code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def begin(self):
"""Make some monkeypatches to dodge progress bar.
Wrap stderr and stdout to keep other users of them from smearing the
progress bar. Wrap some pdb routines to stop showing the bar while in
the debugger.
"""
# The calls to begin/finalize end up like this: a call to begin() on
# instance A of the plugin, then a paired begin/finalize for each test
# on instance B, then a final call to finalize() on instance A.
# TODO: Do only if isatty.
self._stderr.append(sys.stderr)
sys.stderr = StreamWrapper(sys.stderr, self) # TODO: Any point?
self._stdout.append(sys.stdout)
sys.stdout = StreamWrapper(sys.stdout, self)
self._set_trace.append(pdb.set_trace)
pdb.set_trace = set_trace
self._cmdloop.append(pdb.Pdb.cmdloop)
pdb.Pdb.cmdloop = cmdloop
# nosetests changes directories to the tests dir when run from a
# distribution dir, so save the original cwd for relativizing paths.
self._cwd = '' if self.conf.options.absolute_paths else getcwd() | def function[begin, parameter[self]]:
constant[Make some monkeypatches to dodge progress bar.
Wrap stderr and stdout to keep other users of them from smearing the
progress bar. Wrap some pdb routines to stop showing the bar while in
the debugger.
]
call[name[self]._stderr.append, parameter[name[sys].stderr]]
name[sys].stderr assign[=] call[name[StreamWrapper], parameter[name[sys].stderr, name[self]]]
call[name[self]._stdout.append, parameter[name[sys].stdout]]
name[sys].stdout assign[=] call[name[StreamWrapper], parameter[name[sys].stdout, name[self]]]
call[name[self]._set_trace.append, parameter[name[pdb].set_trace]]
name[pdb].set_trace assign[=] name[set_trace]
call[name[self]._cmdloop.append, parameter[name[pdb].Pdb.cmdloop]]
name[pdb].Pdb.cmdloop assign[=] name[cmdloop]
name[self]._cwd assign[=] <ast.IfExp object at 0x7da2046226e0> | keyword[def] identifier[begin] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_stderr] . identifier[append] ( identifier[sys] . identifier[stderr] )
identifier[sys] . identifier[stderr] = identifier[StreamWrapper] ( identifier[sys] . identifier[stderr] , identifier[self] )
identifier[self] . identifier[_stdout] . identifier[append] ( identifier[sys] . identifier[stdout] )
identifier[sys] . identifier[stdout] = identifier[StreamWrapper] ( identifier[sys] . identifier[stdout] , identifier[self] )
identifier[self] . identifier[_set_trace] . identifier[append] ( identifier[pdb] . identifier[set_trace] )
identifier[pdb] . identifier[set_trace] = identifier[set_trace]
identifier[self] . identifier[_cmdloop] . identifier[append] ( identifier[pdb] . identifier[Pdb] . identifier[cmdloop] )
identifier[pdb] . identifier[Pdb] . identifier[cmdloop] = identifier[cmdloop]
identifier[self] . identifier[_cwd] = literal[string] keyword[if] identifier[self] . identifier[conf] . identifier[options] . identifier[absolute_paths] keyword[else] identifier[getcwd] () | def begin(self):
"""Make some monkeypatches to dodge progress bar.
Wrap stderr and stdout to keep other users of them from smearing the
progress bar. Wrap some pdb routines to stop showing the bar while in
the debugger.
"""
# The calls to begin/finalize end up like this: a call to begin() on
# instance A of the plugin, then a paired begin/finalize for each test
# on instance B, then a final call to finalize() on instance A.
# TODO: Do only if isatty.
self._stderr.append(sys.stderr)
sys.stderr = StreamWrapper(sys.stderr, self) # TODO: Any point?
self._stdout.append(sys.stdout)
sys.stdout = StreamWrapper(sys.stdout, self)
self._set_trace.append(pdb.set_trace)
pdb.set_trace = set_trace
self._cmdloop.append(pdb.Pdb.cmdloop)
pdb.Pdb.cmdloop = cmdloop
# nosetests changes directories to the tests dir when run from a
# distribution dir, so save the original cwd for relativizing paths.
self._cwd = '' if self.conf.options.absolute_paths else getcwd() |
def key(
seq: Sequence,
tooth: Callable[[Sequence], str] = (
lambda seq: str(random.SystemRandom().choice(seq)).strip()
),
nteeth: int = 6,
delimiter: str = ' ',
) -> str:
"""Concatenate strings generated by the tooth function."""
return delimiter.join(tooth(seq) for _ in range(nteeth)) | def function[key, parameter[seq, tooth, nteeth, delimiter]]:
constant[Concatenate strings generated by the tooth function.]
return[call[name[delimiter].join, parameter[<ast.GeneratorExp object at 0x7da1b11a42e0>]]] | keyword[def] identifier[key] (
identifier[seq] : identifier[Sequence] ,
identifier[tooth] : identifier[Callable] [[ identifier[Sequence] ], identifier[str] ]=(
keyword[lambda] identifier[seq] : identifier[str] ( identifier[random] . identifier[SystemRandom] (). identifier[choice] ( identifier[seq] )). identifier[strip] ()
),
identifier[nteeth] : identifier[int] = literal[int] ,
identifier[delimiter] : identifier[str] = literal[string] ,
)-> identifier[str] :
literal[string]
keyword[return] identifier[delimiter] . identifier[join] ( identifier[tooth] ( identifier[seq] ) keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[nteeth] )) | def key(seq: Sequence, tooth: Callable[[Sequence], str]=lambda seq: str(random.SystemRandom().choice(seq)).strip(), nteeth: int=6, delimiter: str=' ') -> str:
"""Concatenate strings generated by the tooth function."""
return delimiter.join((tooth(seq) for _ in range(nteeth))) |
def _get_port_center_position(self, width):
"""Calculates the center position of the port rectangle
The port itself can be positioned in the corner, the center of the port rectangle however is restricted by
the width of the rectangle. This method therefore calculates the center, depending on the position of the
port and the width of the rectangle.
:param float width: The width of the rectangle
:return: The center position of the rectangle
:rtype: float, float
"""
x, y = self.pos.x.value, self.pos.y.value
if self.side is SnappedSide.TOP or self.side is SnappedSide.BOTTOM:
if x - width / 2. < 0:
x = width / 2
elif x + width / 2. > self.parent.width:
x = self.parent.width - width / 2.
else:
if y - width / 2. < 0:
y = width / 2
elif y + width / 2. > self.parent.height:
y = self.parent.height - width / 2.
return x, y | def function[_get_port_center_position, parameter[self, width]]:
constant[Calculates the center position of the port rectangle
The port itself can be positioned in the corner, the center of the port rectangle however is restricted by
the width of the rectangle. This method therefore calculates the center, depending on the position of the
port and the width of the rectangle.
:param float width: The width of the rectangle
:return: The center position of the rectangle
:rtype: float, float
]
<ast.Tuple object at 0x7da1b1b6a8f0> assign[=] tuple[[<ast.Attribute object at 0x7da1b1b68640>, <ast.Attribute object at 0x7da1b1b6a740>]]
if <ast.BoolOp object at 0x7da1b1b68e20> begin[:]
if compare[binary_operation[name[x] - binary_operation[name[width] / constant[2.0]]] less[<] constant[0]] begin[:]
variable[x] assign[=] binary_operation[name[width] / constant[2]]
return[tuple[[<ast.Name object at 0x7da1b1b6b400>, <ast.Name object at 0x7da1b1b6b3d0>]]] | keyword[def] identifier[_get_port_center_position] ( identifier[self] , identifier[width] ):
literal[string]
identifier[x] , identifier[y] = identifier[self] . identifier[pos] . identifier[x] . identifier[value] , identifier[self] . identifier[pos] . identifier[y] . identifier[value]
keyword[if] identifier[self] . identifier[side] keyword[is] identifier[SnappedSide] . identifier[TOP] keyword[or] identifier[self] . identifier[side] keyword[is] identifier[SnappedSide] . identifier[BOTTOM] :
keyword[if] identifier[x] - identifier[width] / literal[int] < literal[int] :
identifier[x] = identifier[width] / literal[int]
keyword[elif] identifier[x] + identifier[width] / literal[int] > identifier[self] . identifier[parent] . identifier[width] :
identifier[x] = identifier[self] . identifier[parent] . identifier[width] - identifier[width] / literal[int]
keyword[else] :
keyword[if] identifier[y] - identifier[width] / literal[int] < literal[int] :
identifier[y] = identifier[width] / literal[int]
keyword[elif] identifier[y] + identifier[width] / literal[int] > identifier[self] . identifier[parent] . identifier[height] :
identifier[y] = identifier[self] . identifier[parent] . identifier[height] - identifier[width] / literal[int]
keyword[return] identifier[x] , identifier[y] | def _get_port_center_position(self, width):
"""Calculates the center position of the port rectangle
The port itself can be positioned in the corner, the center of the port rectangle however is restricted by
the width of the rectangle. This method therefore calculates the center, depending on the position of the
port and the width of the rectangle.
:param float width: The width of the rectangle
:return: The center position of the rectangle
:rtype: float, float
"""
(x, y) = (self.pos.x.value, self.pos.y.value)
if self.side is SnappedSide.TOP or self.side is SnappedSide.BOTTOM:
if x - width / 2.0 < 0:
x = width / 2 # depends on [control=['if'], data=[]]
elif x + width / 2.0 > self.parent.width:
x = self.parent.width - width / 2.0 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif y - width / 2.0 < 0:
y = width / 2 # depends on [control=['if'], data=[]]
elif y + width / 2.0 > self.parent.height:
y = self.parent.height - width / 2.0 # depends on [control=['if'], data=[]]
return (x, y) |
def parse_instruction(string, location, tokens):
"""Parse an x86 instruction.
"""
prefix_str = tokens.get("prefix", None)
mnemonic_str = tokens.get("mnemonic")
operands = [op for op in tokens.get("operands", [])]
infer_operands_size(operands)
# Quick hack: Capstone returns rep instead of repe for cmps and scas
# instructions.
if prefix_str == "rep" and (mnemonic_str.startswith("cmps") or mnemonic_str.startswith("scas")):
prefix_str = "repe"
instr = X86Instruction(
prefix_str,
mnemonic_str,
operands,
arch_info.architecture_mode
)
return instr | def function[parse_instruction, parameter[string, location, tokens]]:
constant[Parse an x86 instruction.
]
variable[prefix_str] assign[=] call[name[tokens].get, parameter[constant[prefix], constant[None]]]
variable[mnemonic_str] assign[=] call[name[tokens].get, parameter[constant[mnemonic]]]
variable[operands] assign[=] <ast.ListComp object at 0x7da1b086c6d0>
call[name[infer_operands_size], parameter[name[operands]]]
if <ast.BoolOp object at 0x7da1b086c2e0> begin[:]
variable[prefix_str] assign[=] constant[repe]
variable[instr] assign[=] call[name[X86Instruction], parameter[name[prefix_str], name[mnemonic_str], name[operands], name[arch_info].architecture_mode]]
return[name[instr]] | keyword[def] identifier[parse_instruction] ( identifier[string] , identifier[location] , identifier[tokens] ):
literal[string]
identifier[prefix_str] = identifier[tokens] . identifier[get] ( literal[string] , keyword[None] )
identifier[mnemonic_str] = identifier[tokens] . identifier[get] ( literal[string] )
identifier[operands] =[ identifier[op] keyword[for] identifier[op] keyword[in] identifier[tokens] . identifier[get] ( literal[string] ,[])]
identifier[infer_operands_size] ( identifier[operands] )
keyword[if] identifier[prefix_str] == literal[string] keyword[and] ( identifier[mnemonic_str] . identifier[startswith] ( literal[string] ) keyword[or] identifier[mnemonic_str] . identifier[startswith] ( literal[string] )):
identifier[prefix_str] = literal[string]
identifier[instr] = identifier[X86Instruction] (
identifier[prefix_str] ,
identifier[mnemonic_str] ,
identifier[operands] ,
identifier[arch_info] . identifier[architecture_mode]
)
keyword[return] identifier[instr] | def parse_instruction(string, location, tokens):
"""Parse an x86 instruction.
"""
prefix_str = tokens.get('prefix', None)
mnemonic_str = tokens.get('mnemonic')
operands = [op for op in tokens.get('operands', [])]
infer_operands_size(operands)
# Quick hack: Capstone returns rep instead of repe for cmps and scas
# instructions.
if prefix_str == 'rep' and (mnemonic_str.startswith('cmps') or mnemonic_str.startswith('scas')):
prefix_str = 'repe' # depends on [control=['if'], data=[]]
instr = X86Instruction(prefix_str, mnemonic_str, operands, arch_info.architecture_mode)
return instr |
def do_setup(self, arg, arguments):
"""
::
Usage:
setup init [--force]
Copies a cmd3.yaml file into ~/.cloudmesh/cmd3.yaml
"""
if arguments["init"]:
Console.ok("Initialize cmd3.yaml file")
from cmd3.yaml_setup import create_cmd3_yaml_file
force = arguments["--force"]
create_cmd3_yaml_file(force=force) | def function[do_setup, parameter[self, arg, arguments]]:
constant[
::
Usage:
setup init [--force]
Copies a cmd3.yaml file into ~/.cloudmesh/cmd3.yaml
]
if call[name[arguments]][constant[init]] begin[:]
call[name[Console].ok, parameter[constant[Initialize cmd3.yaml file]]]
from relative_module[cmd3.yaml_setup] import module[create_cmd3_yaml_file]
variable[force] assign[=] call[name[arguments]][constant[--force]]
call[name[create_cmd3_yaml_file], parameter[]] | keyword[def] identifier[do_setup] ( identifier[self] , identifier[arg] , identifier[arguments] ):
literal[string]
keyword[if] identifier[arguments] [ literal[string] ]:
identifier[Console] . identifier[ok] ( literal[string] )
keyword[from] identifier[cmd3] . identifier[yaml_setup] keyword[import] identifier[create_cmd3_yaml_file]
identifier[force] = identifier[arguments] [ literal[string] ]
identifier[create_cmd3_yaml_file] ( identifier[force] = identifier[force] ) | def do_setup(self, arg, arguments):
"""
::
Usage:
setup init [--force]
Copies a cmd3.yaml file into ~/.cloudmesh/cmd3.yaml
"""
if arguments['init']:
Console.ok('Initialize cmd3.yaml file')
from cmd3.yaml_setup import create_cmd3_yaml_file
force = arguments['--force']
create_cmd3_yaml_file(force=force) # depends on [control=['if'], data=[]] |
def is_valid_port(instance: int):
"""Validates data is a valid port"""
if not isinstance(instance, (int, str)):
return True
return int(instance) in range(65535) | def function[is_valid_port, parameter[instance]]:
constant[Validates data is a valid port]
if <ast.UnaryOp object at 0x7da1b1e93670> begin[:]
return[constant[True]]
return[compare[call[name[int], parameter[name[instance]]] in call[name[range], parameter[constant[65535]]]]] | keyword[def] identifier[is_valid_port] ( identifier[instance] : identifier[int] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[instance] ,( identifier[int] , identifier[str] )):
keyword[return] keyword[True]
keyword[return] identifier[int] ( identifier[instance] ) keyword[in] identifier[range] ( literal[int] ) | def is_valid_port(instance: int):
"""Validates data is a valid port"""
if not isinstance(instance, (int, str)):
return True # depends on [control=['if'], data=[]]
return int(instance) in range(65535) |
def find_codon_mismatches(sbjct_start, sbjct_seq, qry_seq):
"""
This function takes two alligned sequence (subject and query), and
the position on the subject where the alignment starts. The sequences
are compared codon by codon. If a mis matches is found it is saved in
'mis_matches'. If a gap is found the function get_inframe_gap is used
to find the indel sequence and keep the sequence in the correct
reading frame. The function translate_indel is used to name indel
mutations and translate the indels to amino acids
The function returns a list of tuples containing all needed informations
about the mutation in order to look it up in the database dict known
mutation and the with the output files the the user.
"""
mis_matches = []
# Find start pos of first codon in frame, i_start
codon_offset = (sbjct_start-1) % 3
i_start = 0
if codon_offset != 0:
i_start = 3 - codon_offset
sbjct_start = sbjct_start + i_start
# Set sequences in frame
sbjct_seq = sbjct_seq[i_start:]
qry_seq = qry_seq[i_start:]
# Find codon number of the first codon in the sequence, start at 0
codon_no = int((sbjct_start-1) / 3) # 1,2,3 start on 0
# s_shift and q_shift are used when gaps appears
q_shift = 0
s_shift = 0
mut_no = 0
# Find inserts and deletions in sequence
indel_no = 0
indels = get_indels(sbjct_seq, qry_seq, sbjct_start)
# Go through sequence and save mutations when found
for index in range(0, len(sbjct_seq), 3):
# Count codon number
codon_no += 1
# Shift index according to gaps
s_i = index + s_shift
q_i = index + q_shift
# Get codons
sbjct_codon = sbjct_seq[s_i:s_i+3]
qry_codon = qry_seq[q_i:q_i+3]
if len(sbjct_seq[s_i:].replace("-","")) + len(qry_codon[q_i:].replace("-","")) < 6:
break
# Check for mutations
if sbjct_codon.upper() != qry_codon.upper():
# Check for codon insertions and deletions and frameshift mutations
if "-" in sbjct_codon or "-" in qry_codon:
# Get indel info
try:
indel_data = indels[indel_no]
except IndexError:
print(sbjct_codon, qry_codon)
print(indels)
print(gene, indel_data, indel_no)
mut = indel_data[0]
codon_no_indel = indel_data[1]
seq_pos = indel_data[2] + sbjct_start - 1
indel = indel_data[3]
indel_no +=1
# Get the affected sequence in frame for both for sbjct and qry
if mut == "ins":
sbjct_rf_indel = get_inframe_gap(sbjct_seq[s_i:], 3)
qry_rf_indel = get_inframe_gap(qry_seq[q_i:], int(math.floor(len(sbjct_rf_indel)/3) *3))
else:
qry_rf_indel = get_inframe_gap(qry_seq[q_i:], 3)
sbjct_rf_indel = get_inframe_gap(sbjct_seq[s_i:], int(math.floor(len(qry_rf_indel)/3) *3))
mut_name, aa_ref, aa_alt = name_indel_mutation(sbjct_seq, indel, sbjct_rf_indel, qry_rf_indel, codon_no, mut, sbjct_start - 1)
# Set index to the correct reading frame after the indel gap
shift_diff_before = abs(s_shift - q_shift)
s_shift += len(sbjct_rf_indel) - 3
q_shift += len(qry_rf_indel) - 3
shift_diff = abs(s_shift - q_shift)
if shift_diff_before != 0 and shift_diff %3 == 0:
if s_shift > q_shift:
nucs_needed = int((len(sbjct_rf_indel)/3) *3) + shift_diff
pre_qry_indel = qry_rf_indel
qry_rf_indel = get_inframe_gap(qry_seq[q_i:], nucs_needed)
q_shift += len(qry_rf_indel) - len(pre_qry_indel)
elif q_shift > s_shift:
nucs_needed = int((len(qry_rf_indel)/3)*3) + shift_diff
pre_sbjct_indel = sbjct_rf_indel
sbjct_rf_indel = get_inframe_gap(sbjct_seq[s_i:], nucs_needed)
s_shift += len(sbjct_rf_indel) - len(pre_sbjct_indel)
mut_name, aa_ref, aa_alt = name_indel_mutation(sbjct_seq, indel, sbjct_rf_indel, qry_rf_indel, codon_no, mut, sbjct_start - 1)
if "Frameshift" in mut_name:
mut_name = mut_name.split("-")[0] + "- Frame restored"
mis_matches += [[mut, codon_no_indel, seq_pos, indel, mut_name, sbjct_rf_indel, qry_rf_indel, aa_ref, aa_alt]]
# Check if the next mutation in the indels list is in the current codon
# Find the number of individul gaps in the evaluated sequence
no_of_indels = len(re.findall("\-\w", sbjct_rf_indel)) + len(re.findall("\-\w", qry_rf_indel))
if no_of_indels > 1:
for j in range(indel_no, indel_no + no_of_indels - 1):
try:
indel_data = indels[j]
except IndexError:
sys.exit("indel_data list is out of range, bug!")
mut = indel_data[0]
codon_no_indel = indel_data[1]
seq_pos = indel_data[2] + sbjct_start - 1
indel = indel_data[3]
indel_no +=1
mis_matches += [[mut, codon_no_indel, seq_pos, indel, mut_name, sbjct_rf_indel, qry_rf_indel, aa_ref, aa_alt]]
# Set codon number, and save nucleotides from out of frame mutations
if mut == "del":
codon_no += int((len(sbjct_rf_indel) - 3)/3)
# If evaluated insert is only gaps codon_no should not increment
elif sbjct_rf_indel.count("-") == len(sbjct_rf_indel):
codon_no -= 1
# Check of point mutations
else:
mut = "sub"
aa_ref = aa(sbjct_codon)
aa_alt = aa(qry_codon)
if aa_ref != aa_alt:
# End search for mutation if a premature stop codon is found
mut_name = "p." + aa_ref + str(codon_no) + aa_alt
mis_matches += [[mut, codon_no, codon_no, aa_alt, mut_name, sbjct_codon, qry_codon, aa_ref, aa_alt]]
# If a Premature stop codon occur report it an stop the loop
try:
if mis_matches[-1][-1] == "*":
mut_name += " - Premature stop codon"
mis_matches[-1][4] = mis_matches[-1][4].split("-")[0] + " - Premature stop codon"
break
except IndexError:
pass
# Sort mutations on position
mis_matches = sorted(mis_matches, key = lambda x:x[1])
return mis_matches | def function[find_codon_mismatches, parameter[sbjct_start, sbjct_seq, qry_seq]]:
constant[
This function takes two alligned sequence (subject and query), and
the position on the subject where the alignment starts. The sequences
are compared codon by codon. If a mis matches is found it is saved in
'mis_matches'. If a gap is found the function get_inframe_gap is used
to find the indel sequence and keep the sequence in the correct
reading frame. The function translate_indel is used to name indel
mutations and translate the indels to amino acids
The function returns a list of tuples containing all needed informations
about the mutation in order to look it up in the database dict known
mutation and the with the output files the the user.
]
variable[mis_matches] assign[=] list[[]]
variable[codon_offset] assign[=] binary_operation[binary_operation[name[sbjct_start] - constant[1]] <ast.Mod object at 0x7da2590d6920> constant[3]]
variable[i_start] assign[=] constant[0]
if compare[name[codon_offset] not_equal[!=] constant[0]] begin[:]
variable[i_start] assign[=] binary_operation[constant[3] - name[codon_offset]]
variable[sbjct_start] assign[=] binary_operation[name[sbjct_start] + name[i_start]]
variable[sbjct_seq] assign[=] call[name[sbjct_seq]][<ast.Slice object at 0x7da1b11e1a80>]
variable[qry_seq] assign[=] call[name[qry_seq]][<ast.Slice object at 0x7da1b11e1d80>]
variable[codon_no] assign[=] call[name[int], parameter[binary_operation[binary_operation[name[sbjct_start] - constant[1]] / constant[3]]]]
variable[q_shift] assign[=] constant[0]
variable[s_shift] assign[=] constant[0]
variable[mut_no] assign[=] constant[0]
variable[indel_no] assign[=] constant[0]
variable[indels] assign[=] call[name[get_indels], parameter[name[sbjct_seq], name[qry_seq], name[sbjct_start]]]
for taget[name[index]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[sbjct_seq]]], constant[3]]]] begin[:]
<ast.AugAssign object at 0x7da1b11e3610>
variable[s_i] assign[=] binary_operation[name[index] + name[s_shift]]
variable[q_i] assign[=] binary_operation[name[index] + name[q_shift]]
variable[sbjct_codon] assign[=] call[name[sbjct_seq]][<ast.Slice object at 0x7da1b11e1150>]
variable[qry_codon] assign[=] call[name[qry_seq]][<ast.Slice object at 0x7da1b11e3460>]
if compare[binary_operation[call[name[len], parameter[call[call[name[sbjct_seq]][<ast.Slice object at 0x7da1b11e2770>].replace, parameter[constant[-], constant[]]]]] + call[name[len], parameter[call[call[name[qry_codon]][<ast.Slice object at 0x7da1b11e0610>].replace, parameter[constant[-], constant[]]]]]] less[<] constant[6]] begin[:]
break
if compare[call[name[sbjct_codon].upper, parameter[]] not_equal[!=] call[name[qry_codon].upper, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b11e3b50> begin[:]
<ast.Try object at 0x7da1b10a79a0>
variable[mut] assign[=] call[name[indel_data]][constant[0]]
variable[codon_no_indel] assign[=] call[name[indel_data]][constant[1]]
variable[seq_pos] assign[=] binary_operation[binary_operation[call[name[indel_data]][constant[2]] + name[sbjct_start]] - constant[1]]
variable[indel] assign[=] call[name[indel_data]][constant[3]]
<ast.AugAssign object at 0x7da1b10a4c70>
if compare[name[mut] equal[==] constant[ins]] begin[:]
variable[sbjct_rf_indel] assign[=] call[name[get_inframe_gap], parameter[call[name[sbjct_seq]][<ast.Slice object at 0x7da1b10a6470>], constant[3]]]
variable[qry_rf_indel] assign[=] call[name[get_inframe_gap], parameter[call[name[qry_seq]][<ast.Slice object at 0x7da1b10a4760>], call[name[int], parameter[binary_operation[call[name[math].floor, parameter[binary_operation[call[name[len], parameter[name[sbjct_rf_indel]]] / constant[3]]]] * constant[3]]]]]]
<ast.Tuple object at 0x7da1b113acb0> assign[=] call[name[name_indel_mutation], parameter[name[sbjct_seq], name[indel], name[sbjct_rf_indel], name[qry_rf_indel], name[codon_no], name[mut], binary_operation[name[sbjct_start] - constant[1]]]]
variable[shift_diff_before] assign[=] call[name[abs], parameter[binary_operation[name[s_shift] - name[q_shift]]]]
<ast.AugAssign object at 0x7da1b1138ca0>
<ast.AugAssign object at 0x7da1b113a560>
variable[shift_diff] assign[=] call[name[abs], parameter[binary_operation[name[s_shift] - name[q_shift]]]]
if <ast.BoolOp object at 0x7da1b1138eb0> begin[:]
if compare[name[s_shift] greater[>] name[q_shift]] begin[:]
variable[nucs_needed] assign[=] binary_operation[call[name[int], parameter[binary_operation[binary_operation[call[name[len], parameter[name[sbjct_rf_indel]]] / constant[3]] * constant[3]]]] + name[shift_diff]]
variable[pre_qry_indel] assign[=] name[qry_rf_indel]
variable[qry_rf_indel] assign[=] call[name[get_inframe_gap], parameter[call[name[qry_seq]][<ast.Slice object at 0x7da1b113a170>], name[nucs_needed]]]
<ast.AugAssign object at 0x7da1b1139cf0>
<ast.Tuple object at 0x7da1b1139e10> assign[=] call[name[name_indel_mutation], parameter[name[sbjct_seq], name[indel], name[sbjct_rf_indel], name[qry_rf_indel], name[codon_no], name[mut], binary_operation[name[sbjct_start] - constant[1]]]]
if compare[constant[Frameshift] in name[mut_name]] begin[:]
variable[mut_name] assign[=] binary_operation[call[call[name[mut_name].split, parameter[constant[-]]]][constant[0]] + constant[- Frame restored]]
<ast.AugAssign object at 0x7da1b113a290>
variable[no_of_indels] assign[=] binary_operation[call[name[len], parameter[call[name[re].findall, parameter[constant[\-\w], name[sbjct_rf_indel]]]]] + call[name[len], parameter[call[name[re].findall, parameter[constant[\-\w], name[qry_rf_indel]]]]]]
if compare[name[no_of_indels] greater[>] constant[1]] begin[:]
for taget[name[j]] in starred[call[name[range], parameter[name[indel_no], binary_operation[binary_operation[name[indel_no] + name[no_of_indels]] - constant[1]]]]] begin[:]
<ast.Try object at 0x7da1b0f1ca60>
variable[mut] assign[=] call[name[indel_data]][constant[0]]
variable[codon_no_indel] assign[=] call[name[indel_data]][constant[1]]
variable[seq_pos] assign[=] binary_operation[binary_operation[call[name[indel_data]][constant[2]] + name[sbjct_start]] - constant[1]]
variable[indel] assign[=] call[name[indel_data]][constant[3]]
<ast.AugAssign object at 0x7da1b0f1eb90>
<ast.AugAssign object at 0x7da1b0f1ec50>
if compare[name[mut] equal[==] constant[del]] begin[:]
<ast.AugAssign object at 0x7da1b0f1e7d0>
<ast.Try object at 0x7da1b0f1c370>
variable[mis_matches] assign[=] call[name[sorted], parameter[name[mis_matches]]]
return[name[mis_matches]] | keyword[def] identifier[find_codon_mismatches] ( identifier[sbjct_start] , identifier[sbjct_seq] , identifier[qry_seq] ):
literal[string]
identifier[mis_matches] =[]
identifier[codon_offset] =( identifier[sbjct_start] - literal[int] )% literal[int]
identifier[i_start] = literal[int]
keyword[if] identifier[codon_offset] != literal[int] :
identifier[i_start] = literal[int] - identifier[codon_offset]
identifier[sbjct_start] = identifier[sbjct_start] + identifier[i_start]
identifier[sbjct_seq] = identifier[sbjct_seq] [ identifier[i_start] :]
identifier[qry_seq] = identifier[qry_seq] [ identifier[i_start] :]
identifier[codon_no] = identifier[int] (( identifier[sbjct_start] - literal[int] )/ literal[int] )
identifier[q_shift] = literal[int]
identifier[s_shift] = literal[int]
identifier[mut_no] = literal[int]
identifier[indel_no] = literal[int]
identifier[indels] = identifier[get_indels] ( identifier[sbjct_seq] , identifier[qry_seq] , identifier[sbjct_start] )
keyword[for] identifier[index] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[sbjct_seq] ), literal[int] ):
identifier[codon_no] += literal[int]
identifier[s_i] = identifier[index] + identifier[s_shift]
identifier[q_i] = identifier[index] + identifier[q_shift]
identifier[sbjct_codon] = identifier[sbjct_seq] [ identifier[s_i] : identifier[s_i] + literal[int] ]
identifier[qry_codon] = identifier[qry_seq] [ identifier[q_i] : identifier[q_i] + literal[int] ]
keyword[if] identifier[len] ( identifier[sbjct_seq] [ identifier[s_i] :]. identifier[replace] ( literal[string] , literal[string] ))+ identifier[len] ( identifier[qry_codon] [ identifier[q_i] :]. identifier[replace] ( literal[string] , literal[string] ))< literal[int] :
keyword[break]
keyword[if] identifier[sbjct_codon] . identifier[upper] ()!= identifier[qry_codon] . identifier[upper] ():
keyword[if] literal[string] keyword[in] identifier[sbjct_codon] keyword[or] literal[string] keyword[in] identifier[qry_codon] :
keyword[try] :
identifier[indel_data] = identifier[indels] [ identifier[indel_no] ]
keyword[except] identifier[IndexError] :
identifier[print] ( identifier[sbjct_codon] , identifier[qry_codon] )
identifier[print] ( identifier[indels] )
identifier[print] ( identifier[gene] , identifier[indel_data] , identifier[indel_no] )
identifier[mut] = identifier[indel_data] [ literal[int] ]
identifier[codon_no_indel] = identifier[indel_data] [ literal[int] ]
identifier[seq_pos] = identifier[indel_data] [ literal[int] ]+ identifier[sbjct_start] - literal[int]
identifier[indel] = identifier[indel_data] [ literal[int] ]
identifier[indel_no] += literal[int]
keyword[if] identifier[mut] == literal[string] :
identifier[sbjct_rf_indel] = identifier[get_inframe_gap] ( identifier[sbjct_seq] [ identifier[s_i] :], literal[int] )
identifier[qry_rf_indel] = identifier[get_inframe_gap] ( identifier[qry_seq] [ identifier[q_i] :], identifier[int] ( identifier[math] . identifier[floor] ( identifier[len] ( identifier[sbjct_rf_indel] )/ literal[int] )* literal[int] ))
keyword[else] :
identifier[qry_rf_indel] = identifier[get_inframe_gap] ( identifier[qry_seq] [ identifier[q_i] :], literal[int] )
identifier[sbjct_rf_indel] = identifier[get_inframe_gap] ( identifier[sbjct_seq] [ identifier[s_i] :], identifier[int] ( identifier[math] . identifier[floor] ( identifier[len] ( identifier[qry_rf_indel] )/ literal[int] )* literal[int] ))
identifier[mut_name] , identifier[aa_ref] , identifier[aa_alt] = identifier[name_indel_mutation] ( identifier[sbjct_seq] , identifier[indel] , identifier[sbjct_rf_indel] , identifier[qry_rf_indel] , identifier[codon_no] , identifier[mut] , identifier[sbjct_start] - literal[int] )
identifier[shift_diff_before] = identifier[abs] ( identifier[s_shift] - identifier[q_shift] )
identifier[s_shift] += identifier[len] ( identifier[sbjct_rf_indel] )- literal[int]
identifier[q_shift] += identifier[len] ( identifier[qry_rf_indel] )- literal[int]
identifier[shift_diff] = identifier[abs] ( identifier[s_shift] - identifier[q_shift] )
keyword[if] identifier[shift_diff_before] != literal[int] keyword[and] identifier[shift_diff] % literal[int] == literal[int] :
keyword[if] identifier[s_shift] > identifier[q_shift] :
identifier[nucs_needed] = identifier[int] (( identifier[len] ( identifier[sbjct_rf_indel] )/ literal[int] )* literal[int] )+ identifier[shift_diff]
identifier[pre_qry_indel] = identifier[qry_rf_indel]
identifier[qry_rf_indel] = identifier[get_inframe_gap] ( identifier[qry_seq] [ identifier[q_i] :], identifier[nucs_needed] )
identifier[q_shift] += identifier[len] ( identifier[qry_rf_indel] )- identifier[len] ( identifier[pre_qry_indel] )
keyword[elif] identifier[q_shift] > identifier[s_shift] :
identifier[nucs_needed] = identifier[int] (( identifier[len] ( identifier[qry_rf_indel] )/ literal[int] )* literal[int] )+ identifier[shift_diff]
identifier[pre_sbjct_indel] = identifier[sbjct_rf_indel]
identifier[sbjct_rf_indel] = identifier[get_inframe_gap] ( identifier[sbjct_seq] [ identifier[s_i] :], identifier[nucs_needed] )
identifier[s_shift] += identifier[len] ( identifier[sbjct_rf_indel] )- identifier[len] ( identifier[pre_sbjct_indel] )
identifier[mut_name] , identifier[aa_ref] , identifier[aa_alt] = identifier[name_indel_mutation] ( identifier[sbjct_seq] , identifier[indel] , identifier[sbjct_rf_indel] , identifier[qry_rf_indel] , identifier[codon_no] , identifier[mut] , identifier[sbjct_start] - literal[int] )
keyword[if] literal[string] keyword[in] identifier[mut_name] :
identifier[mut_name] = identifier[mut_name] . identifier[split] ( literal[string] )[ literal[int] ]+ literal[string]
identifier[mis_matches] +=[[ identifier[mut] , identifier[codon_no_indel] , identifier[seq_pos] , identifier[indel] , identifier[mut_name] , identifier[sbjct_rf_indel] , identifier[qry_rf_indel] , identifier[aa_ref] , identifier[aa_alt] ]]
identifier[no_of_indels] = identifier[len] ( identifier[re] . identifier[findall] ( literal[string] , identifier[sbjct_rf_indel] ))+ identifier[len] ( identifier[re] . identifier[findall] ( literal[string] , identifier[qry_rf_indel] ))
keyword[if] identifier[no_of_indels] > literal[int] :
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[indel_no] , identifier[indel_no] + identifier[no_of_indels] - literal[int] ):
keyword[try] :
identifier[indel_data] = identifier[indels] [ identifier[j] ]
keyword[except] identifier[IndexError] :
identifier[sys] . identifier[exit] ( literal[string] )
identifier[mut] = identifier[indel_data] [ literal[int] ]
identifier[codon_no_indel] = identifier[indel_data] [ literal[int] ]
identifier[seq_pos] = identifier[indel_data] [ literal[int] ]+ identifier[sbjct_start] - literal[int]
identifier[indel] = identifier[indel_data] [ literal[int] ]
identifier[indel_no] += literal[int]
identifier[mis_matches] +=[[ identifier[mut] , identifier[codon_no_indel] , identifier[seq_pos] , identifier[indel] , identifier[mut_name] , identifier[sbjct_rf_indel] , identifier[qry_rf_indel] , identifier[aa_ref] , identifier[aa_alt] ]]
keyword[if] identifier[mut] == literal[string] :
identifier[codon_no] += identifier[int] (( identifier[len] ( identifier[sbjct_rf_indel] )- literal[int] )/ literal[int] )
keyword[elif] identifier[sbjct_rf_indel] . identifier[count] ( literal[string] )== identifier[len] ( identifier[sbjct_rf_indel] ):
identifier[codon_no] -= literal[int]
keyword[else] :
identifier[mut] = literal[string]
identifier[aa_ref] = identifier[aa] ( identifier[sbjct_codon] )
identifier[aa_alt] = identifier[aa] ( identifier[qry_codon] )
keyword[if] identifier[aa_ref] != identifier[aa_alt] :
identifier[mut_name] = literal[string] + identifier[aa_ref] + identifier[str] ( identifier[codon_no] )+ identifier[aa_alt]
identifier[mis_matches] +=[[ identifier[mut] , identifier[codon_no] , identifier[codon_no] , identifier[aa_alt] , identifier[mut_name] , identifier[sbjct_codon] , identifier[qry_codon] , identifier[aa_ref] , identifier[aa_alt] ]]
keyword[try] :
keyword[if] identifier[mis_matches] [- literal[int] ][- literal[int] ]== literal[string] :
identifier[mut_name] += literal[string]
identifier[mis_matches] [- literal[int] ][ literal[int] ]= identifier[mis_matches] [- literal[int] ][ literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ]+ literal[string]
keyword[break]
keyword[except] identifier[IndexError] :
keyword[pass]
identifier[mis_matches] = identifier[sorted] ( identifier[mis_matches] , identifier[key] = keyword[lambda] identifier[x] : identifier[x] [ literal[int] ])
keyword[return] identifier[mis_matches] | def find_codon_mismatches(sbjct_start, sbjct_seq, qry_seq):
"""
This function takes two alligned sequence (subject and query), and
the position on the subject where the alignment starts. The sequences
are compared codon by codon. If a mis matches is found it is saved in
'mis_matches'. If a gap is found the function get_inframe_gap is used
to find the indel sequence and keep the sequence in the correct
reading frame. The function translate_indel is used to name indel
mutations and translate the indels to amino acids
The function returns a list of tuples containing all needed informations
about the mutation in order to look it up in the database dict known
mutation and the with the output files the the user.
"""
mis_matches = []
# Find start pos of first codon in frame, i_start
codon_offset = (sbjct_start - 1) % 3
i_start = 0
if codon_offset != 0:
i_start = 3 - codon_offset # depends on [control=['if'], data=['codon_offset']]
sbjct_start = sbjct_start + i_start
# Set sequences in frame
sbjct_seq = sbjct_seq[i_start:]
qry_seq = qry_seq[i_start:]
# Find codon number of the first codon in the sequence, start at 0
codon_no = int((sbjct_start - 1) / 3) # 1,2,3 start on 0
# s_shift and q_shift are used when gaps appears
q_shift = 0
s_shift = 0
mut_no = 0
# Find inserts and deletions in sequence
indel_no = 0
indels = get_indels(sbjct_seq, qry_seq, sbjct_start)
# Go through sequence and save mutations when found
for index in range(0, len(sbjct_seq), 3):
# Count codon number
codon_no += 1
# Shift index according to gaps
s_i = index + s_shift
q_i = index + q_shift
# Get codons
sbjct_codon = sbjct_seq[s_i:s_i + 3]
qry_codon = qry_seq[q_i:q_i + 3]
if len(sbjct_seq[s_i:].replace('-', '')) + len(qry_codon[q_i:].replace('-', '')) < 6:
break # depends on [control=['if'], data=[]]
# Check for mutations
if sbjct_codon.upper() != qry_codon.upper():
# Check for codon insertions and deletions and frameshift mutations
if '-' in sbjct_codon or '-' in qry_codon:
# Get indel info
try:
indel_data = indels[indel_no] # depends on [control=['try'], data=[]]
except IndexError:
print(sbjct_codon, qry_codon)
print(indels)
print(gene, indel_data, indel_no) # depends on [control=['except'], data=[]]
mut = indel_data[0]
codon_no_indel = indel_data[1]
seq_pos = indel_data[2] + sbjct_start - 1
indel = indel_data[3]
indel_no += 1 # Get the affected sequence in frame for both for sbjct and qry
if mut == 'ins':
sbjct_rf_indel = get_inframe_gap(sbjct_seq[s_i:], 3)
qry_rf_indel = get_inframe_gap(qry_seq[q_i:], int(math.floor(len(sbjct_rf_indel) / 3) * 3)) # depends on [control=['if'], data=[]]
else:
qry_rf_indel = get_inframe_gap(qry_seq[q_i:], 3)
sbjct_rf_indel = get_inframe_gap(sbjct_seq[s_i:], int(math.floor(len(qry_rf_indel) / 3) * 3))
(mut_name, aa_ref, aa_alt) = name_indel_mutation(sbjct_seq, indel, sbjct_rf_indel, qry_rf_indel, codon_no, mut, sbjct_start - 1) # Set index to the correct reading frame after the indel gap
shift_diff_before = abs(s_shift - q_shift)
s_shift += len(sbjct_rf_indel) - 3
q_shift += len(qry_rf_indel) - 3
shift_diff = abs(s_shift - q_shift)
if shift_diff_before != 0 and shift_diff % 3 == 0:
if s_shift > q_shift:
nucs_needed = int(len(sbjct_rf_indel) / 3 * 3) + shift_diff
pre_qry_indel = qry_rf_indel
qry_rf_indel = get_inframe_gap(qry_seq[q_i:], nucs_needed)
q_shift += len(qry_rf_indel) - len(pre_qry_indel) # depends on [control=['if'], data=['q_shift']]
elif q_shift > s_shift:
nucs_needed = int(len(qry_rf_indel) / 3 * 3) + shift_diff
pre_sbjct_indel = sbjct_rf_indel
sbjct_rf_indel = get_inframe_gap(sbjct_seq[s_i:], nucs_needed)
s_shift += len(sbjct_rf_indel) - len(pre_sbjct_indel) # depends on [control=['if'], data=['s_shift']]
(mut_name, aa_ref, aa_alt) = name_indel_mutation(sbjct_seq, indel, sbjct_rf_indel, qry_rf_indel, codon_no, mut, sbjct_start - 1)
if 'Frameshift' in mut_name:
mut_name = mut_name.split('-')[0] + '- Frame restored' # depends on [control=['if'], data=['mut_name']] # depends on [control=['if'], data=[]]
mis_matches += [[mut, codon_no_indel, seq_pos, indel, mut_name, sbjct_rf_indel, qry_rf_indel, aa_ref, aa_alt]]
# Check if the next mutation in the indels list is in the current codon
# Find the number of individul gaps in the evaluated sequence
no_of_indels = len(re.findall('\\-\\w', sbjct_rf_indel)) + len(re.findall('\\-\\w', qry_rf_indel))
if no_of_indels > 1:
for j in range(indel_no, indel_no + no_of_indels - 1):
try:
indel_data = indels[j] # depends on [control=['try'], data=[]]
except IndexError:
sys.exit('indel_data list is out of range, bug!') # depends on [control=['except'], data=[]]
mut = indel_data[0]
codon_no_indel = indel_data[1]
seq_pos = indel_data[2] + sbjct_start - 1
indel = indel_data[3]
indel_no += 1
mis_matches += [[mut, codon_no_indel, seq_pos, indel, mut_name, sbjct_rf_indel, qry_rf_indel, aa_ref, aa_alt]] # depends on [control=['for'], data=['j']] # depends on [control=['if'], data=['no_of_indels']] # Set codon number, and save nucleotides from out of frame mutations
if mut == 'del':
codon_no += int((len(sbjct_rf_indel) - 3) / 3) # depends on [control=['if'], data=[]]
# If evaluated insert is only gaps codon_no should not increment
elif sbjct_rf_indel.count('-') == len(sbjct_rf_indel):
codon_no -= 1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# Check of point mutations
mut = 'sub'
aa_ref = aa(sbjct_codon)
aa_alt = aa(qry_codon)
if aa_ref != aa_alt:
# End search for mutation if a premature stop codon is found
mut_name = 'p.' + aa_ref + str(codon_no) + aa_alt
mis_matches += [[mut, codon_no, codon_no, aa_alt, mut_name, sbjct_codon, qry_codon, aa_ref, aa_alt]] # depends on [control=['if'], data=['aa_ref', 'aa_alt']]
# If a Premature stop codon occur report it an stop the loop
try:
if mis_matches[-1][-1] == '*':
mut_name += ' - Premature stop codon'
mis_matches[-1][4] = mis_matches[-1][4].split('-')[0] + ' - Premature stop codon'
break # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except IndexError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['index']]
# Sort mutations on position
mis_matches = sorted(mis_matches, key=lambda x: x[1])
return mis_matches |
def analyse_text(text):
"""
Recognize JCL job by header.
"""
result = 0.0
lines = text.split('\n')
if len(lines) > 0:
if JclLexer._JOB_HEADER_PATTERN.match(lines[0]):
result = 1.0
assert 0.0 <= result <= 1.0
return result | def function[analyse_text, parameter[text]]:
constant[
Recognize JCL job by header.
]
variable[result] assign[=] constant[0.0]
variable[lines] assign[=] call[name[text].split, parameter[constant[
]]]
if compare[call[name[len], parameter[name[lines]]] greater[>] constant[0]] begin[:]
if call[name[JclLexer]._JOB_HEADER_PATTERN.match, parameter[call[name[lines]][constant[0]]]] begin[:]
variable[result] assign[=] constant[1.0]
assert[compare[constant[0.0] less_or_equal[<=] name[result]]]
return[name[result]] | keyword[def] identifier[analyse_text] ( identifier[text] ):
literal[string]
identifier[result] = literal[int]
identifier[lines] = identifier[text] . identifier[split] ( literal[string] )
keyword[if] identifier[len] ( identifier[lines] )> literal[int] :
keyword[if] identifier[JclLexer] . identifier[_JOB_HEADER_PATTERN] . identifier[match] ( identifier[lines] [ literal[int] ]):
identifier[result] = literal[int]
keyword[assert] literal[int] <= identifier[result] <= literal[int]
keyword[return] identifier[result] | def analyse_text(text):
"""
Recognize JCL job by header.
"""
result = 0.0
lines = text.split('\n')
if len(lines) > 0:
if JclLexer._JOB_HEADER_PATTERN.match(lines[0]):
result = 1.0 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
assert 0.0 <= result <= 1.0
return result |
def publish_proto_metadata_update(self):
""" Publish protobuf model in ipfs and update existing metadata file """
metadata = load_mpe_service_metadata(self.args.metadata_file)
ipfs_hash_base58 = utils_ipfs.publish_proto_in_ipfs(self._get_ipfs_client(), self.args.protodir)
metadata.set_simple_field("model_ipfs_hash", ipfs_hash_base58)
metadata.save_pretty(self.args.metadata_file) | def function[publish_proto_metadata_update, parameter[self]]:
constant[ Publish protobuf model in ipfs and update existing metadata file ]
variable[metadata] assign[=] call[name[load_mpe_service_metadata], parameter[name[self].args.metadata_file]]
variable[ipfs_hash_base58] assign[=] call[name[utils_ipfs].publish_proto_in_ipfs, parameter[call[name[self]._get_ipfs_client, parameter[]], name[self].args.protodir]]
call[name[metadata].set_simple_field, parameter[constant[model_ipfs_hash], name[ipfs_hash_base58]]]
call[name[metadata].save_pretty, parameter[name[self].args.metadata_file]] | keyword[def] identifier[publish_proto_metadata_update] ( identifier[self] ):
literal[string]
identifier[metadata] = identifier[load_mpe_service_metadata] ( identifier[self] . identifier[args] . identifier[metadata_file] )
identifier[ipfs_hash_base58] = identifier[utils_ipfs] . identifier[publish_proto_in_ipfs] ( identifier[self] . identifier[_get_ipfs_client] (), identifier[self] . identifier[args] . identifier[protodir] )
identifier[metadata] . identifier[set_simple_field] ( literal[string] , identifier[ipfs_hash_base58] )
identifier[metadata] . identifier[save_pretty] ( identifier[self] . identifier[args] . identifier[metadata_file] ) | def publish_proto_metadata_update(self):
""" Publish protobuf model in ipfs and update existing metadata file """
metadata = load_mpe_service_metadata(self.args.metadata_file)
ipfs_hash_base58 = utils_ipfs.publish_proto_in_ipfs(self._get_ipfs_client(), self.args.protodir)
metadata.set_simple_field('model_ipfs_hash', ipfs_hash_base58)
metadata.save_pretty(self.args.metadata_file) |
def get_occurrence(self, occ):
"""
Return a persisted occurrences matching the occ and remove it from
lookup since it has already been matched
"""
return self.lookup.pop(
(occ.event, occ.original_start, occ.original_end),
occ) | def function[get_occurrence, parameter[self, occ]]:
constant[
Return a persisted occurrences matching the occ and remove it from
lookup since it has already been matched
]
return[call[name[self].lookup.pop, parameter[tuple[[<ast.Attribute object at 0x7da1b26afb20>, <ast.Attribute object at 0x7da1b26ae860>, <ast.Attribute object at 0x7da1b26ac250>]], name[occ]]]] | keyword[def] identifier[get_occurrence] ( identifier[self] , identifier[occ] ):
literal[string]
keyword[return] identifier[self] . identifier[lookup] . identifier[pop] (
( identifier[occ] . identifier[event] , identifier[occ] . identifier[original_start] , identifier[occ] . identifier[original_end] ),
identifier[occ] ) | def get_occurrence(self, occ):
"""
Return a persisted occurrences matching the occ and remove it from
lookup since it has already been matched
"""
return self.lookup.pop((occ.event, occ.original_start, occ.original_end), occ) |
def add_deploy(state, deploy_func, *args, **kwargs):
'''
Prepare & add an deploy to pyinfra.state by executing it on all hosts.
Args:
state (``pyinfra.api.State`` obj): the deploy state to add the operation
deploy_func (function): the operation function from one of the modules,
ie ``server.user``
args/kwargs: passed to the operation function
'''
frameinfo = get_caller_frameinfo()
kwargs['frameinfo'] = frameinfo
for host in state.inventory:
deploy_func(state, host, *args, **kwargs) | def function[add_deploy, parameter[state, deploy_func]]:
constant[
Prepare & add an deploy to pyinfra.state by executing it on all hosts.
Args:
state (``pyinfra.api.State`` obj): the deploy state to add the operation
deploy_func (function): the operation function from one of the modules,
ie ``server.user``
args/kwargs: passed to the operation function
]
variable[frameinfo] assign[=] call[name[get_caller_frameinfo], parameter[]]
call[name[kwargs]][constant[frameinfo]] assign[=] name[frameinfo]
for taget[name[host]] in starred[name[state].inventory] begin[:]
call[name[deploy_func], parameter[name[state], name[host], <ast.Starred object at 0x7da18dc99630>]] | keyword[def] identifier[add_deploy] ( identifier[state] , identifier[deploy_func] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[frameinfo] = identifier[get_caller_frameinfo] ()
identifier[kwargs] [ literal[string] ]= identifier[frameinfo]
keyword[for] identifier[host] keyword[in] identifier[state] . identifier[inventory] :
identifier[deploy_func] ( identifier[state] , identifier[host] ,* identifier[args] ,** identifier[kwargs] ) | def add_deploy(state, deploy_func, *args, **kwargs):
"""
Prepare & add an deploy to pyinfra.state by executing it on all hosts.
Args:
state (``pyinfra.api.State`` obj): the deploy state to add the operation
deploy_func (function): the operation function from one of the modules,
ie ``server.user``
args/kwargs: passed to the operation function
"""
frameinfo = get_caller_frameinfo()
kwargs['frameinfo'] = frameinfo
for host in state.inventory:
deploy_func(state, host, *args, **kwargs) # depends on [control=['for'], data=['host']] |
def freq_from_iterators(cls, iterators):
"""
Returns the frequency corresponding to the given iterators
"""
return {
set(it): f for f, it in cls.FREQUENCIES.items()}[set(iterators)] | def function[freq_from_iterators, parameter[cls, iterators]]:
constant[
Returns the frequency corresponding to the given iterators
]
return[call[<ast.DictComp object at 0x7da18f00df90>][call[name[set], parameter[name[iterators]]]]] | keyword[def] identifier[freq_from_iterators] ( identifier[cls] , identifier[iterators] ):
literal[string]
keyword[return] {
identifier[set] ( identifier[it] ): identifier[f] keyword[for] identifier[f] , identifier[it] keyword[in] identifier[cls] . identifier[FREQUENCIES] . identifier[items] ()}[ identifier[set] ( identifier[iterators] )] | def freq_from_iterators(cls, iterators):
"""
Returns the frequency corresponding to the given iterators
"""
return {set(it): f for (f, it) in cls.FREQUENCIES.items()}[set(iterators)] |
def get_person_by_employee_id(self, employee_id):
"""
Returns a restclients.Person object for the given employee id. If the
employee id isn't found, or if there is an error communicating with the
PWS, a DataFailureException will be thrown.
"""
if not self.valid_employee_id(employee_id):
raise InvalidEmployeeID(employee_id)
url = "{}.json?{}".format(
PERSON_PREFIX, urlencode({"employee_id": employee_id}))
response = DAO.getURL(url, {"Accept": "application/json"})
if response.status != 200:
raise DataFailureException(url, response.status, response.data)
# Search does not return a full person resource
data = json.loads(response.data)
if not len(data["Persons"]):
raise DataFailureException(url, 404, "No person found")
regid = data["Persons"][0]["PersonURI"]["UWRegID"]
return self.get_person_by_regid(regid) | def function[get_person_by_employee_id, parameter[self, employee_id]]:
constant[
Returns a restclients.Person object for the given employee id. If the
employee id isn't found, or if there is an error communicating with the
PWS, a DataFailureException will be thrown.
]
if <ast.UnaryOp object at 0x7da2054a7820> begin[:]
<ast.Raise object at 0x7da20ec049a0>
variable[url] assign[=] call[constant[{}.json?{}].format, parameter[name[PERSON_PREFIX], call[name[urlencode], parameter[dictionary[[<ast.Constant object at 0x7da18ede5630>], [<ast.Name object at 0x7da18ede4340>]]]]]]
variable[response] assign[=] call[name[DAO].getURL, parameter[name[url], dictionary[[<ast.Constant object at 0x7da18ede4460>], [<ast.Constant object at 0x7da18ede7eb0>]]]]
if compare[name[response].status not_equal[!=] constant[200]] begin[:]
<ast.Raise object at 0x7da18ede4e80>
variable[data] assign[=] call[name[json].loads, parameter[name[response].data]]
if <ast.UnaryOp object at 0x7da18ede6530> begin[:]
<ast.Raise object at 0x7da18ede5ff0>
variable[regid] assign[=] call[call[call[call[name[data]][constant[Persons]]][constant[0]]][constant[PersonURI]]][constant[UWRegID]]
return[call[name[self].get_person_by_regid, parameter[name[regid]]]] | keyword[def] identifier[get_person_by_employee_id] ( identifier[self] , identifier[employee_id] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[valid_employee_id] ( identifier[employee_id] ):
keyword[raise] identifier[InvalidEmployeeID] ( identifier[employee_id] )
identifier[url] = literal[string] . identifier[format] (
identifier[PERSON_PREFIX] , identifier[urlencode] ({ literal[string] : identifier[employee_id] }))
identifier[response] = identifier[DAO] . identifier[getURL] ( identifier[url] ,{ literal[string] : literal[string] })
keyword[if] identifier[response] . identifier[status] != literal[int] :
keyword[raise] identifier[DataFailureException] ( identifier[url] , identifier[response] . identifier[status] , identifier[response] . identifier[data] )
identifier[data] = identifier[json] . identifier[loads] ( identifier[response] . identifier[data] )
keyword[if] keyword[not] identifier[len] ( identifier[data] [ literal[string] ]):
keyword[raise] identifier[DataFailureException] ( identifier[url] , literal[int] , literal[string] )
identifier[regid] = identifier[data] [ literal[string] ][ literal[int] ][ literal[string] ][ literal[string] ]
keyword[return] identifier[self] . identifier[get_person_by_regid] ( identifier[regid] ) | def get_person_by_employee_id(self, employee_id):
"""
Returns a restclients.Person object for the given employee id. If the
employee id isn't found, or if there is an error communicating with the
PWS, a DataFailureException will be thrown.
"""
if not self.valid_employee_id(employee_id):
raise InvalidEmployeeID(employee_id) # depends on [control=['if'], data=[]]
url = '{}.json?{}'.format(PERSON_PREFIX, urlencode({'employee_id': employee_id}))
response = DAO.getURL(url, {'Accept': 'application/json'})
if response.status != 200:
raise DataFailureException(url, response.status, response.data) # depends on [control=['if'], data=[]]
# Search does not return a full person resource
data = json.loads(response.data)
if not len(data['Persons']):
raise DataFailureException(url, 404, 'No person found') # depends on [control=['if'], data=[]]
regid = data['Persons'][0]['PersonURI']['UWRegID']
return self.get_person_by_regid(regid) |
def convertToNative(self, aVal):
""" Convert to native bool; interpret certain strings. """
if aVal is None:
return None
if isinstance(aVal, bool): return aVal
# otherwise interpret strings
return str(aVal).lower() in ('1','on','yes','true') | def function[convertToNative, parameter[self, aVal]]:
constant[ Convert to native bool; interpret certain strings. ]
if compare[name[aVal] is constant[None]] begin[:]
return[constant[None]]
if call[name[isinstance], parameter[name[aVal], name[bool]]] begin[:]
return[name[aVal]]
return[compare[call[call[name[str], parameter[name[aVal]]].lower, parameter[]] in tuple[[<ast.Constant object at 0x7da1b0f521d0>, <ast.Constant object at 0x7da1b0f51a50>, <ast.Constant object at 0x7da1b0f51a20>, <ast.Constant object at 0x7da1b0f519f0>]]]] | keyword[def] identifier[convertToNative] ( identifier[self] , identifier[aVal] ):
literal[string]
keyword[if] identifier[aVal] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[if] identifier[isinstance] ( identifier[aVal] , identifier[bool] ): keyword[return] identifier[aVal]
keyword[return] identifier[str] ( identifier[aVal] ). identifier[lower] () keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] ) | def convertToNative(self, aVal):
""" Convert to native bool; interpret certain strings. """
if aVal is None:
return None # depends on [control=['if'], data=[]]
if isinstance(aVal, bool):
return aVal # depends on [control=['if'], data=[]]
# otherwise interpret strings
return str(aVal).lower() in ('1', 'on', 'yes', 'true') |
def parse_memory(s):
"""Converts bytes expression to number of mebibytes.
If no unit is specified, ``MiB`` is used."""
if isinstance(s, integer):
out = s
elif isinstance(s, float):
out = math_ceil(s)
elif isinstance(s, string):
s = s.replace(' ', '')
if not s:
raise context.ValueError("Could not interpret %r as a byte unit" % s)
if s[0].isdigit():
for i, c in enumerate(reversed(s)):
if not c.isalpha():
break
index = len(s) - i
prefix = s[:index]
suffix = s[index:]
try:
n = float(prefix)
except ValueError:
raise context.ValueError("Could not interpret %r as a number" % prefix)
else:
n = 1
suffix = s
try:
multiplier = _byte_sizes[suffix.lower()]
except KeyError:
raise context.ValueError("Could not interpret %r as a byte unit" % suffix)
out = math_ceil(n * multiplier / (2 ** 20))
else:
raise context.TypeError("memory must be an integer, got %r"
% type(s).__name__)
if out < 0:
raise context.ValueError("memory must be positive")
return out | def function[parse_memory, parameter[s]]:
constant[Converts bytes expression to number of mebibytes.
If no unit is specified, ``MiB`` is used.]
if call[name[isinstance], parameter[name[s], name[integer]]] begin[:]
variable[out] assign[=] name[s]
if compare[name[out] less[<] constant[0]] begin[:]
<ast.Raise object at 0x7da1b078ef80>
return[name[out]] | keyword[def] identifier[parse_memory] ( identifier[s] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[s] , identifier[integer] ):
identifier[out] = identifier[s]
keyword[elif] identifier[isinstance] ( identifier[s] , identifier[float] ):
identifier[out] = identifier[math_ceil] ( identifier[s] )
keyword[elif] identifier[isinstance] ( identifier[s] , identifier[string] ):
identifier[s] = identifier[s] . identifier[replace] ( literal[string] , literal[string] )
keyword[if] keyword[not] identifier[s] :
keyword[raise] identifier[context] . identifier[ValueError] ( literal[string] % identifier[s] )
keyword[if] identifier[s] [ literal[int] ]. identifier[isdigit] ():
keyword[for] identifier[i] , identifier[c] keyword[in] identifier[enumerate] ( identifier[reversed] ( identifier[s] )):
keyword[if] keyword[not] identifier[c] . identifier[isalpha] ():
keyword[break]
identifier[index] = identifier[len] ( identifier[s] )- identifier[i]
identifier[prefix] = identifier[s] [: identifier[index] ]
identifier[suffix] = identifier[s] [ identifier[index] :]
keyword[try] :
identifier[n] = identifier[float] ( identifier[prefix] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[context] . identifier[ValueError] ( literal[string] % identifier[prefix] )
keyword[else] :
identifier[n] = literal[int]
identifier[suffix] = identifier[s]
keyword[try] :
identifier[multiplier] = identifier[_byte_sizes] [ identifier[suffix] . identifier[lower] ()]
keyword[except] identifier[KeyError] :
keyword[raise] identifier[context] . identifier[ValueError] ( literal[string] % identifier[suffix] )
identifier[out] = identifier[math_ceil] ( identifier[n] * identifier[multiplier] /( literal[int] ** literal[int] ))
keyword[else] :
keyword[raise] identifier[context] . identifier[TypeError] ( literal[string]
% identifier[type] ( identifier[s] ). identifier[__name__] )
keyword[if] identifier[out] < literal[int] :
keyword[raise] identifier[context] . identifier[ValueError] ( literal[string] )
keyword[return] identifier[out] | def parse_memory(s):
"""Converts bytes expression to number of mebibytes.
If no unit is specified, ``MiB`` is used."""
if isinstance(s, integer):
out = s # depends on [control=['if'], data=[]]
elif isinstance(s, float):
out = math_ceil(s) # depends on [control=['if'], data=[]]
elif isinstance(s, string):
s = s.replace(' ', '')
if not s:
raise context.ValueError('Could not interpret %r as a byte unit' % s) # depends on [control=['if'], data=[]]
if s[0].isdigit():
for (i, c) in enumerate(reversed(s)):
if not c.isalpha():
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
index = len(s) - i
prefix = s[:index]
suffix = s[index:]
try:
n = float(prefix) # depends on [control=['try'], data=[]]
except ValueError:
raise context.ValueError('Could not interpret %r as a number' % prefix) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
n = 1
suffix = s
try:
multiplier = _byte_sizes[suffix.lower()] # depends on [control=['try'], data=[]]
except KeyError:
raise context.ValueError('Could not interpret %r as a byte unit' % suffix) # depends on [control=['except'], data=[]]
out = math_ceil(n * multiplier / 2 ** 20) # depends on [control=['if'], data=[]]
else:
raise context.TypeError('memory must be an integer, got %r' % type(s).__name__)
if out < 0:
raise context.ValueError('memory must be positive') # depends on [control=['if'], data=[]]
return out |
def ccmod_setcoef(k):
"""Set the coefficient maps for the ccmod stage. The only parameter is
the slice index `k` and there are no return values; all inputs and
outputs are from and to global variables.
"""
# Set working coefficient maps for ccmod step and compute DFT of
# coefficient maps Z and Z^T S
mp_Zf[k] = sl.rfftn(mp_Z_Y[k], mp_cri.Nv, mp_cri.axisN)
mp_ZSf[k] = np.conj(mp_Zf[k]) * mp_Sf[k] | def function[ccmod_setcoef, parameter[k]]:
constant[Set the coefficient maps for the ccmod stage. The only parameter is
the slice index `k` and there are no return values; all inputs and
outputs are from and to global variables.
]
call[name[mp_Zf]][name[k]] assign[=] call[name[sl].rfftn, parameter[call[name[mp_Z_Y]][name[k]], name[mp_cri].Nv, name[mp_cri].axisN]]
call[name[mp_ZSf]][name[k]] assign[=] binary_operation[call[name[np].conj, parameter[call[name[mp_Zf]][name[k]]]] * call[name[mp_Sf]][name[k]]] | keyword[def] identifier[ccmod_setcoef] ( identifier[k] ):
literal[string]
identifier[mp_Zf] [ identifier[k] ]= identifier[sl] . identifier[rfftn] ( identifier[mp_Z_Y] [ identifier[k] ], identifier[mp_cri] . identifier[Nv] , identifier[mp_cri] . identifier[axisN] )
identifier[mp_ZSf] [ identifier[k] ]= identifier[np] . identifier[conj] ( identifier[mp_Zf] [ identifier[k] ])* identifier[mp_Sf] [ identifier[k] ] | def ccmod_setcoef(k):
"""Set the coefficient maps for the ccmod stage. The only parameter is
the slice index `k` and there are no return values; all inputs and
outputs are from and to global variables.
"""
# Set working coefficient maps for ccmod step and compute DFT of
# coefficient maps Z and Z^T S
mp_Zf[k] = sl.rfftn(mp_Z_Y[k], mp_cri.Nv, mp_cri.axisN)
mp_ZSf[k] = np.conj(mp_Zf[k]) * mp_Sf[k] |
def combine(files: List[str], output_file: str, key: str = None, file_attrs: Dict[str, str] = None, batch_size: int = 1000, convert_attrs: bool = False) -> None:
"""
Combine two or more loom files and save as a new loom file
Args:
files (list of str): the list of input files (full paths)
output_file (str): full path of the output loom file
key (string): Row attribute to use to verify row ordering
file_attrs (dict): file attributes (title, description, url, etc.)
batch_size (int): limits the batch or cols/rows read in memory (default: 1000)
convert_attrs (bool): convert file attributes that differ between files into column attributes
Returns:
Nothing, but creates a new loom file combining the input files.
Note that the loom files must have exactly the same
number of rows, and must have exactly the same column attributes.
Named layers not present in the first file are discarded.
.. warning::
If you don't give a ``key`` argument, the files will be combined without changing
the ordering of rows or columns. Row attributes will be taken from the first file.
Hence, if rows are not in the same order in all files, the result may be meaningless.
To guard against this issue, you are strongly advised to provide a ``key`` argument,
which is used to sort files while merging. The ``key`` should be the name of a row
atrribute that contains a unique value for each row. For example, to order rows by
the attribute ``Accession``:
.. highlight:: python
.. code-block:: python
import loompy
loompy.combine(files, key="Accession")
"""
if file_attrs is None:
file_attrs = {}
if len(files) == 0:
raise ValueError("The input file list was empty")
copyfile(files[0], output_file)
ds = connect(output_file)
for a in file_attrs:
ds.attrs[a] = file_attrs[a]
if len(files) >= 2:
for f in files[1:]:
ds.add_loom(f, key, batch_size=batch_size, convert_attrs=convert_attrs)
ds.close() | def function[combine, parameter[files, output_file, key, file_attrs, batch_size, convert_attrs]]:
constant[
Combine two or more loom files and save as a new loom file
Args:
files (list of str): the list of input files (full paths)
output_file (str): full path of the output loom file
key (string): Row attribute to use to verify row ordering
file_attrs (dict): file attributes (title, description, url, etc.)
batch_size (int): limits the batch or cols/rows read in memory (default: 1000)
convert_attrs (bool): convert file attributes that differ between files into column attributes
Returns:
Nothing, but creates a new loom file combining the input files.
Note that the loom files must have exactly the same
number of rows, and must have exactly the same column attributes.
Named layers not present in the first file are discarded.
.. warning::
If you don't give a ``key`` argument, the files will be combined without changing
the ordering of rows or columns. Row attributes will be taken from the first file.
Hence, if rows are not in the same order in all files, the result may be meaningless.
To guard against this issue, you are strongly advised to provide a ``key`` argument,
which is used to sort files while merging. The ``key`` should be the name of a row
atrribute that contains a unique value for each row. For example, to order rows by
the attribute ``Accession``:
.. highlight:: python
.. code-block:: python
import loompy
loompy.combine(files, key="Accession")
]
if compare[name[file_attrs] is constant[None]] begin[:]
variable[file_attrs] assign[=] dictionary[[], []]
if compare[call[name[len], parameter[name[files]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da18f00c8e0>
call[name[copyfile], parameter[call[name[files]][constant[0]], name[output_file]]]
variable[ds] assign[=] call[name[connect], parameter[name[output_file]]]
for taget[name[a]] in starred[name[file_attrs]] begin[:]
call[name[ds].attrs][name[a]] assign[=] call[name[file_attrs]][name[a]]
if compare[call[name[len], parameter[name[files]]] greater_or_equal[>=] constant[2]] begin[:]
for taget[name[f]] in starred[call[name[files]][<ast.Slice object at 0x7da18f00eef0>]] begin[:]
call[name[ds].add_loom, parameter[name[f], name[key]]]
call[name[ds].close, parameter[]] | keyword[def] identifier[combine] ( identifier[files] : identifier[List] [ identifier[str] ], identifier[output_file] : identifier[str] , identifier[key] : identifier[str] = keyword[None] , identifier[file_attrs] : identifier[Dict] [ identifier[str] , identifier[str] ]= keyword[None] , identifier[batch_size] : identifier[int] = literal[int] , identifier[convert_attrs] : identifier[bool] = keyword[False] )-> keyword[None] :
literal[string]
keyword[if] identifier[file_attrs] keyword[is] keyword[None] :
identifier[file_attrs] ={}
keyword[if] identifier[len] ( identifier[files] )== literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[copyfile] ( identifier[files] [ literal[int] ], identifier[output_file] )
identifier[ds] = identifier[connect] ( identifier[output_file] )
keyword[for] identifier[a] keyword[in] identifier[file_attrs] :
identifier[ds] . identifier[attrs] [ identifier[a] ]= identifier[file_attrs] [ identifier[a] ]
keyword[if] identifier[len] ( identifier[files] )>= literal[int] :
keyword[for] identifier[f] keyword[in] identifier[files] [ literal[int] :]:
identifier[ds] . identifier[add_loom] ( identifier[f] , identifier[key] , identifier[batch_size] = identifier[batch_size] , identifier[convert_attrs] = identifier[convert_attrs] )
identifier[ds] . identifier[close] () | def combine(files: List[str], output_file: str, key: str=None, file_attrs: Dict[str, str]=None, batch_size: int=1000, convert_attrs: bool=False) -> None:
"""
Combine two or more loom files and save as a new loom file
Args:
files (list of str): the list of input files (full paths)
output_file (str): full path of the output loom file
key (string): Row attribute to use to verify row ordering
file_attrs (dict): file attributes (title, description, url, etc.)
batch_size (int): limits the batch or cols/rows read in memory (default: 1000)
convert_attrs (bool): convert file attributes that differ between files into column attributes
Returns:
Nothing, but creates a new loom file combining the input files.
Note that the loom files must have exactly the same
number of rows, and must have exactly the same column attributes.
Named layers not present in the first file are discarded.
.. warning::
If you don't give a ``key`` argument, the files will be combined without changing
the ordering of rows or columns. Row attributes will be taken from the first file.
Hence, if rows are not in the same order in all files, the result may be meaningless.
To guard against this issue, you are strongly advised to provide a ``key`` argument,
which is used to sort files while merging. The ``key`` should be the name of a row
atrribute that contains a unique value for each row. For example, to order rows by
the attribute ``Accession``:
.. highlight:: python
.. code-block:: python
import loompy
loompy.combine(files, key="Accession")
"""
if file_attrs is None:
file_attrs = {} # depends on [control=['if'], data=['file_attrs']]
if len(files) == 0:
raise ValueError('The input file list was empty') # depends on [control=['if'], data=[]]
copyfile(files[0], output_file)
ds = connect(output_file)
for a in file_attrs:
ds.attrs[a] = file_attrs[a] # depends on [control=['for'], data=['a']]
if len(files) >= 2:
for f in files[1:]:
ds.add_loom(f, key, batch_size=batch_size, convert_attrs=convert_attrs) # depends on [control=['for'], data=['f']] # depends on [control=['if'], data=[]]
ds.close() |
def read_values(target_usage):
"""read feature report values"""
# browse all devices
all_devices = hid.HidDeviceFilter().get_devices()
if not all_devices:
print("Can't find any non system HID device connected")
else:
# search for our target usage
usage_found = False
for device in all_devices:
try:
device.open()
# browse feature reports
for report in device.find_feature_reports():
if target_usage in report:
# we found our usage
report.get()
# print result
print("The value:", list(report[target_usage]))
print("All the report: {0}".format(report.get_raw_data()))
usage_found = True
finally:
device.close()
if not usage_found:
print("The target device was found, but the requested usage does not exist!\n") | def function[read_values, parameter[target_usage]]:
constant[read feature report values]
variable[all_devices] assign[=] call[call[name[hid].HidDeviceFilter, parameter[]].get_devices, parameter[]]
if <ast.UnaryOp object at 0x7da1b06fc0a0> begin[:]
call[name[print], parameter[constant[Can't find any non system HID device connected]]] | keyword[def] identifier[read_values] ( identifier[target_usage] ):
literal[string]
identifier[all_devices] = identifier[hid] . identifier[HidDeviceFilter] (). identifier[get_devices] ()
keyword[if] keyword[not] identifier[all_devices] :
identifier[print] ( literal[string] )
keyword[else] :
identifier[usage_found] = keyword[False]
keyword[for] identifier[device] keyword[in] identifier[all_devices] :
keyword[try] :
identifier[device] . identifier[open] ()
keyword[for] identifier[report] keyword[in] identifier[device] . identifier[find_feature_reports] ():
keyword[if] identifier[target_usage] keyword[in] identifier[report] :
identifier[report] . identifier[get] ()
identifier[print] ( literal[string] , identifier[list] ( identifier[report] [ identifier[target_usage] ]))
identifier[print] ( literal[string] . identifier[format] ( identifier[report] . identifier[get_raw_data] ()))
identifier[usage_found] = keyword[True]
keyword[finally] :
identifier[device] . identifier[close] ()
keyword[if] keyword[not] identifier[usage_found] :
identifier[print] ( literal[string] ) | def read_values(target_usage):
"""read feature report values""" # browse all devices
all_devices = hid.HidDeviceFilter().get_devices()
if not all_devices:
print("Can't find any non system HID device connected") # depends on [control=['if'], data=[]]
else: # search for our target usage
usage_found = False
for device in all_devices:
try:
device.open() # browse feature reports
for report in device.find_feature_reports():
if target_usage in report: # we found our usage
report.get() # print result
print('The value:', list(report[target_usage]))
print('All the report: {0}'.format(report.get_raw_data()))
usage_found = True # depends on [control=['if'], data=['target_usage', 'report']] # depends on [control=['for'], data=['report']] # depends on [control=['try'], data=[]]
finally:
device.close() # depends on [control=['for'], data=['device']]
if not usage_found:
print('The target device was found, but the requested usage does not exist!\n') # depends on [control=['if'], data=[]] |
def make_message(self, data):
"""
Create a Message instance from data, data will be loaded
via munge according to the codec specified in the
transport_content_type attribute
Returns:
Message: message object
"""
data = self.codec.loads(data)
msg = Message(
data.get("data"),
*data.get("args",[]),
**data.get("kwargs",{})
)
msg.meta.update(data.get("meta"))
self.trigger("make_message", data, msg)
return msg | def function[make_message, parameter[self, data]]:
constant[
Create a Message instance from data, data will be loaded
via munge according to the codec specified in the
transport_content_type attribute
Returns:
Message: message object
]
variable[data] assign[=] call[name[self].codec.loads, parameter[name[data]]]
variable[msg] assign[=] call[name[Message], parameter[call[name[data].get, parameter[constant[data]]], <ast.Starred object at 0x7da1b1588640>]]
call[name[msg].meta.update, parameter[call[name[data].get, parameter[constant[meta]]]]]
call[name[self].trigger, parameter[constant[make_message], name[data], name[msg]]]
return[name[msg]] | keyword[def] identifier[make_message] ( identifier[self] , identifier[data] ):
literal[string]
identifier[data] = identifier[self] . identifier[codec] . identifier[loads] ( identifier[data] )
identifier[msg] = identifier[Message] (
identifier[data] . identifier[get] ( literal[string] ),
* identifier[data] . identifier[get] ( literal[string] ,[]),
** identifier[data] . identifier[get] ( literal[string] ,{})
)
identifier[msg] . identifier[meta] . identifier[update] ( identifier[data] . identifier[get] ( literal[string] ))
identifier[self] . identifier[trigger] ( literal[string] , identifier[data] , identifier[msg] )
keyword[return] identifier[msg] | def make_message(self, data):
"""
Create a Message instance from data, data will be loaded
via munge according to the codec specified in the
transport_content_type attribute
Returns:
Message: message object
"""
data = self.codec.loads(data)
msg = Message(data.get('data'), *data.get('args', []), **data.get('kwargs', {}))
msg.meta.update(data.get('meta'))
self.trigger('make_message', data, msg)
return msg |
def set_sdk_enabled(cls, value):
"""
Modifies the enabled flag if the "AWS_XRAY_SDK_ENABLED" environment variable is not set,
otherwise, set the enabled flag to be equal to the environment variable. If the
env variable is an invalid string boolean, it will default to true.
:param bool value: Flag to set whether the SDK is enabled or disabled.
Environment variables AWS_XRAY_SDK_ENABLED overrides argument value.
"""
# Environment Variables take precedence over hardcoded configurations.
if cls.XRAY_ENABLED_KEY in os.environ:
cls.__SDK_ENABLED = str(os.getenv(cls.XRAY_ENABLED_KEY, 'true')).lower() != 'false'
else:
if type(value) == bool:
cls.__SDK_ENABLED = value
else:
cls.__SDK_ENABLED = True
log.warning("Invalid parameter type passed into set_sdk_enabled(). Defaulting to True...") | def function[set_sdk_enabled, parameter[cls, value]]:
constant[
Modifies the enabled flag if the "AWS_XRAY_SDK_ENABLED" environment variable is not set,
otherwise, set the enabled flag to be equal to the environment variable. If the
env variable is an invalid string boolean, it will default to true.
:param bool value: Flag to set whether the SDK is enabled or disabled.
Environment variables AWS_XRAY_SDK_ENABLED overrides argument value.
]
if compare[name[cls].XRAY_ENABLED_KEY in name[os].environ] begin[:]
name[cls].__SDK_ENABLED assign[=] compare[call[call[name[str], parameter[call[name[os].getenv, parameter[name[cls].XRAY_ENABLED_KEY, constant[true]]]]].lower, parameter[]] not_equal[!=] constant[false]] | keyword[def] identifier[set_sdk_enabled] ( identifier[cls] , identifier[value] ):
literal[string]
keyword[if] identifier[cls] . identifier[XRAY_ENABLED_KEY] keyword[in] identifier[os] . identifier[environ] :
identifier[cls] . identifier[__SDK_ENABLED] = identifier[str] ( identifier[os] . identifier[getenv] ( identifier[cls] . identifier[XRAY_ENABLED_KEY] , literal[string] )). identifier[lower] ()!= literal[string]
keyword[else] :
keyword[if] identifier[type] ( identifier[value] )== identifier[bool] :
identifier[cls] . identifier[__SDK_ENABLED] = identifier[value]
keyword[else] :
identifier[cls] . identifier[__SDK_ENABLED] = keyword[True]
identifier[log] . identifier[warning] ( literal[string] ) | def set_sdk_enabled(cls, value):
"""
Modifies the enabled flag if the "AWS_XRAY_SDK_ENABLED" environment variable is not set,
otherwise, set the enabled flag to be equal to the environment variable. If the
env variable is an invalid string boolean, it will default to true.
:param bool value: Flag to set whether the SDK is enabled or disabled.
Environment variables AWS_XRAY_SDK_ENABLED overrides argument value.
"""
# Environment Variables take precedence over hardcoded configurations.
if cls.XRAY_ENABLED_KEY in os.environ:
cls.__SDK_ENABLED = str(os.getenv(cls.XRAY_ENABLED_KEY, 'true')).lower() != 'false' # depends on [control=['if'], data=[]]
elif type(value) == bool:
cls.__SDK_ENABLED = value # depends on [control=['if'], data=[]]
else:
cls.__SDK_ENABLED = True
log.warning('Invalid parameter type passed into set_sdk_enabled(). Defaulting to True...') |
def _galaxy_loc_iter(loc_file, galaxy_dt, need_remap=False):
"""Iterator returning genome build and references from Galaxy *.loc file.
"""
if "column" in galaxy_dt:
dbkey_i = galaxy_dt["column"].index("dbkey")
path_i = galaxy_dt["column"].index("path")
else:
dbkey_i = None
if os.path.exists(loc_file):
with open(loc_file) as in_handle:
for line in in_handle:
if line.strip() and not line.startswith("#"):
parts = [x.strip() for x in line.strip().split("\t")]
# Detect and report spaces instead of tabs
if len(parts) == 1:
parts = [x.strip() for x in line.strip().split(" ") if x.strip()]
if len(parts) > 1:
raise IOError("Galaxy location file uses spaces instead of "
"tabs to separate fields: %s" % loc_file)
if dbkey_i is not None and not need_remap:
dbkey = parts[dbkey_i]
cur_ref = parts[path_i]
else:
if parts[0] == "index":
parts = parts[1:]
dbkey = parts[0]
cur_ref = parts[-1]
yield (dbkey, cur_ref) | def function[_galaxy_loc_iter, parameter[loc_file, galaxy_dt, need_remap]]:
constant[Iterator returning genome build and references from Galaxy *.loc file.
]
if compare[constant[column] in name[galaxy_dt]] begin[:]
variable[dbkey_i] assign[=] call[call[name[galaxy_dt]][constant[column]].index, parameter[constant[dbkey]]]
variable[path_i] assign[=] call[call[name[galaxy_dt]][constant[column]].index, parameter[constant[path]]]
if call[name[os].path.exists, parameter[name[loc_file]]] begin[:]
with call[name[open], parameter[name[loc_file]]] begin[:]
for taget[name[line]] in starred[name[in_handle]] begin[:]
if <ast.BoolOp object at 0x7da1b18d1630> begin[:]
variable[parts] assign[=] <ast.ListComp object at 0x7da1b18d1d20>
if compare[call[name[len], parameter[name[parts]]] equal[==] constant[1]] begin[:]
variable[parts] assign[=] <ast.ListComp object at 0x7da1b18d2380>
if compare[call[name[len], parameter[name[parts]]] greater[>] constant[1]] begin[:]
<ast.Raise object at 0x7da1b18d2500>
if <ast.BoolOp object at 0x7da1b18d2f80> begin[:]
variable[dbkey] assign[=] call[name[parts]][name[dbkey_i]]
variable[cur_ref] assign[=] call[name[parts]][name[path_i]]
<ast.Yield object at 0x7da1b1881a20> | keyword[def] identifier[_galaxy_loc_iter] ( identifier[loc_file] , identifier[galaxy_dt] , identifier[need_remap] = keyword[False] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[galaxy_dt] :
identifier[dbkey_i] = identifier[galaxy_dt] [ literal[string] ]. identifier[index] ( literal[string] )
identifier[path_i] = identifier[galaxy_dt] [ literal[string] ]. identifier[index] ( literal[string] )
keyword[else] :
identifier[dbkey_i] = keyword[None]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[loc_file] ):
keyword[with] identifier[open] ( identifier[loc_file] ) keyword[as] identifier[in_handle] :
keyword[for] identifier[line] keyword[in] identifier[in_handle] :
keyword[if] identifier[line] . identifier[strip] () keyword[and] keyword[not] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[parts] =[ identifier[x] . identifier[strip] () keyword[for] identifier[x] keyword[in] identifier[line] . identifier[strip] (). identifier[split] ( literal[string] )]
keyword[if] identifier[len] ( identifier[parts] )== literal[int] :
identifier[parts] =[ identifier[x] . identifier[strip] () keyword[for] identifier[x] keyword[in] identifier[line] . identifier[strip] (). identifier[split] ( literal[string] ) keyword[if] identifier[x] . identifier[strip] ()]
keyword[if] identifier[len] ( identifier[parts] )> literal[int] :
keyword[raise] identifier[IOError] ( literal[string]
literal[string] % identifier[loc_file] )
keyword[if] identifier[dbkey_i] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[need_remap] :
identifier[dbkey] = identifier[parts] [ identifier[dbkey_i] ]
identifier[cur_ref] = identifier[parts] [ identifier[path_i] ]
keyword[else] :
keyword[if] identifier[parts] [ literal[int] ]== literal[string] :
identifier[parts] = identifier[parts] [ literal[int] :]
identifier[dbkey] = identifier[parts] [ literal[int] ]
identifier[cur_ref] = identifier[parts] [- literal[int] ]
keyword[yield] ( identifier[dbkey] , identifier[cur_ref] ) | def _galaxy_loc_iter(loc_file, galaxy_dt, need_remap=False):
"""Iterator returning genome build and references from Galaxy *.loc file.
"""
if 'column' in galaxy_dt:
dbkey_i = galaxy_dt['column'].index('dbkey')
path_i = galaxy_dt['column'].index('path') # depends on [control=['if'], data=['galaxy_dt']]
else:
dbkey_i = None
if os.path.exists(loc_file):
with open(loc_file) as in_handle:
for line in in_handle:
if line.strip() and (not line.startswith('#')):
parts = [x.strip() for x in line.strip().split('\t')]
# Detect and report spaces instead of tabs
if len(parts) == 1:
parts = [x.strip() for x in line.strip().split(' ') if x.strip()]
if len(parts) > 1:
raise IOError('Galaxy location file uses spaces instead of tabs to separate fields: %s' % loc_file) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if dbkey_i is not None and (not need_remap):
dbkey = parts[dbkey_i]
cur_ref = parts[path_i] # depends on [control=['if'], data=[]]
else:
if parts[0] == 'index':
parts = parts[1:] # depends on [control=['if'], data=[]]
dbkey = parts[0]
cur_ref = parts[-1]
yield (dbkey, cur_ref) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['in_handle']] # depends on [control=['if'], data=[]] |
def multinterp(x, y, xquery, slow=False):
"""Multiple linear interpolations
Parameters
----------
x : array_like, shape=(N,)
sorted array of x values
y : array_like, shape=(N, M)
array of y values corresponding to each x value
xquery : array_like, shape=(M,)
array of query values
slow : boolean, default=False
if True, use slow method (used mainly for unit testing)
Returns
-------
yquery : ndarray, shape=(M,)
The interpolated values corresponding to each x query.
"""
x, y, xquery = map(np.asarray, (x, y, xquery))
assert x.ndim == 1
assert xquery.ndim == 1
assert y.shape == x.shape + xquery.shape
# make sure xmin < xquery < xmax in all cases
xquery = np.clip(xquery, x.min(), x.max())
if slow:
from scipy.interpolate import interp1d
return np.array([interp1d(x, y)(xq) for xq, y in zip(xquery, y.T)])
elif len(x) == 3:
# Most common case: use a faster approach
yq_lower = y[0] + (xquery - x[0]) * (y[1] - y[0]) / (x[1] - x[0])
yq_upper = y[1] + (xquery - x[1]) * (y[2] - y[1]) / (x[2] - x[1])
return np.where(xquery < x[1], yq_lower, yq_upper)
else:
i = np.clip(np.searchsorted(x, xquery, side='right') - 1,
0, len(x) - 2)
j = np.arange(len(xquery))
return y[i, j] + ((xquery - x[i]) *
(y[i + 1, j] - y[i, j]) / (x[i + 1] - x[i])) | def function[multinterp, parameter[x, y, xquery, slow]]:
constant[Multiple linear interpolations
Parameters
----------
x : array_like, shape=(N,)
sorted array of x values
y : array_like, shape=(N, M)
array of y values corresponding to each x value
xquery : array_like, shape=(M,)
array of query values
slow : boolean, default=False
if True, use slow method (used mainly for unit testing)
Returns
-------
yquery : ndarray, shape=(M,)
The interpolated values corresponding to each x query.
]
<ast.Tuple object at 0x7da204962cb0> assign[=] call[name[map], parameter[name[np].asarray, tuple[[<ast.Name object at 0x7da204963bb0>, <ast.Name object at 0x7da204960940>, <ast.Name object at 0x7da204960580>]]]]
assert[compare[name[x].ndim equal[==] constant[1]]]
assert[compare[name[xquery].ndim equal[==] constant[1]]]
assert[compare[name[y].shape equal[==] binary_operation[name[x].shape + name[xquery].shape]]]
variable[xquery] assign[=] call[name[np].clip, parameter[name[xquery], call[name[x].min, parameter[]], call[name[x].max, parameter[]]]]
if name[slow] begin[:]
from relative_module[scipy.interpolate] import module[interp1d]
return[call[name[np].array, parameter[<ast.ListComp object at 0x7da2049638e0>]]] | keyword[def] identifier[multinterp] ( identifier[x] , identifier[y] , identifier[xquery] , identifier[slow] = keyword[False] ):
literal[string]
identifier[x] , identifier[y] , identifier[xquery] = identifier[map] ( identifier[np] . identifier[asarray] ,( identifier[x] , identifier[y] , identifier[xquery] ))
keyword[assert] identifier[x] . identifier[ndim] == literal[int]
keyword[assert] identifier[xquery] . identifier[ndim] == literal[int]
keyword[assert] identifier[y] . identifier[shape] == identifier[x] . identifier[shape] + identifier[xquery] . identifier[shape]
identifier[xquery] = identifier[np] . identifier[clip] ( identifier[xquery] , identifier[x] . identifier[min] (), identifier[x] . identifier[max] ())
keyword[if] identifier[slow] :
keyword[from] identifier[scipy] . identifier[interpolate] keyword[import] identifier[interp1d]
keyword[return] identifier[np] . identifier[array] ([ identifier[interp1d] ( identifier[x] , identifier[y] )( identifier[xq] ) keyword[for] identifier[xq] , identifier[y] keyword[in] identifier[zip] ( identifier[xquery] , identifier[y] . identifier[T] )])
keyword[elif] identifier[len] ( identifier[x] )== literal[int] :
identifier[yq_lower] = identifier[y] [ literal[int] ]+( identifier[xquery] - identifier[x] [ literal[int] ])*( identifier[y] [ literal[int] ]- identifier[y] [ literal[int] ])/( identifier[x] [ literal[int] ]- identifier[x] [ literal[int] ])
identifier[yq_upper] = identifier[y] [ literal[int] ]+( identifier[xquery] - identifier[x] [ literal[int] ])*( identifier[y] [ literal[int] ]- identifier[y] [ literal[int] ])/( identifier[x] [ literal[int] ]- identifier[x] [ literal[int] ])
keyword[return] identifier[np] . identifier[where] ( identifier[xquery] < identifier[x] [ literal[int] ], identifier[yq_lower] , identifier[yq_upper] )
keyword[else] :
identifier[i] = identifier[np] . identifier[clip] ( identifier[np] . identifier[searchsorted] ( identifier[x] , identifier[xquery] , identifier[side] = literal[string] )- literal[int] ,
literal[int] , identifier[len] ( identifier[x] )- literal[int] )
identifier[j] = identifier[np] . identifier[arange] ( identifier[len] ( identifier[xquery] ))
keyword[return] identifier[y] [ identifier[i] , identifier[j] ]+(( identifier[xquery] - identifier[x] [ identifier[i] ])*
( identifier[y] [ identifier[i] + literal[int] , identifier[j] ]- identifier[y] [ identifier[i] , identifier[j] ])/( identifier[x] [ identifier[i] + literal[int] ]- identifier[x] [ identifier[i] ])) | def multinterp(x, y, xquery, slow=False):
"""Multiple linear interpolations
Parameters
----------
x : array_like, shape=(N,)
sorted array of x values
y : array_like, shape=(N, M)
array of y values corresponding to each x value
xquery : array_like, shape=(M,)
array of query values
slow : boolean, default=False
if True, use slow method (used mainly for unit testing)
Returns
-------
yquery : ndarray, shape=(M,)
The interpolated values corresponding to each x query.
"""
(x, y, xquery) = map(np.asarray, (x, y, xquery))
assert x.ndim == 1
assert xquery.ndim == 1
assert y.shape == x.shape + xquery.shape
# make sure xmin < xquery < xmax in all cases
xquery = np.clip(xquery, x.min(), x.max())
if slow:
from scipy.interpolate import interp1d
return np.array([interp1d(x, y)(xq) for (xq, y) in zip(xquery, y.T)]) # depends on [control=['if'], data=[]]
elif len(x) == 3:
# Most common case: use a faster approach
yq_lower = y[0] + (xquery - x[0]) * (y[1] - y[0]) / (x[1] - x[0])
yq_upper = y[1] + (xquery - x[1]) * (y[2] - y[1]) / (x[2] - x[1])
return np.where(xquery < x[1], yq_lower, yq_upper) # depends on [control=['if'], data=[]]
else:
i = np.clip(np.searchsorted(x, xquery, side='right') - 1, 0, len(x) - 2)
j = np.arange(len(xquery))
return y[i, j] + (xquery - x[i]) * (y[i + 1, j] - y[i, j]) / (x[i + 1] - x[i]) |
def _process_policy_eval_results(to_eval, eval_results, active_episodes,
active_envs, off_policy_actions, policies,
clip_actions):
"""Process the output of policy neural network evaluation.
Records policy evaluation results into the given episode objects and
returns replies to send back to agents in the env.
Returns:
actions_to_send: nested dict of env id -> agent id -> agent replies.
"""
actions_to_send = defaultdict(dict)
for env_id in active_envs:
actions_to_send[env_id] = {} # at minimum send empty dict
for policy_id, eval_data in to_eval.items():
rnn_in_cols = _to_column_format([t.rnn_state for t in eval_data])
actions, rnn_out_cols, pi_info_cols = eval_results[policy_id]
if len(rnn_in_cols) != len(rnn_out_cols):
raise ValueError("Length of RNN in did not match RNN out, got: "
"{} vs {}".format(rnn_in_cols, rnn_out_cols))
# Add RNN state info
for f_i, column in enumerate(rnn_in_cols):
pi_info_cols["state_in_{}".format(f_i)] = column
for f_i, column in enumerate(rnn_out_cols):
pi_info_cols["state_out_{}".format(f_i)] = column
# Save output rows
actions = _unbatch_tuple_actions(actions)
policy = _get_or_raise(policies, policy_id)
for i, action in enumerate(actions):
env_id = eval_data[i].env_id
agent_id = eval_data[i].agent_id
if clip_actions:
actions_to_send[env_id][agent_id] = clip_action(
action, policy.action_space)
else:
actions_to_send[env_id][agent_id] = action
episode = active_episodes[env_id]
episode._set_rnn_state(agent_id, [c[i] for c in rnn_out_cols])
episode._set_last_pi_info(
agent_id, {k: v[i]
for k, v in pi_info_cols.items()})
if env_id in off_policy_actions and \
agent_id in off_policy_actions[env_id]:
episode._set_last_action(agent_id,
off_policy_actions[env_id][agent_id])
else:
episode._set_last_action(agent_id, action)
return actions_to_send | def function[_process_policy_eval_results, parameter[to_eval, eval_results, active_episodes, active_envs, off_policy_actions, policies, clip_actions]]:
constant[Process the output of policy neural network evaluation.
Records policy evaluation results into the given episode objects and
returns replies to send back to agents in the env.
Returns:
actions_to_send: nested dict of env id -> agent id -> agent replies.
]
variable[actions_to_send] assign[=] call[name[defaultdict], parameter[name[dict]]]
for taget[name[env_id]] in starred[name[active_envs]] begin[:]
call[name[actions_to_send]][name[env_id]] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da18fe920b0>, <ast.Name object at 0x7da18fe900a0>]]] in starred[call[name[to_eval].items, parameter[]]] begin[:]
variable[rnn_in_cols] assign[=] call[name[_to_column_format], parameter[<ast.ListComp object at 0x7da18fe91360>]]
<ast.Tuple object at 0x7da18fe92500> assign[=] call[name[eval_results]][name[policy_id]]
if compare[call[name[len], parameter[name[rnn_in_cols]]] not_equal[!=] call[name[len], parameter[name[rnn_out_cols]]]] begin[:]
<ast.Raise object at 0x7da18fe910f0>
for taget[tuple[[<ast.Name object at 0x7da18fe93760>, <ast.Name object at 0x7da18fe92f20>]]] in starred[call[name[enumerate], parameter[name[rnn_in_cols]]]] begin[:]
call[name[pi_info_cols]][call[constant[state_in_{}].format, parameter[name[f_i]]]] assign[=] name[column]
for taget[tuple[[<ast.Name object at 0x7da18fe912d0>, <ast.Name object at 0x7da18fe92590>]]] in starred[call[name[enumerate], parameter[name[rnn_out_cols]]]] begin[:]
call[name[pi_info_cols]][call[constant[state_out_{}].format, parameter[name[f_i]]]] assign[=] name[column]
variable[actions] assign[=] call[name[_unbatch_tuple_actions], parameter[name[actions]]]
variable[policy] assign[=] call[name[_get_or_raise], parameter[name[policies], name[policy_id]]]
for taget[tuple[[<ast.Name object at 0x7da18fe92890>, <ast.Name object at 0x7da18fe91a50>]]] in starred[call[name[enumerate], parameter[name[actions]]]] begin[:]
variable[env_id] assign[=] call[name[eval_data]][name[i]].env_id
variable[agent_id] assign[=] call[name[eval_data]][name[i]].agent_id
if name[clip_actions] begin[:]
call[call[name[actions_to_send]][name[env_id]]][name[agent_id]] assign[=] call[name[clip_action], parameter[name[action], name[policy].action_space]]
variable[episode] assign[=] call[name[active_episodes]][name[env_id]]
call[name[episode]._set_rnn_state, parameter[name[agent_id], <ast.ListComp object at 0x7da18fe91ff0>]]
call[name[episode]._set_last_pi_info, parameter[name[agent_id], <ast.DictComp object at 0x7da18fe908b0>]]
if <ast.BoolOp object at 0x7da18fe93c70> begin[:]
call[name[episode]._set_last_action, parameter[name[agent_id], call[call[name[off_policy_actions]][name[env_id]]][name[agent_id]]]]
return[name[actions_to_send]] | keyword[def] identifier[_process_policy_eval_results] ( identifier[to_eval] , identifier[eval_results] , identifier[active_episodes] ,
identifier[active_envs] , identifier[off_policy_actions] , identifier[policies] ,
identifier[clip_actions] ):
literal[string]
identifier[actions_to_send] = identifier[defaultdict] ( identifier[dict] )
keyword[for] identifier[env_id] keyword[in] identifier[active_envs] :
identifier[actions_to_send] [ identifier[env_id] ]={}
keyword[for] identifier[policy_id] , identifier[eval_data] keyword[in] identifier[to_eval] . identifier[items] ():
identifier[rnn_in_cols] = identifier[_to_column_format] ([ identifier[t] . identifier[rnn_state] keyword[for] identifier[t] keyword[in] identifier[eval_data] ])
identifier[actions] , identifier[rnn_out_cols] , identifier[pi_info_cols] = identifier[eval_results] [ identifier[policy_id] ]
keyword[if] identifier[len] ( identifier[rnn_in_cols] )!= identifier[len] ( identifier[rnn_out_cols] ):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[rnn_in_cols] , identifier[rnn_out_cols] ))
keyword[for] identifier[f_i] , identifier[column] keyword[in] identifier[enumerate] ( identifier[rnn_in_cols] ):
identifier[pi_info_cols] [ literal[string] . identifier[format] ( identifier[f_i] )]= identifier[column]
keyword[for] identifier[f_i] , identifier[column] keyword[in] identifier[enumerate] ( identifier[rnn_out_cols] ):
identifier[pi_info_cols] [ literal[string] . identifier[format] ( identifier[f_i] )]= identifier[column]
identifier[actions] = identifier[_unbatch_tuple_actions] ( identifier[actions] )
identifier[policy] = identifier[_get_or_raise] ( identifier[policies] , identifier[policy_id] )
keyword[for] identifier[i] , identifier[action] keyword[in] identifier[enumerate] ( identifier[actions] ):
identifier[env_id] = identifier[eval_data] [ identifier[i] ]. identifier[env_id]
identifier[agent_id] = identifier[eval_data] [ identifier[i] ]. identifier[agent_id]
keyword[if] identifier[clip_actions] :
identifier[actions_to_send] [ identifier[env_id] ][ identifier[agent_id] ]= identifier[clip_action] (
identifier[action] , identifier[policy] . identifier[action_space] )
keyword[else] :
identifier[actions_to_send] [ identifier[env_id] ][ identifier[agent_id] ]= identifier[action]
identifier[episode] = identifier[active_episodes] [ identifier[env_id] ]
identifier[episode] . identifier[_set_rnn_state] ( identifier[agent_id] ,[ identifier[c] [ identifier[i] ] keyword[for] identifier[c] keyword[in] identifier[rnn_out_cols] ])
identifier[episode] . identifier[_set_last_pi_info] (
identifier[agent_id] ,{ identifier[k] : identifier[v] [ identifier[i] ]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[pi_info_cols] . identifier[items] ()})
keyword[if] identifier[env_id] keyword[in] identifier[off_policy_actions] keyword[and] identifier[agent_id] keyword[in] identifier[off_policy_actions] [ identifier[env_id] ]:
identifier[episode] . identifier[_set_last_action] ( identifier[agent_id] ,
identifier[off_policy_actions] [ identifier[env_id] ][ identifier[agent_id] ])
keyword[else] :
identifier[episode] . identifier[_set_last_action] ( identifier[agent_id] , identifier[action] )
keyword[return] identifier[actions_to_send] | def _process_policy_eval_results(to_eval, eval_results, active_episodes, active_envs, off_policy_actions, policies, clip_actions):
"""Process the output of policy neural network evaluation.
Records policy evaluation results into the given episode objects and
returns replies to send back to agents in the env.
Returns:
actions_to_send: nested dict of env id -> agent id -> agent replies.
"""
actions_to_send = defaultdict(dict)
for env_id in active_envs:
actions_to_send[env_id] = {} # at minimum send empty dict # depends on [control=['for'], data=['env_id']]
for (policy_id, eval_data) in to_eval.items():
rnn_in_cols = _to_column_format([t.rnn_state for t in eval_data])
(actions, rnn_out_cols, pi_info_cols) = eval_results[policy_id]
if len(rnn_in_cols) != len(rnn_out_cols):
raise ValueError('Length of RNN in did not match RNN out, got: {} vs {}'.format(rnn_in_cols, rnn_out_cols)) # depends on [control=['if'], data=[]]
# Add RNN state info
for (f_i, column) in enumerate(rnn_in_cols):
pi_info_cols['state_in_{}'.format(f_i)] = column # depends on [control=['for'], data=[]]
for (f_i, column) in enumerate(rnn_out_cols):
pi_info_cols['state_out_{}'.format(f_i)] = column # depends on [control=['for'], data=[]]
# Save output rows
actions = _unbatch_tuple_actions(actions)
policy = _get_or_raise(policies, policy_id)
for (i, action) in enumerate(actions):
env_id = eval_data[i].env_id
agent_id = eval_data[i].agent_id
if clip_actions:
actions_to_send[env_id][agent_id] = clip_action(action, policy.action_space) # depends on [control=['if'], data=[]]
else:
actions_to_send[env_id][agent_id] = action
episode = active_episodes[env_id]
episode._set_rnn_state(agent_id, [c[i] for c in rnn_out_cols])
episode._set_last_pi_info(agent_id, {k: v[i] for (k, v) in pi_info_cols.items()})
if env_id in off_policy_actions and agent_id in off_policy_actions[env_id]:
episode._set_last_action(agent_id, off_policy_actions[env_id][agent_id]) # depends on [control=['if'], data=[]]
else:
episode._set_last_action(agent_id, action) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
return actions_to_send |
def create_wav_link(f, metadata):
"""
This will return three MOBs for the given `metadata`: master_mob, source_mob,
tape_mob
The parameter `metadata` is presumed to be a dictionary from a run of ffprobe.
It's not clear for the purposes of Pro Tools that a tape_mob needs to be made,
it'll open the AAF perfectly well without out one.
A lot of this recaps the AMA link code but it's subtly different enough, but it
could all bear to be refactored.
"""
path = metadata['format']['filename']
master_mob = f.create.MasterMob()
source_mob = f.create.SourceMob()
tape_mob = f.create.SourceMob()
edit_rate = metadata['streams'][0]['sample_rate']
length = metadata['streams'][0]['duration_ts']
master_mob.name = os.path.basename(path)
source_mob.name = os.path.basename(path) + " Source MOB"
tape_mob.name = os.path.basename(path) + " Tape MOB"
container_guid = AUID("3711d3cc-62d0-49d7-b0ae-c118101d1a16") # WAVE/AIFF
f.content.mobs.append(master_mob)
f.content.mobs.append(source_mob)
f.content.mobs.append(tape_mob)
tape_mob.descriptor = f.create.TapeDescriptor()
tape_mob.descriptor["VideoSignal"].value = "VideoSignalNull"
# Tape timecode
t = tape_mob.create_empty_sequence_slot(edit_rate, media_kind='timecode')
tc = f.create.Timecode(int(float(edit_rate)+0.5), drop=False)
tc.length = int(length)
if 'tags' not in metadata['format'].keys() or \
'time_reference' not in metadata['format']['tags']:
tc.start = 0
else:
tc.start = metadata['format']['tags']['time_reference'] or 0
t.segment.length = int(length)
t.segment.components.append(tc)
descriptor = create_wav_descriptor(f, source_mob, path, metadata['streams'][0])
source_mob.descriptor = descriptor
for channel_index in range(metadata['streams'][0]['channels']):
tape_slot = tape_mob.create_empty_sequence_slot(edit_rate, media_kind='sound')
tape_slot.segment.length = length
nul_ref = f.create.SourceClip(media_kind='sound')
nul_ref.length = length
tape_slot.segment.components.append(nul_ref)
tape_clip = tape_mob.create_source_clip(tape_slot.slot_id)
tape_clip.length = length
tape_clip.media_kind = 'sound'
src_slot = source_mob.create_empty_sequence_slot(edit_rate, media_kind='sound')
src_slot.segment.length = length
src_slot.segment.components.append(tape_clip)
src_slot['PhysicalTrackNumber'].value = channel_index + 1
clip = source_mob.create_source_clip(src_slot.slot_id)
clip.length = length
clip.media_kind = 'sound'
master_slot = master_mob.create_empty_sequence_slot(edit_rate, media_kind='sound')
master_slot.segment.components.append(clip)
master_slot.segment.length = length
master_slot['PhysicalTrackNumber'].value = channel_index + 1
return master_mob, source_mob, tape_mob | def function[create_wav_link, parameter[f, metadata]]:
constant[
This will return three MOBs for the given `metadata`: master_mob, source_mob,
tape_mob
The parameter `metadata` is presumed to be a dictionary from a run of ffprobe.
It's not clear for the purposes of Pro Tools that a tape_mob needs to be made,
it'll open the AAF perfectly well without out one.
A lot of this recaps the AMA link code but it's subtly different enough, but it
could all bear to be refactored.
]
variable[path] assign[=] call[call[name[metadata]][constant[format]]][constant[filename]]
variable[master_mob] assign[=] call[name[f].create.MasterMob, parameter[]]
variable[source_mob] assign[=] call[name[f].create.SourceMob, parameter[]]
variable[tape_mob] assign[=] call[name[f].create.SourceMob, parameter[]]
variable[edit_rate] assign[=] call[call[call[name[metadata]][constant[streams]]][constant[0]]][constant[sample_rate]]
variable[length] assign[=] call[call[call[name[metadata]][constant[streams]]][constant[0]]][constant[duration_ts]]
name[master_mob].name assign[=] call[name[os].path.basename, parameter[name[path]]]
name[source_mob].name assign[=] binary_operation[call[name[os].path.basename, parameter[name[path]]] + constant[ Source MOB]]
name[tape_mob].name assign[=] binary_operation[call[name[os].path.basename, parameter[name[path]]] + constant[ Tape MOB]]
variable[container_guid] assign[=] call[name[AUID], parameter[constant[3711d3cc-62d0-49d7-b0ae-c118101d1a16]]]
call[name[f].content.mobs.append, parameter[name[master_mob]]]
call[name[f].content.mobs.append, parameter[name[source_mob]]]
call[name[f].content.mobs.append, parameter[name[tape_mob]]]
name[tape_mob].descriptor assign[=] call[name[f].create.TapeDescriptor, parameter[]]
call[name[tape_mob].descriptor][constant[VideoSignal]].value assign[=] constant[VideoSignalNull]
variable[t] assign[=] call[name[tape_mob].create_empty_sequence_slot, parameter[name[edit_rate]]]
variable[tc] assign[=] call[name[f].create.Timecode, parameter[call[name[int], parameter[binary_operation[call[name[float], parameter[name[edit_rate]]] + constant[0.5]]]]]]
name[tc].length assign[=] call[name[int], parameter[name[length]]]
if <ast.BoolOp object at 0x7da204347340> begin[:]
name[tc].start assign[=] constant[0]
name[t].segment.length assign[=] call[name[int], parameter[name[length]]]
call[name[t].segment.components.append, parameter[name[tc]]]
variable[descriptor] assign[=] call[name[create_wav_descriptor], parameter[name[f], name[source_mob], name[path], call[call[name[metadata]][constant[streams]]][constant[0]]]]
name[source_mob].descriptor assign[=] name[descriptor]
for taget[name[channel_index]] in starred[call[name[range], parameter[call[call[call[name[metadata]][constant[streams]]][constant[0]]][constant[channels]]]]] begin[:]
variable[tape_slot] assign[=] call[name[tape_mob].create_empty_sequence_slot, parameter[name[edit_rate]]]
name[tape_slot].segment.length assign[=] name[length]
variable[nul_ref] assign[=] call[name[f].create.SourceClip, parameter[]]
name[nul_ref].length assign[=] name[length]
call[name[tape_slot].segment.components.append, parameter[name[nul_ref]]]
variable[tape_clip] assign[=] call[name[tape_mob].create_source_clip, parameter[name[tape_slot].slot_id]]
name[tape_clip].length assign[=] name[length]
name[tape_clip].media_kind assign[=] constant[sound]
variable[src_slot] assign[=] call[name[source_mob].create_empty_sequence_slot, parameter[name[edit_rate]]]
name[src_slot].segment.length assign[=] name[length]
call[name[src_slot].segment.components.append, parameter[name[tape_clip]]]
call[name[src_slot]][constant[PhysicalTrackNumber]].value assign[=] binary_operation[name[channel_index] + constant[1]]
variable[clip] assign[=] call[name[source_mob].create_source_clip, parameter[name[src_slot].slot_id]]
name[clip].length assign[=] name[length]
name[clip].media_kind assign[=] constant[sound]
variable[master_slot] assign[=] call[name[master_mob].create_empty_sequence_slot, parameter[name[edit_rate]]]
call[name[master_slot].segment.components.append, parameter[name[clip]]]
name[master_slot].segment.length assign[=] name[length]
call[name[master_slot]][constant[PhysicalTrackNumber]].value assign[=] binary_operation[name[channel_index] + constant[1]]
return[tuple[[<ast.Name object at 0x7da2043478e0>, <ast.Name object at 0x7da204346560>, <ast.Name object at 0x7da204344f70>]]] | keyword[def] identifier[create_wav_link] ( identifier[f] , identifier[metadata] ):
literal[string]
identifier[path] = identifier[metadata] [ literal[string] ][ literal[string] ]
identifier[master_mob] = identifier[f] . identifier[create] . identifier[MasterMob] ()
identifier[source_mob] = identifier[f] . identifier[create] . identifier[SourceMob] ()
identifier[tape_mob] = identifier[f] . identifier[create] . identifier[SourceMob] ()
identifier[edit_rate] = identifier[metadata] [ literal[string] ][ literal[int] ][ literal[string] ]
identifier[length] = identifier[metadata] [ literal[string] ][ literal[int] ][ literal[string] ]
identifier[master_mob] . identifier[name] = identifier[os] . identifier[path] . identifier[basename] ( identifier[path] )
identifier[source_mob] . identifier[name] = identifier[os] . identifier[path] . identifier[basename] ( identifier[path] )+ literal[string]
identifier[tape_mob] . identifier[name] = identifier[os] . identifier[path] . identifier[basename] ( identifier[path] )+ literal[string]
identifier[container_guid] = identifier[AUID] ( literal[string] )
identifier[f] . identifier[content] . identifier[mobs] . identifier[append] ( identifier[master_mob] )
identifier[f] . identifier[content] . identifier[mobs] . identifier[append] ( identifier[source_mob] )
identifier[f] . identifier[content] . identifier[mobs] . identifier[append] ( identifier[tape_mob] )
identifier[tape_mob] . identifier[descriptor] = identifier[f] . identifier[create] . identifier[TapeDescriptor] ()
identifier[tape_mob] . identifier[descriptor] [ literal[string] ]. identifier[value] = literal[string]
identifier[t] = identifier[tape_mob] . identifier[create_empty_sequence_slot] ( identifier[edit_rate] , identifier[media_kind] = literal[string] )
identifier[tc] = identifier[f] . identifier[create] . identifier[Timecode] ( identifier[int] ( identifier[float] ( identifier[edit_rate] )+ literal[int] ), identifier[drop] = keyword[False] )
identifier[tc] . identifier[length] = identifier[int] ( identifier[length] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[metadata] [ literal[string] ]. identifier[keys] () keyword[or] literal[string] keyword[not] keyword[in] identifier[metadata] [ literal[string] ][ literal[string] ]:
identifier[tc] . identifier[start] = literal[int]
keyword[else] :
identifier[tc] . identifier[start] = identifier[metadata] [ literal[string] ][ literal[string] ][ literal[string] ] keyword[or] literal[int]
identifier[t] . identifier[segment] . identifier[length] = identifier[int] ( identifier[length] )
identifier[t] . identifier[segment] . identifier[components] . identifier[append] ( identifier[tc] )
identifier[descriptor] = identifier[create_wav_descriptor] ( identifier[f] , identifier[source_mob] , identifier[path] , identifier[metadata] [ literal[string] ][ literal[int] ])
identifier[source_mob] . identifier[descriptor] = identifier[descriptor]
keyword[for] identifier[channel_index] keyword[in] identifier[range] ( identifier[metadata] [ literal[string] ][ literal[int] ][ literal[string] ]):
identifier[tape_slot] = identifier[tape_mob] . identifier[create_empty_sequence_slot] ( identifier[edit_rate] , identifier[media_kind] = literal[string] )
identifier[tape_slot] . identifier[segment] . identifier[length] = identifier[length]
identifier[nul_ref] = identifier[f] . identifier[create] . identifier[SourceClip] ( identifier[media_kind] = literal[string] )
identifier[nul_ref] . identifier[length] = identifier[length]
identifier[tape_slot] . identifier[segment] . identifier[components] . identifier[append] ( identifier[nul_ref] )
identifier[tape_clip] = identifier[tape_mob] . identifier[create_source_clip] ( identifier[tape_slot] . identifier[slot_id] )
identifier[tape_clip] . identifier[length] = identifier[length]
identifier[tape_clip] . identifier[media_kind] = literal[string]
identifier[src_slot] = identifier[source_mob] . identifier[create_empty_sequence_slot] ( identifier[edit_rate] , identifier[media_kind] = literal[string] )
identifier[src_slot] . identifier[segment] . identifier[length] = identifier[length]
identifier[src_slot] . identifier[segment] . identifier[components] . identifier[append] ( identifier[tape_clip] )
identifier[src_slot] [ literal[string] ]. identifier[value] = identifier[channel_index] + literal[int]
identifier[clip] = identifier[source_mob] . identifier[create_source_clip] ( identifier[src_slot] . identifier[slot_id] )
identifier[clip] . identifier[length] = identifier[length]
identifier[clip] . identifier[media_kind] = literal[string]
identifier[master_slot] = identifier[master_mob] . identifier[create_empty_sequence_slot] ( identifier[edit_rate] , identifier[media_kind] = literal[string] )
identifier[master_slot] . identifier[segment] . identifier[components] . identifier[append] ( identifier[clip] )
identifier[master_slot] . identifier[segment] . identifier[length] = identifier[length]
identifier[master_slot] [ literal[string] ]. identifier[value] = identifier[channel_index] + literal[int]
keyword[return] identifier[master_mob] , identifier[source_mob] , identifier[tape_mob] | def create_wav_link(f, metadata):
"""
This will return three MOBs for the given `metadata`: master_mob, source_mob,
tape_mob
The parameter `metadata` is presumed to be a dictionary from a run of ffprobe.
It's not clear for the purposes of Pro Tools that a tape_mob needs to be made,
it'll open the AAF perfectly well without out one.
A lot of this recaps the AMA link code but it's subtly different enough, but it
could all bear to be refactored.
"""
path = metadata['format']['filename']
master_mob = f.create.MasterMob()
source_mob = f.create.SourceMob()
tape_mob = f.create.SourceMob()
edit_rate = metadata['streams'][0]['sample_rate']
length = metadata['streams'][0]['duration_ts']
master_mob.name = os.path.basename(path)
source_mob.name = os.path.basename(path) + ' Source MOB'
tape_mob.name = os.path.basename(path) + ' Tape MOB'
container_guid = AUID('3711d3cc-62d0-49d7-b0ae-c118101d1a16') # WAVE/AIFF
f.content.mobs.append(master_mob)
f.content.mobs.append(source_mob)
f.content.mobs.append(tape_mob)
tape_mob.descriptor = f.create.TapeDescriptor()
tape_mob.descriptor['VideoSignal'].value = 'VideoSignalNull'
# Tape timecode
t = tape_mob.create_empty_sequence_slot(edit_rate, media_kind='timecode')
tc = f.create.Timecode(int(float(edit_rate) + 0.5), drop=False)
tc.length = int(length)
if 'tags' not in metadata['format'].keys() or 'time_reference' not in metadata['format']['tags']:
tc.start = 0 # depends on [control=['if'], data=[]]
else:
tc.start = metadata['format']['tags']['time_reference'] or 0
t.segment.length = int(length)
t.segment.components.append(tc)
descriptor = create_wav_descriptor(f, source_mob, path, metadata['streams'][0])
source_mob.descriptor = descriptor
for channel_index in range(metadata['streams'][0]['channels']):
tape_slot = tape_mob.create_empty_sequence_slot(edit_rate, media_kind='sound')
tape_slot.segment.length = length
nul_ref = f.create.SourceClip(media_kind='sound')
nul_ref.length = length
tape_slot.segment.components.append(nul_ref)
tape_clip = tape_mob.create_source_clip(tape_slot.slot_id)
tape_clip.length = length
tape_clip.media_kind = 'sound'
src_slot = source_mob.create_empty_sequence_slot(edit_rate, media_kind='sound')
src_slot.segment.length = length
src_slot.segment.components.append(tape_clip)
src_slot['PhysicalTrackNumber'].value = channel_index + 1
clip = source_mob.create_source_clip(src_slot.slot_id)
clip.length = length
clip.media_kind = 'sound'
master_slot = master_mob.create_empty_sequence_slot(edit_rate, media_kind='sound')
master_slot.segment.components.append(clip)
master_slot.segment.length = length
master_slot['PhysicalTrackNumber'].value = channel_index + 1 # depends on [control=['for'], data=['channel_index']]
return (master_mob, source_mob, tape_mob) |
def codestr2rst(codestr, lang='python', lineno=None):
"""Return reStructuredText code block from code string"""
if lineno is not None:
if LooseVersion(sphinx.__version__) >= '1.3':
# Sphinx only starts numbering from the first non-empty line.
blank_lines = codestr.count('\n', 0, -len(codestr.lstrip()))
lineno = ' :lineno-start: {0}\n'.format(lineno + blank_lines)
else:
lineno = ' :linenos:\n'
else:
lineno = ''
code_directive = "\n.. code-block:: {0}\n{1}\n".format(lang, lineno)
indented_block = indent(codestr, ' ' * 4)
return code_directive + indented_block | def function[codestr2rst, parameter[codestr, lang, lineno]]:
constant[Return reStructuredText code block from code string]
if compare[name[lineno] is_not constant[None]] begin[:]
if compare[call[name[LooseVersion], parameter[name[sphinx].__version__]] greater_or_equal[>=] constant[1.3]] begin[:]
variable[blank_lines] assign[=] call[name[codestr].count, parameter[constant[
], constant[0], <ast.UnaryOp object at 0x7da18f00d450>]]
variable[lineno] assign[=] call[constant[ :lineno-start: {0}
].format, parameter[binary_operation[name[lineno] + name[blank_lines]]]]
variable[code_directive] assign[=] call[constant[
.. code-block:: {0}
{1}
].format, parameter[name[lang], name[lineno]]]
variable[indented_block] assign[=] call[name[indent], parameter[name[codestr], binary_operation[constant[ ] * constant[4]]]]
return[binary_operation[name[code_directive] + name[indented_block]]] | keyword[def] identifier[codestr2rst] ( identifier[codestr] , identifier[lang] = literal[string] , identifier[lineno] = keyword[None] ):
literal[string]
keyword[if] identifier[lineno] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[LooseVersion] ( identifier[sphinx] . identifier[__version__] )>= literal[string] :
identifier[blank_lines] = identifier[codestr] . identifier[count] ( literal[string] , literal[int] ,- identifier[len] ( identifier[codestr] . identifier[lstrip] ()))
identifier[lineno] = literal[string] . identifier[format] ( identifier[lineno] + identifier[blank_lines] )
keyword[else] :
identifier[lineno] = literal[string]
keyword[else] :
identifier[lineno] = literal[string]
identifier[code_directive] = literal[string] . identifier[format] ( identifier[lang] , identifier[lineno] )
identifier[indented_block] = identifier[indent] ( identifier[codestr] , literal[string] * literal[int] )
keyword[return] identifier[code_directive] + identifier[indented_block] | def codestr2rst(codestr, lang='python', lineno=None):
"""Return reStructuredText code block from code string"""
if lineno is not None:
if LooseVersion(sphinx.__version__) >= '1.3':
# Sphinx only starts numbering from the first non-empty line.
blank_lines = codestr.count('\n', 0, -len(codestr.lstrip()))
lineno = ' :lineno-start: {0}\n'.format(lineno + blank_lines) # depends on [control=['if'], data=[]]
else:
lineno = ' :linenos:\n' # depends on [control=['if'], data=['lineno']]
else:
lineno = ''
code_directive = '\n.. code-block:: {0}\n{1}\n'.format(lang, lineno)
indented_block = indent(codestr, ' ' * 4)
return code_directive + indented_block |
def exit_code(self, code):
"""Set the App exit code.
For TC Exchange Apps there are 3 supported exit codes.
* 0 indicates a normal exit
* 1 indicates a failure during execution
* 3 indicates a partial failure
Args:
code (integer): The exit code value for the app.
"""
if code is not None and code in [0, 1, 3]:
self._exit_code = code
else:
self.log.warning(u'Invalid exit code') | def function[exit_code, parameter[self, code]]:
constant[Set the App exit code.
For TC Exchange Apps there are 3 supported exit codes.
* 0 indicates a normal exit
* 1 indicates a failure during execution
* 3 indicates a partial failure
Args:
code (integer): The exit code value for the app.
]
if <ast.BoolOp object at 0x7da2043458d0> begin[:]
name[self]._exit_code assign[=] name[code] | keyword[def] identifier[exit_code] ( identifier[self] , identifier[code] ):
literal[string]
keyword[if] identifier[code] keyword[is] keyword[not] keyword[None] keyword[and] identifier[code] keyword[in] [ literal[int] , literal[int] , literal[int] ]:
identifier[self] . identifier[_exit_code] = identifier[code]
keyword[else] :
identifier[self] . identifier[log] . identifier[warning] ( literal[string] ) | def exit_code(self, code):
"""Set the App exit code.
For TC Exchange Apps there are 3 supported exit codes.
* 0 indicates a normal exit
* 1 indicates a failure during execution
* 3 indicates a partial failure
Args:
code (integer): The exit code value for the app.
"""
if code is not None and code in [0, 1, 3]:
self._exit_code = code # depends on [control=['if'], data=[]]
else:
self.log.warning(u'Invalid exit code') |
def clean_egginfo(self):
"""Clean .egginfo directory"""
dir_name = os.path.join(self.root, self.get_egginfo_dir())
self._clean_directory(dir_name) | def function[clean_egginfo, parameter[self]]:
constant[Clean .egginfo directory]
variable[dir_name] assign[=] call[name[os].path.join, parameter[name[self].root, call[name[self].get_egginfo_dir, parameter[]]]]
call[name[self]._clean_directory, parameter[name[dir_name]]] | keyword[def] identifier[clean_egginfo] ( identifier[self] ):
literal[string]
identifier[dir_name] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[root] , identifier[self] . identifier[get_egginfo_dir] ())
identifier[self] . identifier[_clean_directory] ( identifier[dir_name] ) | def clean_egginfo(self):
"""Clean .egginfo directory"""
dir_name = os.path.join(self.root, self.get_egginfo_dir())
self._clean_directory(dir_name) |
def set_network(ip, netmask, gateway, host=None,
admin_username=None, admin_password=None):
'''
Configure Network on the CMC or individual iDRAC.
Use ``set_niccfg`` for blade and switch addresses.
CLI Example:
.. code-block:: bash
salt dell dracr.set_network [DRAC IP] [NETMASK] [GATEWAY]
salt dell dracr.set_network 192.168.0.2 255.255.255.0 192.168.0.1
admin_username=root admin_password=calvin host=192.168.1.1
'''
return __execute_cmd('setniccfg -s {0} {1} {2}'.format(
ip, netmask, gateway, host=host, admin_username=admin_username,
admin_password=admin_password
)) | def function[set_network, parameter[ip, netmask, gateway, host, admin_username, admin_password]]:
constant[
Configure Network on the CMC or individual iDRAC.
Use ``set_niccfg`` for blade and switch addresses.
CLI Example:
.. code-block:: bash
salt dell dracr.set_network [DRAC IP] [NETMASK] [GATEWAY]
salt dell dracr.set_network 192.168.0.2 255.255.255.0 192.168.0.1
admin_username=root admin_password=calvin host=192.168.1.1
]
return[call[name[__execute_cmd], parameter[call[constant[setniccfg -s {0} {1} {2}].format, parameter[name[ip], name[netmask], name[gateway]]]]]] | keyword[def] identifier[set_network] ( identifier[ip] , identifier[netmask] , identifier[gateway] , identifier[host] = keyword[None] ,
identifier[admin_username] = keyword[None] , identifier[admin_password] = keyword[None] ):
literal[string]
keyword[return] identifier[__execute_cmd] ( literal[string] . identifier[format] (
identifier[ip] , identifier[netmask] , identifier[gateway] , identifier[host] = identifier[host] , identifier[admin_username] = identifier[admin_username] ,
identifier[admin_password] = identifier[admin_password]
)) | def set_network(ip, netmask, gateway, host=None, admin_username=None, admin_password=None):
"""
Configure Network on the CMC or individual iDRAC.
Use ``set_niccfg`` for blade and switch addresses.
CLI Example:
.. code-block:: bash
salt dell dracr.set_network [DRAC IP] [NETMASK] [GATEWAY]
salt dell dracr.set_network 192.168.0.2 255.255.255.0 192.168.0.1
admin_username=root admin_password=calvin host=192.168.1.1
"""
return __execute_cmd('setniccfg -s {0} {1} {2}'.format(ip, netmask, gateway, host=host, admin_username=admin_username, admin_password=admin_password)) |
def _write_vdr(self, f, cdataType, numElems, numDims, dimSizes, name,
dimVary, recVary, sparse, blockingfactor, compression,
pad, zVar):
'''
Writes a VDR block to the end of the file.
Parameters:
f : file
The open CDF file
cdataType : int
The CDF data type
numElems : int
The number of elements in the variable
numDims : int
The number of dimensions in the variable
dimSizes : int
The size of each dimension
name : str
The name of the variable
dimVary : array of bool
Bool array of size numDims.
True if a dimension is physical, False if a dimension is not physical
recVary : bool
True if each record is unique
sparse : bool
True if using sparse records
blockingfactor: int
No idea
compression : int
The level of compression between 0-9
pad : num
The pad values to insert
zVar : bool
True if this variable is a z variable
Returns:
num : int
The number of the variable
byte_loc : int
The current byte location within the file
'''
if zVar:
block_size = CDF.zVDR_BASE_SIZE64
section_type = CDF.zVDR_
else:
block_size = CDF.rVDR_BASE_SIZE64
section_type = CDF.rVDR_
nextVDR = 0
dataType = cdataType
if dataType == -1:
raise ValueError('Bad data type.')
maxRec = -1
headVXR = 0
tailVXR = 0
flags = 0
if recVary:
flags = CDF._set_bit(flags, 0)
flags = CDF._set_bit(flags, 1)
sRecords = sparse
rfuB = 0
rfuC = -1
rfuF = -1
if zVar:
num = len(self.zvars)
else:
num = len(self.rvars)
if compression > 0:
offsetCPRorSPR = self._write_cpr(f, CDF.GZIP_COMPRESSION,
compression)
else:
offsetCPRorSPR = -1
if blockingfactor is None:
blockingFactor = 1
else:
blockingFactor = blockingfactor
# Increase the block size to account for "zDimSizes" and "DimVarys" fields
if numDims > 0:
if zVar:
block_size = block_size + numDims * 8
else:
block_size = block_size + numDims * 4
# Determine pad value
if pad is not None:
if (dataType == 51 or dataType == 52):
# pad needs to be the correct number of elements
if (len(pad) < numElems):
pad += '\0'*(numElems-len(pad))
elif (len(pad) > numElems):
pad = pad[:numElems]
pad = pad.encode()
else:
dummy, pad = self._convert_data(dataType, numElems, 1, pad)
else:
pad = self._default_pad(dataType, numElems)
f.seek(0, 2)
byte_loc = f.tell()
block_size += len(pad)
vdr = bytearray(block_size)
# if (dataType == 51):
# numElems = len(pad)
vdr[0:8] = struct.pack('>q', block_size)
vdr[8:12] = struct.pack('>i', section_type)
vdr[12:20] = struct.pack('>q', nextVDR)
vdr[20:24] = struct.pack('>i', dataType)
vdr[24:28] = struct.pack('>i', maxRec)
vdr[28:36] = struct.pack('>q', headVXR)
vdr[36:44] = struct.pack('>q', tailVXR)
vdr[44:48] = struct.pack('>i', flags)
vdr[48:52] = struct.pack('>i', sRecords)
vdr[52:56] = struct.pack('>i', rfuB)
vdr[56:60] = struct.pack('>i', rfuC)
vdr[60:64] = struct.pack('>i', rfuF)
vdr[64:68] = struct.pack('>i', numElems)
vdr[68:72] = struct.pack('>i', num)
vdr[72:80] = struct.pack('>q', offsetCPRorSPR)
vdr[80:84] = struct.pack('>i', blockingFactor)
tofill = 256 - len(name)
vdr[84:340] = (name+'\0'*tofill).encode()
if zVar:
vdr[340:344] = struct.pack('>i', numDims)
if (numDims > 0):
for i in range(0, numDims):
vdr[344+i*4:344+(i+1)*4] = struct.pack('>i', dimSizes[i])
ist = 344+numDims*4
for i in range(0, numDims):
vdr[ist+i*4:ist+(i+1)*4] = struct.pack('>i', CDF.VARY)
ist = 344 + 8 * numDims
else:
if (numDims > 0):
for i in range(0, numDims):
if (dimVary[i] == True or dimVary[i] != 0):
vdr[340+i*4:344+i*4] = struct.pack('>i', CDF.VARY)
else:
vdr[340+i*4:344+i*4] = struct.pack('>i', CDF.NOVARY)
ist = 340 + 4 * numDims
vdr[ist:block_size] = pad
f.write(vdr)
# Set variable info
info = []
info.append(name)
info.append(byte_loc)
if zVar:
info.append(numDims)
info.append(dimSizes)
else:
info.append(self.num_rdim)
info.append(self.rdim_sizes)
info.append(dimVary)
# Update the pointers from the CDR/previous VDR
if zVar:
self.zvarsinfo[num] = info
self.zvars.append(name)
if (num > 0):
# VDR's VDRnext
self._update_offset_value(f, self.zvarsinfo[num-1][1]+12, 8,
byte_loc)
# GDR's NzVars
self._update_offset_value(f, self.gdr_head+60, 4, num+1)
else:
self.rvarsinfo[num] = info
self.rvars.append(name)
if (num > 0):
# VDR's VDRnext
self._update_offset_value(f, self.rvarsinfo[num-1][1]+12, 8,
byte_loc)
# GDR's NrVars
self._update_offset_value(f, self.gdr_head+44, 4, num+1)
return num, byte_loc | def function[_write_vdr, parameter[self, f, cdataType, numElems, numDims, dimSizes, name, dimVary, recVary, sparse, blockingfactor, compression, pad, zVar]]:
constant[
Writes a VDR block to the end of the file.
Parameters:
f : file
The open CDF file
cdataType : int
The CDF data type
numElems : int
The number of elements in the variable
numDims : int
The number of dimensions in the variable
dimSizes : int
The size of each dimension
name : str
The name of the variable
dimVary : array of bool
Bool array of size numDims.
True if a dimension is physical, False if a dimension is not physical
recVary : bool
True if each record is unique
sparse : bool
True if using sparse records
blockingfactor: int
No idea
compression : int
The level of compression between 0-9
pad : num
The pad values to insert
zVar : bool
True if this variable is a z variable
Returns:
num : int
The number of the variable
byte_loc : int
The current byte location within the file
]
if name[zVar] begin[:]
variable[block_size] assign[=] name[CDF].zVDR_BASE_SIZE64
variable[section_type] assign[=] name[CDF].zVDR_
variable[nextVDR] assign[=] constant[0]
variable[dataType] assign[=] name[cdataType]
if compare[name[dataType] equal[==] <ast.UnaryOp object at 0x7da1b06ff700>] begin[:]
<ast.Raise object at 0x7da1b06ff6a0>
variable[maxRec] assign[=] <ast.UnaryOp object at 0x7da1b06ff580>
variable[headVXR] assign[=] constant[0]
variable[tailVXR] assign[=] constant[0]
variable[flags] assign[=] constant[0]
if name[recVary] begin[:]
variable[flags] assign[=] call[name[CDF]._set_bit, parameter[name[flags], constant[0]]]
variable[flags] assign[=] call[name[CDF]._set_bit, parameter[name[flags], constant[1]]]
variable[sRecords] assign[=] name[sparse]
variable[rfuB] assign[=] constant[0]
variable[rfuC] assign[=] <ast.UnaryOp object at 0x7da1b06feef0>
variable[rfuF] assign[=] <ast.UnaryOp object at 0x7da1b06fee30>
if name[zVar] begin[:]
variable[num] assign[=] call[name[len], parameter[name[self].zvars]]
if compare[name[compression] greater[>] constant[0]] begin[:]
variable[offsetCPRorSPR] assign[=] call[name[self]._write_cpr, parameter[name[f], name[CDF].GZIP_COMPRESSION, name[compression]]]
if compare[name[blockingfactor] is constant[None]] begin[:]
variable[blockingFactor] assign[=] constant[1]
if compare[name[numDims] greater[>] constant[0]] begin[:]
if name[zVar] begin[:]
variable[block_size] assign[=] binary_operation[name[block_size] + binary_operation[name[numDims] * constant[8]]]
if compare[name[pad] is_not constant[None]] begin[:]
if <ast.BoolOp object at 0x7da1b06fe170> begin[:]
if compare[call[name[len], parameter[name[pad]]] less[<] name[numElems]] begin[:]
<ast.AugAssign object at 0x7da1b06fdf00>
variable[pad] assign[=] call[name[pad].encode, parameter[]]
call[name[f].seek, parameter[constant[0], constant[2]]]
variable[byte_loc] assign[=] call[name[f].tell, parameter[]]
<ast.AugAssign object at 0x7da1b06fd480>
variable[vdr] assign[=] call[name[bytearray], parameter[name[block_size]]]
call[name[vdr]][<ast.Slice object at 0x7da1b06fd210>] assign[=] call[name[struct].pack, parameter[constant[>q], name[block_size]]]
call[name[vdr]][<ast.Slice object at 0x7da1b06fd000>] assign[=] call[name[struct].pack, parameter[constant[>i], name[section_type]]]
call[name[vdr]][<ast.Slice object at 0x7da1b06fcdf0>] assign[=] call[name[struct].pack, parameter[constant[>q], name[nextVDR]]]
call[name[vdr]][<ast.Slice object at 0x7da1b06fcbe0>] assign[=] call[name[struct].pack, parameter[constant[>i], name[dataType]]]
call[name[vdr]][<ast.Slice object at 0x7da1b06fc9d0>] assign[=] call[name[struct].pack, parameter[constant[>i], name[maxRec]]]
call[name[vdr]][<ast.Slice object at 0x7da1b06fc7c0>] assign[=] call[name[struct].pack, parameter[constant[>q], name[headVXR]]]
call[name[vdr]][<ast.Slice object at 0x7da1b06fa2f0>] assign[=] call[name[struct].pack, parameter[constant[>q], name[tailVXR]]]
call[name[vdr]][<ast.Slice object at 0x7da1b06fa0e0>] assign[=] call[name[struct].pack, parameter[constant[>i], name[flags]]]
call[name[vdr]][<ast.Slice object at 0x7da1b06f9ed0>] assign[=] call[name[struct].pack, parameter[constant[>i], name[sRecords]]]
call[name[vdr]][<ast.Slice object at 0x7da1b06f9cc0>] assign[=] call[name[struct].pack, parameter[constant[>i], name[rfuB]]]
call[name[vdr]][<ast.Slice object at 0x7da1b07a1540>] assign[=] call[name[struct].pack, parameter[constant[>i], name[rfuC]]]
call[name[vdr]][<ast.Slice object at 0x7da1b07a0f10>] assign[=] call[name[struct].pack, parameter[constant[>i], name[rfuF]]]
call[name[vdr]][<ast.Slice object at 0x7da1b06a2ec0>] assign[=] call[name[struct].pack, parameter[constant[>i], name[numElems]]]
call[name[vdr]][<ast.Slice object at 0x7da1b06a3640>] assign[=] call[name[struct].pack, parameter[constant[>i], name[num]]]
call[name[vdr]][<ast.Slice object at 0x7da1b06a36a0>] assign[=] call[name[struct].pack, parameter[constant[>q], name[offsetCPRorSPR]]]
call[name[vdr]][<ast.Slice object at 0x7da1b06a3a60>] assign[=] call[name[struct].pack, parameter[constant[>i], name[blockingFactor]]]
variable[tofill] assign[=] binary_operation[constant[256] - call[name[len], parameter[name[name]]]]
call[name[vdr]][<ast.Slice object at 0x7da1b06a3250>] assign[=] call[binary_operation[name[name] + binary_operation[constant[ ] * name[tofill]]].encode, parameter[]]
if name[zVar] begin[:]
call[name[vdr]][<ast.Slice object at 0x7da1b06a0250>] assign[=] call[name[struct].pack, parameter[constant[>i], name[numDims]]]
if compare[name[numDims] greater[>] constant[0]] begin[:]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], name[numDims]]]] begin[:]
call[name[vdr]][<ast.Slice object at 0x7da1b06a0520>] assign[=] call[name[struct].pack, parameter[constant[>i], call[name[dimSizes]][name[i]]]]
variable[ist] assign[=] binary_operation[constant[344] + binary_operation[name[numDims] * constant[4]]]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], name[numDims]]]] begin[:]
call[name[vdr]][<ast.Slice object at 0x7da1b06a26b0>] assign[=] call[name[struct].pack, parameter[constant[>i], name[CDF].VARY]]
variable[ist] assign[=] binary_operation[constant[344] + binary_operation[constant[8] * name[numDims]]]
call[name[vdr]][<ast.Slice object at 0x7da1b07f4f40>] assign[=] name[pad]
call[name[f].write, parameter[name[vdr]]]
variable[info] assign[=] list[[]]
call[name[info].append, parameter[name[name]]]
call[name[info].append, parameter[name[byte_loc]]]
if name[zVar] begin[:]
call[name[info].append, parameter[name[numDims]]]
call[name[info].append, parameter[name[dimSizes]]]
call[name[info].append, parameter[name[dimVary]]]
if name[zVar] begin[:]
call[name[self].zvarsinfo][name[num]] assign[=] name[info]
call[name[self].zvars.append, parameter[name[name]]]
if compare[name[num] greater[>] constant[0]] begin[:]
call[name[self]._update_offset_value, parameter[name[f], binary_operation[call[call[name[self].zvarsinfo][binary_operation[name[num] - constant[1]]]][constant[1]] + constant[12]], constant[8], name[byte_loc]]]
call[name[self]._update_offset_value, parameter[name[f], binary_operation[name[self].gdr_head + constant[60]], constant[4], binary_operation[name[num] + constant[1]]]]
return[tuple[[<ast.Name object at 0x7da1b06a2650>, <ast.Name object at 0x7da1b06a1180>]]] | keyword[def] identifier[_write_vdr] ( identifier[self] , identifier[f] , identifier[cdataType] , identifier[numElems] , identifier[numDims] , identifier[dimSizes] , identifier[name] ,
identifier[dimVary] , identifier[recVary] , identifier[sparse] , identifier[blockingfactor] , identifier[compression] ,
identifier[pad] , identifier[zVar] ):
literal[string]
keyword[if] identifier[zVar] :
identifier[block_size] = identifier[CDF] . identifier[zVDR_BASE_SIZE64]
identifier[section_type] = identifier[CDF] . identifier[zVDR_]
keyword[else] :
identifier[block_size] = identifier[CDF] . identifier[rVDR_BASE_SIZE64]
identifier[section_type] = identifier[CDF] . identifier[rVDR_]
identifier[nextVDR] = literal[int]
identifier[dataType] = identifier[cdataType]
keyword[if] identifier[dataType] ==- literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[maxRec] =- literal[int]
identifier[headVXR] = literal[int]
identifier[tailVXR] = literal[int]
identifier[flags] = literal[int]
keyword[if] identifier[recVary] :
identifier[flags] = identifier[CDF] . identifier[_set_bit] ( identifier[flags] , literal[int] )
identifier[flags] = identifier[CDF] . identifier[_set_bit] ( identifier[flags] , literal[int] )
identifier[sRecords] = identifier[sparse]
identifier[rfuB] = literal[int]
identifier[rfuC] =- literal[int]
identifier[rfuF] =- literal[int]
keyword[if] identifier[zVar] :
identifier[num] = identifier[len] ( identifier[self] . identifier[zvars] )
keyword[else] :
identifier[num] = identifier[len] ( identifier[self] . identifier[rvars] )
keyword[if] identifier[compression] > literal[int] :
identifier[offsetCPRorSPR] = identifier[self] . identifier[_write_cpr] ( identifier[f] , identifier[CDF] . identifier[GZIP_COMPRESSION] ,
identifier[compression] )
keyword[else] :
identifier[offsetCPRorSPR] =- literal[int]
keyword[if] identifier[blockingfactor] keyword[is] keyword[None] :
identifier[blockingFactor] = literal[int]
keyword[else] :
identifier[blockingFactor] = identifier[blockingfactor]
keyword[if] identifier[numDims] > literal[int] :
keyword[if] identifier[zVar] :
identifier[block_size] = identifier[block_size] + identifier[numDims] * literal[int]
keyword[else] :
identifier[block_size] = identifier[block_size] + identifier[numDims] * literal[int]
keyword[if] identifier[pad] keyword[is] keyword[not] keyword[None] :
keyword[if] ( identifier[dataType] == literal[int] keyword[or] identifier[dataType] == literal[int] ):
keyword[if] ( identifier[len] ( identifier[pad] )< identifier[numElems] ):
identifier[pad] += literal[string] *( identifier[numElems] - identifier[len] ( identifier[pad] ))
keyword[elif] ( identifier[len] ( identifier[pad] )> identifier[numElems] ):
identifier[pad] = identifier[pad] [: identifier[numElems] ]
identifier[pad] = identifier[pad] . identifier[encode] ()
keyword[else] :
identifier[dummy] , identifier[pad] = identifier[self] . identifier[_convert_data] ( identifier[dataType] , identifier[numElems] , literal[int] , identifier[pad] )
keyword[else] :
identifier[pad] = identifier[self] . identifier[_default_pad] ( identifier[dataType] , identifier[numElems] )
identifier[f] . identifier[seek] ( literal[int] , literal[int] )
identifier[byte_loc] = identifier[f] . identifier[tell] ()
identifier[block_size] += identifier[len] ( identifier[pad] )
identifier[vdr] = identifier[bytearray] ( identifier[block_size] )
identifier[vdr] [ literal[int] : literal[int] ]= identifier[struct] . identifier[pack] ( literal[string] , identifier[block_size] )
identifier[vdr] [ literal[int] : literal[int] ]= identifier[struct] . identifier[pack] ( literal[string] , identifier[section_type] )
identifier[vdr] [ literal[int] : literal[int] ]= identifier[struct] . identifier[pack] ( literal[string] , identifier[nextVDR] )
identifier[vdr] [ literal[int] : literal[int] ]= identifier[struct] . identifier[pack] ( literal[string] , identifier[dataType] )
identifier[vdr] [ literal[int] : literal[int] ]= identifier[struct] . identifier[pack] ( literal[string] , identifier[maxRec] )
identifier[vdr] [ literal[int] : literal[int] ]= identifier[struct] . identifier[pack] ( literal[string] , identifier[headVXR] )
identifier[vdr] [ literal[int] : literal[int] ]= identifier[struct] . identifier[pack] ( literal[string] , identifier[tailVXR] )
identifier[vdr] [ literal[int] : literal[int] ]= identifier[struct] . identifier[pack] ( literal[string] , identifier[flags] )
identifier[vdr] [ literal[int] : literal[int] ]= identifier[struct] . identifier[pack] ( literal[string] , identifier[sRecords] )
identifier[vdr] [ literal[int] : literal[int] ]= identifier[struct] . identifier[pack] ( literal[string] , identifier[rfuB] )
identifier[vdr] [ literal[int] : literal[int] ]= identifier[struct] . identifier[pack] ( literal[string] , identifier[rfuC] )
identifier[vdr] [ literal[int] : literal[int] ]= identifier[struct] . identifier[pack] ( literal[string] , identifier[rfuF] )
identifier[vdr] [ literal[int] : literal[int] ]= identifier[struct] . identifier[pack] ( literal[string] , identifier[numElems] )
identifier[vdr] [ literal[int] : literal[int] ]= identifier[struct] . identifier[pack] ( literal[string] , identifier[num] )
identifier[vdr] [ literal[int] : literal[int] ]= identifier[struct] . identifier[pack] ( literal[string] , identifier[offsetCPRorSPR] )
identifier[vdr] [ literal[int] : literal[int] ]= identifier[struct] . identifier[pack] ( literal[string] , identifier[blockingFactor] )
identifier[tofill] = literal[int] - identifier[len] ( identifier[name] )
identifier[vdr] [ literal[int] : literal[int] ]=( identifier[name] + literal[string] * identifier[tofill] ). identifier[encode] ()
keyword[if] identifier[zVar] :
identifier[vdr] [ literal[int] : literal[int] ]= identifier[struct] . identifier[pack] ( literal[string] , identifier[numDims] )
keyword[if] ( identifier[numDims] > literal[int] ):
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[numDims] ):
identifier[vdr] [ literal[int] + identifier[i] * literal[int] : literal[int] +( identifier[i] + literal[int] )* literal[int] ]= identifier[struct] . identifier[pack] ( literal[string] , identifier[dimSizes] [ identifier[i] ])
identifier[ist] = literal[int] + identifier[numDims] * literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[numDims] ):
identifier[vdr] [ identifier[ist] + identifier[i] * literal[int] : identifier[ist] +( identifier[i] + literal[int] )* literal[int] ]= identifier[struct] . identifier[pack] ( literal[string] , identifier[CDF] . identifier[VARY] )
identifier[ist] = literal[int] + literal[int] * identifier[numDims]
keyword[else] :
keyword[if] ( identifier[numDims] > literal[int] ):
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[numDims] ):
keyword[if] ( identifier[dimVary] [ identifier[i] ]== keyword[True] keyword[or] identifier[dimVary] [ identifier[i] ]!= literal[int] ):
identifier[vdr] [ literal[int] + identifier[i] * literal[int] : literal[int] + identifier[i] * literal[int] ]= identifier[struct] . identifier[pack] ( literal[string] , identifier[CDF] . identifier[VARY] )
keyword[else] :
identifier[vdr] [ literal[int] + identifier[i] * literal[int] : literal[int] + identifier[i] * literal[int] ]= identifier[struct] . identifier[pack] ( literal[string] , identifier[CDF] . identifier[NOVARY] )
identifier[ist] = literal[int] + literal[int] * identifier[numDims]
identifier[vdr] [ identifier[ist] : identifier[block_size] ]= identifier[pad]
identifier[f] . identifier[write] ( identifier[vdr] )
identifier[info] =[]
identifier[info] . identifier[append] ( identifier[name] )
identifier[info] . identifier[append] ( identifier[byte_loc] )
keyword[if] identifier[zVar] :
identifier[info] . identifier[append] ( identifier[numDims] )
identifier[info] . identifier[append] ( identifier[dimSizes] )
keyword[else] :
identifier[info] . identifier[append] ( identifier[self] . identifier[num_rdim] )
identifier[info] . identifier[append] ( identifier[self] . identifier[rdim_sizes] )
identifier[info] . identifier[append] ( identifier[dimVary] )
keyword[if] identifier[zVar] :
identifier[self] . identifier[zvarsinfo] [ identifier[num] ]= identifier[info]
identifier[self] . identifier[zvars] . identifier[append] ( identifier[name] )
keyword[if] ( identifier[num] > literal[int] ):
identifier[self] . identifier[_update_offset_value] ( identifier[f] , identifier[self] . identifier[zvarsinfo] [ identifier[num] - literal[int] ][ literal[int] ]+ literal[int] , literal[int] ,
identifier[byte_loc] )
identifier[self] . identifier[_update_offset_value] ( identifier[f] , identifier[self] . identifier[gdr_head] + literal[int] , literal[int] , identifier[num] + literal[int] )
keyword[else] :
identifier[self] . identifier[rvarsinfo] [ identifier[num] ]= identifier[info]
identifier[self] . identifier[rvars] . identifier[append] ( identifier[name] )
keyword[if] ( identifier[num] > literal[int] ):
identifier[self] . identifier[_update_offset_value] ( identifier[f] , identifier[self] . identifier[rvarsinfo] [ identifier[num] - literal[int] ][ literal[int] ]+ literal[int] , literal[int] ,
identifier[byte_loc] )
identifier[self] . identifier[_update_offset_value] ( identifier[f] , identifier[self] . identifier[gdr_head] + literal[int] , literal[int] , identifier[num] + literal[int] )
keyword[return] identifier[num] , identifier[byte_loc] | def _write_vdr(self, f, cdataType, numElems, numDims, dimSizes, name, dimVary, recVary, sparse, blockingfactor, compression, pad, zVar):
"""
Writes a VDR block to the end of the file.
Parameters:
f : file
The open CDF file
cdataType : int
The CDF data type
numElems : int
The number of elements in the variable
numDims : int
The number of dimensions in the variable
dimSizes : int
The size of each dimension
name : str
The name of the variable
dimVary : array of bool
Bool array of size numDims.
True if a dimension is physical, False if a dimension is not physical
recVary : bool
True if each record is unique
sparse : bool
True if using sparse records
blockingfactor: int
No idea
compression : int
The level of compression between 0-9
pad : num
The pad values to insert
zVar : bool
True if this variable is a z variable
Returns:
num : int
The number of the variable
byte_loc : int
The current byte location within the file
"""
if zVar:
block_size = CDF.zVDR_BASE_SIZE64
section_type = CDF.zVDR_ # depends on [control=['if'], data=[]]
else:
block_size = CDF.rVDR_BASE_SIZE64
section_type = CDF.rVDR_
nextVDR = 0
dataType = cdataType
if dataType == -1:
raise ValueError('Bad data type.') # depends on [control=['if'], data=[]]
maxRec = -1
headVXR = 0
tailVXR = 0
flags = 0
if recVary:
flags = CDF._set_bit(flags, 0) # depends on [control=['if'], data=[]]
flags = CDF._set_bit(flags, 1)
sRecords = sparse
rfuB = 0
rfuC = -1
rfuF = -1
if zVar:
num = len(self.zvars) # depends on [control=['if'], data=[]]
else:
num = len(self.rvars)
if compression > 0:
offsetCPRorSPR = self._write_cpr(f, CDF.GZIP_COMPRESSION, compression) # depends on [control=['if'], data=['compression']]
else:
offsetCPRorSPR = -1
if blockingfactor is None:
blockingFactor = 1 # depends on [control=['if'], data=[]]
else:
blockingFactor = blockingfactor
# Increase the block size to account for "zDimSizes" and "DimVarys" fields
if numDims > 0:
if zVar:
block_size = block_size + numDims * 8 # depends on [control=['if'], data=[]]
else:
block_size = block_size + numDims * 4 # depends on [control=['if'], data=['numDims']]
# Determine pad value
if pad is not None:
if dataType == 51 or dataType == 52:
# pad needs to be the correct number of elements
if len(pad) < numElems:
pad += '\x00' * (numElems - len(pad)) # depends on [control=['if'], data=['numElems']]
elif len(pad) > numElems:
pad = pad[:numElems] # depends on [control=['if'], data=['numElems']]
pad = pad.encode() # depends on [control=['if'], data=[]]
else:
(dummy, pad) = self._convert_data(dataType, numElems, 1, pad) # depends on [control=['if'], data=['pad']]
else:
pad = self._default_pad(dataType, numElems)
f.seek(0, 2)
byte_loc = f.tell()
block_size += len(pad)
vdr = bytearray(block_size)
# if (dataType == 51):
# numElems = len(pad)
vdr[0:8] = struct.pack('>q', block_size)
vdr[8:12] = struct.pack('>i', section_type)
vdr[12:20] = struct.pack('>q', nextVDR)
vdr[20:24] = struct.pack('>i', dataType)
vdr[24:28] = struct.pack('>i', maxRec)
vdr[28:36] = struct.pack('>q', headVXR)
vdr[36:44] = struct.pack('>q', tailVXR)
vdr[44:48] = struct.pack('>i', flags)
vdr[48:52] = struct.pack('>i', sRecords)
vdr[52:56] = struct.pack('>i', rfuB)
vdr[56:60] = struct.pack('>i', rfuC)
vdr[60:64] = struct.pack('>i', rfuF)
vdr[64:68] = struct.pack('>i', numElems)
vdr[68:72] = struct.pack('>i', num)
vdr[72:80] = struct.pack('>q', offsetCPRorSPR)
vdr[80:84] = struct.pack('>i', blockingFactor)
tofill = 256 - len(name)
vdr[84:340] = (name + '\x00' * tofill).encode()
if zVar:
vdr[340:344] = struct.pack('>i', numDims)
if numDims > 0:
for i in range(0, numDims):
vdr[344 + i * 4:344 + (i + 1) * 4] = struct.pack('>i', dimSizes[i]) # depends on [control=['for'], data=['i']]
ist = 344 + numDims * 4
for i in range(0, numDims):
vdr[ist + i * 4:ist + (i + 1) * 4] = struct.pack('>i', CDF.VARY) # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=['numDims']]
ist = 344 + 8 * numDims # depends on [control=['if'], data=[]]
else:
if numDims > 0:
for i in range(0, numDims):
if dimVary[i] == True or dimVary[i] != 0:
vdr[340 + i * 4:344 + i * 4] = struct.pack('>i', CDF.VARY) # depends on [control=['if'], data=[]]
else:
vdr[340 + i * 4:344 + i * 4] = struct.pack('>i', CDF.NOVARY) # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=['numDims']]
ist = 340 + 4 * numDims
vdr[ist:block_size] = pad
f.write(vdr)
# Set variable info
info = []
info.append(name)
info.append(byte_loc)
if zVar:
info.append(numDims)
info.append(dimSizes) # depends on [control=['if'], data=[]]
else:
info.append(self.num_rdim)
info.append(self.rdim_sizes)
info.append(dimVary)
# Update the pointers from the CDR/previous VDR
if zVar:
self.zvarsinfo[num] = info
self.zvars.append(name)
if num > 0:
# VDR's VDRnext
self._update_offset_value(f, self.zvarsinfo[num - 1][1] + 12, 8, byte_loc) # depends on [control=['if'], data=['num']]
# GDR's NzVars
self._update_offset_value(f, self.gdr_head + 60, 4, num + 1) # depends on [control=['if'], data=[]]
else:
self.rvarsinfo[num] = info
self.rvars.append(name)
if num > 0:
# VDR's VDRnext
self._update_offset_value(f, self.rvarsinfo[num - 1][1] + 12, 8, byte_loc) # depends on [control=['if'], data=['num']]
# GDR's NrVars
self._update_offset_value(f, self.gdr_head + 44, 4, num + 1)
return (num, byte_loc) |
def fetch(self, url, path, filename):
"""Verify if the file is already downloaded and complete. If they don't
exists or if are not complete, use homura download function to fetch
files. Return a list with the path of the downloaded file and the size
of the remote file.
"""
logger.debug('initializing download in ', url)
remote_file_size = self.get_remote_file_size(url)
if exists(join(path, filename)):
size = getsize(join(path, filename))
if size == remote_file_size:
logger.error('%s already exists on your system' % filename)
print('%s already exists on your system' % filename)
return [join(path, filename), size]
logger.debug('Downloading: %s' % filename)
print('Downloading: %s' % filename)
fetch(url, path)
print('stored at %s' % path)
logger.debug('stored at %s' % path)
return [join(path, filename), remote_file_size] | def function[fetch, parameter[self, url, path, filename]]:
constant[Verify if the file is already downloaded and complete. If they don't
exists or if are not complete, use homura download function to fetch
files. Return a list with the path of the downloaded file and the size
of the remote file.
]
call[name[logger].debug, parameter[constant[initializing download in ], name[url]]]
variable[remote_file_size] assign[=] call[name[self].get_remote_file_size, parameter[name[url]]]
if call[name[exists], parameter[call[name[join], parameter[name[path], name[filename]]]]] begin[:]
variable[size] assign[=] call[name[getsize], parameter[call[name[join], parameter[name[path], name[filename]]]]]
if compare[name[size] equal[==] name[remote_file_size]] begin[:]
call[name[logger].error, parameter[binary_operation[constant[%s already exists on your system] <ast.Mod object at 0x7da2590d6920> name[filename]]]]
call[name[print], parameter[binary_operation[constant[%s already exists on your system] <ast.Mod object at 0x7da2590d6920> name[filename]]]]
return[list[[<ast.Call object at 0x7da18ede6d40>, <ast.Name object at 0x7da18ede5e10>]]]
call[name[logger].debug, parameter[binary_operation[constant[Downloading: %s] <ast.Mod object at 0x7da2590d6920> name[filename]]]]
call[name[print], parameter[binary_operation[constant[Downloading: %s] <ast.Mod object at 0x7da2590d6920> name[filename]]]]
call[name[fetch], parameter[name[url], name[path]]]
call[name[print], parameter[binary_operation[constant[stored at %s] <ast.Mod object at 0x7da2590d6920> name[path]]]]
call[name[logger].debug, parameter[binary_operation[constant[stored at %s] <ast.Mod object at 0x7da2590d6920> name[path]]]]
return[list[[<ast.Call object at 0x7da1b00f5fc0>, <ast.Name object at 0x7da1b00f4880>]]] | keyword[def] identifier[fetch] ( identifier[self] , identifier[url] , identifier[path] , identifier[filename] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string] , identifier[url] )
identifier[remote_file_size] = identifier[self] . identifier[get_remote_file_size] ( identifier[url] )
keyword[if] identifier[exists] ( identifier[join] ( identifier[path] , identifier[filename] )):
identifier[size] = identifier[getsize] ( identifier[join] ( identifier[path] , identifier[filename] ))
keyword[if] identifier[size] == identifier[remote_file_size] :
identifier[logger] . identifier[error] ( literal[string] % identifier[filename] )
identifier[print] ( literal[string] % identifier[filename] )
keyword[return] [ identifier[join] ( identifier[path] , identifier[filename] ), identifier[size] ]
identifier[logger] . identifier[debug] ( literal[string] % identifier[filename] )
identifier[print] ( literal[string] % identifier[filename] )
identifier[fetch] ( identifier[url] , identifier[path] )
identifier[print] ( literal[string] % identifier[path] )
identifier[logger] . identifier[debug] ( literal[string] % identifier[path] )
keyword[return] [ identifier[join] ( identifier[path] , identifier[filename] ), identifier[remote_file_size] ] | def fetch(self, url, path, filename):
"""Verify if the file is already downloaded and complete. If they don't
exists or if are not complete, use homura download function to fetch
files. Return a list with the path of the downloaded file and the size
of the remote file.
"""
logger.debug('initializing download in ', url)
remote_file_size = self.get_remote_file_size(url)
if exists(join(path, filename)):
size = getsize(join(path, filename))
if size == remote_file_size:
logger.error('%s already exists on your system' % filename)
print('%s already exists on your system' % filename)
return [join(path, filename), size] # depends on [control=['if'], data=['size']] # depends on [control=['if'], data=[]]
logger.debug('Downloading: %s' % filename)
print('Downloading: %s' % filename)
fetch(url, path)
print('stored at %s' % path)
logger.debug('stored at %s' % path)
return [join(path, filename), remote_file_size] |
def getShocks(self):
'''
Gets new Markov states and permanent and transitory income shocks for this period. Samples
from IncomeDstn for each period-state in the cycle.
Parameters
----------
None
Returns
-------
None
'''
# Get new Markov states for each agent
if self.global_markov:
base_draws = np.ones(self.AgentCount)*drawUniform(1,seed=self.RNG.randint(0,2**31-1))
else:
base_draws = self.RNG.permutation(np.arange(self.AgentCount,dtype=float)/self.AgentCount + 1.0/(2*self.AgentCount))
newborn = self.t_age == 0 # Don't change Markov state for those who were just born (unless global_markov)
MrkvPrev = self.MrkvNow
MrkvNow = np.zeros(self.AgentCount,dtype=int)
for t in range(self.T_cycle):
Cutoffs = np.cumsum(self.MrkvArray[t],axis=1)
for j in range(self.MrkvArray[t].shape[0]):
these = np.logical_and(self.t_cycle == t,MrkvPrev == j)
MrkvNow[these] = np.searchsorted(Cutoffs[j,:],base_draws[these]).astype(int)
if not self.global_markov:
MrkvNow[newborn] = MrkvPrev[newborn]
self.MrkvNow = MrkvNow.astype(int)
# Now get income shocks for each consumer, by cycle-time and discrete state
PermShkNow = np.zeros(self.AgentCount) # Initialize shock arrays
TranShkNow = np.zeros(self.AgentCount)
for t in range(self.T_cycle):
for j in range(self.MrkvArray[t].shape[0]):
these = np.logical_and(t == self.t_cycle, j == MrkvNow)
N = np.sum(these)
if N > 0:
IncomeDstnNow = self.IncomeDstn[t-1][j] # set current income distribution
PermGroFacNow = self.PermGroFac[t-1][j] # and permanent growth factor
Indices = np.arange(IncomeDstnNow[0].size) # just a list of integers
# Get random draws of income shocks from the discrete distribution
EventDraws = drawDiscrete(N,X=Indices,P=IncomeDstnNow[0],exact_match=False,seed=self.RNG.randint(0,2**31-1))
PermShkNow[these] = IncomeDstnNow[1][EventDraws]*PermGroFacNow # permanent "shock" includes expected growth
TranShkNow[these] = IncomeDstnNow[2][EventDraws]
newborn = self.t_age == 0
PermShkNow[newborn] = 1.0
TranShkNow[newborn] = 1.0
self.PermShkNow = PermShkNow
self.TranShkNow = TranShkNow | def function[getShocks, parameter[self]]:
constant[
Gets new Markov states and permanent and transitory income shocks for this period. Samples
from IncomeDstn for each period-state in the cycle.
Parameters
----------
None
Returns
-------
None
]
if name[self].global_markov begin[:]
variable[base_draws] assign[=] binary_operation[call[name[np].ones, parameter[name[self].AgentCount]] * call[name[drawUniform], parameter[constant[1]]]]
variable[newborn] assign[=] compare[name[self].t_age equal[==] constant[0]]
variable[MrkvPrev] assign[=] name[self].MrkvNow
variable[MrkvNow] assign[=] call[name[np].zeros, parameter[name[self].AgentCount]]
for taget[name[t]] in starred[call[name[range], parameter[name[self].T_cycle]]] begin[:]
variable[Cutoffs] assign[=] call[name[np].cumsum, parameter[call[name[self].MrkvArray][name[t]]]]
for taget[name[j]] in starred[call[name[range], parameter[call[call[name[self].MrkvArray][name[t]].shape][constant[0]]]]] begin[:]
variable[these] assign[=] call[name[np].logical_and, parameter[compare[name[self].t_cycle equal[==] name[t]], compare[name[MrkvPrev] equal[==] name[j]]]]
call[name[MrkvNow]][name[these]] assign[=] call[call[name[np].searchsorted, parameter[call[name[Cutoffs]][tuple[[<ast.Name object at 0x7da18ede6470>, <ast.Slice object at 0x7da18ede4a30>]]], call[name[base_draws]][name[these]]]].astype, parameter[name[int]]]
if <ast.UnaryOp object at 0x7da18ede6230> begin[:]
call[name[MrkvNow]][name[newborn]] assign[=] call[name[MrkvPrev]][name[newborn]]
name[self].MrkvNow assign[=] call[name[MrkvNow].astype, parameter[name[int]]]
variable[PermShkNow] assign[=] call[name[np].zeros, parameter[name[self].AgentCount]]
variable[TranShkNow] assign[=] call[name[np].zeros, parameter[name[self].AgentCount]]
for taget[name[t]] in starred[call[name[range], parameter[name[self].T_cycle]]] begin[:]
for taget[name[j]] in starred[call[name[range], parameter[call[call[name[self].MrkvArray][name[t]].shape][constant[0]]]]] begin[:]
variable[these] assign[=] call[name[np].logical_and, parameter[compare[name[t] equal[==] name[self].t_cycle], compare[name[j] equal[==] name[MrkvNow]]]]
variable[N] assign[=] call[name[np].sum, parameter[name[these]]]
if compare[name[N] greater[>] constant[0]] begin[:]
variable[IncomeDstnNow] assign[=] call[call[name[self].IncomeDstn][binary_operation[name[t] - constant[1]]]][name[j]]
variable[PermGroFacNow] assign[=] call[call[name[self].PermGroFac][binary_operation[name[t] - constant[1]]]][name[j]]
variable[Indices] assign[=] call[name[np].arange, parameter[call[name[IncomeDstnNow]][constant[0]].size]]
variable[EventDraws] assign[=] call[name[drawDiscrete], parameter[name[N]]]
call[name[PermShkNow]][name[these]] assign[=] binary_operation[call[call[name[IncomeDstnNow]][constant[1]]][name[EventDraws]] * name[PermGroFacNow]]
call[name[TranShkNow]][name[these]] assign[=] call[call[name[IncomeDstnNow]][constant[2]]][name[EventDraws]]
variable[newborn] assign[=] compare[name[self].t_age equal[==] constant[0]]
call[name[PermShkNow]][name[newborn]] assign[=] constant[1.0]
call[name[TranShkNow]][name[newborn]] assign[=] constant[1.0]
name[self].PermShkNow assign[=] name[PermShkNow]
name[self].TranShkNow assign[=] name[TranShkNow] | keyword[def] identifier[getShocks] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[global_markov] :
identifier[base_draws] = identifier[np] . identifier[ones] ( identifier[self] . identifier[AgentCount] )* identifier[drawUniform] ( literal[int] , identifier[seed] = identifier[self] . identifier[RNG] . identifier[randint] ( literal[int] , literal[int] ** literal[int] - literal[int] ))
keyword[else] :
identifier[base_draws] = identifier[self] . identifier[RNG] . identifier[permutation] ( identifier[np] . identifier[arange] ( identifier[self] . identifier[AgentCount] , identifier[dtype] = identifier[float] )/ identifier[self] . identifier[AgentCount] + literal[int] /( literal[int] * identifier[self] . identifier[AgentCount] ))
identifier[newborn] = identifier[self] . identifier[t_age] == literal[int]
identifier[MrkvPrev] = identifier[self] . identifier[MrkvNow]
identifier[MrkvNow] = identifier[np] . identifier[zeros] ( identifier[self] . identifier[AgentCount] , identifier[dtype] = identifier[int] )
keyword[for] identifier[t] keyword[in] identifier[range] ( identifier[self] . identifier[T_cycle] ):
identifier[Cutoffs] = identifier[np] . identifier[cumsum] ( identifier[self] . identifier[MrkvArray] [ identifier[t] ], identifier[axis] = literal[int] )
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[self] . identifier[MrkvArray] [ identifier[t] ]. identifier[shape] [ literal[int] ]):
identifier[these] = identifier[np] . identifier[logical_and] ( identifier[self] . identifier[t_cycle] == identifier[t] , identifier[MrkvPrev] == identifier[j] )
identifier[MrkvNow] [ identifier[these] ]= identifier[np] . identifier[searchsorted] ( identifier[Cutoffs] [ identifier[j] ,:], identifier[base_draws] [ identifier[these] ]). identifier[astype] ( identifier[int] )
keyword[if] keyword[not] identifier[self] . identifier[global_markov] :
identifier[MrkvNow] [ identifier[newborn] ]= identifier[MrkvPrev] [ identifier[newborn] ]
identifier[self] . identifier[MrkvNow] = identifier[MrkvNow] . identifier[astype] ( identifier[int] )
identifier[PermShkNow] = identifier[np] . identifier[zeros] ( identifier[self] . identifier[AgentCount] )
identifier[TranShkNow] = identifier[np] . identifier[zeros] ( identifier[self] . identifier[AgentCount] )
keyword[for] identifier[t] keyword[in] identifier[range] ( identifier[self] . identifier[T_cycle] ):
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[self] . identifier[MrkvArray] [ identifier[t] ]. identifier[shape] [ literal[int] ]):
identifier[these] = identifier[np] . identifier[logical_and] ( identifier[t] == identifier[self] . identifier[t_cycle] , identifier[j] == identifier[MrkvNow] )
identifier[N] = identifier[np] . identifier[sum] ( identifier[these] )
keyword[if] identifier[N] > literal[int] :
identifier[IncomeDstnNow] = identifier[self] . identifier[IncomeDstn] [ identifier[t] - literal[int] ][ identifier[j] ]
identifier[PermGroFacNow] = identifier[self] . identifier[PermGroFac] [ identifier[t] - literal[int] ][ identifier[j] ]
identifier[Indices] = identifier[np] . identifier[arange] ( identifier[IncomeDstnNow] [ literal[int] ]. identifier[size] )
identifier[EventDraws] = identifier[drawDiscrete] ( identifier[N] , identifier[X] = identifier[Indices] , identifier[P] = identifier[IncomeDstnNow] [ literal[int] ], identifier[exact_match] = keyword[False] , identifier[seed] = identifier[self] . identifier[RNG] . identifier[randint] ( literal[int] , literal[int] ** literal[int] - literal[int] ))
identifier[PermShkNow] [ identifier[these] ]= identifier[IncomeDstnNow] [ literal[int] ][ identifier[EventDraws] ]* identifier[PermGroFacNow]
identifier[TranShkNow] [ identifier[these] ]= identifier[IncomeDstnNow] [ literal[int] ][ identifier[EventDraws] ]
identifier[newborn] = identifier[self] . identifier[t_age] == literal[int]
identifier[PermShkNow] [ identifier[newborn] ]= literal[int]
identifier[TranShkNow] [ identifier[newborn] ]= literal[int]
identifier[self] . identifier[PermShkNow] = identifier[PermShkNow]
identifier[self] . identifier[TranShkNow] = identifier[TranShkNow] | def getShocks(self):
"""
Gets new Markov states and permanent and transitory income shocks for this period. Samples
from IncomeDstn for each period-state in the cycle.
Parameters
----------
None
Returns
-------
None
"""
# Get new Markov states for each agent
if self.global_markov:
base_draws = np.ones(self.AgentCount) * drawUniform(1, seed=self.RNG.randint(0, 2 ** 31 - 1)) # depends on [control=['if'], data=[]]
else:
base_draws = self.RNG.permutation(np.arange(self.AgentCount, dtype=float) / self.AgentCount + 1.0 / (2 * self.AgentCount))
newborn = self.t_age == 0 # Don't change Markov state for those who were just born (unless global_markov)
MrkvPrev = self.MrkvNow
MrkvNow = np.zeros(self.AgentCount, dtype=int)
for t in range(self.T_cycle):
Cutoffs = np.cumsum(self.MrkvArray[t], axis=1)
for j in range(self.MrkvArray[t].shape[0]):
these = np.logical_and(self.t_cycle == t, MrkvPrev == j)
MrkvNow[these] = np.searchsorted(Cutoffs[j, :], base_draws[these]).astype(int) # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['t']]
if not self.global_markov:
MrkvNow[newborn] = MrkvPrev[newborn] # depends on [control=['if'], data=[]]
self.MrkvNow = MrkvNow.astype(int)
# Now get income shocks for each consumer, by cycle-time and discrete state
PermShkNow = np.zeros(self.AgentCount) # Initialize shock arrays
TranShkNow = np.zeros(self.AgentCount)
for t in range(self.T_cycle):
for j in range(self.MrkvArray[t].shape[0]):
these = np.logical_and(t == self.t_cycle, j == MrkvNow)
N = np.sum(these)
if N > 0:
IncomeDstnNow = self.IncomeDstn[t - 1][j] # set current income distribution
PermGroFacNow = self.PermGroFac[t - 1][j] # and permanent growth factor
Indices = np.arange(IncomeDstnNow[0].size) # just a list of integers
# Get random draws of income shocks from the discrete distribution
EventDraws = drawDiscrete(N, X=Indices, P=IncomeDstnNow[0], exact_match=False, seed=self.RNG.randint(0, 2 ** 31 - 1))
PermShkNow[these] = IncomeDstnNow[1][EventDraws] * PermGroFacNow # permanent "shock" includes expected growth
TranShkNow[these] = IncomeDstnNow[2][EventDraws] # depends on [control=['if'], data=['N']] # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['t']]
newborn = self.t_age == 0
PermShkNow[newborn] = 1.0
TranShkNow[newborn] = 1.0
self.PermShkNow = PermShkNow
self.TranShkNow = TranShkNow |
def incidence(boundary):
"""
given an Nxm matrix containing boundary info between simplices,
compute indidence info matrix
not very reusable; should probably not be in this lib
"""
return GroupBy(boundary).split(np.arange(boundary.size) // boundary.shape[1]) | def function[incidence, parameter[boundary]]:
constant[
given an Nxm matrix containing boundary info between simplices,
compute indidence info matrix
not very reusable; should probably not be in this lib
]
return[call[call[name[GroupBy], parameter[name[boundary]]].split, parameter[binary_operation[call[name[np].arange, parameter[name[boundary].size]] <ast.FloorDiv object at 0x7da2590d6bc0> call[name[boundary].shape][constant[1]]]]]] | keyword[def] identifier[incidence] ( identifier[boundary] ):
literal[string]
keyword[return] identifier[GroupBy] ( identifier[boundary] ). identifier[split] ( identifier[np] . identifier[arange] ( identifier[boundary] . identifier[size] )// identifier[boundary] . identifier[shape] [ literal[int] ]) | def incidence(boundary):
"""
given an Nxm matrix containing boundary info between simplices,
compute indidence info matrix
not very reusable; should probably not be in this lib
"""
return GroupBy(boundary).split(np.arange(boundary.size) // boundary.shape[1]) |
def parse(self, data):
"""Parse a 9 bytes packet in the Humidity format and return a
dictionary containing the data extracted. An example of a return value
would be:
.. code-block:: python
{
'id': "0x2EB2",
'packet_length': 8,
'packet_type': 81,
'packet_type_name': 'Humidity sensors',
'sequence_number': 0,
'packet_subtype': 1,
'packet_subtype_name': "LaCrosse TX3",
'humidity': 91,
'humidity_status': "Wet"
'signal_level': 9,
'battery_level': 6,
}
:param data: bytearray to be parsed
:type data: bytearray
:return: Data dictionary containing the parsed values
:rtype: dict
"""
self.validate_packet(data)
id_ = self.dump_hex(data[4:6])
# channel = data[5] TBC
humidity = data[6]
humidity_status = self._extract_humidity_status(data[7])
sensor_specific = {
'id': id_,
# 'channel': channel, TBC
'humidity': humidity,
'humidity_status': humidity_status
}
results = self.parse_header_part(data)
results.update(RfxPacketUtils.parse_signal_and_battery(data[8]))
results.update(sensor_specific)
return results | def function[parse, parameter[self, data]]:
constant[Parse a 9 bytes packet in the Humidity format and return a
dictionary containing the data extracted. An example of a return value
would be:
.. code-block:: python
{
'id': "0x2EB2",
'packet_length': 8,
'packet_type': 81,
'packet_type_name': 'Humidity sensors',
'sequence_number': 0,
'packet_subtype': 1,
'packet_subtype_name': "LaCrosse TX3",
'humidity': 91,
'humidity_status': "Wet"
'signal_level': 9,
'battery_level': 6,
}
:param data: bytearray to be parsed
:type data: bytearray
:return: Data dictionary containing the parsed values
:rtype: dict
]
call[name[self].validate_packet, parameter[name[data]]]
variable[id_] assign[=] call[name[self].dump_hex, parameter[call[name[data]][<ast.Slice object at 0x7da1b1932b00>]]]
variable[humidity] assign[=] call[name[data]][constant[6]]
variable[humidity_status] assign[=] call[name[self]._extract_humidity_status, parameter[call[name[data]][constant[7]]]]
variable[sensor_specific] assign[=] dictionary[[<ast.Constant object at 0x7da1b1931210>, <ast.Constant object at 0x7da1b1931660>, <ast.Constant object at 0x7da1b1931840>], [<ast.Name object at 0x7da1b1931600>, <ast.Name object at 0x7da1b19322f0>, <ast.Name object at 0x7da1b1931750>]]
variable[results] assign[=] call[name[self].parse_header_part, parameter[name[data]]]
call[name[results].update, parameter[call[name[RfxPacketUtils].parse_signal_and_battery, parameter[call[name[data]][constant[8]]]]]]
call[name[results].update, parameter[name[sensor_specific]]]
return[name[results]] | keyword[def] identifier[parse] ( identifier[self] , identifier[data] ):
literal[string]
identifier[self] . identifier[validate_packet] ( identifier[data] )
identifier[id_] = identifier[self] . identifier[dump_hex] ( identifier[data] [ literal[int] : literal[int] ])
identifier[humidity] = identifier[data] [ literal[int] ]
identifier[humidity_status] = identifier[self] . identifier[_extract_humidity_status] ( identifier[data] [ literal[int] ])
identifier[sensor_specific] ={
literal[string] : identifier[id_] ,
literal[string] : identifier[humidity] ,
literal[string] : identifier[humidity_status]
}
identifier[results] = identifier[self] . identifier[parse_header_part] ( identifier[data] )
identifier[results] . identifier[update] ( identifier[RfxPacketUtils] . identifier[parse_signal_and_battery] ( identifier[data] [ literal[int] ]))
identifier[results] . identifier[update] ( identifier[sensor_specific] )
keyword[return] identifier[results] | def parse(self, data):
"""Parse a 9 bytes packet in the Humidity format and return a
dictionary containing the data extracted. An example of a return value
would be:
.. code-block:: python
{
'id': "0x2EB2",
'packet_length': 8,
'packet_type': 81,
'packet_type_name': 'Humidity sensors',
'sequence_number': 0,
'packet_subtype': 1,
'packet_subtype_name': "LaCrosse TX3",
'humidity': 91,
'humidity_status': "Wet"
'signal_level': 9,
'battery_level': 6,
}
:param data: bytearray to be parsed
:type data: bytearray
:return: Data dictionary containing the parsed values
:rtype: dict
"""
self.validate_packet(data)
id_ = self.dump_hex(data[4:6])
# channel = data[5] TBC
humidity = data[6]
humidity_status = self._extract_humidity_status(data[7])
# 'channel': channel, TBC
sensor_specific = {'id': id_, 'humidity': humidity, 'humidity_status': humidity_status}
results = self.parse_header_part(data)
results.update(RfxPacketUtils.parse_signal_and_battery(data[8]))
results.update(sensor_specific)
return results |
def add_cache_bypass(url):
"""
Adds the current time to the querystring of the URL to force a
cache reload. Used for when a form post redirects back to a
page that should display updated content, such as new comments or
ratings.
"""
if not cache_installed():
return url
hash_str = ""
if "#" in url:
url, hash_str = url.split("#", 1)
hash_str = "#" + hash_str
url += "?" if "?" not in url else "&"
return url + "t=" + str(time()).replace(".", "") + hash_str | def function[add_cache_bypass, parameter[url]]:
constant[
Adds the current time to the querystring of the URL to force a
cache reload. Used for when a form post redirects back to a
page that should display updated content, such as new comments or
ratings.
]
if <ast.UnaryOp object at 0x7da20c6a8790> begin[:]
return[name[url]]
variable[hash_str] assign[=] constant[]
if compare[constant[#] in name[url]] begin[:]
<ast.Tuple object at 0x7da20c6a8250> assign[=] call[name[url].split, parameter[constant[#], constant[1]]]
variable[hash_str] assign[=] binary_operation[constant[#] + name[hash_str]]
<ast.AugAssign object at 0x7da20c6a8490>
return[binary_operation[binary_operation[binary_operation[name[url] + constant[t=]] + call[call[name[str], parameter[call[name[time], parameter[]]]].replace, parameter[constant[.], constant[]]]] + name[hash_str]]] | keyword[def] identifier[add_cache_bypass] ( identifier[url] ):
literal[string]
keyword[if] keyword[not] identifier[cache_installed] ():
keyword[return] identifier[url]
identifier[hash_str] = literal[string]
keyword[if] literal[string] keyword[in] identifier[url] :
identifier[url] , identifier[hash_str] = identifier[url] . identifier[split] ( literal[string] , literal[int] )
identifier[hash_str] = literal[string] + identifier[hash_str]
identifier[url] += literal[string] keyword[if] literal[string] keyword[not] keyword[in] identifier[url] keyword[else] literal[string]
keyword[return] identifier[url] + literal[string] + identifier[str] ( identifier[time] ()). identifier[replace] ( literal[string] , literal[string] )+ identifier[hash_str] | def add_cache_bypass(url):
"""
Adds the current time to the querystring of the URL to force a
cache reload. Used for when a form post redirects back to a
page that should display updated content, such as new comments or
ratings.
"""
if not cache_installed():
return url # depends on [control=['if'], data=[]]
hash_str = ''
if '#' in url:
(url, hash_str) = url.split('#', 1)
hash_str = '#' + hash_str # depends on [control=['if'], data=['url']]
url += '?' if '?' not in url else '&'
return url + 't=' + str(time()).replace('.', '') + hash_str |
def build_config(config : Dict[str, Any]) -> Dict[str, str]:
"""Will build the actual config for Jinja2, based on SDK config.
"""
result = config.copy()
# Manage the classifier stable/beta
is_stable = result.pop("is_stable", False)
if is_stable:
result["classifier"] = "Development Status :: 5 - Production/Stable"
else:
result["classifier"] = "Development Status :: 4 - Beta"
# Manage the nspkg
package_name = result["package_name"]
result["package_nspkg"] = result.pop(
"package_nspkg",
package_name[:package_name.rindex('-')]+"-nspkg"
)
# ARM?
result['is_arm'] = result.pop("is_arm", True)
# Do I need msrestazure for this package?
result['need_msrestazure'] = result.pop("need_msrestazure", True)
# Pre-compute some Jinja variable that are complicated to do inside the templates
package_parts = result["package_nspkg"][:-len('-nspkg')].split('-')
result['nspkg_names'] = [
".".join(package_parts[:i+1])
for i in range(len(package_parts))
]
result['init_names'] = [
"/".join(package_parts[:i+1])+"/__init__.py"
for i in range(len(package_parts))
]
# Return result
return result | def function[build_config, parameter[config]]:
constant[Will build the actual config for Jinja2, based on SDK config.
]
variable[result] assign[=] call[name[config].copy, parameter[]]
variable[is_stable] assign[=] call[name[result].pop, parameter[constant[is_stable], constant[False]]]
if name[is_stable] begin[:]
call[name[result]][constant[classifier]] assign[=] constant[Development Status :: 5 - Production/Stable]
variable[package_name] assign[=] call[name[result]][constant[package_name]]
call[name[result]][constant[package_nspkg]] assign[=] call[name[result].pop, parameter[constant[package_nspkg], binary_operation[call[name[package_name]][<ast.Slice object at 0x7da18f58c820>] + constant[-nspkg]]]]
call[name[result]][constant[is_arm]] assign[=] call[name[result].pop, parameter[constant[is_arm], constant[True]]]
call[name[result]][constant[need_msrestazure]] assign[=] call[name[result].pop, parameter[constant[need_msrestazure], constant[True]]]
variable[package_parts] assign[=] call[call[call[name[result]][constant[package_nspkg]]][<ast.Slice object at 0x7da18f58cfd0>].split, parameter[constant[-]]]
call[name[result]][constant[nspkg_names]] assign[=] <ast.ListComp object at 0x7da18f58f040>
call[name[result]][constant[init_names]] assign[=] <ast.ListComp object at 0x7da20c6e6a10>
return[name[result]] | keyword[def] identifier[build_config] ( identifier[config] : identifier[Dict] [ identifier[str] , identifier[Any] ])-> identifier[Dict] [ identifier[str] , identifier[str] ]:
literal[string]
identifier[result] = identifier[config] . identifier[copy] ()
identifier[is_stable] = identifier[result] . identifier[pop] ( literal[string] , keyword[False] )
keyword[if] identifier[is_stable] :
identifier[result] [ literal[string] ]= literal[string]
keyword[else] :
identifier[result] [ literal[string] ]= literal[string]
identifier[package_name] = identifier[result] [ literal[string] ]
identifier[result] [ literal[string] ]= identifier[result] . identifier[pop] (
literal[string] ,
identifier[package_name] [: identifier[package_name] . identifier[rindex] ( literal[string] )]+ literal[string]
)
identifier[result] [ literal[string] ]= identifier[result] . identifier[pop] ( literal[string] , keyword[True] )
identifier[result] [ literal[string] ]= identifier[result] . identifier[pop] ( literal[string] , keyword[True] )
identifier[package_parts] = identifier[result] [ literal[string] ][:- identifier[len] ( literal[string] )]. identifier[split] ( literal[string] )
identifier[result] [ literal[string] ]=[
literal[string] . identifier[join] ( identifier[package_parts] [: identifier[i] + literal[int] ])
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[package_parts] ))
]
identifier[result] [ literal[string] ]=[
literal[string] . identifier[join] ( identifier[package_parts] [: identifier[i] + literal[int] ])+ literal[string]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[package_parts] ))
]
keyword[return] identifier[result] | def build_config(config: Dict[str, Any]) -> Dict[str, str]:
"""Will build the actual config for Jinja2, based on SDK config.
"""
result = config.copy()
# Manage the classifier stable/beta
is_stable = result.pop('is_stable', False)
if is_stable:
result['classifier'] = 'Development Status :: 5 - Production/Stable' # depends on [control=['if'], data=[]]
else:
result['classifier'] = 'Development Status :: 4 - Beta'
# Manage the nspkg
package_name = result['package_name']
result['package_nspkg'] = result.pop('package_nspkg', package_name[:package_name.rindex('-')] + '-nspkg')
# ARM?
result['is_arm'] = result.pop('is_arm', True)
# Do I need msrestazure for this package?
result['need_msrestazure'] = result.pop('need_msrestazure', True)
# Pre-compute some Jinja variable that are complicated to do inside the templates
package_parts = result['package_nspkg'][:-len('-nspkg')].split('-')
result['nspkg_names'] = ['.'.join(package_parts[:i + 1]) for i in range(len(package_parts))]
result['init_names'] = ['/'.join(package_parts[:i + 1]) + '/__init__.py' for i in range(len(package_parts))]
# Return result
return result |
def read_tgf(filename):
"""Reads a file in Trivial Graph Format."""
g = Graph()
with open(filename) as file:
states = {}
# Nodes
for line in file:
line = line.strip()
if line == "":
continue
elif line == "#":
break
i, q = line.split(None, 1)
q, attrs = syntax.string_to_state(q)
states[i] = q
g.add_node(q, attrs)
# Edges
for line in file:
line = line.strip()
if line == "":
continue
i, j, t = line.split(None, 2)
q, r = states[i], states[j]
t = syntax.string_to_transition(t)
g.add_edge(q, r, {'label':t})
return from_graph(g) | def function[read_tgf, parameter[filename]]:
constant[Reads a file in Trivial Graph Format.]
variable[g] assign[=] call[name[Graph], parameter[]]
with call[name[open], parameter[name[filename]]] begin[:]
variable[states] assign[=] dictionary[[], []]
for taget[name[line]] in starred[name[file]] begin[:]
variable[line] assign[=] call[name[line].strip, parameter[]]
if compare[name[line] equal[==] constant[]] begin[:]
continue
<ast.Tuple object at 0x7da20c795840> assign[=] call[name[line].split, parameter[constant[None], constant[1]]]
<ast.Tuple object at 0x7da20c794370> assign[=] call[name[syntax].string_to_state, parameter[name[q]]]
call[name[states]][name[i]] assign[=] name[q]
call[name[g].add_node, parameter[name[q], name[attrs]]]
for taget[name[line]] in starred[name[file]] begin[:]
variable[line] assign[=] call[name[line].strip, parameter[]]
if compare[name[line] equal[==] constant[]] begin[:]
continue
<ast.Tuple object at 0x7da20c796080> assign[=] call[name[line].split, parameter[constant[None], constant[2]]]
<ast.Tuple object at 0x7da2049619f0> assign[=] tuple[[<ast.Subscript object at 0x7da204961600>, <ast.Subscript object at 0x7da204961360>]]
variable[t] assign[=] call[name[syntax].string_to_transition, parameter[name[t]]]
call[name[g].add_edge, parameter[name[q], name[r], dictionary[[<ast.Constant object at 0x7da204961e10>], [<ast.Name object at 0x7da204961f30>]]]]
return[call[name[from_graph], parameter[name[g]]]] | keyword[def] identifier[read_tgf] ( identifier[filename] ):
literal[string]
identifier[g] = identifier[Graph] ()
keyword[with] identifier[open] ( identifier[filename] ) keyword[as] identifier[file] :
identifier[states] ={}
keyword[for] identifier[line] keyword[in] identifier[file] :
identifier[line] = identifier[line] . identifier[strip] ()
keyword[if] identifier[line] == literal[string] :
keyword[continue]
keyword[elif] identifier[line] == literal[string] :
keyword[break]
identifier[i] , identifier[q] = identifier[line] . identifier[split] ( keyword[None] , literal[int] )
identifier[q] , identifier[attrs] = identifier[syntax] . identifier[string_to_state] ( identifier[q] )
identifier[states] [ identifier[i] ]= identifier[q]
identifier[g] . identifier[add_node] ( identifier[q] , identifier[attrs] )
keyword[for] identifier[line] keyword[in] identifier[file] :
identifier[line] = identifier[line] . identifier[strip] ()
keyword[if] identifier[line] == literal[string] :
keyword[continue]
identifier[i] , identifier[j] , identifier[t] = identifier[line] . identifier[split] ( keyword[None] , literal[int] )
identifier[q] , identifier[r] = identifier[states] [ identifier[i] ], identifier[states] [ identifier[j] ]
identifier[t] = identifier[syntax] . identifier[string_to_transition] ( identifier[t] )
identifier[g] . identifier[add_edge] ( identifier[q] , identifier[r] ,{ literal[string] : identifier[t] })
keyword[return] identifier[from_graph] ( identifier[g] ) | def read_tgf(filename):
"""Reads a file in Trivial Graph Format."""
g = Graph()
with open(filename) as file:
states = {}
# Nodes
for line in file:
line = line.strip()
if line == '':
continue # depends on [control=['if'], data=[]]
elif line == '#':
break # depends on [control=['if'], data=[]]
(i, q) = line.split(None, 1)
(q, attrs) = syntax.string_to_state(q)
states[i] = q
g.add_node(q, attrs) # depends on [control=['for'], data=['line']]
# Edges
for line in file:
line = line.strip()
if line == '':
continue # depends on [control=['if'], data=[]]
(i, j, t) = line.split(None, 2)
(q, r) = (states[i], states[j])
t = syntax.string_to_transition(t)
g.add_edge(q, r, {'label': t}) # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['file']]
return from_graph(g) |
def _create_chrome_options(self):
"""Create and configure a chrome options object
:returns: chrome options object
"""
# Create Chrome options
options = webdriver.ChromeOptions()
if self.config.getboolean_optional('Driver', 'headless'):
self.logger.debug("Running Chrome in headless mode")
options.add_argument('--headless')
if os.name == 'nt': # Temporarily needed if running on Windows.
options.add_argument('--disable-gpu')
# Add Chrome preferences, mobile emulation options and chrome arguments
self._add_chrome_options(options, 'prefs')
self._add_chrome_options(options, 'mobileEmulation')
self._add_chrome_arguments(options)
return options | def function[_create_chrome_options, parameter[self]]:
constant[Create and configure a chrome options object
:returns: chrome options object
]
variable[options] assign[=] call[name[webdriver].ChromeOptions, parameter[]]
if call[name[self].config.getboolean_optional, parameter[constant[Driver], constant[headless]]] begin[:]
call[name[self].logger.debug, parameter[constant[Running Chrome in headless mode]]]
call[name[options].add_argument, parameter[constant[--headless]]]
if compare[name[os].name equal[==] constant[nt]] begin[:]
call[name[options].add_argument, parameter[constant[--disable-gpu]]]
call[name[self]._add_chrome_options, parameter[name[options], constant[prefs]]]
call[name[self]._add_chrome_options, parameter[name[options], constant[mobileEmulation]]]
call[name[self]._add_chrome_arguments, parameter[name[options]]]
return[name[options]] | keyword[def] identifier[_create_chrome_options] ( identifier[self] ):
literal[string]
identifier[options] = identifier[webdriver] . identifier[ChromeOptions] ()
keyword[if] identifier[self] . identifier[config] . identifier[getboolean_optional] ( literal[string] , literal[string] ):
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] )
identifier[options] . identifier[add_argument] ( literal[string] )
keyword[if] identifier[os] . identifier[name] == literal[string] :
identifier[options] . identifier[add_argument] ( literal[string] )
identifier[self] . identifier[_add_chrome_options] ( identifier[options] , literal[string] )
identifier[self] . identifier[_add_chrome_options] ( identifier[options] , literal[string] )
identifier[self] . identifier[_add_chrome_arguments] ( identifier[options] )
keyword[return] identifier[options] | def _create_chrome_options(self):
"""Create and configure a chrome options object
:returns: chrome options object
"""
# Create Chrome options
options = webdriver.ChromeOptions()
if self.config.getboolean_optional('Driver', 'headless'):
self.logger.debug('Running Chrome in headless mode')
options.add_argument('--headless')
if os.name == 'nt': # Temporarily needed if running on Windows.
options.add_argument('--disable-gpu') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Add Chrome preferences, mobile emulation options and chrome arguments
self._add_chrome_options(options, 'prefs')
self._add_chrome_options(options, 'mobileEmulation')
self._add_chrome_arguments(options)
return options |
def cli(inargs=None):
"""
Commandline interface for receiving stem files
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'--version', '-V',
action='version',
version='%%(prog)s %s' % __version__
)
parser.add_argument(
'filename',
metavar="filename",
help="Input STEM file"
)
parser.add_argument(
'--id',
metavar='id',
type=int,
nargs='+',
help="A list of stem_ids"
)
parser.add_argument(
'-s',
type=float,
nargs='?',
help="start offset in seconds"
)
parser.add_argument(
'-t',
type=float,
nargs='?',
help="read duration"
)
parser.add_argument(
'outdir',
metavar='outdir',
nargs='?',
help="Output folder"
)
args = parser.parse_args(inargs)
stem2wav(args.filename, args.outdir, args.id, args.s, args.t) | def function[cli, parameter[inargs]]:
constant[
Commandline interface for receiving stem files
]
variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]]
call[name[parser].add_argument, parameter[constant[--version], constant[-V]]]
call[name[parser].add_argument, parameter[constant[filename]]]
call[name[parser].add_argument, parameter[constant[--id]]]
call[name[parser].add_argument, parameter[constant[-s]]]
call[name[parser].add_argument, parameter[constant[-t]]]
call[name[parser].add_argument, parameter[constant[outdir]]]
variable[args] assign[=] call[name[parser].parse_args, parameter[name[inargs]]]
call[name[stem2wav], parameter[name[args].filename, name[args].outdir, name[args].id, name[args].s, name[args].t]] | keyword[def] identifier[cli] ( identifier[inargs] = keyword[None] ):
literal[string]
identifier[parser] = identifier[argparse] . identifier[ArgumentParser] ()
identifier[parser] . identifier[add_argument] (
literal[string] , literal[string] ,
identifier[action] = literal[string] ,
identifier[version] = literal[string] % identifier[__version__]
)
identifier[parser] . identifier[add_argument] (
literal[string] ,
identifier[metavar] = literal[string] ,
identifier[help] = literal[string]
)
identifier[parser] . identifier[add_argument] (
literal[string] ,
identifier[metavar] = literal[string] ,
identifier[type] = identifier[int] ,
identifier[nargs] = literal[string] ,
identifier[help] = literal[string]
)
identifier[parser] . identifier[add_argument] (
literal[string] ,
identifier[type] = identifier[float] ,
identifier[nargs] = literal[string] ,
identifier[help] = literal[string]
)
identifier[parser] . identifier[add_argument] (
literal[string] ,
identifier[type] = identifier[float] ,
identifier[nargs] = literal[string] ,
identifier[help] = literal[string]
)
identifier[parser] . identifier[add_argument] (
literal[string] ,
identifier[metavar] = literal[string] ,
identifier[nargs] = literal[string] ,
identifier[help] = literal[string]
)
identifier[args] = identifier[parser] . identifier[parse_args] ( identifier[inargs] )
identifier[stem2wav] ( identifier[args] . identifier[filename] , identifier[args] . identifier[outdir] , identifier[args] . identifier[id] , identifier[args] . identifier[s] , identifier[args] . identifier[t] ) | def cli(inargs=None):
"""
Commandline interface for receiving stem files
"""
parser = argparse.ArgumentParser()
parser.add_argument('--version', '-V', action='version', version='%%(prog)s %s' % __version__)
parser.add_argument('filename', metavar='filename', help='Input STEM file')
parser.add_argument('--id', metavar='id', type=int, nargs='+', help='A list of stem_ids')
parser.add_argument('-s', type=float, nargs='?', help='start offset in seconds')
parser.add_argument('-t', type=float, nargs='?', help='read duration')
parser.add_argument('outdir', metavar='outdir', nargs='?', help='Output folder')
args = parser.parse_args(inargs)
stem2wav(args.filename, args.outdir, args.id, args.s, args.t) |
def start_replication(mysql_settings,
binlog_pos_memory=(None, 2),
**kwargs):
""" Start replication on server specified by *mysql_settings*
Args:
mysql_settings (dict): mysql settings that is used to connect to
mysql via pymysql
binlog_pos_memory (_bpm.BaseBinlogPosMemory):
Binlog Position Memory, it should be an instance of subclass of
:py:class:`_bpm.BaseBinlogPosMemory`.
If a tuple (str, float) is passed, it will be initialize parameters
for default :py:class:`_bpm.FileBasedBinlogPosMemory`. It the file-
name is None, it will be *`cwd`\mysqlbinlog2blinker.binlog.pos*
**kwargs: any arguments that are accepted by
:py:class:`pymysqlreplication.BinLogStreamReader`'s constructor
"""
if not isinstance(binlog_pos_memory, _bpm.BaseBinlogPosMemory):
if not isinstance(binlog_pos_memory, (tuple, list)):
raise ValueError('Invalid binlog position memory: %s'
% binlog_pos_memory)
binlog_pos_memory = _bpm.FileBasedBinlogPosMemory(*binlog_pos_memory)
mysql_settings.setdefault('connect_timeout', 5)
kwargs.setdefault('blocking', True)
kwargs.setdefault('resume_stream', True)
with binlog_pos_memory:
kwargs.setdefault('log_file', binlog_pos_memory.log_file)
kwargs.setdefault('log_pos', binlog_pos_memory.log_pos)
_logger.info('Start replication from %s with:\n%s'
% (mysql_settings, kwargs))
start_publishing(mysql_settings, **kwargs) | def function[start_replication, parameter[mysql_settings, binlog_pos_memory]]:
constant[ Start replication on server specified by *mysql_settings*
Args:
mysql_settings (dict): mysql settings that is used to connect to
mysql via pymysql
binlog_pos_memory (_bpm.BaseBinlogPosMemory):
Binlog Position Memory, it should be an instance of subclass of
:py:class:`_bpm.BaseBinlogPosMemory`.
If a tuple (str, float) is passed, it will be initialize parameters
for default :py:class:`_bpm.FileBasedBinlogPosMemory`. It the file-
name is None, it will be *`cwd`\mysqlbinlog2blinker.binlog.pos*
**kwargs: any arguments that are accepted by
:py:class:`pymysqlreplication.BinLogStreamReader`'s constructor
]
if <ast.UnaryOp object at 0x7da1b26611e0> begin[:]
if <ast.UnaryOp object at 0x7da1b26e0640> begin[:]
<ast.Raise object at 0x7da1b26e2950>
variable[binlog_pos_memory] assign[=] call[name[_bpm].FileBasedBinlogPosMemory, parameter[<ast.Starred object at 0x7da1b26e3190>]]
call[name[mysql_settings].setdefault, parameter[constant[connect_timeout], constant[5]]]
call[name[kwargs].setdefault, parameter[constant[blocking], constant[True]]]
call[name[kwargs].setdefault, parameter[constant[resume_stream], constant[True]]]
with name[binlog_pos_memory] begin[:]
call[name[kwargs].setdefault, parameter[constant[log_file], name[binlog_pos_memory].log_file]]
call[name[kwargs].setdefault, parameter[constant[log_pos], name[binlog_pos_memory].log_pos]]
call[name[_logger].info, parameter[binary_operation[constant[Start replication from %s with:
%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b26ee920>, <ast.Name object at 0x7da1b26edcc0>]]]]]
call[name[start_publishing], parameter[name[mysql_settings]]] | keyword[def] identifier[start_replication] ( identifier[mysql_settings] ,
identifier[binlog_pos_memory] =( keyword[None] , literal[int] ),
** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[binlog_pos_memory] , identifier[_bpm] . identifier[BaseBinlogPosMemory] ):
keyword[if] keyword[not] identifier[isinstance] ( identifier[binlog_pos_memory] ,( identifier[tuple] , identifier[list] )):
keyword[raise] identifier[ValueError] ( literal[string]
% identifier[binlog_pos_memory] )
identifier[binlog_pos_memory] = identifier[_bpm] . identifier[FileBasedBinlogPosMemory] (* identifier[binlog_pos_memory] )
identifier[mysql_settings] . identifier[setdefault] ( literal[string] , literal[int] )
identifier[kwargs] . identifier[setdefault] ( literal[string] , keyword[True] )
identifier[kwargs] . identifier[setdefault] ( literal[string] , keyword[True] )
keyword[with] identifier[binlog_pos_memory] :
identifier[kwargs] . identifier[setdefault] ( literal[string] , identifier[binlog_pos_memory] . identifier[log_file] )
identifier[kwargs] . identifier[setdefault] ( literal[string] , identifier[binlog_pos_memory] . identifier[log_pos] )
identifier[_logger] . identifier[info] ( literal[string]
%( identifier[mysql_settings] , identifier[kwargs] ))
identifier[start_publishing] ( identifier[mysql_settings] ,** identifier[kwargs] ) | def start_replication(mysql_settings, binlog_pos_memory=(None, 2), **kwargs):
""" Start replication on server specified by *mysql_settings*
Args:
mysql_settings (dict): mysql settings that is used to connect to
mysql via pymysql
binlog_pos_memory (_bpm.BaseBinlogPosMemory):
Binlog Position Memory, it should be an instance of subclass of
:py:class:`_bpm.BaseBinlogPosMemory`.
If a tuple (str, float) is passed, it will be initialize parameters
for default :py:class:`_bpm.FileBasedBinlogPosMemory`. It the file-
name is None, it will be *`cwd`\\mysqlbinlog2blinker.binlog.pos*
**kwargs: any arguments that are accepted by
:py:class:`pymysqlreplication.BinLogStreamReader`'s constructor
"""
if not isinstance(binlog_pos_memory, _bpm.BaseBinlogPosMemory):
if not isinstance(binlog_pos_memory, (tuple, list)):
raise ValueError('Invalid binlog position memory: %s' % binlog_pos_memory) # depends on [control=['if'], data=[]]
binlog_pos_memory = _bpm.FileBasedBinlogPosMemory(*binlog_pos_memory) # depends on [control=['if'], data=[]]
mysql_settings.setdefault('connect_timeout', 5)
kwargs.setdefault('blocking', True)
kwargs.setdefault('resume_stream', True)
with binlog_pos_memory:
kwargs.setdefault('log_file', binlog_pos_memory.log_file)
kwargs.setdefault('log_pos', binlog_pos_memory.log_pos)
_logger.info('Start replication from %s with:\n%s' % (mysql_settings, kwargs))
start_publishing(mysql_settings, **kwargs) # depends on [control=['with'], data=[]] |
def find_user(session, username):
"""Find user by name - returns user ID."""
resp = _make_request(session, FIND_USER_URL, username)
if not resp:
raise VooblyError('user not found')
try:
return int(resp[0]['uid'])
except ValueError:
raise VooblyError('user not found') | def function[find_user, parameter[session, username]]:
constant[Find user by name - returns user ID.]
variable[resp] assign[=] call[name[_make_request], parameter[name[session], name[FIND_USER_URL], name[username]]]
if <ast.UnaryOp object at 0x7da1b18b9540> begin[:]
<ast.Raise object at 0x7da1b18b8280>
<ast.Try object at 0x7da1b18b9420> | keyword[def] identifier[find_user] ( identifier[session] , identifier[username] ):
literal[string]
identifier[resp] = identifier[_make_request] ( identifier[session] , identifier[FIND_USER_URL] , identifier[username] )
keyword[if] keyword[not] identifier[resp] :
keyword[raise] identifier[VooblyError] ( literal[string] )
keyword[try] :
keyword[return] identifier[int] ( identifier[resp] [ literal[int] ][ literal[string] ])
keyword[except] identifier[ValueError] :
keyword[raise] identifier[VooblyError] ( literal[string] ) | def find_user(session, username):
"""Find user by name - returns user ID."""
resp = _make_request(session, FIND_USER_URL, username)
if not resp:
raise VooblyError('user not found') # depends on [control=['if'], data=[]]
try:
return int(resp[0]['uid']) # depends on [control=['try'], data=[]]
except ValueError:
raise VooblyError('user not found') # depends on [control=['except'], data=[]] |
def from_translation_key(
cls,
translation_key,
translations,
overlapping_reads,
ref_reads,
alt_reads,
alt_reads_supporting_protein_sequence,
transcripts_overlapping_variant,
transcripts_supporting_protein_sequence,
gene):
"""
Create a ProteinSequence object from a TranslationKey, along with
all the extra fields a ProteinSequence requires.
"""
return cls(
amino_acids=translation_key.amino_acids,
variant_aa_interval_start=translation_key.variant_aa_interval_start,
variant_aa_interval_end=translation_key.variant_aa_interval_end,
ends_with_stop_codon=translation_key.ends_with_stop_codon,
frameshift=translation_key.frameshift,
translations=translations,
overlapping_reads=overlapping_reads,
ref_reads=ref_reads,
alt_reads=alt_reads,
alt_reads_supporting_protein_sequence=(
alt_reads_supporting_protein_sequence),
transcripts_overlapping_variant=transcripts_overlapping_variant,
transcripts_supporting_protein_sequence=(
transcripts_supporting_protein_sequence),
gene=gene) | def function[from_translation_key, parameter[cls, translation_key, translations, overlapping_reads, ref_reads, alt_reads, alt_reads_supporting_protein_sequence, transcripts_overlapping_variant, transcripts_supporting_protein_sequence, gene]]:
constant[
Create a ProteinSequence object from a TranslationKey, along with
all the extra fields a ProteinSequence requires.
]
return[call[name[cls], parameter[]]] | keyword[def] identifier[from_translation_key] (
identifier[cls] ,
identifier[translation_key] ,
identifier[translations] ,
identifier[overlapping_reads] ,
identifier[ref_reads] ,
identifier[alt_reads] ,
identifier[alt_reads_supporting_protein_sequence] ,
identifier[transcripts_overlapping_variant] ,
identifier[transcripts_supporting_protein_sequence] ,
identifier[gene] ):
literal[string]
keyword[return] identifier[cls] (
identifier[amino_acids] = identifier[translation_key] . identifier[amino_acids] ,
identifier[variant_aa_interval_start] = identifier[translation_key] . identifier[variant_aa_interval_start] ,
identifier[variant_aa_interval_end] = identifier[translation_key] . identifier[variant_aa_interval_end] ,
identifier[ends_with_stop_codon] = identifier[translation_key] . identifier[ends_with_stop_codon] ,
identifier[frameshift] = identifier[translation_key] . identifier[frameshift] ,
identifier[translations] = identifier[translations] ,
identifier[overlapping_reads] = identifier[overlapping_reads] ,
identifier[ref_reads] = identifier[ref_reads] ,
identifier[alt_reads] = identifier[alt_reads] ,
identifier[alt_reads_supporting_protein_sequence] =(
identifier[alt_reads_supporting_protein_sequence] ),
identifier[transcripts_overlapping_variant] = identifier[transcripts_overlapping_variant] ,
identifier[transcripts_supporting_protein_sequence] =(
identifier[transcripts_supporting_protein_sequence] ),
identifier[gene] = identifier[gene] ) | def from_translation_key(cls, translation_key, translations, overlapping_reads, ref_reads, alt_reads, alt_reads_supporting_protein_sequence, transcripts_overlapping_variant, transcripts_supporting_protein_sequence, gene):
"""
Create a ProteinSequence object from a TranslationKey, along with
all the extra fields a ProteinSequence requires.
"""
return cls(amino_acids=translation_key.amino_acids, variant_aa_interval_start=translation_key.variant_aa_interval_start, variant_aa_interval_end=translation_key.variant_aa_interval_end, ends_with_stop_codon=translation_key.ends_with_stop_codon, frameshift=translation_key.frameshift, translations=translations, overlapping_reads=overlapping_reads, ref_reads=ref_reads, alt_reads=alt_reads, alt_reads_supporting_protein_sequence=alt_reads_supporting_protein_sequence, transcripts_overlapping_variant=transcripts_overlapping_variant, transcripts_supporting_protein_sequence=transcripts_supporting_protein_sequence, gene=gene) |
def save_yaml(dictionary, path, pretty=False, sortkeys=False):
# type: (Dict, str, bool, bool) -> None
"""Save dictionary to YAML file preserving order if it is an OrderedDict
Args:
dictionary (Dict): Python dictionary to save
path (str): Path to YAML file
pretty (bool): Whether to pretty print. Defaults to False.
sortkeys (bool): Whether to sort dictionary keys. Defaults to False.
Returns:
None
"""
if sortkeys:
dictionary = dict(dictionary)
with open(path, 'w') as f:
if pretty:
pyaml.dump(dictionary, f)
else:
yaml.dump(dictionary, f, default_flow_style=None, Dumper=yamlloader.ordereddict.CDumper) | def function[save_yaml, parameter[dictionary, path, pretty, sortkeys]]:
constant[Save dictionary to YAML file preserving order if it is an OrderedDict
Args:
dictionary (Dict): Python dictionary to save
path (str): Path to YAML file
pretty (bool): Whether to pretty print. Defaults to False.
sortkeys (bool): Whether to sort dictionary keys. Defaults to False.
Returns:
None
]
if name[sortkeys] begin[:]
variable[dictionary] assign[=] call[name[dict], parameter[name[dictionary]]]
with call[name[open], parameter[name[path], constant[w]]] begin[:]
if name[pretty] begin[:]
call[name[pyaml].dump, parameter[name[dictionary], name[f]]] | keyword[def] identifier[save_yaml] ( identifier[dictionary] , identifier[path] , identifier[pretty] = keyword[False] , identifier[sortkeys] = keyword[False] ):
literal[string]
keyword[if] identifier[sortkeys] :
identifier[dictionary] = identifier[dict] ( identifier[dictionary] )
keyword[with] identifier[open] ( identifier[path] , literal[string] ) keyword[as] identifier[f] :
keyword[if] identifier[pretty] :
identifier[pyaml] . identifier[dump] ( identifier[dictionary] , identifier[f] )
keyword[else] :
identifier[yaml] . identifier[dump] ( identifier[dictionary] , identifier[f] , identifier[default_flow_style] = keyword[None] , identifier[Dumper] = identifier[yamlloader] . identifier[ordereddict] . identifier[CDumper] ) | def save_yaml(dictionary, path, pretty=False, sortkeys=False):
# type: (Dict, str, bool, bool) -> None
'Save dictionary to YAML file preserving order if it is an OrderedDict\n\n Args:\n dictionary (Dict): Python dictionary to save\n path (str): Path to YAML file\n pretty (bool): Whether to pretty print. Defaults to False.\n sortkeys (bool): Whether to sort dictionary keys. Defaults to False.\n\n Returns:\n None\n '
if sortkeys:
dictionary = dict(dictionary) # depends on [control=['if'], data=[]]
with open(path, 'w') as f:
if pretty:
pyaml.dump(dictionary, f) # depends on [control=['if'], data=[]]
else:
yaml.dump(dictionary, f, default_flow_style=None, Dumper=yamlloader.ordereddict.CDumper) # depends on [control=['with'], data=['f']] |
def _wrap_result(self, func):
""" Wrap result in Parser instance """
def wrapper(*args):
result = func(*args)
if hasattr(result, '__iter__') and not isinstance(result, etree._Element):
return [self._wrap_element(element) for element in result]
else:
return self._wrap_element(result)
return wrapper | def function[_wrap_result, parameter[self, func]]:
constant[ Wrap result in Parser instance ]
def function[wrapper, parameter[]]:
variable[result] assign[=] call[name[func], parameter[<ast.Starred object at 0x7da2047e9ff0>]]
if <ast.BoolOp object at 0x7da2047eaad0> begin[:]
return[<ast.ListComp object at 0x7da2047e91e0>]
return[name[wrapper]] | keyword[def] identifier[_wrap_result] ( identifier[self] , identifier[func] ):
literal[string]
keyword[def] identifier[wrapper] (* identifier[args] ):
identifier[result] = identifier[func] (* identifier[args] )
keyword[if] identifier[hasattr] ( identifier[result] , literal[string] ) keyword[and] keyword[not] identifier[isinstance] ( identifier[result] , identifier[etree] . identifier[_Element] ):
keyword[return] [ identifier[self] . identifier[_wrap_element] ( identifier[element] ) keyword[for] identifier[element] keyword[in] identifier[result] ]
keyword[else] :
keyword[return] identifier[self] . identifier[_wrap_element] ( identifier[result] )
keyword[return] identifier[wrapper] | def _wrap_result(self, func):
""" Wrap result in Parser instance """
def wrapper(*args):
result = func(*args)
if hasattr(result, '__iter__') and (not isinstance(result, etree._Element)):
return [self._wrap_element(element) for element in result] # depends on [control=['if'], data=[]]
else:
return self._wrap_element(result)
return wrapper |
def send_file_url(self, recipient_id, file_url, notification_type=NotificationType.regular):
"""Send file to the specified recipient.
https://developers.facebook.com/docs/messenger-platform/send-api-reference/file-attachment
Input:
recipient_id: recipient id to send to
file_url: url of file to be sent
Output:
Response from API as <dict>
"""
return self.send_attachment_url(recipient_id, "file", file_url, notification_type) | def function[send_file_url, parameter[self, recipient_id, file_url, notification_type]]:
constant[Send file to the specified recipient.
https://developers.facebook.com/docs/messenger-platform/send-api-reference/file-attachment
Input:
recipient_id: recipient id to send to
file_url: url of file to be sent
Output:
Response from API as <dict>
]
return[call[name[self].send_attachment_url, parameter[name[recipient_id], constant[file], name[file_url], name[notification_type]]]] | keyword[def] identifier[send_file_url] ( identifier[self] , identifier[recipient_id] , identifier[file_url] , identifier[notification_type] = identifier[NotificationType] . identifier[regular] ):
literal[string]
keyword[return] identifier[self] . identifier[send_attachment_url] ( identifier[recipient_id] , literal[string] , identifier[file_url] , identifier[notification_type] ) | def send_file_url(self, recipient_id, file_url, notification_type=NotificationType.regular):
"""Send file to the specified recipient.
https://developers.facebook.com/docs/messenger-platform/send-api-reference/file-attachment
Input:
recipient_id: recipient id to send to
file_url: url of file to be sent
Output:
Response from API as <dict>
"""
return self.send_attachment_url(recipient_id, 'file', file_url, notification_type) |
def check_types(func):
"""
Check if annotated function arguments are of the correct type
"""
call = PythonCall(func)
@wraps(func)
def decorator(*args, **kwargs):
parameters = call.bind(args, kwargs)
for arg_name, expected_type in func.__annotations__.items():
if not isinstance(parameters[arg_name], expected_type):
raise TypeError("{} must be a {}".format(
arg_name, expected_type))
return call.apply(args, kwargs)
return decorator | def function[check_types, parameter[func]]:
constant[
Check if annotated function arguments are of the correct type
]
variable[call] assign[=] call[name[PythonCall], parameter[name[func]]]
def function[decorator, parameter[]]:
variable[parameters] assign[=] call[name[call].bind, parameter[name[args], name[kwargs]]]
for taget[tuple[[<ast.Name object at 0x7da1b23475e0>, <ast.Name object at 0x7da1b2345c90>]]] in starred[call[name[func].__annotations__.items, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da1b2344820> begin[:]
<ast.Raise object at 0x7da1b2344670>
return[call[name[call].apply, parameter[name[args], name[kwargs]]]]
return[name[decorator]] | keyword[def] identifier[check_types] ( identifier[func] ):
literal[string]
identifier[call] = identifier[PythonCall] ( identifier[func] )
@ identifier[wraps] ( identifier[func] )
keyword[def] identifier[decorator] (* identifier[args] ,** identifier[kwargs] ):
identifier[parameters] = identifier[call] . identifier[bind] ( identifier[args] , identifier[kwargs] )
keyword[for] identifier[arg_name] , identifier[expected_type] keyword[in] identifier[func] . identifier[__annotations__] . identifier[items] ():
keyword[if] keyword[not] identifier[isinstance] ( identifier[parameters] [ identifier[arg_name] ], identifier[expected_type] ):
keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] (
identifier[arg_name] , identifier[expected_type] ))
keyword[return] identifier[call] . identifier[apply] ( identifier[args] , identifier[kwargs] )
keyword[return] identifier[decorator] | def check_types(func):
"""
Check if annotated function arguments are of the correct type
"""
call = PythonCall(func)
@wraps(func)
def decorator(*args, **kwargs):
parameters = call.bind(args, kwargs)
for (arg_name, expected_type) in func.__annotations__.items():
if not isinstance(parameters[arg_name], expected_type):
raise TypeError('{} must be a {}'.format(arg_name, expected_type)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return call.apply(args, kwargs)
return decorator |
def leave_room(self, sid, room, namespace=None):
"""Leave a room.
The only difference with the :func:`socketio.Server.leave_room` method
is that when the ``namespace`` argument is not given the namespace
associated with the class is used.
"""
return self.server.leave_room(sid, room,
namespace=namespace or self.namespace) | def function[leave_room, parameter[self, sid, room, namespace]]:
constant[Leave a room.
The only difference with the :func:`socketio.Server.leave_room` method
is that when the ``namespace`` argument is not given the namespace
associated with the class is used.
]
return[call[name[self].server.leave_room, parameter[name[sid], name[room]]]] | keyword[def] identifier[leave_room] ( identifier[self] , identifier[sid] , identifier[room] , identifier[namespace] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[server] . identifier[leave_room] ( identifier[sid] , identifier[room] ,
identifier[namespace] = identifier[namespace] keyword[or] identifier[self] . identifier[namespace] ) | def leave_room(self, sid, room, namespace=None):
"""Leave a room.
The only difference with the :func:`socketio.Server.leave_room` method
is that when the ``namespace`` argument is not given the namespace
associated with the class is used.
"""
return self.server.leave_room(sid, room, namespace=namespace or self.namespace) |
def ReportConfiguration(self, file):
"""
:param file: Destination for report details
:return: None
"""
global encodingpar
print >> file, BuildReportLine("FAM FILE", self.fam_details)
print >> file, BuildReportLine("IMPUTE_ARCHIVES", "%s:%s" % (str(self.chroms[0]), self.archives[0]))
idx = 0
for arch in self.archives[1:]:
print >> file, BuildReportLine("", "%s:%s" % (str(self.chroms[idx+1]), arch))
idx += 1
print >> file, BuildReportLine("ENCODING", ["Additive", "Dominant", "Recessive", "Genotype", "Raw"][encoding])
print >> file, BuildReportLine("INFO-EXT", Parser.info_ext)
print >> file, BuildReportLine("INFO-THRESH", Parser.info_threshold) | def function[ReportConfiguration, parameter[self, file]]:
constant[
:param file: Destination for report details
:return: None
]
<ast.Global object at 0x7da20c795360>
tuple[[<ast.BinOp object at 0x7da18ede4a60>, <ast.Call object at 0x7da18ede73d0>]]
tuple[[<ast.BinOp object at 0x7da18ede7bb0>, <ast.Call object at 0x7da18ede4520>]]
variable[idx] assign[=] constant[0]
for taget[name[arch]] in starred[call[name[self].archives][<ast.Slice object at 0x7da18ede60b0>]] begin[:]
tuple[[<ast.BinOp object at 0x7da18ede6650>, <ast.Call object at 0x7da18ede63b0>]]
<ast.AugAssign object at 0x7da18ede55d0>
tuple[[<ast.BinOp object at 0x7da18ede5b40>, <ast.Call object at 0x7da18ede7580>]]
tuple[[<ast.BinOp object at 0x7da18ede7460>, <ast.Call object at 0x7da18ede5c60>]]
tuple[[<ast.BinOp object at 0x7da18ede5780>, <ast.Call object at 0x7da18ede46d0>]] | keyword[def] identifier[ReportConfiguration] ( identifier[self] , identifier[file] ):
literal[string]
keyword[global] identifier[encodingpar]
identifier[print] >> identifier[file] , identifier[BuildReportLine] ( literal[string] , identifier[self] . identifier[fam_details] )
identifier[print] >> identifier[file] , identifier[BuildReportLine] ( literal[string] , literal[string] %( identifier[str] ( identifier[self] . identifier[chroms] [ literal[int] ]), identifier[self] . identifier[archives] [ literal[int] ]))
identifier[idx] = literal[int]
keyword[for] identifier[arch] keyword[in] identifier[self] . identifier[archives] [ literal[int] :]:
identifier[print] >> identifier[file] , identifier[BuildReportLine] ( literal[string] , literal[string] %( identifier[str] ( identifier[self] . identifier[chroms] [ identifier[idx] + literal[int] ]), identifier[arch] ))
identifier[idx] += literal[int]
identifier[print] >> identifier[file] , identifier[BuildReportLine] ( literal[string] ,[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ][ identifier[encoding] ])
identifier[print] >> identifier[file] , identifier[BuildReportLine] ( literal[string] , identifier[Parser] . identifier[info_ext] )
identifier[print] >> identifier[file] , identifier[BuildReportLine] ( literal[string] , identifier[Parser] . identifier[info_threshold] ) | def ReportConfiguration(self, file):
"""
:param file: Destination for report details
:return: None
"""
global encodingpar
(print >> file, BuildReportLine('FAM FILE', self.fam_details))
(print >> file, BuildReportLine('IMPUTE_ARCHIVES', '%s:%s' % (str(self.chroms[0]), self.archives[0])))
idx = 0
for arch in self.archives[1:]:
(print >> file, BuildReportLine('', '%s:%s' % (str(self.chroms[idx + 1]), arch)))
idx += 1 # depends on [control=['for'], data=['arch']]
(print >> file, BuildReportLine('ENCODING', ['Additive', 'Dominant', 'Recessive', 'Genotype', 'Raw'][encoding]))
(print >> file, BuildReportLine('INFO-EXT', Parser.info_ext))
(print >> file, BuildReportLine('INFO-THRESH', Parser.info_threshold)) |
def _ParseRecords(self, parser_mediator, evt_file):
"""Parses Windows EventLog (EVT) records.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
evt_file (pyevt.file): Windows EventLog (EVT) file.
"""
# To handle errors when parsing a Windows EventLog (EVT) file in the most
# granular way the following code iterates over every event record. The
# call to evt_file.get_record() and access to members of evt_record should
# be called within a try-except.
for record_index in range(evt_file.number_of_records):
if parser_mediator.abort:
break
try:
evt_record = evt_file.get_record(record_index)
self._ParseRecord(parser_mediator, record_index, evt_record)
except IOError as exception:
parser_mediator.ProduceExtractionWarning(
'unable to parse event record: {0:d} with error: {1!s}'.format(
record_index, exception))
for record_index in range(evt_file.number_of_recovered_records):
if parser_mediator.abort:
break
try:
evt_record = evt_file.get_recovered_record(record_index)
self._ParseRecord(
parser_mediator, record_index, evt_record, recovered=True)
except IOError as exception:
parser_mediator.ProduceExtractionWarning((
'unable to parse recovered event record: {0:d} with error: '
'{1!s}').format(record_index, exception)) | def function[_ParseRecords, parameter[self, parser_mediator, evt_file]]:
constant[Parses Windows EventLog (EVT) records.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
evt_file (pyevt.file): Windows EventLog (EVT) file.
]
for taget[name[record_index]] in starred[call[name[range], parameter[name[evt_file].number_of_records]]] begin[:]
if name[parser_mediator].abort begin[:]
break
<ast.Try object at 0x7da20e9b2590>
for taget[name[record_index]] in starred[call[name[range], parameter[name[evt_file].number_of_recovered_records]]] begin[:]
if name[parser_mediator].abort begin[:]
break
<ast.Try object at 0x7da20e9b39a0> | keyword[def] identifier[_ParseRecords] ( identifier[self] , identifier[parser_mediator] , identifier[evt_file] ):
literal[string]
keyword[for] identifier[record_index] keyword[in] identifier[range] ( identifier[evt_file] . identifier[number_of_records] ):
keyword[if] identifier[parser_mediator] . identifier[abort] :
keyword[break]
keyword[try] :
identifier[evt_record] = identifier[evt_file] . identifier[get_record] ( identifier[record_index] )
identifier[self] . identifier[_ParseRecord] ( identifier[parser_mediator] , identifier[record_index] , identifier[evt_record] )
keyword[except] identifier[IOError] keyword[as] identifier[exception] :
identifier[parser_mediator] . identifier[ProduceExtractionWarning] (
literal[string] . identifier[format] (
identifier[record_index] , identifier[exception] ))
keyword[for] identifier[record_index] keyword[in] identifier[range] ( identifier[evt_file] . identifier[number_of_recovered_records] ):
keyword[if] identifier[parser_mediator] . identifier[abort] :
keyword[break]
keyword[try] :
identifier[evt_record] = identifier[evt_file] . identifier[get_recovered_record] ( identifier[record_index] )
identifier[self] . identifier[_ParseRecord] (
identifier[parser_mediator] , identifier[record_index] , identifier[evt_record] , identifier[recovered] = keyword[True] )
keyword[except] identifier[IOError] keyword[as] identifier[exception] :
identifier[parser_mediator] . identifier[ProduceExtractionWarning] ((
literal[string]
literal[string] ). identifier[format] ( identifier[record_index] , identifier[exception] )) | def _ParseRecords(self, parser_mediator, evt_file):
"""Parses Windows EventLog (EVT) records.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
evt_file (pyevt.file): Windows EventLog (EVT) file.
"""
# To handle errors when parsing a Windows EventLog (EVT) file in the most
# granular way the following code iterates over every event record. The
# call to evt_file.get_record() and access to members of evt_record should
# be called within a try-except.
for record_index in range(evt_file.number_of_records):
if parser_mediator.abort:
break # depends on [control=['if'], data=[]]
try:
evt_record = evt_file.get_record(record_index)
self._ParseRecord(parser_mediator, record_index, evt_record) # depends on [control=['try'], data=[]]
except IOError as exception:
parser_mediator.ProduceExtractionWarning('unable to parse event record: {0:d} with error: {1!s}'.format(record_index, exception)) # depends on [control=['except'], data=['exception']] # depends on [control=['for'], data=['record_index']]
for record_index in range(evt_file.number_of_recovered_records):
if parser_mediator.abort:
break # depends on [control=['if'], data=[]]
try:
evt_record = evt_file.get_recovered_record(record_index)
self._ParseRecord(parser_mediator, record_index, evt_record, recovered=True) # depends on [control=['try'], data=[]]
except IOError as exception:
parser_mediator.ProduceExtractionWarning('unable to parse recovered event record: {0:d} with error: {1!s}'.format(record_index, exception)) # depends on [control=['except'], data=['exception']] # depends on [control=['for'], data=['record_index']] |
def get_description(self, description_type=DescriptionTypeEnum.FULL):
"""Generates a human readable string for the Cron Expression
Args:
description_type: Which part(s) of the expression to describe
Returns:
The cron expression description
Raises:
Exception: if throw_exception_on_parse_error is True
"""
try:
if self._parsed is False:
parser = ExpressionParser(self._expression, self._options)
self._expression_parts = parser.parse()
self._parsed = True
choices = {
DescriptionTypeEnum.FULL: self.get_full_description,
DescriptionTypeEnum.TIMEOFDAY: self.get_time_of_day_description,
DescriptionTypeEnum.HOURS: self.get_hours_description,
DescriptionTypeEnum.MINUTES: self.get_minutes_description,
DescriptionTypeEnum.SECONDS: self.get_seconds_description,
DescriptionTypeEnum.DAYOFMONTH: self.get_day_of_month_description,
DescriptionTypeEnum.MONTH: self.get_month_description,
DescriptionTypeEnum.DAYOFWEEK: self.get_day_of_week_description,
DescriptionTypeEnum.YEAR: self.get_year_description,
}
description = choices.get(description_type, self.get_seconds_description)()
except Exception as ex:
if self._options.throw_exception_on_parse_error:
raise
else:
description = str(ex)
return description | def function[get_description, parameter[self, description_type]]:
constant[Generates a human readable string for the Cron Expression
Args:
description_type: Which part(s) of the expression to describe
Returns:
The cron expression description
Raises:
Exception: if throw_exception_on_parse_error is True
]
<ast.Try object at 0x7da1b04f4a90>
return[name[description]] | keyword[def] identifier[get_description] ( identifier[self] , identifier[description_type] = identifier[DescriptionTypeEnum] . identifier[FULL] ):
literal[string]
keyword[try] :
keyword[if] identifier[self] . identifier[_parsed] keyword[is] keyword[False] :
identifier[parser] = identifier[ExpressionParser] ( identifier[self] . identifier[_expression] , identifier[self] . identifier[_options] )
identifier[self] . identifier[_expression_parts] = identifier[parser] . identifier[parse] ()
identifier[self] . identifier[_parsed] = keyword[True]
identifier[choices] ={
identifier[DescriptionTypeEnum] . identifier[FULL] : identifier[self] . identifier[get_full_description] ,
identifier[DescriptionTypeEnum] . identifier[TIMEOFDAY] : identifier[self] . identifier[get_time_of_day_description] ,
identifier[DescriptionTypeEnum] . identifier[HOURS] : identifier[self] . identifier[get_hours_description] ,
identifier[DescriptionTypeEnum] . identifier[MINUTES] : identifier[self] . identifier[get_minutes_description] ,
identifier[DescriptionTypeEnum] . identifier[SECONDS] : identifier[self] . identifier[get_seconds_description] ,
identifier[DescriptionTypeEnum] . identifier[DAYOFMONTH] : identifier[self] . identifier[get_day_of_month_description] ,
identifier[DescriptionTypeEnum] . identifier[MONTH] : identifier[self] . identifier[get_month_description] ,
identifier[DescriptionTypeEnum] . identifier[DAYOFWEEK] : identifier[self] . identifier[get_day_of_week_description] ,
identifier[DescriptionTypeEnum] . identifier[YEAR] : identifier[self] . identifier[get_year_description] ,
}
identifier[description] = identifier[choices] . identifier[get] ( identifier[description_type] , identifier[self] . identifier[get_seconds_description] )()
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
keyword[if] identifier[self] . identifier[_options] . identifier[throw_exception_on_parse_error] :
keyword[raise]
keyword[else] :
identifier[description] = identifier[str] ( identifier[ex] )
keyword[return] identifier[description] | def get_description(self, description_type=DescriptionTypeEnum.FULL):
"""Generates a human readable string for the Cron Expression
Args:
description_type: Which part(s) of the expression to describe
Returns:
The cron expression description
Raises:
Exception: if throw_exception_on_parse_error is True
"""
try:
if self._parsed is False:
parser = ExpressionParser(self._expression, self._options)
self._expression_parts = parser.parse()
self._parsed = True # depends on [control=['if'], data=[]]
choices = {DescriptionTypeEnum.FULL: self.get_full_description, DescriptionTypeEnum.TIMEOFDAY: self.get_time_of_day_description, DescriptionTypeEnum.HOURS: self.get_hours_description, DescriptionTypeEnum.MINUTES: self.get_minutes_description, DescriptionTypeEnum.SECONDS: self.get_seconds_description, DescriptionTypeEnum.DAYOFMONTH: self.get_day_of_month_description, DescriptionTypeEnum.MONTH: self.get_month_description, DescriptionTypeEnum.DAYOFWEEK: self.get_day_of_week_description, DescriptionTypeEnum.YEAR: self.get_year_description}
description = choices.get(description_type, self.get_seconds_description)() # depends on [control=['try'], data=[]]
except Exception as ex:
if self._options.throw_exception_on_parse_error:
raise # depends on [control=['if'], data=[]]
else:
description = str(ex) # depends on [control=['except'], data=['ex']]
return description |
def pad_to_size(text, x, y):
"""
Adds whitespace to text to center it within a frame of the given
dimensions.
"""
input_lines = text.rstrip().split("\n")
longest_input_line = max(map(len, input_lines))
number_of_input_lines = len(input_lines)
x = max(x, longest_input_line)
y = max(y, number_of_input_lines)
output = ""
padding_top = int((y - number_of_input_lines) / 2)
padding_bottom = y - number_of_input_lines - padding_top
padding_left = int((x - longest_input_line) / 2)
output += padding_top * (" " * x + "\n")
for line in input_lines:
output += padding_left * " " + line + " " * (x - padding_left - len(line)) + "\n"
output += padding_bottom * (" " * x + "\n")
return output | def function[pad_to_size, parameter[text, x, y]]:
constant[
Adds whitespace to text to center it within a frame of the given
dimensions.
]
variable[input_lines] assign[=] call[call[name[text].rstrip, parameter[]].split, parameter[constant[
]]]
variable[longest_input_line] assign[=] call[name[max], parameter[call[name[map], parameter[name[len], name[input_lines]]]]]
variable[number_of_input_lines] assign[=] call[name[len], parameter[name[input_lines]]]
variable[x] assign[=] call[name[max], parameter[name[x], name[longest_input_line]]]
variable[y] assign[=] call[name[max], parameter[name[y], name[number_of_input_lines]]]
variable[output] assign[=] constant[]
variable[padding_top] assign[=] call[name[int], parameter[binary_operation[binary_operation[name[y] - name[number_of_input_lines]] / constant[2]]]]
variable[padding_bottom] assign[=] binary_operation[binary_operation[name[y] - name[number_of_input_lines]] - name[padding_top]]
variable[padding_left] assign[=] call[name[int], parameter[binary_operation[binary_operation[name[x] - name[longest_input_line]] / constant[2]]]]
<ast.AugAssign object at 0x7da1b12c8820>
for taget[name[line]] in starred[name[input_lines]] begin[:]
<ast.AugAssign object at 0x7da1b12ca320>
<ast.AugAssign object at 0x7da1b12c8850>
return[name[output]] | keyword[def] identifier[pad_to_size] ( identifier[text] , identifier[x] , identifier[y] ):
literal[string]
identifier[input_lines] = identifier[text] . identifier[rstrip] (). identifier[split] ( literal[string] )
identifier[longest_input_line] = identifier[max] ( identifier[map] ( identifier[len] , identifier[input_lines] ))
identifier[number_of_input_lines] = identifier[len] ( identifier[input_lines] )
identifier[x] = identifier[max] ( identifier[x] , identifier[longest_input_line] )
identifier[y] = identifier[max] ( identifier[y] , identifier[number_of_input_lines] )
identifier[output] = literal[string]
identifier[padding_top] = identifier[int] (( identifier[y] - identifier[number_of_input_lines] )/ literal[int] )
identifier[padding_bottom] = identifier[y] - identifier[number_of_input_lines] - identifier[padding_top]
identifier[padding_left] = identifier[int] (( identifier[x] - identifier[longest_input_line] )/ literal[int] )
identifier[output] += identifier[padding_top] *( literal[string] * identifier[x] + literal[string] )
keyword[for] identifier[line] keyword[in] identifier[input_lines] :
identifier[output] += identifier[padding_left] * literal[string] + identifier[line] + literal[string] *( identifier[x] - identifier[padding_left] - identifier[len] ( identifier[line] ))+ literal[string]
identifier[output] += identifier[padding_bottom] *( literal[string] * identifier[x] + literal[string] )
keyword[return] identifier[output] | def pad_to_size(text, x, y):
"""
Adds whitespace to text to center it within a frame of the given
dimensions.
"""
input_lines = text.rstrip().split('\n')
longest_input_line = max(map(len, input_lines))
number_of_input_lines = len(input_lines)
x = max(x, longest_input_line)
y = max(y, number_of_input_lines)
output = ''
padding_top = int((y - number_of_input_lines) / 2)
padding_bottom = y - number_of_input_lines - padding_top
padding_left = int((x - longest_input_line) / 2)
output += padding_top * (' ' * x + '\n')
for line in input_lines:
output += padding_left * ' ' + line + ' ' * (x - padding_left - len(line)) + '\n' # depends on [control=['for'], data=['line']]
output += padding_bottom * (' ' * x + '\n')
return output |
def parse_resource(library, session, resource_name):
"""Parse a resource string to get the interface information.
Corresponds to viParseRsrc function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Resource Manager session (should always be the Default Resource Manager for VISA
returned from open_default_resource_manager()).
:param resource_name: Unique symbolic name of a resource.
:return: Resource information with interface type and board number, return value of the library call.
:rtype: :class:`pyvisa.highlevel.ResourceInfo`, :class:`pyvisa.constants.StatusCode`
"""
interface_type = ViUInt16()
interface_board_number = ViUInt16()
# [ViSession, ViRsrc, ViPUInt16, ViPUInt16]
# ViRsrc converts from (str, unicode, bytes) to bytes
ret = library.viParseRsrc(session, resource_name, byref(interface_type),
byref(interface_board_number))
return ResourceInfo(constants.InterfaceType(interface_type.value),
interface_board_number.value,
None, None, None), ret | def function[parse_resource, parameter[library, session, resource_name]]:
constant[Parse a resource string to get the interface information.
Corresponds to viParseRsrc function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Resource Manager session (should always be the Default Resource Manager for VISA
returned from open_default_resource_manager()).
:param resource_name: Unique symbolic name of a resource.
:return: Resource information with interface type and board number, return value of the library call.
:rtype: :class:`pyvisa.highlevel.ResourceInfo`, :class:`pyvisa.constants.StatusCode`
]
variable[interface_type] assign[=] call[name[ViUInt16], parameter[]]
variable[interface_board_number] assign[=] call[name[ViUInt16], parameter[]]
variable[ret] assign[=] call[name[library].viParseRsrc, parameter[name[session], name[resource_name], call[name[byref], parameter[name[interface_type]]], call[name[byref], parameter[name[interface_board_number]]]]]
return[tuple[[<ast.Call object at 0x7da18dc9a080>, <ast.Name object at 0x7da18dc98310>]]] | keyword[def] identifier[parse_resource] ( identifier[library] , identifier[session] , identifier[resource_name] ):
literal[string]
identifier[interface_type] = identifier[ViUInt16] ()
identifier[interface_board_number] = identifier[ViUInt16] ()
identifier[ret] = identifier[library] . identifier[viParseRsrc] ( identifier[session] , identifier[resource_name] , identifier[byref] ( identifier[interface_type] ),
identifier[byref] ( identifier[interface_board_number] ))
keyword[return] identifier[ResourceInfo] ( identifier[constants] . identifier[InterfaceType] ( identifier[interface_type] . identifier[value] ),
identifier[interface_board_number] . identifier[value] ,
keyword[None] , keyword[None] , keyword[None] ), identifier[ret] | def parse_resource(library, session, resource_name):
"""Parse a resource string to get the interface information.
Corresponds to viParseRsrc function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Resource Manager session (should always be the Default Resource Manager for VISA
returned from open_default_resource_manager()).
:param resource_name: Unique symbolic name of a resource.
:return: Resource information with interface type and board number, return value of the library call.
:rtype: :class:`pyvisa.highlevel.ResourceInfo`, :class:`pyvisa.constants.StatusCode`
"""
interface_type = ViUInt16()
interface_board_number = ViUInt16()
# [ViSession, ViRsrc, ViPUInt16, ViPUInt16]
# ViRsrc converts from (str, unicode, bytes) to bytes
ret = library.viParseRsrc(session, resource_name, byref(interface_type), byref(interface_board_number))
return (ResourceInfo(constants.InterfaceType(interface_type.value), interface_board_number.value, None, None, None), ret) |
def from_record(cls, record):
"""
Factory methods to create Record from pymarc.Record object.
"""
if not isinstance(record, pymarc.Record):
raise TypeError('record must be of type pymarc.Record')
record.__class__ = Record
return record | def function[from_record, parameter[cls, record]]:
constant[
Factory methods to create Record from pymarc.Record object.
]
if <ast.UnaryOp object at 0x7da1b0a20cd0> begin[:]
<ast.Raise object at 0x7da1b0a23d00>
name[record].__class__ assign[=] name[Record]
return[name[record]] | keyword[def] identifier[from_record] ( identifier[cls] , identifier[record] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[record] , identifier[pymarc] . identifier[Record] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[record] . identifier[__class__] = identifier[Record]
keyword[return] identifier[record] | def from_record(cls, record):
"""
Factory methods to create Record from pymarc.Record object.
"""
if not isinstance(record, pymarc.Record):
raise TypeError('record must be of type pymarc.Record') # depends on [control=['if'], data=[]]
record.__class__ = Record
return record |
def process_metrics(self, snmp_data):
"Build list with metrics"
self.metrics = {}
for i in range(0, len(snmp_data)):
metric_id = self.models[self.modem_type]['metrics'][i]
value = int(snmp_data[i])
self.metrics[metric_id] = value | def function[process_metrics, parameter[self, snmp_data]]:
constant[Build list with metrics]
name[self].metrics assign[=] dictionary[[], []]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], call[name[len], parameter[name[snmp_data]]]]]] begin[:]
variable[metric_id] assign[=] call[call[call[name[self].models][name[self].modem_type]][constant[metrics]]][name[i]]
variable[value] assign[=] call[name[int], parameter[call[name[snmp_data]][name[i]]]]
call[name[self].metrics][name[metric_id]] assign[=] name[value] | keyword[def] identifier[process_metrics] ( identifier[self] , identifier[snmp_data] ):
literal[string]
identifier[self] . identifier[metrics] ={}
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[snmp_data] )):
identifier[metric_id] = identifier[self] . identifier[models] [ identifier[self] . identifier[modem_type] ][ literal[string] ][ identifier[i] ]
identifier[value] = identifier[int] ( identifier[snmp_data] [ identifier[i] ])
identifier[self] . identifier[metrics] [ identifier[metric_id] ]= identifier[value] | def process_metrics(self, snmp_data):
"""Build list with metrics"""
self.metrics = {}
for i in range(0, len(snmp_data)):
metric_id = self.models[self.modem_type]['metrics'][i]
value = int(snmp_data[i])
self.metrics[metric_id] = value # depends on [control=['for'], data=['i']] |
def foreignkey(element, exceptions):
'''
function to determine if each select field needs a create button or not
'''
label = element.field.__dict__['label']
try:
label = unicode(label)
except NameError:
pass
if (not label) or (label in exceptions):
return False
else:
return "_queryset" in element.field.__dict__ | def function[foreignkey, parameter[element, exceptions]]:
constant[
function to determine if each select field needs a create button or not
]
variable[label] assign[=] call[name[element].field.__dict__][constant[label]]
<ast.Try object at 0x7da18fe909a0>
if <ast.BoolOp object at 0x7da18fe90250> begin[:]
return[constant[False]] | keyword[def] identifier[foreignkey] ( identifier[element] , identifier[exceptions] ):
literal[string]
identifier[label] = identifier[element] . identifier[field] . identifier[__dict__] [ literal[string] ]
keyword[try] :
identifier[label] = identifier[unicode] ( identifier[label] )
keyword[except] identifier[NameError] :
keyword[pass]
keyword[if] ( keyword[not] identifier[label] ) keyword[or] ( identifier[label] keyword[in] identifier[exceptions] ):
keyword[return] keyword[False]
keyword[else] :
keyword[return] literal[string] keyword[in] identifier[element] . identifier[field] . identifier[__dict__] | def foreignkey(element, exceptions):
"""
function to determine if each select field needs a create button or not
"""
label = element.field.__dict__['label']
try:
label = unicode(label) # depends on [control=['try'], data=[]]
except NameError:
pass # depends on [control=['except'], data=[]]
if not label or label in exceptions:
return False # depends on [control=['if'], data=[]]
else:
return '_queryset' in element.field.__dict__ |
def _get_lineage(self, tax_id, merge_obsolete=True):
"""Return a list of [(rank, tax_id)] describing the lineage of
tax_id. If ``merge_obsolete`` is True and ``tax_id`` has been
replaced, use the corresponding value in table merged.
"""
# Be sure we aren't working with an obsolete tax_id
if merge_obsolete:
tax_id = self._get_merged(tax_id)
# Note: joining with ranks seems like a no-op, but for some
# reason it results in a faster query using sqlite, as well as
# an ordering from leaf --> root. Might be a better idea to
# sort explicitly if this is the expected behavior, but it
# seems like for the most part, the lineage is converted to a
# dict and the order is irrelevant.
cmd = """
WITH RECURSIVE a AS (
SELECT tax_id, parent_id, rank
FROM {nodes}
WHERE tax_id = {}
UNION ALL
SELECT p.tax_id, p.parent_id, p.rank
FROM a JOIN {nodes} p ON a.parent_id = p.tax_id
)
SELECT a.rank, a.tax_id FROM a
JOIN {ranks} using(rank)
""".format(self.placeholder, nodes=self.nodes, ranks=self.ranks_table)
# with some versions of sqlite3, an error is raised when no
# rows are returned; with others, an empty list is returned.
try:
with self.engine.connect() as con:
result = con.execute(cmd, (tax_id,))
# reorder so that root is first
lineage = result.fetchall()[::-1]
except sqlalchemy.exc.ResourceClosedError:
lineage = []
if not lineage:
raise ValueError('tax id "{}" not found'.format(tax_id))
return lineage | def function[_get_lineage, parameter[self, tax_id, merge_obsolete]]:
constant[Return a list of [(rank, tax_id)] describing the lineage of
tax_id. If ``merge_obsolete`` is True and ``tax_id`` has been
replaced, use the corresponding value in table merged.
]
if name[merge_obsolete] begin[:]
variable[tax_id] assign[=] call[name[self]._get_merged, parameter[name[tax_id]]]
variable[cmd] assign[=] call[constant[
WITH RECURSIVE a AS (
SELECT tax_id, parent_id, rank
FROM {nodes}
WHERE tax_id = {}
UNION ALL
SELECT p.tax_id, p.parent_id, p.rank
FROM a JOIN {nodes} p ON a.parent_id = p.tax_id
)
SELECT a.rank, a.tax_id FROM a
JOIN {ranks} using(rank)
].format, parameter[name[self].placeholder]]
<ast.Try object at 0x7da1b1951270>
if <ast.UnaryOp object at 0x7da1b19517b0> begin[:]
<ast.Raise object at 0x7da1b1950e50>
return[name[lineage]] | keyword[def] identifier[_get_lineage] ( identifier[self] , identifier[tax_id] , identifier[merge_obsolete] = keyword[True] ):
literal[string]
keyword[if] identifier[merge_obsolete] :
identifier[tax_id] = identifier[self] . identifier[_get_merged] ( identifier[tax_id] )
identifier[cmd] = literal[string] . identifier[format] ( identifier[self] . identifier[placeholder] , identifier[nodes] = identifier[self] . identifier[nodes] , identifier[ranks] = identifier[self] . identifier[ranks_table] )
keyword[try] :
keyword[with] identifier[self] . identifier[engine] . identifier[connect] () keyword[as] identifier[con] :
identifier[result] = identifier[con] . identifier[execute] ( identifier[cmd] ,( identifier[tax_id] ,))
identifier[lineage] = identifier[result] . identifier[fetchall] ()[::- literal[int] ]
keyword[except] identifier[sqlalchemy] . identifier[exc] . identifier[ResourceClosedError] :
identifier[lineage] =[]
keyword[if] keyword[not] identifier[lineage] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[tax_id] ))
keyword[return] identifier[lineage] | def _get_lineage(self, tax_id, merge_obsolete=True):
"""Return a list of [(rank, tax_id)] describing the lineage of
tax_id. If ``merge_obsolete`` is True and ``tax_id`` has been
replaced, use the corresponding value in table merged.
"""
# Be sure we aren't working with an obsolete tax_id
if merge_obsolete:
tax_id = self._get_merged(tax_id) # depends on [control=['if'], data=[]]
# Note: joining with ranks seems like a no-op, but for some
# reason it results in a faster query using sqlite, as well as
# an ordering from leaf --> root. Might be a better idea to
# sort explicitly if this is the expected behavior, but it
# seems like for the most part, the lineage is converted to a
# dict and the order is irrelevant.
cmd = '\n WITH RECURSIVE a AS (\n SELECT tax_id, parent_id, rank\n FROM {nodes}\n WHERE tax_id = {}\n UNION ALL\n SELECT p.tax_id, p.parent_id, p.rank\n FROM a JOIN {nodes} p ON a.parent_id = p.tax_id\n )\n SELECT a.rank, a.tax_id FROM a\n JOIN {ranks} using(rank)\n '.format(self.placeholder, nodes=self.nodes, ranks=self.ranks_table)
# with some versions of sqlite3, an error is raised when no
# rows are returned; with others, an empty list is returned.
try:
with self.engine.connect() as con:
result = con.execute(cmd, (tax_id,))
# reorder so that root is first
lineage = result.fetchall()[::-1] # depends on [control=['with'], data=['con']] # depends on [control=['try'], data=[]]
except sqlalchemy.exc.ResourceClosedError:
lineage = [] # depends on [control=['except'], data=[]]
if not lineage:
raise ValueError('tax id "{}" not found'.format(tax_id)) # depends on [control=['if'], data=[]]
return lineage |
def view(self, dtype=None):
"""
Create a new view of the Series.
This function will return a new Series with a view of the same
underlying values in memory, optionally reinterpreted with a new data
type. The new data type must preserve the same size in bytes as to not
cause index misalignment.
Parameters
----------
dtype : data type
Data type object or one of their string representations.
Returns
-------
Series
A new Series object as a view of the same data in memory.
See Also
--------
numpy.ndarray.view : Equivalent numpy function to create a new view of
the same data in memory.
Notes
-----
Series are instantiated with ``dtype=float64`` by default. While
``numpy.ndarray.view()`` will return a view with the same data type as
the original array, ``Series.view()`` (without specified dtype)
will try using ``float64`` and may fail if the original data type size
in bytes is not the same.
Examples
--------
>>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8')
>>> s
0 -2
1 -1
2 0
3 1
4 2
dtype: int8
The 8 bit signed integer representation of `-1` is `0b11111111`, but
the same bytes represent 255 if read as an 8 bit unsigned integer:
>>> us = s.view('uint8')
>>> us
0 254
1 255
2 0
3 1
4 2
dtype: uint8
The views share the same underlying values:
>>> us[0] = 128
>>> s
0 -128
1 -1
2 0
3 1
4 2
dtype: int8
"""
return self._constructor(self._values.view(dtype),
index=self.index).__finalize__(self) | def function[view, parameter[self, dtype]]:
constant[
Create a new view of the Series.
This function will return a new Series with a view of the same
underlying values in memory, optionally reinterpreted with a new data
type. The new data type must preserve the same size in bytes as to not
cause index misalignment.
Parameters
----------
dtype : data type
Data type object or one of their string representations.
Returns
-------
Series
A new Series object as a view of the same data in memory.
See Also
--------
numpy.ndarray.view : Equivalent numpy function to create a new view of
the same data in memory.
Notes
-----
Series are instantiated with ``dtype=float64`` by default. While
``numpy.ndarray.view()`` will return a view with the same data type as
the original array, ``Series.view()`` (without specified dtype)
will try using ``float64`` and may fail if the original data type size
in bytes is not the same.
Examples
--------
>>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8')
>>> s
0 -2
1 -1
2 0
3 1
4 2
dtype: int8
The 8 bit signed integer representation of `-1` is `0b11111111`, but
the same bytes represent 255 if read as an 8 bit unsigned integer:
>>> us = s.view('uint8')
>>> us
0 254
1 255
2 0
3 1
4 2
dtype: uint8
The views share the same underlying values:
>>> us[0] = 128
>>> s
0 -128
1 -1
2 0
3 1
4 2
dtype: int8
]
return[call[call[name[self]._constructor, parameter[call[name[self]._values.view, parameter[name[dtype]]]]].__finalize__, parameter[name[self]]]] | keyword[def] identifier[view] ( identifier[self] , identifier[dtype] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[_constructor] ( identifier[self] . identifier[_values] . identifier[view] ( identifier[dtype] ),
identifier[index] = identifier[self] . identifier[index] ). identifier[__finalize__] ( identifier[self] ) | def view(self, dtype=None):
"""
Create a new view of the Series.
This function will return a new Series with a view of the same
underlying values in memory, optionally reinterpreted with a new data
type. The new data type must preserve the same size in bytes as to not
cause index misalignment.
Parameters
----------
dtype : data type
Data type object or one of their string representations.
Returns
-------
Series
A new Series object as a view of the same data in memory.
See Also
--------
numpy.ndarray.view : Equivalent numpy function to create a new view of
the same data in memory.
Notes
-----
Series are instantiated with ``dtype=float64`` by default. While
``numpy.ndarray.view()`` will return a view with the same data type as
the original array, ``Series.view()`` (without specified dtype)
will try using ``float64`` and may fail if the original data type size
in bytes is not the same.
Examples
--------
>>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8')
>>> s
0 -2
1 -1
2 0
3 1
4 2
dtype: int8
The 8 bit signed integer representation of `-1` is `0b11111111`, but
the same bytes represent 255 if read as an 8 bit unsigned integer:
>>> us = s.view('uint8')
>>> us
0 254
1 255
2 0
3 1
4 2
dtype: uint8
The views share the same underlying values:
>>> us[0] = 128
>>> s
0 -128
1 -1
2 0
3 1
4 2
dtype: int8
"""
return self._constructor(self._values.view(dtype), index=self.index).__finalize__(self) |
def init_from_acceptor(self, acceptor):
"""
Adds a sink state
Args:
alphabet (list): The input alphabet
Returns:
None
"""
states = sorted(
acceptor.states,
key=attrgetter('initial'),
reverse=True)
for state in states:
for arc in state.arcs:
itext = acceptor.isyms.find(arc.ilabel)
if itext in self.alphabet:
self.add_arc(state.stateid, arc.nextstate, itext)
if state.final:
self[state.stateid].final = True
if state.initial:
self[state.stateid].initial = True | def function[init_from_acceptor, parameter[self, acceptor]]:
constant[
Adds a sink state
Args:
alphabet (list): The input alphabet
Returns:
None
]
variable[states] assign[=] call[name[sorted], parameter[name[acceptor].states]]
for taget[name[state]] in starred[name[states]] begin[:]
for taget[name[arc]] in starred[name[state].arcs] begin[:]
variable[itext] assign[=] call[name[acceptor].isyms.find, parameter[name[arc].ilabel]]
if compare[name[itext] in name[self].alphabet] begin[:]
call[name[self].add_arc, parameter[name[state].stateid, name[arc].nextstate, name[itext]]]
if name[state].final begin[:]
call[name[self]][name[state].stateid].final assign[=] constant[True]
if name[state].initial begin[:]
call[name[self]][name[state].stateid].initial assign[=] constant[True] | keyword[def] identifier[init_from_acceptor] ( identifier[self] , identifier[acceptor] ):
literal[string]
identifier[states] = identifier[sorted] (
identifier[acceptor] . identifier[states] ,
identifier[key] = identifier[attrgetter] ( literal[string] ),
identifier[reverse] = keyword[True] )
keyword[for] identifier[state] keyword[in] identifier[states] :
keyword[for] identifier[arc] keyword[in] identifier[state] . identifier[arcs] :
identifier[itext] = identifier[acceptor] . identifier[isyms] . identifier[find] ( identifier[arc] . identifier[ilabel] )
keyword[if] identifier[itext] keyword[in] identifier[self] . identifier[alphabet] :
identifier[self] . identifier[add_arc] ( identifier[state] . identifier[stateid] , identifier[arc] . identifier[nextstate] , identifier[itext] )
keyword[if] identifier[state] . identifier[final] :
identifier[self] [ identifier[state] . identifier[stateid] ]. identifier[final] = keyword[True]
keyword[if] identifier[state] . identifier[initial] :
identifier[self] [ identifier[state] . identifier[stateid] ]. identifier[initial] = keyword[True] | def init_from_acceptor(self, acceptor):
"""
Adds a sink state
Args:
alphabet (list): The input alphabet
Returns:
None
"""
states = sorted(acceptor.states, key=attrgetter('initial'), reverse=True)
for state in states:
for arc in state.arcs:
itext = acceptor.isyms.find(arc.ilabel)
if itext in self.alphabet:
self.add_arc(state.stateid, arc.nextstate, itext) # depends on [control=['if'], data=['itext']] # depends on [control=['for'], data=['arc']]
if state.final:
self[state.stateid].final = True # depends on [control=['if'], data=[]]
if state.initial:
self[state.stateid].initial = True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['state']] |
def tokenize(self, config):
"""
Break the config into a series of tokens
"""
tokens = []
reg_ex = re.compile(self.TOKENS[0], re.M | re.I)
for token in re.finditer(reg_ex, config):
value = token.group(0)
if token.group("operator"):
t_type = "operator"
elif token.group("literal"):
t_type = "literal"
elif token.group("newline"):
t_type = "newline"
elif token.group("function"):
t_type = "function"
elif token.group("unknown"):
t_type = "unknown"
else:
continue
tokens.append(
{"type": t_type, "value": value, "match": token, "start": token.start()}
)
self.tokens = tokens | def function[tokenize, parameter[self, config]]:
constant[
Break the config into a series of tokens
]
variable[tokens] assign[=] list[[]]
variable[reg_ex] assign[=] call[name[re].compile, parameter[call[name[self].TOKENS][constant[0]], binary_operation[name[re].M <ast.BitOr object at 0x7da2590d6aa0> name[re].I]]]
for taget[name[token]] in starred[call[name[re].finditer, parameter[name[reg_ex], name[config]]]] begin[:]
variable[value] assign[=] call[name[token].group, parameter[constant[0]]]
if call[name[token].group, parameter[constant[operator]]] begin[:]
variable[t_type] assign[=] constant[operator]
call[name[tokens].append, parameter[dictionary[[<ast.Constant object at 0x7da18f58ee60>, <ast.Constant object at 0x7da18f58f7f0>, <ast.Constant object at 0x7da18f58ceb0>, <ast.Constant object at 0x7da18f58ea10>], [<ast.Name object at 0x7da18f58fe80>, <ast.Name object at 0x7da18f58c910>, <ast.Name object at 0x7da18f58d9f0>, <ast.Call object at 0x7da18f58f040>]]]]
name[self].tokens assign[=] name[tokens] | keyword[def] identifier[tokenize] ( identifier[self] , identifier[config] ):
literal[string]
identifier[tokens] =[]
identifier[reg_ex] = identifier[re] . identifier[compile] ( identifier[self] . identifier[TOKENS] [ literal[int] ], identifier[re] . identifier[M] | identifier[re] . identifier[I] )
keyword[for] identifier[token] keyword[in] identifier[re] . identifier[finditer] ( identifier[reg_ex] , identifier[config] ):
identifier[value] = identifier[token] . identifier[group] ( literal[int] )
keyword[if] identifier[token] . identifier[group] ( literal[string] ):
identifier[t_type] = literal[string]
keyword[elif] identifier[token] . identifier[group] ( literal[string] ):
identifier[t_type] = literal[string]
keyword[elif] identifier[token] . identifier[group] ( literal[string] ):
identifier[t_type] = literal[string]
keyword[elif] identifier[token] . identifier[group] ( literal[string] ):
identifier[t_type] = literal[string]
keyword[elif] identifier[token] . identifier[group] ( literal[string] ):
identifier[t_type] = literal[string]
keyword[else] :
keyword[continue]
identifier[tokens] . identifier[append] (
{ literal[string] : identifier[t_type] , literal[string] : identifier[value] , literal[string] : identifier[token] , literal[string] : identifier[token] . identifier[start] ()}
)
identifier[self] . identifier[tokens] = identifier[tokens] | def tokenize(self, config):
"""
Break the config into a series of tokens
"""
tokens = []
reg_ex = re.compile(self.TOKENS[0], re.M | re.I)
for token in re.finditer(reg_ex, config):
value = token.group(0)
if token.group('operator'):
t_type = 'operator' # depends on [control=['if'], data=[]]
elif token.group('literal'):
t_type = 'literal' # depends on [control=['if'], data=[]]
elif token.group('newline'):
t_type = 'newline' # depends on [control=['if'], data=[]]
elif token.group('function'):
t_type = 'function' # depends on [control=['if'], data=[]]
elif token.group('unknown'):
t_type = 'unknown' # depends on [control=['if'], data=[]]
else:
continue
tokens.append({'type': t_type, 'value': value, 'match': token, 'start': token.start()}) # depends on [control=['for'], data=['token']]
self.tokens = tokens |
def isSAFESEHEnabled(self):
"""
Determines if the current L{PE} instance has the SAFESEH (Image has Safe Exception Handlers) flag enabled.
@see: U{http://msdn.microsoft.com/en-us/library/9a89h429.aspx}
@rtype: bool
@return: Returns C{True} if the current L{PE} instance has the SAFESEH flag enabled. Returns C{False} if SAFESEH is off or -1 if SAFESEH is set to NO.
"""
NOSEH = -1
SAFESEH_OFF = 0
SAFESEH_ON = 1
if self.ntHeaders.optionalHeader.dllCharacteristics.value & consts.IMAGE_DLL_CHARACTERISTICS_NO_SEH:
return NOSEH
loadConfigDir = self.ntHeaders.optionalHeader.dataDirectory[consts.CONFIGURATION_DIRECTORY]
if loadConfigDir.info:
if loadConfigDir.info.SEHandlerTable.value:
return SAFESEH_ON
return SAFESEH_OFF | def function[isSAFESEHEnabled, parameter[self]]:
constant[
Determines if the current L{PE} instance has the SAFESEH (Image has Safe Exception Handlers) flag enabled.
@see: U{http://msdn.microsoft.com/en-us/library/9a89h429.aspx}
@rtype: bool
@return: Returns C{True} if the current L{PE} instance has the SAFESEH flag enabled. Returns C{False} if SAFESEH is off or -1 if SAFESEH is set to NO.
]
variable[NOSEH] assign[=] <ast.UnaryOp object at 0x7da1b0f2e650>
variable[SAFESEH_OFF] assign[=] constant[0]
variable[SAFESEH_ON] assign[=] constant[1]
if binary_operation[name[self].ntHeaders.optionalHeader.dllCharacteristics.value <ast.BitAnd object at 0x7da2590d6b60> name[consts].IMAGE_DLL_CHARACTERISTICS_NO_SEH] begin[:]
return[name[NOSEH]]
variable[loadConfigDir] assign[=] call[name[self].ntHeaders.optionalHeader.dataDirectory][name[consts].CONFIGURATION_DIRECTORY]
if name[loadConfigDir].info begin[:]
if name[loadConfigDir].info.SEHandlerTable.value begin[:]
return[name[SAFESEH_ON]]
return[name[SAFESEH_OFF]] | keyword[def] identifier[isSAFESEHEnabled] ( identifier[self] ):
literal[string]
identifier[NOSEH] =- literal[int]
identifier[SAFESEH_OFF] = literal[int]
identifier[SAFESEH_ON] = literal[int]
keyword[if] identifier[self] . identifier[ntHeaders] . identifier[optionalHeader] . identifier[dllCharacteristics] . identifier[value] & identifier[consts] . identifier[IMAGE_DLL_CHARACTERISTICS_NO_SEH] :
keyword[return] identifier[NOSEH]
identifier[loadConfigDir] = identifier[self] . identifier[ntHeaders] . identifier[optionalHeader] . identifier[dataDirectory] [ identifier[consts] . identifier[CONFIGURATION_DIRECTORY] ]
keyword[if] identifier[loadConfigDir] . identifier[info] :
keyword[if] identifier[loadConfigDir] . identifier[info] . identifier[SEHandlerTable] . identifier[value] :
keyword[return] identifier[SAFESEH_ON]
keyword[return] identifier[SAFESEH_OFF] | def isSAFESEHEnabled(self):
"""
Determines if the current L{PE} instance has the SAFESEH (Image has Safe Exception Handlers) flag enabled.
@see: U{http://msdn.microsoft.com/en-us/library/9a89h429.aspx}
@rtype: bool
@return: Returns C{True} if the current L{PE} instance has the SAFESEH flag enabled. Returns C{False} if SAFESEH is off or -1 if SAFESEH is set to NO.
"""
NOSEH = -1
SAFESEH_OFF = 0
SAFESEH_ON = 1
if self.ntHeaders.optionalHeader.dllCharacteristics.value & consts.IMAGE_DLL_CHARACTERISTICS_NO_SEH:
return NOSEH # depends on [control=['if'], data=[]]
loadConfigDir = self.ntHeaders.optionalHeader.dataDirectory[consts.CONFIGURATION_DIRECTORY]
if loadConfigDir.info:
if loadConfigDir.info.SEHandlerTable.value:
return SAFESEH_ON # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return SAFESEH_OFF |
def dump_options_header(header, options):
"""The reverse function to :func:`parse_options_header`.
:param header: the header to dump
:param options: a dict of options to append.
"""
segments = []
if header is not None:
segments.append(header)
for key, value in iteritems(options):
if value is None:
segments.append(key)
else:
segments.append("%s=%s" % (key, quote_header_value(value)))
return "; ".join(segments) | def function[dump_options_header, parameter[header, options]]:
constant[The reverse function to :func:`parse_options_header`.
:param header: the header to dump
:param options: a dict of options to append.
]
variable[segments] assign[=] list[[]]
if compare[name[header] is_not constant[None]] begin[:]
call[name[segments].append, parameter[name[header]]]
for taget[tuple[[<ast.Name object at 0x7da20e9b2b30>, <ast.Name object at 0x7da20e9b3ee0>]]] in starred[call[name[iteritems], parameter[name[options]]]] begin[:]
if compare[name[value] is constant[None]] begin[:]
call[name[segments].append, parameter[name[key]]]
return[call[constant[; ].join, parameter[name[segments]]]] | keyword[def] identifier[dump_options_header] ( identifier[header] , identifier[options] ):
literal[string]
identifier[segments] =[]
keyword[if] identifier[header] keyword[is] keyword[not] keyword[None] :
identifier[segments] . identifier[append] ( identifier[header] )
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[iteritems] ( identifier[options] ):
keyword[if] identifier[value] keyword[is] keyword[None] :
identifier[segments] . identifier[append] ( identifier[key] )
keyword[else] :
identifier[segments] . identifier[append] ( literal[string] %( identifier[key] , identifier[quote_header_value] ( identifier[value] )))
keyword[return] literal[string] . identifier[join] ( identifier[segments] ) | def dump_options_header(header, options):
"""The reverse function to :func:`parse_options_header`.
:param header: the header to dump
:param options: a dict of options to append.
"""
segments = []
if header is not None:
segments.append(header) # depends on [control=['if'], data=['header']]
for (key, value) in iteritems(options):
if value is None:
segments.append(key) # depends on [control=['if'], data=[]]
else:
segments.append('%s=%s' % (key, quote_header_value(value))) # depends on [control=['for'], data=[]]
return '; '.join(segments) |
def _define_helper(flag_name, default_value, docstring, flagtype, required):
"""Registers 'flag_name' with 'default_value' and 'docstring'."""
option_name = flag_name if required else "--%s" % flag_name
get_context_parser().add_argument(
option_name, default=default_value, help=docstring, type=flagtype) | def function[_define_helper, parameter[flag_name, default_value, docstring, flagtype, required]]:
constant[Registers 'flag_name' with 'default_value' and 'docstring'.]
variable[option_name] assign[=] <ast.IfExp object at 0x7da18fe93cd0>
call[call[name[get_context_parser], parameter[]].add_argument, parameter[name[option_name]]] | keyword[def] identifier[_define_helper] ( identifier[flag_name] , identifier[default_value] , identifier[docstring] , identifier[flagtype] , identifier[required] ):
literal[string]
identifier[option_name] = identifier[flag_name] keyword[if] identifier[required] keyword[else] literal[string] % identifier[flag_name]
identifier[get_context_parser] (). identifier[add_argument] (
identifier[option_name] , identifier[default] = identifier[default_value] , identifier[help] = identifier[docstring] , identifier[type] = identifier[flagtype] ) | def _define_helper(flag_name, default_value, docstring, flagtype, required):
"""Registers 'flag_name' with 'default_value' and 'docstring'."""
option_name = flag_name if required else '--%s' % flag_name
get_context_parser().add_argument(option_name, default=default_value, help=docstring, type=flagtype) |
def ridge_regression(X, Y, c1=0.0, c2=0.0, offset=None, ix=None):
"""
Also known as Tikhonov regularization. This solves the minimization problem:
min_{beta} ||(beta X - Y)||^2 + c1||beta||^2 + c2||beta - offset||^2
One can find more information here: http://en.wikipedia.org/wiki/Tikhonov_regularization
Parameters
----------
X: a (n,d) numpy array
Y: a (n,) numpy array
c1: float
c2: float
offset: a (d,) numpy array.
ix: a boolean array of index to slice.
Returns
-------
beta_hat: numpy array
the solution to the minimization problem. V = (X*X^T + (c1+c2)I)^{-1} X^T
"""
_, d = X.shape
if c1 > 0 or c2 > 0:
penalizer_matrix = (c1 + c2) * np.eye(d)
A = np.dot(X.T, X) + penalizer_matrix
else:
A = np.dot(X.T, X)
if offset is None or c2 == 0:
b = np.dot(X.T, Y)
else:
b = np.dot(X.T, Y) + c2 * offset
if ix is not None:
M = np.c_[X.T[:, ix], b]
else:
M = np.c_[X.T, b]
R = solve(A, M, assume_a="pos", check_finite=False)
return R[:, -1], R[:, :-1] | def function[ridge_regression, parameter[X, Y, c1, c2, offset, ix]]:
constant[
Also known as Tikhonov regularization. This solves the minimization problem:
min_{beta} ||(beta X - Y)||^2 + c1||beta||^2 + c2||beta - offset||^2
One can find more information here: http://en.wikipedia.org/wiki/Tikhonov_regularization
Parameters
----------
X: a (n,d) numpy array
Y: a (n,) numpy array
c1: float
c2: float
offset: a (d,) numpy array.
ix: a boolean array of index to slice.
Returns
-------
beta_hat: numpy array
the solution to the minimization problem. V = (X*X^T + (c1+c2)I)^{-1} X^T
]
<ast.Tuple object at 0x7da20c7ca590> assign[=] name[X].shape
if <ast.BoolOp object at 0x7da20c7cbb20> begin[:]
variable[penalizer_matrix] assign[=] binary_operation[binary_operation[name[c1] + name[c2]] * call[name[np].eye, parameter[name[d]]]]
variable[A] assign[=] binary_operation[call[name[np].dot, parameter[name[X].T, name[X]]] + name[penalizer_matrix]]
if <ast.BoolOp object at 0x7da20c7cba30> begin[:]
variable[b] assign[=] call[name[np].dot, parameter[name[X].T, name[Y]]]
if compare[name[ix] is_not constant[None]] begin[:]
variable[M] assign[=] call[name[np].c_][tuple[[<ast.Subscript object at 0x7da20c7c8580>, <ast.Name object at 0x7da20c7cb370>]]]
variable[R] assign[=] call[name[solve], parameter[name[A], name[M]]]
return[tuple[[<ast.Subscript object at 0x7da20c6e5840>, <ast.Subscript object at 0x7da20c6e7640>]]] | keyword[def] identifier[ridge_regression] ( identifier[X] , identifier[Y] , identifier[c1] = literal[int] , identifier[c2] = literal[int] , identifier[offset] = keyword[None] , identifier[ix] = keyword[None] ):
literal[string]
identifier[_] , identifier[d] = identifier[X] . identifier[shape]
keyword[if] identifier[c1] > literal[int] keyword[or] identifier[c2] > literal[int] :
identifier[penalizer_matrix] =( identifier[c1] + identifier[c2] )* identifier[np] . identifier[eye] ( identifier[d] )
identifier[A] = identifier[np] . identifier[dot] ( identifier[X] . identifier[T] , identifier[X] )+ identifier[penalizer_matrix]
keyword[else] :
identifier[A] = identifier[np] . identifier[dot] ( identifier[X] . identifier[T] , identifier[X] )
keyword[if] identifier[offset] keyword[is] keyword[None] keyword[or] identifier[c2] == literal[int] :
identifier[b] = identifier[np] . identifier[dot] ( identifier[X] . identifier[T] , identifier[Y] )
keyword[else] :
identifier[b] = identifier[np] . identifier[dot] ( identifier[X] . identifier[T] , identifier[Y] )+ identifier[c2] * identifier[offset]
keyword[if] identifier[ix] keyword[is] keyword[not] keyword[None] :
identifier[M] = identifier[np] . identifier[c_] [ identifier[X] . identifier[T] [:, identifier[ix] ], identifier[b] ]
keyword[else] :
identifier[M] = identifier[np] . identifier[c_] [ identifier[X] . identifier[T] , identifier[b] ]
identifier[R] = identifier[solve] ( identifier[A] , identifier[M] , identifier[assume_a] = literal[string] , identifier[check_finite] = keyword[False] )
keyword[return] identifier[R] [:,- literal[int] ], identifier[R] [:,:- literal[int] ] | def ridge_regression(X, Y, c1=0.0, c2=0.0, offset=None, ix=None):
"""
Also known as Tikhonov regularization. This solves the minimization problem:
min_{beta} ||(beta X - Y)||^2 + c1||beta||^2 + c2||beta - offset||^2
One can find more information here: http://en.wikipedia.org/wiki/Tikhonov_regularization
Parameters
----------
X: a (n,d) numpy array
Y: a (n,) numpy array
c1: float
c2: float
offset: a (d,) numpy array.
ix: a boolean array of index to slice.
Returns
-------
beta_hat: numpy array
the solution to the minimization problem. V = (X*X^T + (c1+c2)I)^{-1} X^T
"""
(_, d) = X.shape
if c1 > 0 or c2 > 0:
penalizer_matrix = (c1 + c2) * np.eye(d)
A = np.dot(X.T, X) + penalizer_matrix # depends on [control=['if'], data=[]]
else:
A = np.dot(X.T, X)
if offset is None or c2 == 0:
b = np.dot(X.T, Y) # depends on [control=['if'], data=[]]
else:
b = np.dot(X.T, Y) + c2 * offset
if ix is not None:
M = np.c_[X.T[:, ix], b] # depends on [control=['if'], data=['ix']]
else:
M = np.c_[X.T, b]
R = solve(A, M, assume_a='pos', check_finite=False)
return (R[:, -1], R[:, :-1]) |
def archive_wheelfile(base_name, base_dir):
"""Archive all files under `base_dir` in a whl file and name it like
`base_name`.
"""
olddir = os.path.abspath(os.curdir)
base_name = os.path.abspath(base_name)
try:
os.chdir(base_dir)
return make_wheelfile_inner(base_name)
finally:
os.chdir(olddir) | def function[archive_wheelfile, parameter[base_name, base_dir]]:
constant[Archive all files under `base_dir` in a whl file and name it like
`base_name`.
]
variable[olddir] assign[=] call[name[os].path.abspath, parameter[name[os].curdir]]
variable[base_name] assign[=] call[name[os].path.abspath, parameter[name[base_name]]]
<ast.Try object at 0x7da18fe93940> | keyword[def] identifier[archive_wheelfile] ( identifier[base_name] , identifier[base_dir] ):
literal[string]
identifier[olddir] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[curdir] )
identifier[base_name] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[base_name] )
keyword[try] :
identifier[os] . identifier[chdir] ( identifier[base_dir] )
keyword[return] identifier[make_wheelfile_inner] ( identifier[base_name] )
keyword[finally] :
identifier[os] . identifier[chdir] ( identifier[olddir] ) | def archive_wheelfile(base_name, base_dir):
"""Archive all files under `base_dir` in a whl file and name it like
`base_name`.
"""
olddir = os.path.abspath(os.curdir)
base_name = os.path.abspath(base_name)
try:
os.chdir(base_dir)
return make_wheelfile_inner(base_name) # depends on [control=['try'], data=[]]
finally:
os.chdir(olddir) |
def cci(self, n, array=False):
"""CCI指标"""
result = talib.CCI(self.high, self.low, self.close, n)
if array:
return result
return result[-1] | def function[cci, parameter[self, n, array]]:
constant[CCI指标]
variable[result] assign[=] call[name[talib].CCI, parameter[name[self].high, name[self].low, name[self].close, name[n]]]
if name[array] begin[:]
return[name[result]]
return[call[name[result]][<ast.UnaryOp object at 0x7da18dc048b0>]] | keyword[def] identifier[cci] ( identifier[self] , identifier[n] , identifier[array] = keyword[False] ):
literal[string]
identifier[result] = identifier[talib] . identifier[CCI] ( identifier[self] . identifier[high] , identifier[self] . identifier[low] , identifier[self] . identifier[close] , identifier[n] )
keyword[if] identifier[array] :
keyword[return] identifier[result]
keyword[return] identifier[result] [- literal[int] ] | def cci(self, n, array=False):
"""CCI指标"""
result = talib.CCI(self.high, self.low, self.close, n)
if array:
return result # depends on [control=['if'], data=[]]
return result[-1] |
def basic_map(proj):
"""Make our basic default map for plotting"""
fig = plt.figure(figsize=(15, 10))
add_metpy_logo(fig, 0, 80, size='large')
view = fig.add_axes([0, 0, 1, 1], projection=proj)
view.set_extent([-120, -70, 20, 50])
view.add_feature(cfeature.STATES.with_scale('50m'))
view.add_feature(cfeature.OCEAN)
view.add_feature(cfeature.COASTLINE)
view.add_feature(cfeature.BORDERS, linestyle=':')
return fig, view | def function[basic_map, parameter[proj]]:
constant[Make our basic default map for plotting]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
call[name[add_metpy_logo], parameter[name[fig], constant[0], constant[80]]]
variable[view] assign[=] call[name[fig].add_axes, parameter[list[[<ast.Constant object at 0x7da1b1d37730>, <ast.Constant object at 0x7da1b1d37df0>, <ast.Constant object at 0x7da1b1d37c10>, <ast.Constant object at 0x7da1b1d36740>]]]]
call[name[view].set_extent, parameter[list[[<ast.UnaryOp object at 0x7da1b1d34580>, <ast.UnaryOp object at 0x7da1b1d36ef0>, <ast.Constant object at 0x7da1b1d35450>, <ast.Constant object at 0x7da1b1d35570>]]]]
call[name[view].add_feature, parameter[call[name[cfeature].STATES.with_scale, parameter[constant[50m]]]]]
call[name[view].add_feature, parameter[name[cfeature].OCEAN]]
call[name[view].add_feature, parameter[name[cfeature].COASTLINE]]
call[name[view].add_feature, parameter[name[cfeature].BORDERS]]
return[tuple[[<ast.Name object at 0x7da1b2295030>, <ast.Name object at 0x7da1b2294f70>]]] | keyword[def] identifier[basic_map] ( identifier[proj] ):
literal[string]
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] , literal[int] ))
identifier[add_metpy_logo] ( identifier[fig] , literal[int] , literal[int] , identifier[size] = literal[string] )
identifier[view] = identifier[fig] . identifier[add_axes] ([ literal[int] , literal[int] , literal[int] , literal[int] ], identifier[projection] = identifier[proj] )
identifier[view] . identifier[set_extent] ([- literal[int] ,- literal[int] , literal[int] , literal[int] ])
identifier[view] . identifier[add_feature] ( identifier[cfeature] . identifier[STATES] . identifier[with_scale] ( literal[string] ))
identifier[view] . identifier[add_feature] ( identifier[cfeature] . identifier[OCEAN] )
identifier[view] . identifier[add_feature] ( identifier[cfeature] . identifier[COASTLINE] )
identifier[view] . identifier[add_feature] ( identifier[cfeature] . identifier[BORDERS] , identifier[linestyle] = literal[string] )
keyword[return] identifier[fig] , identifier[view] | def basic_map(proj):
"""Make our basic default map for plotting"""
fig = plt.figure(figsize=(15, 10))
add_metpy_logo(fig, 0, 80, size='large')
view = fig.add_axes([0, 0, 1, 1], projection=proj)
view.set_extent([-120, -70, 20, 50])
view.add_feature(cfeature.STATES.with_scale('50m'))
view.add_feature(cfeature.OCEAN)
view.add_feature(cfeature.COASTLINE)
view.add_feature(cfeature.BORDERS, linestyle=':')
return (fig, view) |
def _validate_signal(self, sig):
"""Internal helper to validate a signal.
Raise ValueError if the signal number is invalid or uncatchable.
Raise RuntimeError if there is a problem setting up the handler.
"""
if not isinstance(sig, int):
raise TypeError('sig must be an int, not {!r}'.format(sig))
if signal is None:
raise RuntimeError('Signals are not supported')
if not (1 <= sig < signal.NSIG):
raise ValueError('sig {} out of range(1, {})'.format(sig, signal.NSIG))
if sys.platform == 'win32':
raise RuntimeError('Signals are not really supported on Windows') | def function[_validate_signal, parameter[self, sig]]:
constant[Internal helper to validate a signal.
Raise ValueError if the signal number is invalid or uncatchable.
Raise RuntimeError if there is a problem setting up the handler.
]
if <ast.UnaryOp object at 0x7da1b14e4be0> begin[:]
<ast.Raise object at 0x7da1b14e77f0>
if compare[name[signal] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b14e5e10>
if <ast.UnaryOp object at 0x7da1b14e56c0> begin[:]
<ast.Raise object at 0x7da1b14e7430>
if compare[name[sys].platform equal[==] constant[win32]] begin[:]
<ast.Raise object at 0x7da1b14e5ed0> | keyword[def] identifier[_validate_signal] ( identifier[self] , identifier[sig] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[sig] , identifier[int] ):
keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] ( identifier[sig] ))
keyword[if] identifier[signal] keyword[is] keyword[None] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[if] keyword[not] ( literal[int] <= identifier[sig] < identifier[signal] . identifier[NSIG] ):
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[sig] , identifier[signal] . identifier[NSIG] ))
keyword[if] identifier[sys] . identifier[platform] == literal[string] :
keyword[raise] identifier[RuntimeError] ( literal[string] ) | def _validate_signal(self, sig):
"""Internal helper to validate a signal.
Raise ValueError if the signal number is invalid or uncatchable.
Raise RuntimeError if there is a problem setting up the handler.
"""
if not isinstance(sig, int):
raise TypeError('sig must be an int, not {!r}'.format(sig)) # depends on [control=['if'], data=[]]
if signal is None:
raise RuntimeError('Signals are not supported') # depends on [control=['if'], data=[]]
if not 1 <= sig < signal.NSIG:
raise ValueError('sig {} out of range(1, {})'.format(sig, signal.NSIG)) # depends on [control=['if'], data=[]]
if sys.platform == 'win32':
raise RuntimeError('Signals are not really supported on Windows') # depends on [control=['if'], data=[]] |
def hicpro_contact_chart (self):
""" Generate the HiC-Pro interaction plot """
# Specify the order of the different possible categories
keys = OrderedDict()
keys['cis_shortRange'] = { 'color': '#0039e6', 'name': 'Unique: cis <= 20Kbp' }
keys['cis_longRange'] = { 'color': '#809fff', 'name': 'Unique: cis > 20Kbp' }
keys['trans_interaction'] = { 'color': '#009933', 'name': 'Unique: trans' }
keys['duplicates'] = { 'color': '#a9a2a2', 'name': 'Duplicate read pairs' }
# Config for the plot
config = {
'id': 'hicpro_contact_plot',
'title': 'HiC-Pro: Contact Statistics',
'ylab': '# Pairs',
'cpswitch_counts_label': 'Number of Pairs'
}
return bargraph.plot(self.hicpro_data, keys, config) | def function[hicpro_contact_chart, parameter[self]]:
constant[ Generate the HiC-Pro interaction plot ]
variable[keys] assign[=] call[name[OrderedDict], parameter[]]
call[name[keys]][constant[cis_shortRange]] assign[=] dictionary[[<ast.Constant object at 0x7da207f98e80>, <ast.Constant object at 0x7da207f98640>], [<ast.Constant object at 0x7da207f9aa10>, <ast.Constant object at 0x7da207f987f0>]]
call[name[keys]][constant[cis_longRange]] assign[=] dictionary[[<ast.Constant object at 0x7da207f98580>, <ast.Constant object at 0x7da207f9b040>], [<ast.Constant object at 0x7da207f98d90>, <ast.Constant object at 0x7da207f9a920>]]
call[name[keys]][constant[trans_interaction]] assign[=] dictionary[[<ast.Constant object at 0x7da207f9aad0>, <ast.Constant object at 0x7da207f98880>], [<ast.Constant object at 0x7da207f99030>, <ast.Constant object at 0x7da207f99960>]]
call[name[keys]][constant[duplicates]] assign[=] dictionary[[<ast.Constant object at 0x7da207f9b850>, <ast.Constant object at 0x7da207f9a710>], [<ast.Constant object at 0x7da207f99ed0>, <ast.Constant object at 0x7da207f9a050>]]
variable[config] assign[=] dictionary[[<ast.Constant object at 0x7da207f99a50>, <ast.Constant object at 0x7da207f9a8f0>, <ast.Constant object at 0x7da207f9a1a0>, <ast.Constant object at 0x7da207f9a620>], [<ast.Constant object at 0x7da207f9bf10>, <ast.Constant object at 0x7da207f9ad70>, <ast.Constant object at 0x7da207f9a500>, <ast.Constant object at 0x7da207f982b0>]]
return[call[name[bargraph].plot, parameter[name[self].hicpro_data, name[keys], name[config]]]] | keyword[def] identifier[hicpro_contact_chart] ( identifier[self] ):
literal[string]
identifier[keys] = identifier[OrderedDict] ()
identifier[keys] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] }
identifier[keys] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] }
identifier[keys] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] }
identifier[keys] [ literal[string] ]={ literal[string] : literal[string] , literal[string] : literal[string] }
identifier[config] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string]
}
keyword[return] identifier[bargraph] . identifier[plot] ( identifier[self] . identifier[hicpro_data] , identifier[keys] , identifier[config] ) | def hicpro_contact_chart(self):
""" Generate the HiC-Pro interaction plot """
# Specify the order of the different possible categories
keys = OrderedDict()
keys['cis_shortRange'] = {'color': '#0039e6', 'name': 'Unique: cis <= 20Kbp'}
keys['cis_longRange'] = {'color': '#809fff', 'name': 'Unique: cis > 20Kbp'}
keys['trans_interaction'] = {'color': '#009933', 'name': 'Unique: trans'}
keys['duplicates'] = {'color': '#a9a2a2', 'name': 'Duplicate read pairs'}
# Config for the plot
config = {'id': 'hicpro_contact_plot', 'title': 'HiC-Pro: Contact Statistics', 'ylab': '# Pairs', 'cpswitch_counts_label': 'Number of Pairs'}
return bargraph.plot(self.hicpro_data, keys, config) |
def listified_tokenizer(source):
"""Tokenizes *source* and returns the tokens as a list of lists."""
io_obj = io.StringIO(source)
return [list(a) for a in tokenize.generate_tokens(io_obj.readline)] | def function[listified_tokenizer, parameter[source]]:
constant[Tokenizes *source* and returns the tokens as a list of lists.]
variable[io_obj] assign[=] call[name[io].StringIO, parameter[name[source]]]
return[<ast.ListComp object at 0x7da204566770>] | keyword[def] identifier[listified_tokenizer] ( identifier[source] ):
literal[string]
identifier[io_obj] = identifier[io] . identifier[StringIO] ( identifier[source] )
keyword[return] [ identifier[list] ( identifier[a] ) keyword[for] identifier[a] keyword[in] identifier[tokenize] . identifier[generate_tokens] ( identifier[io_obj] . identifier[readline] )] | def listified_tokenizer(source):
"""Tokenizes *source* and returns the tokens as a list of lists."""
io_obj = io.StringIO(source)
return [list(a) for a in tokenize.generate_tokens(io_obj.readline)] |
def DeleteClientActionRequests(self, requests):
"""Deletes a list of client messages from the db."""
if not requests:
return
to_delete = []
for r in requests:
to_delete.append((r.client_id, r.flow_id, r.request_id))
if len(set(to_delete)) != len(to_delete):
raise ValueError(
"Received multiple copies of the same message to delete.")
self._DeleteClientActionRequest(to_delete) | def function[DeleteClientActionRequests, parameter[self, requests]]:
constant[Deletes a list of client messages from the db.]
if <ast.UnaryOp object at 0x7da1b1b45fc0> begin[:]
return[None]
variable[to_delete] assign[=] list[[]]
for taget[name[r]] in starred[name[requests]] begin[:]
call[name[to_delete].append, parameter[tuple[[<ast.Attribute object at 0x7da1b1b44100>, <ast.Attribute object at 0x7da1b1b45c00>, <ast.Attribute object at 0x7da1b1b46f80>]]]]
if compare[call[name[len], parameter[call[name[set], parameter[name[to_delete]]]]] not_equal[!=] call[name[len], parameter[name[to_delete]]]] begin[:]
<ast.Raise object at 0x7da1b1b450f0>
call[name[self]._DeleteClientActionRequest, parameter[name[to_delete]]] | keyword[def] identifier[DeleteClientActionRequests] ( identifier[self] , identifier[requests] ):
literal[string]
keyword[if] keyword[not] identifier[requests] :
keyword[return]
identifier[to_delete] =[]
keyword[for] identifier[r] keyword[in] identifier[requests] :
identifier[to_delete] . identifier[append] (( identifier[r] . identifier[client_id] , identifier[r] . identifier[flow_id] , identifier[r] . identifier[request_id] ))
keyword[if] identifier[len] ( identifier[set] ( identifier[to_delete] ))!= identifier[len] ( identifier[to_delete] ):
keyword[raise] identifier[ValueError] (
literal[string] )
identifier[self] . identifier[_DeleteClientActionRequest] ( identifier[to_delete] ) | def DeleteClientActionRequests(self, requests):
"""Deletes a list of client messages from the db."""
if not requests:
return # depends on [control=['if'], data=[]]
to_delete = []
for r in requests:
to_delete.append((r.client_id, r.flow_id, r.request_id)) # depends on [control=['for'], data=['r']]
if len(set(to_delete)) != len(to_delete):
raise ValueError('Received multiple copies of the same message to delete.') # depends on [control=['if'], data=[]]
self._DeleteClientActionRequest(to_delete) |
def has_previous_assessment_section(self, assessment_section_id):
"""Tests if there is a previous assessment section in the assessment following the given assessment section ``Id``.
arg: assessment_section_id (osid.id.Id): ``Id`` of the
``AssessmentSection``
return: (boolean) - ``true`` if there is a previous assessment
section, ``false`` otherwise
raise: IllegalState - ``has_assessment_begun()`` is ``false``
raise: NotFound - ``assessment_section_id`` is not found
raise: NullArgument - ``assessment_section_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
try:
self.get_previous_assessment_section(assessment_section_id)
except errors.IllegalState:
return False
else:
return True | def function[has_previous_assessment_section, parameter[self, assessment_section_id]]:
constant[Tests if there is a previous assessment section in the assessment following the given assessment section ``Id``.
arg: assessment_section_id (osid.id.Id): ``Id`` of the
``AssessmentSection``
return: (boolean) - ``true`` if there is a previous assessment
section, ``false`` otherwise
raise: IllegalState - ``has_assessment_begun()`` is ``false``
raise: NotFound - ``assessment_section_id`` is not found
raise: NullArgument - ``assessment_section_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
]
<ast.Try object at 0x7da18f09ca00> | keyword[def] identifier[has_previous_assessment_section] ( identifier[self] , identifier[assessment_section_id] ):
literal[string]
keyword[try] :
identifier[self] . identifier[get_previous_assessment_section] ( identifier[assessment_section_id] )
keyword[except] identifier[errors] . identifier[IllegalState] :
keyword[return] keyword[False]
keyword[else] :
keyword[return] keyword[True] | def has_previous_assessment_section(self, assessment_section_id):
"""Tests if there is a previous assessment section in the assessment following the given assessment section ``Id``.
arg: assessment_section_id (osid.id.Id): ``Id`` of the
``AssessmentSection``
return: (boolean) - ``true`` if there is a previous assessment
section, ``false`` otherwise
raise: IllegalState - ``has_assessment_begun()`` is ``false``
raise: NotFound - ``assessment_section_id`` is not found
raise: NullArgument - ``assessment_section_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
try:
self.get_previous_assessment_section(assessment_section_id) # depends on [control=['try'], data=[]]
except errors.IllegalState:
return False # depends on [control=['except'], data=[]]
else:
return True |
def _create_eval_metric(metric_name: str) -> mx.metric.EvalMetric:
"""
Creates an EvalMetric given a metric names.
"""
# output_names refers to the list of outputs this metric should use to update itself, e.g. the softmax output
if metric_name == C.ACCURACY:
return utils.Accuracy(ignore_label=C.PAD_ID, output_names=[C.SOFTMAX_OUTPUT_NAME], label_names=[C.TARGET_LABEL_NAME])
elif metric_name == C.PERPLEXITY:
return mx.metric.Perplexity(ignore_label=C.PAD_ID, output_names=[C.SOFTMAX_OUTPUT_NAME], label_names=[C.TARGET_LABEL_NAME], name=C.PERPLEXITY)
elif metric_name == C.LENRATIO_MSE:
return loss.LengthRatioMSEMetric(name=C.LENRATIO_MSE,
output_names=[C.LENRATIO_OUTPUT_NAME], label_names=[C.LENRATIO_LABEL_OUTPUT_NAME])
else:
raise ValueError("unknown metric name") | def function[_create_eval_metric, parameter[metric_name]]:
constant[
Creates an EvalMetric given a metric names.
]
if compare[name[metric_name] equal[==] name[C].ACCURACY] begin[:]
return[call[name[utils].Accuracy, parameter[]]] | keyword[def] identifier[_create_eval_metric] ( identifier[metric_name] : identifier[str] )-> identifier[mx] . identifier[metric] . identifier[EvalMetric] :
literal[string]
keyword[if] identifier[metric_name] == identifier[C] . identifier[ACCURACY] :
keyword[return] identifier[utils] . identifier[Accuracy] ( identifier[ignore_label] = identifier[C] . identifier[PAD_ID] , identifier[output_names] =[ identifier[C] . identifier[SOFTMAX_OUTPUT_NAME] ], identifier[label_names] =[ identifier[C] . identifier[TARGET_LABEL_NAME] ])
keyword[elif] identifier[metric_name] == identifier[C] . identifier[PERPLEXITY] :
keyword[return] identifier[mx] . identifier[metric] . identifier[Perplexity] ( identifier[ignore_label] = identifier[C] . identifier[PAD_ID] , identifier[output_names] =[ identifier[C] . identifier[SOFTMAX_OUTPUT_NAME] ], identifier[label_names] =[ identifier[C] . identifier[TARGET_LABEL_NAME] ], identifier[name] = identifier[C] . identifier[PERPLEXITY] )
keyword[elif] identifier[metric_name] == identifier[C] . identifier[LENRATIO_MSE] :
keyword[return] identifier[loss] . identifier[LengthRatioMSEMetric] ( identifier[name] = identifier[C] . identifier[LENRATIO_MSE] ,
identifier[output_names] =[ identifier[C] . identifier[LENRATIO_OUTPUT_NAME] ], identifier[label_names] =[ identifier[C] . identifier[LENRATIO_LABEL_OUTPUT_NAME] ])
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] ) | def _create_eval_metric(metric_name: str) -> mx.metric.EvalMetric:
"""
Creates an EvalMetric given a metric names.
"""
# output_names refers to the list of outputs this metric should use to update itself, e.g. the softmax output
if metric_name == C.ACCURACY:
return utils.Accuracy(ignore_label=C.PAD_ID, output_names=[C.SOFTMAX_OUTPUT_NAME], label_names=[C.TARGET_LABEL_NAME]) # depends on [control=['if'], data=[]]
elif metric_name == C.PERPLEXITY:
return mx.metric.Perplexity(ignore_label=C.PAD_ID, output_names=[C.SOFTMAX_OUTPUT_NAME], label_names=[C.TARGET_LABEL_NAME], name=C.PERPLEXITY) # depends on [control=['if'], data=[]]
elif metric_name == C.LENRATIO_MSE:
return loss.LengthRatioMSEMetric(name=C.LENRATIO_MSE, output_names=[C.LENRATIO_OUTPUT_NAME], label_names=[C.LENRATIO_LABEL_OUTPUT_NAME]) # depends on [control=['if'], data=[]]
else:
raise ValueError('unknown metric name') |
def authenticated(func):
"""
Decorator to check if Smappee's access token has expired.
If it has, use the refresh token to request a new access token
"""
@wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
if self.refresh_token is not None and \
self.token_expiration_time <= dt.datetime.utcnow():
self.re_authenticate()
return func(*args, **kwargs)
return wrapper | def function[authenticated, parameter[func]]:
constant[
Decorator to check if Smappee's access token has expired.
If it has, use the refresh token to request a new access token
]
def function[wrapper, parameter[]]:
variable[self] assign[=] call[name[args]][constant[0]]
if <ast.BoolOp object at 0x7da1b10e4160> begin[:]
call[name[self].re_authenticate, parameter[]]
return[call[name[func], parameter[<ast.Starred object at 0x7da1b10e6410>]]]
return[name[wrapper]] | keyword[def] identifier[authenticated] ( identifier[func] ):
literal[string]
@ identifier[wraps] ( identifier[func] )
keyword[def] identifier[wrapper] (* identifier[args] ,** identifier[kwargs] ):
identifier[self] = identifier[args] [ literal[int] ]
keyword[if] identifier[self] . identifier[refresh_token] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[token_expiration_time] <= identifier[dt] . identifier[datetime] . identifier[utcnow] ():
identifier[self] . identifier[re_authenticate] ()
keyword[return] identifier[func] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[wrapper] | def authenticated(func):
"""
Decorator to check if Smappee's access token has expired.
If it has, use the refresh token to request a new access token
"""
@wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
if self.refresh_token is not None and self.token_expiration_time <= dt.datetime.utcnow():
self.re_authenticate() # depends on [control=['if'], data=[]]
return func(*args, **kwargs)
return wrapper |
def main(argv=None):
"""
:param argv: Argument list to parse or None (sys.argv will be set).
"""
args = _parse_args((argv if argv else sys.argv)[1:])
cnf = os.environ.copy() if args.env else {}
extra_opts = dict()
if args.extra_opts:
extra_opts = anyconfig.parser.parse(args.extra_opts)
diff = _load_diff(args, extra_opts)
if cnf:
API.merge(cnf, diff)
else:
cnf = diff
if args.args:
diff = anyconfig.parser.parse(args.args)
API.merge(cnf, diff)
if args.validate:
_exit_with_output("Validation succeds")
cnf = API.gen_schema(cnf) if args.gen_schema else _do_filter(cnf, args)
_output_result(cnf, args.output, args.otype, args.inputs, args.itype,
extra_opts=extra_opts) | def function[main, parameter[argv]]:
constant[
:param argv: Argument list to parse or None (sys.argv will be set).
]
variable[args] assign[=] call[name[_parse_args], parameter[call[<ast.IfExp object at 0x7da204345000>][<ast.Slice object at 0x7da204347b80>]]]
variable[cnf] assign[=] <ast.IfExp object at 0x7da204345270>
variable[extra_opts] assign[=] call[name[dict], parameter[]]
if name[args].extra_opts begin[:]
variable[extra_opts] assign[=] call[name[anyconfig].parser.parse, parameter[name[args].extra_opts]]
variable[diff] assign[=] call[name[_load_diff], parameter[name[args], name[extra_opts]]]
if name[cnf] begin[:]
call[name[API].merge, parameter[name[cnf], name[diff]]]
if name[args].args begin[:]
variable[diff] assign[=] call[name[anyconfig].parser.parse, parameter[name[args].args]]
call[name[API].merge, parameter[name[cnf], name[diff]]]
if name[args].validate begin[:]
call[name[_exit_with_output], parameter[constant[Validation succeds]]]
variable[cnf] assign[=] <ast.IfExp object at 0x7da20c6c65f0>
call[name[_output_result], parameter[name[cnf], name[args].output, name[args].otype, name[args].inputs, name[args].itype]] | keyword[def] identifier[main] ( identifier[argv] = keyword[None] ):
literal[string]
identifier[args] = identifier[_parse_args] (( identifier[argv] keyword[if] identifier[argv] keyword[else] identifier[sys] . identifier[argv] )[ literal[int] :])
identifier[cnf] = identifier[os] . identifier[environ] . identifier[copy] () keyword[if] identifier[args] . identifier[env] keyword[else] {}
identifier[extra_opts] = identifier[dict] ()
keyword[if] identifier[args] . identifier[extra_opts] :
identifier[extra_opts] = identifier[anyconfig] . identifier[parser] . identifier[parse] ( identifier[args] . identifier[extra_opts] )
identifier[diff] = identifier[_load_diff] ( identifier[args] , identifier[extra_opts] )
keyword[if] identifier[cnf] :
identifier[API] . identifier[merge] ( identifier[cnf] , identifier[diff] )
keyword[else] :
identifier[cnf] = identifier[diff]
keyword[if] identifier[args] . identifier[args] :
identifier[diff] = identifier[anyconfig] . identifier[parser] . identifier[parse] ( identifier[args] . identifier[args] )
identifier[API] . identifier[merge] ( identifier[cnf] , identifier[diff] )
keyword[if] identifier[args] . identifier[validate] :
identifier[_exit_with_output] ( literal[string] )
identifier[cnf] = identifier[API] . identifier[gen_schema] ( identifier[cnf] ) keyword[if] identifier[args] . identifier[gen_schema] keyword[else] identifier[_do_filter] ( identifier[cnf] , identifier[args] )
identifier[_output_result] ( identifier[cnf] , identifier[args] . identifier[output] , identifier[args] . identifier[otype] , identifier[args] . identifier[inputs] , identifier[args] . identifier[itype] ,
identifier[extra_opts] = identifier[extra_opts] ) | def main(argv=None):
"""
:param argv: Argument list to parse or None (sys.argv will be set).
"""
args = _parse_args((argv if argv else sys.argv)[1:])
cnf = os.environ.copy() if args.env else {}
extra_opts = dict()
if args.extra_opts:
extra_opts = anyconfig.parser.parse(args.extra_opts) # depends on [control=['if'], data=[]]
diff = _load_diff(args, extra_opts)
if cnf:
API.merge(cnf, diff) # depends on [control=['if'], data=[]]
else:
cnf = diff
if args.args:
diff = anyconfig.parser.parse(args.args)
API.merge(cnf, diff) # depends on [control=['if'], data=[]]
if args.validate:
_exit_with_output('Validation succeds') # depends on [control=['if'], data=[]]
cnf = API.gen_schema(cnf) if args.gen_schema else _do_filter(cnf, args)
_output_result(cnf, args.output, args.otype, args.inputs, args.itype, extra_opts=extra_opts) |
def _decrypt_object(obj, translate_newlines=False):
'''
Recursively try to decrypt any object. If the object is a six.string_types
(string or unicode), and it contains a valid GPG header, decrypt it,
otherwise keep going until a string is found.
'''
if salt.utils.stringio.is_readable(obj):
return _decrypt_object(obj.getvalue(), translate_newlines)
if isinstance(obj, six.string_types):
return _decrypt_ciphertexts(obj, translate_newlines=translate_newlines)
elif isinstance(obj, dict):
for key, value in six.iteritems(obj):
obj[key] = _decrypt_object(value,
translate_newlines=translate_newlines)
return obj
elif isinstance(obj, list):
for key, value in enumerate(obj):
obj[key] = _decrypt_object(value,
translate_newlines=translate_newlines)
return obj
else:
return obj | def function[_decrypt_object, parameter[obj, translate_newlines]]:
constant[
Recursively try to decrypt any object. If the object is a six.string_types
(string or unicode), and it contains a valid GPG header, decrypt it,
otherwise keep going until a string is found.
]
if call[name[salt].utils.stringio.is_readable, parameter[name[obj]]] begin[:]
return[call[name[_decrypt_object], parameter[call[name[obj].getvalue, parameter[]], name[translate_newlines]]]]
if call[name[isinstance], parameter[name[obj], name[six].string_types]] begin[:]
return[call[name[_decrypt_ciphertexts], parameter[name[obj]]]] | keyword[def] identifier[_decrypt_object] ( identifier[obj] , identifier[translate_newlines] = keyword[False] ):
literal[string]
keyword[if] identifier[salt] . identifier[utils] . identifier[stringio] . identifier[is_readable] ( identifier[obj] ):
keyword[return] identifier[_decrypt_object] ( identifier[obj] . identifier[getvalue] (), identifier[translate_newlines] )
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[six] . identifier[string_types] ):
keyword[return] identifier[_decrypt_ciphertexts] ( identifier[obj] , identifier[translate_newlines] = identifier[translate_newlines] )
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[dict] ):
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[six] . identifier[iteritems] ( identifier[obj] ):
identifier[obj] [ identifier[key] ]= identifier[_decrypt_object] ( identifier[value] ,
identifier[translate_newlines] = identifier[translate_newlines] )
keyword[return] identifier[obj]
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[list] ):
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[enumerate] ( identifier[obj] ):
identifier[obj] [ identifier[key] ]= identifier[_decrypt_object] ( identifier[value] ,
identifier[translate_newlines] = identifier[translate_newlines] )
keyword[return] identifier[obj]
keyword[else] :
keyword[return] identifier[obj] | def _decrypt_object(obj, translate_newlines=False):
"""
Recursively try to decrypt any object. If the object is a six.string_types
(string or unicode), and it contains a valid GPG header, decrypt it,
otherwise keep going until a string is found.
"""
if salt.utils.stringio.is_readable(obj):
return _decrypt_object(obj.getvalue(), translate_newlines) # depends on [control=['if'], data=[]]
if isinstance(obj, six.string_types):
return _decrypt_ciphertexts(obj, translate_newlines=translate_newlines) # depends on [control=['if'], data=[]]
elif isinstance(obj, dict):
for (key, value) in six.iteritems(obj):
obj[key] = _decrypt_object(value, translate_newlines=translate_newlines) # depends on [control=['for'], data=[]]
return obj # depends on [control=['if'], data=[]]
elif isinstance(obj, list):
for (key, value) in enumerate(obj):
obj[key] = _decrypt_object(value, translate_newlines=translate_newlines) # depends on [control=['for'], data=[]]
return obj # depends on [control=['if'], data=[]]
else:
return obj |
def tenant_present(name, description=None, enabled=True, profile=None,
**connection_args):
'''
Ensures that the keystone tenant exists
name
The name of the tenant to manage
description
The description to use for this tenant
enabled
Availability state for this tenant
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Tenant / project "{0}" already exists'.format(name)}
_api_version(profile=profile, **connection_args)
# Check if tenant is already present
tenant = __salt__['keystone.tenant_get'](name=name,
profile=profile,
**connection_args)
if 'Error' not in tenant:
if tenant[name].get('description', None) != description:
if __opts__.get('test'):
ret['result'] = None
ret['comment'] = 'Tenant / project "{0}" will be updated'.format(name)
ret['changes']['Description'] = 'Will be updated'
return ret
__salt__['keystone.tenant_update'](name=name,
description=description,
enabled=enabled,
profile=profile,
**connection_args)
ret['comment'] = 'Tenant / project "{0}" has been updated'.format(name)
ret['changes']['Description'] = 'Updated'
if tenant[name].get('enabled', None) != enabled:
if __opts__.get('test'):
ret['result'] = None
ret['comment'] = 'Tenant / project "{0}" will be updated'.format(name)
ret['changes']['Enabled'] = 'Will be {0}'.format(enabled)
return ret
__salt__['keystone.tenant_update'](name=name,
description=description,
enabled=enabled,
profile=profile,
**connection_args)
ret['comment'] = 'Tenant / project "{0}" has been updated'.format(name)
ret['changes']['Enabled'] = 'Now {0}'.format(enabled)
else:
if __opts__.get('test'):
ret['result'] = None
ret['comment'] = 'Tenant / project "{0}" will be added'.format(name)
ret['changes']['Tenant'] = 'Will be created'
return ret
# Create tenant
if _OS_IDENTITY_API_VERSION > 2:
created = __salt__['keystone.project_create'](name=name, domain='default', description=description,
enabled=enabled, profile=profile, **connection_args)
else:
created = __salt__['keystone.tenant_create'](name=name, description=description, enabled=enabled,
profile=profile, **connection_args)
ret['changes']['Tenant'] = 'Created' if created is True else 'Failed'
ret['result'] = created
ret['comment'] = 'Tenant / project "{0}" has been added'.format(name)
return ret | def function[tenant_present, parameter[name, description, enabled, profile]]:
constant[
Ensures that the keystone tenant exists
name
The name of the tenant to manage
description
The description to use for this tenant
enabled
Availability state for this tenant
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b2347940>, <ast.Constant object at 0x7da1b2345a80>, <ast.Constant object at 0x7da1b23440d0>, <ast.Constant object at 0x7da1b2345450>], [<ast.Name object at 0x7da1b23474f0>, <ast.Dict object at 0x7da1b23467a0>, <ast.Constant object at 0x7da1b2345150>, <ast.Call object at 0x7da1b23471c0>]]
call[name[_api_version], parameter[]]
variable[tenant] assign[=] call[call[name[__salt__]][constant[keystone.tenant_get]], parameter[]]
if compare[constant[Error] <ast.NotIn object at 0x7da2590d7190> name[tenant]] begin[:]
if compare[call[call[name[tenant]][name[name]].get, parameter[constant[description], constant[None]]] not_equal[!=] name[description]] begin[:]
if call[name[__opts__].get, parameter[constant[test]]] begin[:]
call[name[ret]][constant[result]] assign[=] constant[None]
call[name[ret]][constant[comment]] assign[=] call[constant[Tenant / project "{0}" will be updated].format, parameter[name[name]]]
call[call[name[ret]][constant[changes]]][constant[Description]] assign[=] constant[Will be updated]
return[name[ret]]
call[call[name[__salt__]][constant[keystone.tenant_update]], parameter[]]
call[name[ret]][constant[comment]] assign[=] call[constant[Tenant / project "{0}" has been updated].format, parameter[name[name]]]
call[call[name[ret]][constant[changes]]][constant[Description]] assign[=] constant[Updated]
if compare[call[call[name[tenant]][name[name]].get, parameter[constant[enabled], constant[None]]] not_equal[!=] name[enabled]] begin[:]
if call[name[__opts__].get, parameter[constant[test]]] begin[:]
call[name[ret]][constant[result]] assign[=] constant[None]
call[name[ret]][constant[comment]] assign[=] call[constant[Tenant / project "{0}" will be updated].format, parameter[name[name]]]
call[call[name[ret]][constant[changes]]][constant[Enabled]] assign[=] call[constant[Will be {0}].format, parameter[name[enabled]]]
return[name[ret]]
call[call[name[__salt__]][constant[keystone.tenant_update]], parameter[]]
call[name[ret]][constant[comment]] assign[=] call[constant[Tenant / project "{0}" has been updated].format, parameter[name[name]]]
call[call[name[ret]][constant[changes]]][constant[Enabled]] assign[=] call[constant[Now {0}].format, parameter[name[enabled]]]
return[name[ret]] | keyword[def] identifier[tenant_present] ( identifier[name] , identifier[description] = keyword[None] , identifier[enabled] = keyword[True] , identifier[profile] = keyword[None] ,
** identifier[connection_args] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[name] ,
literal[string] :{},
literal[string] : keyword[True] ,
literal[string] : literal[string] . identifier[format] ( identifier[name] )}
identifier[_api_version] ( identifier[profile] = identifier[profile] ,** identifier[connection_args] )
identifier[tenant] = identifier[__salt__] [ literal[string] ]( identifier[name] = identifier[name] ,
identifier[profile] = identifier[profile] ,
** identifier[connection_args] )
keyword[if] literal[string] keyword[not] keyword[in] identifier[tenant] :
keyword[if] identifier[tenant] [ identifier[name] ]. identifier[get] ( literal[string] , keyword[None] )!= identifier[description] :
keyword[if] identifier[__opts__] . identifier[get] ( literal[string] ):
identifier[ret] [ literal[string] ]= keyword[None]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
identifier[ret] [ literal[string] ][ literal[string] ]= literal[string]
keyword[return] identifier[ret]
identifier[__salt__] [ literal[string] ]( identifier[name] = identifier[name] ,
identifier[description] = identifier[description] ,
identifier[enabled] = identifier[enabled] ,
identifier[profile] = identifier[profile] ,
** identifier[connection_args] )
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
identifier[ret] [ literal[string] ][ literal[string] ]= literal[string]
keyword[if] identifier[tenant] [ identifier[name] ]. identifier[get] ( literal[string] , keyword[None] )!= identifier[enabled] :
keyword[if] identifier[__opts__] . identifier[get] ( literal[string] ):
identifier[ret] [ literal[string] ]= keyword[None]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
identifier[ret] [ literal[string] ][ literal[string] ]= literal[string] . identifier[format] ( identifier[enabled] )
keyword[return] identifier[ret]
identifier[__salt__] [ literal[string] ]( identifier[name] = identifier[name] ,
identifier[description] = identifier[description] ,
identifier[enabled] = identifier[enabled] ,
identifier[profile] = identifier[profile] ,
** identifier[connection_args] )
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
identifier[ret] [ literal[string] ][ literal[string] ]= literal[string] . identifier[format] ( identifier[enabled] )
keyword[else] :
keyword[if] identifier[__opts__] . identifier[get] ( literal[string] ):
identifier[ret] [ literal[string] ]= keyword[None]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
identifier[ret] [ literal[string] ][ literal[string] ]= literal[string]
keyword[return] identifier[ret]
keyword[if] identifier[_OS_IDENTITY_API_VERSION] > literal[int] :
identifier[created] = identifier[__salt__] [ literal[string] ]( identifier[name] = identifier[name] , identifier[domain] = literal[string] , identifier[description] = identifier[description] ,
identifier[enabled] = identifier[enabled] , identifier[profile] = identifier[profile] ,** identifier[connection_args] )
keyword[else] :
identifier[created] = identifier[__salt__] [ literal[string] ]( identifier[name] = identifier[name] , identifier[description] = identifier[description] , identifier[enabled] = identifier[enabled] ,
identifier[profile] = identifier[profile] ,** identifier[connection_args] )
identifier[ret] [ literal[string] ][ literal[string] ]= literal[string] keyword[if] identifier[created] keyword[is] keyword[True] keyword[else] literal[string]
identifier[ret] [ literal[string] ]= identifier[created]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
keyword[return] identifier[ret] | def tenant_present(name, description=None, enabled=True, profile=None, **connection_args):
"""
Ensures that the keystone tenant exists
name
The name of the tenant to manage
description
The description to use for this tenant
enabled
Availability state for this tenant
"""
ret = {'name': name, 'changes': {}, 'result': True, 'comment': 'Tenant / project "{0}" already exists'.format(name)}
_api_version(profile=profile, **connection_args)
# Check if tenant is already present
tenant = __salt__['keystone.tenant_get'](name=name, profile=profile, **connection_args)
if 'Error' not in tenant:
if tenant[name].get('description', None) != description:
if __opts__.get('test'):
ret['result'] = None
ret['comment'] = 'Tenant / project "{0}" will be updated'.format(name)
ret['changes']['Description'] = 'Will be updated'
return ret # depends on [control=['if'], data=[]]
__salt__['keystone.tenant_update'](name=name, description=description, enabled=enabled, profile=profile, **connection_args)
ret['comment'] = 'Tenant / project "{0}" has been updated'.format(name)
ret['changes']['Description'] = 'Updated' # depends on [control=['if'], data=['description']]
if tenant[name].get('enabled', None) != enabled:
if __opts__.get('test'):
ret['result'] = None
ret['comment'] = 'Tenant / project "{0}" will be updated'.format(name)
ret['changes']['Enabled'] = 'Will be {0}'.format(enabled)
return ret # depends on [control=['if'], data=[]]
__salt__['keystone.tenant_update'](name=name, description=description, enabled=enabled, profile=profile, **connection_args)
ret['comment'] = 'Tenant / project "{0}" has been updated'.format(name)
ret['changes']['Enabled'] = 'Now {0}'.format(enabled) # depends on [control=['if'], data=['enabled']] # depends on [control=['if'], data=['tenant']]
else:
if __opts__.get('test'):
ret['result'] = None
ret['comment'] = 'Tenant / project "{0}" will be added'.format(name)
ret['changes']['Tenant'] = 'Will be created'
return ret # depends on [control=['if'], data=[]]
# Create tenant
if _OS_IDENTITY_API_VERSION > 2:
created = __salt__['keystone.project_create'](name=name, domain='default', description=description, enabled=enabled, profile=profile, **connection_args) # depends on [control=['if'], data=[]]
else:
created = __salt__['keystone.tenant_create'](name=name, description=description, enabled=enabled, profile=profile, **connection_args)
ret['changes']['Tenant'] = 'Created' if created is True else 'Failed'
ret['result'] = created
ret['comment'] = 'Tenant / project "{0}" has been added'.format(name)
return ret |
def mask_binary(self, binary_im):
"""Create a new image by zeroing out data at locations
where binary_im == 0.0.
Parameters
----------
binary_im : :obj:`BinaryImage`
A BinaryImage of the same size as this image, with pixel values of either
zero or one. Wherever this image has zero pixels, we'll zero out the
pixels of the new image.
Returns
-------
:obj:`Image`
A new Image of the same type, masked by the given binary image.
"""
data = np.copy(self._data)
ind = np.where(binary_im.data == 0)
data[ind[0], ind[1], :] = 0
return SegmentationImage(data, self._frame) | def function[mask_binary, parameter[self, binary_im]]:
constant[Create a new image by zeroing out data at locations
where binary_im == 0.0.
Parameters
----------
binary_im : :obj:`BinaryImage`
A BinaryImage of the same size as this image, with pixel values of either
zero or one. Wherever this image has zero pixels, we'll zero out the
pixels of the new image.
Returns
-------
:obj:`Image`
A new Image of the same type, masked by the given binary image.
]
variable[data] assign[=] call[name[np].copy, parameter[name[self]._data]]
variable[ind] assign[=] call[name[np].where, parameter[compare[name[binary_im].data equal[==] constant[0]]]]
call[name[data]][tuple[[<ast.Subscript object at 0x7da1b04b3070>, <ast.Subscript object at 0x7da1b04b1cf0>, <ast.Slice object at 0x7da1b04b32b0>]]] assign[=] constant[0]
return[call[name[SegmentationImage], parameter[name[data], name[self]._frame]]] | keyword[def] identifier[mask_binary] ( identifier[self] , identifier[binary_im] ):
literal[string]
identifier[data] = identifier[np] . identifier[copy] ( identifier[self] . identifier[_data] )
identifier[ind] = identifier[np] . identifier[where] ( identifier[binary_im] . identifier[data] == literal[int] )
identifier[data] [ identifier[ind] [ literal[int] ], identifier[ind] [ literal[int] ],:]= literal[int]
keyword[return] identifier[SegmentationImage] ( identifier[data] , identifier[self] . identifier[_frame] ) | def mask_binary(self, binary_im):
"""Create a new image by zeroing out data at locations
where binary_im == 0.0.
Parameters
----------
binary_im : :obj:`BinaryImage`
A BinaryImage of the same size as this image, with pixel values of either
zero or one. Wherever this image has zero pixels, we'll zero out the
pixels of the new image.
Returns
-------
:obj:`Image`
A new Image of the same type, masked by the given binary image.
"""
data = np.copy(self._data)
ind = np.where(binary_im.data == 0)
data[ind[0], ind[1], :] = 0
return SegmentationImage(data, self._frame) |
def kwargs_to_list(kwargs):
"""
Turns {'a': 1, 'b': 2} into ["a-1", "b-2"]
"""
kwargs_list = []
# Kwargs are sorted in alphabetic order by their keys.
# Taken from http://www.saltycrane.com/blog/2007/09/how-to-sort-python-dictionary-by-keys/
for k, v in items_sorted_by_key(kwargs):
kwargs_list.append(str(k) + '-' + str(force_string(v)))
return kwargs_list | def function[kwargs_to_list, parameter[kwargs]]:
constant[
Turns {'a': 1, 'b': 2} into ["a-1", "b-2"]
]
variable[kwargs_list] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b0864d60>, <ast.Name object at 0x7da1b0866590>]]] in starred[call[name[items_sorted_by_key], parameter[name[kwargs]]]] begin[:]
call[name[kwargs_list].append, parameter[binary_operation[binary_operation[call[name[str], parameter[name[k]]] + constant[-]] + call[name[str], parameter[call[name[force_string], parameter[name[v]]]]]]]]
return[name[kwargs_list]] | keyword[def] identifier[kwargs_to_list] ( identifier[kwargs] ):
literal[string]
identifier[kwargs_list] =[]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[items_sorted_by_key] ( identifier[kwargs] ):
identifier[kwargs_list] . identifier[append] ( identifier[str] ( identifier[k] )+ literal[string] + identifier[str] ( identifier[force_string] ( identifier[v] )))
keyword[return] identifier[kwargs_list] | def kwargs_to_list(kwargs):
"""
Turns {'a': 1, 'b': 2} into ["a-1", "b-2"]
"""
kwargs_list = []
# Kwargs are sorted in alphabetic order by their keys.
# Taken from http://www.saltycrane.com/blog/2007/09/how-to-sort-python-dictionary-by-keys/
for (k, v) in items_sorted_by_key(kwargs):
kwargs_list.append(str(k) + '-' + str(force_string(v))) # depends on [control=['for'], data=[]]
return kwargs_list |
def _perform_request(self, request, parser=None, parser_args=None, operation_context=None, expected_errors=None):
'''
Sends the request and return response. Catches HTTPError and hands it
to error handler
'''
operation_context = operation_context or _OperationContext()
retry_context = RetryContext()
retry_context.is_emulated = self.is_emulated
# if request body is a stream, we need to remember its current position in case retries happen
if hasattr(request.body, 'read'):
try:
retry_context.body_position = request.body.tell()
except (AttributeError, UnsupportedOperation):
# if body position cannot be obtained, then retries will not work
pass
# Apply the appropriate host based on the location mode
self._apply_host(request, operation_context, retry_context)
# Apply common settings to the request
_update_request(request, self._X_MS_VERSION, self._USER_AGENT_STRING)
client_request_id_prefix = str.format("Client-Request-ID={0}", request.headers['x-ms-client-request-id'])
while True:
try:
try:
# Execute the request callback
if self.request_callback:
self.request_callback(request)
# Add date and auth after the callback so date doesn't get too old and
# authentication is still correct if signed headers are added in the request
# callback. This also ensures retry policies with long back offs
# will work as it resets the time sensitive headers.
_add_date_header(request)
try:
# request can be signed individually
self.authentication.sign_request(request)
except AttributeError:
# session can also be signed
self.request_session = self.authentication.signed_session(self.request_session)
# Set the request context
retry_context.request = request
# Log the request before it goes out
# Avoid unnecessary scrubbing if the logger is not on
if logger.isEnabledFor(logging.INFO):
logger.info("%s Outgoing request: Method=%s, Path=%s, Query=%s, Headers=%s.",
client_request_id_prefix,
request.method,
request.path,
self._scrub_query_parameters(request.query),
str(self._scrub_headers(request.headers)).replace('\n', ''))
# Perform the request
response = self._httpclient.perform_request(request)
# Execute the response callback
if self.response_callback:
self.response_callback(response)
# Set the response context
retry_context.response = response
# Log the response when it comes back
logger.info("%s Receiving Response: "
"%s, HTTP Status Code=%s, Message=%s, Headers=%s.",
client_request_id_prefix,
self.extract_date_and_request_id(retry_context),
response.status,
response.message,
str(response.headers).replace('\n', ''))
# Parse and wrap HTTP errors in AzureHttpError which inherits from AzureException
if response.status >= 300:
# This exception will be caught by the general error handler
# and raised as an azure http exception
_http_error_handler(
HTTPError(response.status, response.message, response.headers, response.body))
# Parse the response
if parser:
if parser_args:
args = [response]
args.extend(parser_args)
return parser(*args)
else:
return parser(response)
else:
return
except AzureException as ex:
retry_context.exception = ex
raise ex
except Exception as ex:
retry_context.exception = ex
raise _wrap_exception(ex, AzureException)
except AzureException as ex:
# only parse the strings used for logging if logging is at least enabled for CRITICAL
exception_str_in_one_line = ''
status_code = ''
timestamp_and_request_id = ''
if logger.isEnabledFor(logging.CRITICAL):
exception_str_in_one_line = str(ex).replace('\n', '')
status_code = retry_context.response.status if retry_context.response is not None else 'Unknown'
timestamp_and_request_id = self.extract_date_and_request_id(retry_context)
# if the http error was expected, we should short-circuit
if isinstance(ex, AzureHttpError) and expected_errors is not None and ex.error_code in expected_errors:
logger.info("%s Received expected http error: "
"%s, HTTP status code=%s, Exception=%s.",
client_request_id_prefix,
timestamp_and_request_id,
status_code,
exception_str_in_one_line)
raise ex
elif isinstance(ex, AzureSigningError):
logger.info("%s Unable to sign the request: Exception=%s.",
client_request_id_prefix,
exception_str_in_one_line)
raise ex
logger.info("%s Operation failed: checking if the operation should be retried. "
"Current retry count=%s, %s, HTTP status code=%s, Exception=%s.",
client_request_id_prefix,
retry_context.count if hasattr(retry_context, 'count') else 0,
timestamp_and_request_id,
status_code,
exception_str_in_one_line)
# Decryption failures (invalid objects, invalid algorithms, data unencrypted in strict mode, etc)
# will not be resolved with retries.
if str(ex) == _ERROR_DECRYPTION_FAILURE:
logger.error("%s Encountered decryption failure: this cannot be retried. "
"%s, HTTP status code=%s, Exception=%s.",
client_request_id_prefix,
timestamp_and_request_id,
status_code,
exception_str_in_one_line)
raise ex
# Determine whether a retry should be performed and if so, how
# long to wait before performing retry.
retry_interval = self.retry(retry_context)
if retry_interval is not None:
# Execute the callback
if self.retry_callback:
self.retry_callback(retry_context)
logger.info(
"%s Retry policy is allowing a retry: Retry count=%s, Interval=%s.",
client_request_id_prefix,
retry_context.count,
retry_interval)
# Sleep for the desired retry interval
sleep(retry_interval)
else:
logger.error("%s Retry policy did not allow for a retry: "
"%s, HTTP status code=%s, Exception=%s.",
client_request_id_prefix,
timestamp_and_request_id,
status_code,
exception_str_in_one_line)
raise ex
finally:
# If this is a location locked operation and the location is not set,
# this is the first request of that operation. Set the location to
# be used for subsequent requests in the operation.
if operation_context.location_lock and not operation_context.host_location:
# note: to cover the emulator scenario, the host_location is grabbed
# from request.host_locations(which includes the dev account name)
# instead of request.host(which at this point no longer includes the dev account name)
operation_context.host_location = {
retry_context.location_mode: request.host_locations[retry_context.location_mode]} | def function[_perform_request, parameter[self, request, parser, parser_args, operation_context, expected_errors]]:
constant[
Sends the request and return response. Catches HTTPError and hands it
to error handler
]
variable[operation_context] assign[=] <ast.BoolOp object at 0x7da1b1dfb430>
variable[retry_context] assign[=] call[name[RetryContext], parameter[]]
name[retry_context].is_emulated assign[=] name[self].is_emulated
if call[name[hasattr], parameter[name[request].body, constant[read]]] begin[:]
<ast.Try object at 0x7da1b1dfb0a0>
call[name[self]._apply_host, parameter[name[request], name[operation_context], name[retry_context]]]
call[name[_update_request], parameter[name[request], name[self]._X_MS_VERSION, name[self]._USER_AGENT_STRING]]
variable[client_request_id_prefix] assign[=] call[name[str].format, parameter[constant[Client-Request-ID={0}], call[name[request].headers][constant[x-ms-client-request-id]]]]
while constant[True] begin[:]
<ast.Try object at 0x7da1b1dfa890> | keyword[def] identifier[_perform_request] ( identifier[self] , identifier[request] , identifier[parser] = keyword[None] , identifier[parser_args] = keyword[None] , identifier[operation_context] = keyword[None] , identifier[expected_errors] = keyword[None] ):
literal[string]
identifier[operation_context] = identifier[operation_context] keyword[or] identifier[_OperationContext] ()
identifier[retry_context] = identifier[RetryContext] ()
identifier[retry_context] . identifier[is_emulated] = identifier[self] . identifier[is_emulated]
keyword[if] identifier[hasattr] ( identifier[request] . identifier[body] , literal[string] ):
keyword[try] :
identifier[retry_context] . identifier[body_position] = identifier[request] . identifier[body] . identifier[tell] ()
keyword[except] ( identifier[AttributeError] , identifier[UnsupportedOperation] ):
keyword[pass]
identifier[self] . identifier[_apply_host] ( identifier[request] , identifier[operation_context] , identifier[retry_context] )
identifier[_update_request] ( identifier[request] , identifier[self] . identifier[_X_MS_VERSION] , identifier[self] . identifier[_USER_AGENT_STRING] )
identifier[client_request_id_prefix] = identifier[str] . identifier[format] ( literal[string] , identifier[request] . identifier[headers] [ literal[string] ])
keyword[while] keyword[True] :
keyword[try] :
keyword[try] :
keyword[if] identifier[self] . identifier[request_callback] :
identifier[self] . identifier[request_callback] ( identifier[request] )
identifier[_add_date_header] ( identifier[request] )
keyword[try] :
identifier[self] . identifier[authentication] . identifier[sign_request] ( identifier[request] )
keyword[except] identifier[AttributeError] :
identifier[self] . identifier[request_session] = identifier[self] . identifier[authentication] . identifier[signed_session] ( identifier[self] . identifier[request_session] )
identifier[retry_context] . identifier[request] = identifier[request]
keyword[if] identifier[logger] . identifier[isEnabledFor] ( identifier[logging] . identifier[INFO] ):
identifier[logger] . identifier[info] ( literal[string] ,
identifier[client_request_id_prefix] ,
identifier[request] . identifier[method] ,
identifier[request] . identifier[path] ,
identifier[self] . identifier[_scrub_query_parameters] ( identifier[request] . identifier[query] ),
identifier[str] ( identifier[self] . identifier[_scrub_headers] ( identifier[request] . identifier[headers] )). identifier[replace] ( literal[string] , literal[string] ))
identifier[response] = identifier[self] . identifier[_httpclient] . identifier[perform_request] ( identifier[request] )
keyword[if] identifier[self] . identifier[response_callback] :
identifier[self] . identifier[response_callback] ( identifier[response] )
identifier[retry_context] . identifier[response] = identifier[response]
identifier[logger] . identifier[info] ( literal[string]
literal[string] ,
identifier[client_request_id_prefix] ,
identifier[self] . identifier[extract_date_and_request_id] ( identifier[retry_context] ),
identifier[response] . identifier[status] ,
identifier[response] . identifier[message] ,
identifier[str] ( identifier[response] . identifier[headers] ). identifier[replace] ( literal[string] , literal[string] ))
keyword[if] identifier[response] . identifier[status] >= literal[int] :
identifier[_http_error_handler] (
identifier[HTTPError] ( identifier[response] . identifier[status] , identifier[response] . identifier[message] , identifier[response] . identifier[headers] , identifier[response] . identifier[body] ))
keyword[if] identifier[parser] :
keyword[if] identifier[parser_args] :
identifier[args] =[ identifier[response] ]
identifier[args] . identifier[extend] ( identifier[parser_args] )
keyword[return] identifier[parser] (* identifier[args] )
keyword[else] :
keyword[return] identifier[parser] ( identifier[response] )
keyword[else] :
keyword[return]
keyword[except] identifier[AzureException] keyword[as] identifier[ex] :
identifier[retry_context] . identifier[exception] = identifier[ex]
keyword[raise] identifier[ex]
keyword[except] identifier[Exception] keyword[as] identifier[ex] :
identifier[retry_context] . identifier[exception] = identifier[ex]
keyword[raise] identifier[_wrap_exception] ( identifier[ex] , identifier[AzureException] )
keyword[except] identifier[AzureException] keyword[as] identifier[ex] :
identifier[exception_str_in_one_line] = literal[string]
identifier[status_code] = literal[string]
identifier[timestamp_and_request_id] = literal[string]
keyword[if] identifier[logger] . identifier[isEnabledFor] ( identifier[logging] . identifier[CRITICAL] ):
identifier[exception_str_in_one_line] = identifier[str] ( identifier[ex] ). identifier[replace] ( literal[string] , literal[string] )
identifier[status_code] = identifier[retry_context] . identifier[response] . identifier[status] keyword[if] identifier[retry_context] . identifier[response] keyword[is] keyword[not] keyword[None] keyword[else] literal[string]
identifier[timestamp_and_request_id] = identifier[self] . identifier[extract_date_and_request_id] ( identifier[retry_context] )
keyword[if] identifier[isinstance] ( identifier[ex] , identifier[AzureHttpError] ) keyword[and] identifier[expected_errors] keyword[is] keyword[not] keyword[None] keyword[and] identifier[ex] . identifier[error_code] keyword[in] identifier[expected_errors] :
identifier[logger] . identifier[info] ( literal[string]
literal[string] ,
identifier[client_request_id_prefix] ,
identifier[timestamp_and_request_id] ,
identifier[status_code] ,
identifier[exception_str_in_one_line] )
keyword[raise] identifier[ex]
keyword[elif] identifier[isinstance] ( identifier[ex] , identifier[AzureSigningError] ):
identifier[logger] . identifier[info] ( literal[string] ,
identifier[client_request_id_prefix] ,
identifier[exception_str_in_one_line] )
keyword[raise] identifier[ex]
identifier[logger] . identifier[info] ( literal[string]
literal[string] ,
identifier[client_request_id_prefix] ,
identifier[retry_context] . identifier[count] keyword[if] identifier[hasattr] ( identifier[retry_context] , literal[string] ) keyword[else] literal[int] ,
identifier[timestamp_and_request_id] ,
identifier[status_code] ,
identifier[exception_str_in_one_line] )
keyword[if] identifier[str] ( identifier[ex] )== identifier[_ERROR_DECRYPTION_FAILURE] :
identifier[logger] . identifier[error] ( literal[string]
literal[string] ,
identifier[client_request_id_prefix] ,
identifier[timestamp_and_request_id] ,
identifier[status_code] ,
identifier[exception_str_in_one_line] )
keyword[raise] identifier[ex]
identifier[retry_interval] = identifier[self] . identifier[retry] ( identifier[retry_context] )
keyword[if] identifier[retry_interval] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[self] . identifier[retry_callback] :
identifier[self] . identifier[retry_callback] ( identifier[retry_context] )
identifier[logger] . identifier[info] (
literal[string] ,
identifier[client_request_id_prefix] ,
identifier[retry_context] . identifier[count] ,
identifier[retry_interval] )
identifier[sleep] ( identifier[retry_interval] )
keyword[else] :
identifier[logger] . identifier[error] ( literal[string]
literal[string] ,
identifier[client_request_id_prefix] ,
identifier[timestamp_and_request_id] ,
identifier[status_code] ,
identifier[exception_str_in_one_line] )
keyword[raise] identifier[ex]
keyword[finally] :
keyword[if] identifier[operation_context] . identifier[location_lock] keyword[and] keyword[not] identifier[operation_context] . identifier[host_location] :
identifier[operation_context] . identifier[host_location] ={
identifier[retry_context] . identifier[location_mode] : identifier[request] . identifier[host_locations] [ identifier[retry_context] . identifier[location_mode] ]} | def _perform_request(self, request, parser=None, parser_args=None, operation_context=None, expected_errors=None):
"""
Sends the request and return response. Catches HTTPError and hands it
to error handler
"""
operation_context = operation_context or _OperationContext()
retry_context = RetryContext()
retry_context.is_emulated = self.is_emulated
# if request body is a stream, we need to remember its current position in case retries happen
if hasattr(request.body, 'read'):
try:
retry_context.body_position = request.body.tell() # depends on [control=['try'], data=[]]
except (AttributeError, UnsupportedOperation):
# if body position cannot be obtained, then retries will not work
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
# Apply the appropriate host based on the location mode
self._apply_host(request, operation_context, retry_context)
# Apply common settings to the request
_update_request(request, self._X_MS_VERSION, self._USER_AGENT_STRING)
client_request_id_prefix = str.format('Client-Request-ID={0}', request.headers['x-ms-client-request-id'])
while True:
try:
try: # Execute the request callback
if self.request_callback:
self.request_callback(request) # depends on [control=['if'], data=[]] # Add date and auth after the callback so date doesn't get too old and
# authentication is still correct if signed headers are added in the request
# callback. This also ensures retry policies with long back offs
# will work as it resets the time sensitive headers.
_add_date_header(request)
try:
# request can be signed individually
self.authentication.sign_request(request) # depends on [control=['try'], data=[]]
except AttributeError:
# session can also be signed
self.request_session = self.authentication.signed_session(self.request_session) # depends on [control=['except'], data=[]]
# Set the request context
retry_context.request = request
# Log the request before it goes out
# Avoid unnecessary scrubbing if the logger is not on
if logger.isEnabledFor(logging.INFO):
logger.info('%s Outgoing request: Method=%s, Path=%s, Query=%s, Headers=%s.', client_request_id_prefix, request.method, request.path, self._scrub_query_parameters(request.query), str(self._scrub_headers(request.headers)).replace('\n', '')) # depends on [control=['if'], data=[]]
# Perform the request
response = self._httpclient.perform_request(request)
# Execute the response callback
if self.response_callback:
self.response_callback(response) # depends on [control=['if'], data=[]]
# Set the response context
retry_context.response = response
# Log the response when it comes back
logger.info('%s Receiving Response: %s, HTTP Status Code=%s, Message=%s, Headers=%s.', client_request_id_prefix, self.extract_date_and_request_id(retry_context), response.status, response.message, str(response.headers).replace('\n', ''))
# Parse and wrap HTTP errors in AzureHttpError which inherits from AzureException
if response.status >= 300:
# This exception will be caught by the general error handler
# and raised as an azure http exception
_http_error_handler(HTTPError(response.status, response.message, response.headers, response.body)) # depends on [control=['if'], data=[]]
# Parse the response
if parser:
if parser_args:
args = [response]
args.extend(parser_args)
return parser(*args) # depends on [control=['if'], data=[]]
else:
return parser(response) # depends on [control=['if'], data=[]]
else:
return # depends on [control=['try'], data=[]]
except AzureException as ex:
retry_context.exception = ex
raise ex # depends on [control=['except'], data=['ex']]
except Exception as ex:
retry_context.exception = ex
raise _wrap_exception(ex, AzureException) # depends on [control=['except'], data=['ex']] # depends on [control=['try'], data=[]]
except AzureException as ex:
# only parse the strings used for logging if logging is at least enabled for CRITICAL
exception_str_in_one_line = ''
status_code = ''
timestamp_and_request_id = ''
if logger.isEnabledFor(logging.CRITICAL):
exception_str_in_one_line = str(ex).replace('\n', '')
status_code = retry_context.response.status if retry_context.response is not None else 'Unknown'
timestamp_and_request_id = self.extract_date_and_request_id(retry_context) # depends on [control=['if'], data=[]]
# if the http error was expected, we should short-circuit
if isinstance(ex, AzureHttpError) and expected_errors is not None and (ex.error_code in expected_errors):
logger.info('%s Received expected http error: %s, HTTP status code=%s, Exception=%s.', client_request_id_prefix, timestamp_and_request_id, status_code, exception_str_in_one_line)
raise ex # depends on [control=['if'], data=[]]
elif isinstance(ex, AzureSigningError):
logger.info('%s Unable to sign the request: Exception=%s.', client_request_id_prefix, exception_str_in_one_line)
raise ex # depends on [control=['if'], data=[]]
logger.info('%s Operation failed: checking if the operation should be retried. Current retry count=%s, %s, HTTP status code=%s, Exception=%s.', client_request_id_prefix, retry_context.count if hasattr(retry_context, 'count') else 0, timestamp_and_request_id, status_code, exception_str_in_one_line)
# Decryption failures (invalid objects, invalid algorithms, data unencrypted in strict mode, etc)
# will not be resolved with retries.
if str(ex) == _ERROR_DECRYPTION_FAILURE:
logger.error('%s Encountered decryption failure: this cannot be retried. %s, HTTP status code=%s, Exception=%s.', client_request_id_prefix, timestamp_and_request_id, status_code, exception_str_in_one_line)
raise ex # depends on [control=['if'], data=[]] # Determine whether a retry should be performed and if so, how
# long to wait before performing retry.
retry_interval = self.retry(retry_context)
if retry_interval is not None:
# Execute the callback
if self.retry_callback:
self.retry_callback(retry_context) # depends on [control=['if'], data=[]]
logger.info('%s Retry policy is allowing a retry: Retry count=%s, Interval=%s.', client_request_id_prefix, retry_context.count, retry_interval)
# Sleep for the desired retry interval
sleep(retry_interval) # depends on [control=['if'], data=['retry_interval']]
else:
logger.error('%s Retry policy did not allow for a retry: %s, HTTP status code=%s, Exception=%s.', client_request_id_prefix, timestamp_and_request_id, status_code, exception_str_in_one_line)
raise ex # depends on [control=['except'], data=['ex']]
finally: # If this is a location locked operation and the location is not set,
# this is the first request of that operation. Set the location to
# be used for subsequent requests in the operation.
if operation_context.location_lock and (not operation_context.host_location):
# note: to cover the emulator scenario, the host_location is grabbed
# from request.host_locations(which includes the dev account name)
# instead of request.host(which at this point no longer includes the dev account name)
operation_context.host_location = {retry_context.location_mode: request.host_locations[retry_context.location_mode]} # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] |
def stop_service(self, instance, service):
"""
Stops a single service.
:param str instance: A Yamcs instance name.
:param str service: The name of the service.
"""
req = rest_pb2.EditServiceRequest()
req.state = 'stopped'
url = '/services/{}/{}'.format(instance, service)
self.patch_proto(url, data=req.SerializeToString()) | def function[stop_service, parameter[self, instance, service]]:
constant[
Stops a single service.
:param str instance: A Yamcs instance name.
:param str service: The name of the service.
]
variable[req] assign[=] call[name[rest_pb2].EditServiceRequest, parameter[]]
name[req].state assign[=] constant[stopped]
variable[url] assign[=] call[constant[/services/{}/{}].format, parameter[name[instance], name[service]]]
call[name[self].patch_proto, parameter[name[url]]] | keyword[def] identifier[stop_service] ( identifier[self] , identifier[instance] , identifier[service] ):
literal[string]
identifier[req] = identifier[rest_pb2] . identifier[EditServiceRequest] ()
identifier[req] . identifier[state] = literal[string]
identifier[url] = literal[string] . identifier[format] ( identifier[instance] , identifier[service] )
identifier[self] . identifier[patch_proto] ( identifier[url] , identifier[data] = identifier[req] . identifier[SerializeToString] ()) | def stop_service(self, instance, service):
"""
Stops a single service.
:param str instance: A Yamcs instance name.
:param str service: The name of the service.
"""
req = rest_pb2.EditServiceRequest()
req.state = 'stopped'
url = '/services/{}/{}'.format(instance, service)
self.patch_proto(url, data=req.SerializeToString()) |
def render_scene(self):
"render scene one time"
self.init_gl() # should be a no-op after the first frame is rendered
SDL_GL_MakeCurrent ( self.window, self.context )
self.renderer.render_scene()
# Done rendering
# SDL_GL_SwapWindow(self.window)
glFlush() | def function[render_scene, parameter[self]]:
constant[render scene one time]
call[name[self].init_gl, parameter[]]
call[name[SDL_GL_MakeCurrent], parameter[name[self].window, name[self].context]]
call[name[self].renderer.render_scene, parameter[]]
call[name[glFlush], parameter[]] | keyword[def] identifier[render_scene] ( identifier[self] ):
literal[string]
identifier[self] . identifier[init_gl] ()
identifier[SDL_GL_MakeCurrent] ( identifier[self] . identifier[window] , identifier[self] . identifier[context] )
identifier[self] . identifier[renderer] . identifier[render_scene] ()
identifier[glFlush] () | def render_scene(self):
"""render scene one time"""
self.init_gl() # should be a no-op after the first frame is rendered
SDL_GL_MakeCurrent(self.window, self.context)
self.renderer.render_scene() # Done rendering
# SDL_GL_SwapWindow(self.window)
glFlush() |
def btc_is_p2pkh_address( address ):
"""
Is the given address a p2pkh address?
"""
vb = keylib.b58check.b58check_version_byte( address )
if vb == version_byte:
return True
else:
return False | def function[btc_is_p2pkh_address, parameter[address]]:
constant[
Is the given address a p2pkh address?
]
variable[vb] assign[=] call[name[keylib].b58check.b58check_version_byte, parameter[name[address]]]
if compare[name[vb] equal[==] name[version_byte]] begin[:]
return[constant[True]] | keyword[def] identifier[btc_is_p2pkh_address] ( identifier[address] ):
literal[string]
identifier[vb] = identifier[keylib] . identifier[b58check] . identifier[b58check_version_byte] ( identifier[address] )
keyword[if] identifier[vb] == identifier[version_byte] :
keyword[return] keyword[True]
keyword[else] :
keyword[return] keyword[False] | def btc_is_p2pkh_address(address):
"""
Is the given address a p2pkh address?
"""
vb = keylib.b58check.b58check_version_byte(address)
if vb == version_byte:
return True # depends on [control=['if'], data=[]]
else:
return False |
def tagExplicitly(self, superTag):
"""Return explicitly tagged *TagSet*
Create a new *TagSet* representing callee *TagSet* explicitly tagged
with passed tag(s). With explicit tagging mode, new tags are appended
to existing tag(s).
Parameters
----------
superTag: :class:`~pyasn1.type.tag.Tag`
*Tag* object to tag this *TagSet*
Returns
-------
: :class:`~pyasn1.type.tag.TagSet`
New *TagSet* object
"""
if superTag.tagClass == tagClassUniversal:
raise error.PyAsn1Error("Can't tag with UNIVERSAL class tag")
if superTag.tagFormat != tagFormatConstructed:
superTag = Tag(superTag.tagClass, tagFormatConstructed, superTag.tagId)
return self + superTag | def function[tagExplicitly, parameter[self, superTag]]:
constant[Return explicitly tagged *TagSet*
Create a new *TagSet* representing callee *TagSet* explicitly tagged
with passed tag(s). With explicit tagging mode, new tags are appended
to existing tag(s).
Parameters
----------
superTag: :class:`~pyasn1.type.tag.Tag`
*Tag* object to tag this *TagSet*
Returns
-------
: :class:`~pyasn1.type.tag.TagSet`
New *TagSet* object
]
if compare[name[superTag].tagClass equal[==] name[tagClassUniversal]] begin[:]
<ast.Raise object at 0x7da20e9568c0>
if compare[name[superTag].tagFormat not_equal[!=] name[tagFormatConstructed]] begin[:]
variable[superTag] assign[=] call[name[Tag], parameter[name[superTag].tagClass, name[tagFormatConstructed], name[superTag].tagId]]
return[binary_operation[name[self] + name[superTag]]] | keyword[def] identifier[tagExplicitly] ( identifier[self] , identifier[superTag] ):
literal[string]
keyword[if] identifier[superTag] . identifier[tagClass] == identifier[tagClassUniversal] :
keyword[raise] identifier[error] . identifier[PyAsn1Error] ( literal[string] )
keyword[if] identifier[superTag] . identifier[tagFormat] != identifier[tagFormatConstructed] :
identifier[superTag] = identifier[Tag] ( identifier[superTag] . identifier[tagClass] , identifier[tagFormatConstructed] , identifier[superTag] . identifier[tagId] )
keyword[return] identifier[self] + identifier[superTag] | def tagExplicitly(self, superTag):
"""Return explicitly tagged *TagSet*
Create a new *TagSet* representing callee *TagSet* explicitly tagged
with passed tag(s). With explicit tagging mode, new tags are appended
to existing tag(s).
Parameters
----------
superTag: :class:`~pyasn1.type.tag.Tag`
*Tag* object to tag this *TagSet*
Returns
-------
: :class:`~pyasn1.type.tag.TagSet`
New *TagSet* object
"""
if superTag.tagClass == tagClassUniversal:
raise error.PyAsn1Error("Can't tag with UNIVERSAL class tag") # depends on [control=['if'], data=[]]
if superTag.tagFormat != tagFormatConstructed:
superTag = Tag(superTag.tagClass, tagFormatConstructed, superTag.tagId) # depends on [control=['if'], data=['tagFormatConstructed']]
return self + superTag |
def os_discovery():
"""
Performs os (and domain) discovery of smb hosts.
"""
hs = HostSearch()
hosts = hs.get_hosts(ports=[445], tags=['!nmap_os'])
# TODO fix filter for emtpy fields.
hosts = [host for host in hosts if not host.os]
host_dict = {}
for host in hosts:
host_dict[str(host.address)] = host
arguments = "--script smb-os-discovery.nse -p 445 -Pn -n --disable-arp-ping".split(' ')
if len(hosts):
count = 0
print_notification("Checking OS of {} systems".format(len(hosts)))
result = nmap(arguments, [str(h.address) for h in hosts])
parser = NmapParser()
report = parser.parse_fromstring(result)
for nmap_host in report.hosts:
for script_result in nmap_host.scripts_results:
script_result = script_result.get('elements', {})
host = host_dict[str(nmap_host.address)]
if 'fqdn' in script_result:
host.hostname.append(script_result['fqdn'])
if 'os' in script_result:
count += 1
host.os = script_result['os']
host_dict[str(nmap_host.address)] = host
for host in hosts:
host.add_tag('nmap_os')
host.save()
print_notification("Done, found the os of {} systems".format(count))
else:
print_notification("No systems found to be checked.") | def function[os_discovery, parameter[]]:
constant[
Performs os (and domain) discovery of smb hosts.
]
variable[hs] assign[=] call[name[HostSearch], parameter[]]
variable[hosts] assign[=] call[name[hs].get_hosts, parameter[]]
variable[hosts] assign[=] <ast.ListComp object at 0x7da1b004f400>
variable[host_dict] assign[=] dictionary[[], []]
for taget[name[host]] in starred[name[hosts]] begin[:]
call[name[host_dict]][call[name[str], parameter[name[host].address]]] assign[=] name[host]
variable[arguments] assign[=] call[constant[--script smb-os-discovery.nse -p 445 -Pn -n --disable-arp-ping].split, parameter[constant[ ]]]
if call[name[len], parameter[name[hosts]]] begin[:]
variable[count] assign[=] constant[0]
call[name[print_notification], parameter[call[constant[Checking OS of {} systems].format, parameter[call[name[len], parameter[name[hosts]]]]]]]
variable[result] assign[=] call[name[nmap], parameter[name[arguments], <ast.ListComp object at 0x7da1b009e560>]]
variable[parser] assign[=] call[name[NmapParser], parameter[]]
variable[report] assign[=] call[name[parser].parse_fromstring, parameter[name[result]]]
for taget[name[nmap_host]] in starred[name[report].hosts] begin[:]
for taget[name[script_result]] in starred[name[nmap_host].scripts_results] begin[:]
variable[script_result] assign[=] call[name[script_result].get, parameter[constant[elements], dictionary[[], []]]]
variable[host] assign[=] call[name[host_dict]][call[name[str], parameter[name[nmap_host].address]]]
if compare[constant[fqdn] in name[script_result]] begin[:]
call[name[host].hostname.append, parameter[call[name[script_result]][constant[fqdn]]]]
if compare[constant[os] in name[script_result]] begin[:]
<ast.AugAssign object at 0x7da1afe38d00>
name[host].os assign[=] call[name[script_result]][constant[os]]
call[name[host_dict]][call[name[str], parameter[name[nmap_host].address]]] assign[=] name[host]
for taget[name[host]] in starred[name[hosts]] begin[:]
call[name[host].add_tag, parameter[constant[nmap_os]]]
call[name[host].save, parameter[]]
call[name[print_notification], parameter[call[constant[Done, found the os of {} systems].format, parameter[name[count]]]]] | keyword[def] identifier[os_discovery] ():
literal[string]
identifier[hs] = identifier[HostSearch] ()
identifier[hosts] = identifier[hs] . identifier[get_hosts] ( identifier[ports] =[ literal[int] ], identifier[tags] =[ literal[string] ])
identifier[hosts] =[ identifier[host] keyword[for] identifier[host] keyword[in] identifier[hosts] keyword[if] keyword[not] identifier[host] . identifier[os] ]
identifier[host_dict] ={}
keyword[for] identifier[host] keyword[in] identifier[hosts] :
identifier[host_dict] [ identifier[str] ( identifier[host] . identifier[address] )]= identifier[host]
identifier[arguments] = literal[string] . identifier[split] ( literal[string] )
keyword[if] identifier[len] ( identifier[hosts] ):
identifier[count] = literal[int]
identifier[print_notification] ( literal[string] . identifier[format] ( identifier[len] ( identifier[hosts] )))
identifier[result] = identifier[nmap] ( identifier[arguments] ,[ identifier[str] ( identifier[h] . identifier[address] ) keyword[for] identifier[h] keyword[in] identifier[hosts] ])
identifier[parser] = identifier[NmapParser] ()
identifier[report] = identifier[parser] . identifier[parse_fromstring] ( identifier[result] )
keyword[for] identifier[nmap_host] keyword[in] identifier[report] . identifier[hosts] :
keyword[for] identifier[script_result] keyword[in] identifier[nmap_host] . identifier[scripts_results] :
identifier[script_result] = identifier[script_result] . identifier[get] ( literal[string] ,{})
identifier[host] = identifier[host_dict] [ identifier[str] ( identifier[nmap_host] . identifier[address] )]
keyword[if] literal[string] keyword[in] identifier[script_result] :
identifier[host] . identifier[hostname] . identifier[append] ( identifier[script_result] [ literal[string] ])
keyword[if] literal[string] keyword[in] identifier[script_result] :
identifier[count] += literal[int]
identifier[host] . identifier[os] = identifier[script_result] [ literal[string] ]
identifier[host_dict] [ identifier[str] ( identifier[nmap_host] . identifier[address] )]= identifier[host]
keyword[for] identifier[host] keyword[in] identifier[hosts] :
identifier[host] . identifier[add_tag] ( literal[string] )
identifier[host] . identifier[save] ()
identifier[print_notification] ( literal[string] . identifier[format] ( identifier[count] ))
keyword[else] :
identifier[print_notification] ( literal[string] ) | def os_discovery():
"""
Performs os (and domain) discovery of smb hosts.
"""
hs = HostSearch()
hosts = hs.get_hosts(ports=[445], tags=['!nmap_os'])
# TODO fix filter for emtpy fields.
hosts = [host for host in hosts if not host.os]
host_dict = {}
for host in hosts:
host_dict[str(host.address)] = host # depends on [control=['for'], data=['host']]
arguments = '--script smb-os-discovery.nse -p 445 -Pn -n --disable-arp-ping'.split(' ')
if len(hosts):
count = 0
print_notification('Checking OS of {} systems'.format(len(hosts)))
result = nmap(arguments, [str(h.address) for h in hosts])
parser = NmapParser()
report = parser.parse_fromstring(result)
for nmap_host in report.hosts:
for script_result in nmap_host.scripts_results:
script_result = script_result.get('elements', {})
host = host_dict[str(nmap_host.address)]
if 'fqdn' in script_result:
host.hostname.append(script_result['fqdn']) # depends on [control=['if'], data=['script_result']]
if 'os' in script_result:
count += 1
host.os = script_result['os'] # depends on [control=['if'], data=['script_result']]
host_dict[str(nmap_host.address)] = host # depends on [control=['for'], data=['script_result']] # depends on [control=['for'], data=['nmap_host']]
for host in hosts:
host.add_tag('nmap_os')
host.save() # depends on [control=['for'], data=['host']]
print_notification('Done, found the os of {} systems'.format(count)) # depends on [control=['if'], data=[]]
else:
print_notification('No systems found to be checked.') |
def update_xml_element(self):
"""
Updates the xml element contents to matches the instance contents.
:returns: Updated XML element.
:rtype: lxml.etree._Element
"""
super(Description, self).update_xml_element()
if hasattr(self, 'lang'):
self.xml_element.set(
'{http://www.w3.org/XML/1998/namespace}lang', self.lang)
if hasattr(self, 'override'):
self.xml_element.set('override', str(self.override))
return self.xml_element | def function[update_xml_element, parameter[self]]:
constant[
Updates the xml element contents to matches the instance contents.
:returns: Updated XML element.
:rtype: lxml.etree._Element
]
call[call[name[super], parameter[name[Description], name[self]]].update_xml_element, parameter[]]
if call[name[hasattr], parameter[name[self], constant[lang]]] begin[:]
call[name[self].xml_element.set, parameter[constant[{http://www.w3.org/XML/1998/namespace}lang], name[self].lang]]
if call[name[hasattr], parameter[name[self], constant[override]]] begin[:]
call[name[self].xml_element.set, parameter[constant[override], call[name[str], parameter[name[self].override]]]]
return[name[self].xml_element] | keyword[def] identifier[update_xml_element] ( identifier[self] ):
literal[string]
identifier[super] ( identifier[Description] , identifier[self] ). identifier[update_xml_element] ()
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[xml_element] . identifier[set] (
literal[string] , identifier[self] . identifier[lang] )
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[xml_element] . identifier[set] ( literal[string] , identifier[str] ( identifier[self] . identifier[override] ))
keyword[return] identifier[self] . identifier[xml_element] | def update_xml_element(self):
"""
Updates the xml element contents to matches the instance contents.
:returns: Updated XML element.
:rtype: lxml.etree._Element
"""
super(Description, self).update_xml_element()
if hasattr(self, 'lang'):
self.xml_element.set('{http://www.w3.org/XML/1998/namespace}lang', self.lang) # depends on [control=['if'], data=[]]
if hasattr(self, 'override'):
self.xml_element.set('override', str(self.override)) # depends on [control=['if'], data=[]]
return self.xml_element |
def parse_response(self, connection, command_name, **options):
"Parses a response from the Redis server"
try:
response = connection.read_response()
except ResponseError:
if EMPTY_RESPONSE in options:
return options[EMPTY_RESPONSE]
raise
if command_name in self.response_callbacks:
return self.response_callbacks[command_name](response, **options)
return response | def function[parse_response, parameter[self, connection, command_name]]:
constant[Parses a response from the Redis server]
<ast.Try object at 0x7da2041d8040>
if compare[name[command_name] in name[self].response_callbacks] begin[:]
return[call[call[name[self].response_callbacks][name[command_name]], parameter[name[response]]]]
return[name[response]] | keyword[def] identifier[parse_response] ( identifier[self] , identifier[connection] , identifier[command_name] ,** identifier[options] ):
literal[string]
keyword[try] :
identifier[response] = identifier[connection] . identifier[read_response] ()
keyword[except] identifier[ResponseError] :
keyword[if] identifier[EMPTY_RESPONSE] keyword[in] identifier[options] :
keyword[return] identifier[options] [ identifier[EMPTY_RESPONSE] ]
keyword[raise]
keyword[if] identifier[command_name] keyword[in] identifier[self] . identifier[response_callbacks] :
keyword[return] identifier[self] . identifier[response_callbacks] [ identifier[command_name] ]( identifier[response] ,** identifier[options] )
keyword[return] identifier[response] | def parse_response(self, connection, command_name, **options):
"""Parses a response from the Redis server"""
try:
response = connection.read_response() # depends on [control=['try'], data=[]]
except ResponseError:
if EMPTY_RESPONSE in options:
return options[EMPTY_RESPONSE] # depends on [control=['if'], data=['EMPTY_RESPONSE', 'options']]
raise # depends on [control=['except'], data=[]]
if command_name in self.response_callbacks:
return self.response_callbacks[command_name](response, **options) # depends on [control=['if'], data=['command_name']]
return response |
def write_command(self, request_id, msg):
"""Send "insert" etc. command, returning response as a dict.
Can raise ConnectionFailure or OperationFailure.
:Parameters:
- `request_id`: an int.
- `msg`: bytes, the command message.
"""
self.send_message(msg, 0)
response = helpers._unpack_response(self.receive_message(1, request_id))
assert response['number_returned'] == 1
result = response['data'][0]
# Raises NotMasterError or OperationFailure.
helpers._check_command_response(result)
return result | def function[write_command, parameter[self, request_id, msg]]:
constant[Send "insert" etc. command, returning response as a dict.
Can raise ConnectionFailure or OperationFailure.
:Parameters:
- `request_id`: an int.
- `msg`: bytes, the command message.
]
call[name[self].send_message, parameter[name[msg], constant[0]]]
variable[response] assign[=] call[name[helpers]._unpack_response, parameter[call[name[self].receive_message, parameter[constant[1], name[request_id]]]]]
assert[compare[call[name[response]][constant[number_returned]] equal[==] constant[1]]]
variable[result] assign[=] call[call[name[response]][constant[data]]][constant[0]]
call[name[helpers]._check_command_response, parameter[name[result]]]
return[name[result]] | keyword[def] identifier[write_command] ( identifier[self] , identifier[request_id] , identifier[msg] ):
literal[string]
identifier[self] . identifier[send_message] ( identifier[msg] , literal[int] )
identifier[response] = identifier[helpers] . identifier[_unpack_response] ( identifier[self] . identifier[receive_message] ( literal[int] , identifier[request_id] ))
keyword[assert] identifier[response] [ literal[string] ]== literal[int]
identifier[result] = identifier[response] [ literal[string] ][ literal[int] ]
identifier[helpers] . identifier[_check_command_response] ( identifier[result] )
keyword[return] identifier[result] | def write_command(self, request_id, msg):
"""Send "insert" etc. command, returning response as a dict.
Can raise ConnectionFailure or OperationFailure.
:Parameters:
- `request_id`: an int.
- `msg`: bytes, the command message.
"""
self.send_message(msg, 0)
response = helpers._unpack_response(self.receive_message(1, request_id))
assert response['number_returned'] == 1
result = response['data'][0]
# Raises NotMasterError or OperationFailure.
helpers._check_command_response(result)
return result |
def dump(obj):
"Recursive convert a live GUI object to a resource list/dict"
from .spec import InitSpec, DimensionSpec, StyleSpec, InternalSpec
import decimal, datetime
from .font import Font
from .graphic import Bitmap, Color
from . import registry
ret = {'type': obj.__class__.__name__}
for (k, spec) in obj._meta.specs.items():
if k == "index": # index is really defined by creation order
continue # also, avoid infinite recursion
v = getattr(obj, k, "")
if (not isinstance(spec, InternalSpec)
and v != spec.default
and (k != 'id' or v > 0)
and isinstance(v,
(basestring, int, long, float, bool, dict, list,
decimal.Decimal,
datetime.datetime, datetime.date, datetime.time,
Font, Color))
and repr(v) != 'None'
and k != 'parent'
):
ret[k] = v
for ctl in obj:
if ret['type'] in registry.MENU:
ret.setdefault('items', []).append(dump(ctl))
else:
res = dump(ctl)
if 'menubar' in res:
ret.setdefault('menubar', []).append(res.pop('menubar'))
else:
ret.setdefault('components', []).append(res)
return ret | def function[dump, parameter[obj]]:
constant[Recursive convert a live GUI object to a resource list/dict]
from relative_module[spec] import module[InitSpec], module[DimensionSpec], module[StyleSpec], module[InternalSpec]
import module[decimal], module[datetime]
from relative_module[font] import module[Font]
from relative_module[graphic] import module[Bitmap], module[Color]
from relative_module[None] import module[registry]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da1b03bacb0>], [<ast.Attribute object at 0x7da1b03b9a50>]]
for taget[tuple[[<ast.Name object at 0x7da1b03b96f0>, <ast.Name object at 0x7da1b03b8dc0>]]] in starred[call[name[obj]._meta.specs.items, parameter[]]] begin[:]
if compare[name[k] equal[==] constant[index]] begin[:]
continue
variable[v] assign[=] call[name[getattr], parameter[name[obj], name[k], constant[]]]
if <ast.BoolOp object at 0x7da1b03ba290> begin[:]
call[name[ret]][name[k]] assign[=] name[v]
for taget[name[ctl]] in starred[name[obj]] begin[:]
if compare[call[name[ret]][constant[type]] in name[registry].MENU] begin[:]
call[call[name[ret].setdefault, parameter[constant[items], list[[]]]].append, parameter[call[name[dump], parameter[name[ctl]]]]]
return[name[ret]] | keyword[def] identifier[dump] ( identifier[obj] ):
literal[string]
keyword[from] . identifier[spec] keyword[import] identifier[InitSpec] , identifier[DimensionSpec] , identifier[StyleSpec] , identifier[InternalSpec]
keyword[import] identifier[decimal] , identifier[datetime]
keyword[from] . identifier[font] keyword[import] identifier[Font]
keyword[from] . identifier[graphic] keyword[import] identifier[Bitmap] , identifier[Color]
keyword[from] . keyword[import] identifier[registry]
identifier[ret] ={ literal[string] : identifier[obj] . identifier[__class__] . identifier[__name__] }
keyword[for] ( identifier[k] , identifier[spec] ) keyword[in] identifier[obj] . identifier[_meta] . identifier[specs] . identifier[items] ():
keyword[if] identifier[k] == literal[string] :
keyword[continue]
identifier[v] = identifier[getattr] ( identifier[obj] , identifier[k] , literal[string] )
keyword[if] ( keyword[not] identifier[isinstance] ( identifier[spec] , identifier[InternalSpec] )
keyword[and] identifier[v] != identifier[spec] . identifier[default]
keyword[and] ( identifier[k] != literal[string] keyword[or] identifier[v] > literal[int] )
keyword[and] identifier[isinstance] ( identifier[v] ,
( identifier[basestring] , identifier[int] , identifier[long] , identifier[float] , identifier[bool] , identifier[dict] , identifier[list] ,
identifier[decimal] . identifier[Decimal] ,
identifier[datetime] . identifier[datetime] , identifier[datetime] . identifier[date] , identifier[datetime] . identifier[time] ,
identifier[Font] , identifier[Color] ))
keyword[and] identifier[repr] ( identifier[v] )!= literal[string]
keyword[and] identifier[k] != literal[string]
):
identifier[ret] [ identifier[k] ]= identifier[v]
keyword[for] identifier[ctl] keyword[in] identifier[obj] :
keyword[if] identifier[ret] [ literal[string] ] keyword[in] identifier[registry] . identifier[MENU] :
identifier[ret] . identifier[setdefault] ( literal[string] ,[]). identifier[append] ( identifier[dump] ( identifier[ctl] ))
keyword[else] :
identifier[res] = identifier[dump] ( identifier[ctl] )
keyword[if] literal[string] keyword[in] identifier[res] :
identifier[ret] . identifier[setdefault] ( literal[string] ,[]). identifier[append] ( identifier[res] . identifier[pop] ( literal[string] ))
keyword[else] :
identifier[ret] . identifier[setdefault] ( literal[string] ,[]). identifier[append] ( identifier[res] )
keyword[return] identifier[ret] | def dump(obj):
"""Recursive convert a live GUI object to a resource list/dict"""
from .spec import InitSpec, DimensionSpec, StyleSpec, InternalSpec
import decimal, datetime
from .font import Font
from .graphic import Bitmap, Color
from . import registry
ret = {'type': obj.__class__.__name__}
for (k, spec) in obj._meta.specs.items():
if k == 'index': # index is really defined by creation order
continue # also, avoid infinite recursion # depends on [control=['if'], data=[]]
v = getattr(obj, k, '')
if not isinstance(spec, InternalSpec) and v != spec.default and (k != 'id' or v > 0) and isinstance(v, (basestring, int, long, float, bool, dict, list, decimal.Decimal, datetime.datetime, datetime.date, datetime.time, Font, Color)) and (repr(v) != 'None') and (k != 'parent'):
ret[k] = v # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
for ctl in obj:
if ret['type'] in registry.MENU:
ret.setdefault('items', []).append(dump(ctl)) # depends on [control=['if'], data=[]]
else:
res = dump(ctl)
if 'menubar' in res:
ret.setdefault('menubar', []).append(res.pop('menubar')) # depends on [control=['if'], data=['res']]
else:
ret.setdefault('components', []).append(res) # depends on [control=['for'], data=['ctl']]
return ret |
def append(self, station):
""" Append station to database.
Returns the index of the appended station.
"""
rec = station._pack(self)
with self:
_libtcd.add_tide_record(rec, self._header)
return self._header.number_of_records - 1 | def function[append, parameter[self, station]]:
constant[ Append station to database.
Returns the index of the appended station.
]
variable[rec] assign[=] call[name[station]._pack, parameter[name[self]]]
with name[self] begin[:]
call[name[_libtcd].add_tide_record, parameter[name[rec], name[self]._header]]
return[binary_operation[name[self]._header.number_of_records - constant[1]]] | keyword[def] identifier[append] ( identifier[self] , identifier[station] ):
literal[string]
identifier[rec] = identifier[station] . identifier[_pack] ( identifier[self] )
keyword[with] identifier[self] :
identifier[_libtcd] . identifier[add_tide_record] ( identifier[rec] , identifier[self] . identifier[_header] )
keyword[return] identifier[self] . identifier[_header] . identifier[number_of_records] - literal[int] | def append(self, station):
""" Append station to database.
Returns the index of the appended station.
"""
rec = station._pack(self)
with self:
_libtcd.add_tide_record(rec, self._header)
return self._header.number_of_records - 1 # depends on [control=['with'], data=[]] |
def get_sugg(self, keyword):
"""获取微信搜狗搜索关键词联想
Parameters
----------
keyword : str or unicode
关键词
Returns
-------
list[str]
联想关键词列表
Raises
------
WechatSogouRequestsException
"""
url = 'http://w.sugg.sogou.com/sugg/ajaj_json.jsp?key={}&type=wxpub&pr=web'.format(
quote(keyword.encode('utf-8')))
r = requests.get(url)
if not r.ok:
raise WechatSogouRequestsException('get_sugg', r)
sugg = re.findall(u'\["' + keyword + '",(.*?),\["', r.text)[0]
return json.loads(sugg) | def function[get_sugg, parameter[self, keyword]]:
constant[获取微信搜狗搜索关键词联想
Parameters
----------
keyword : str or unicode
关键词
Returns
-------
list[str]
联想关键词列表
Raises
------
WechatSogouRequestsException
]
variable[url] assign[=] call[constant[http://w.sugg.sogou.com/sugg/ajaj_json.jsp?key={}&type=wxpub&pr=web].format, parameter[call[name[quote], parameter[call[name[keyword].encode, parameter[constant[utf-8]]]]]]]
variable[r] assign[=] call[name[requests].get, parameter[name[url]]]
if <ast.UnaryOp object at 0x7da1b2046170> begin[:]
<ast.Raise object at 0x7da1b2046b00>
variable[sugg] assign[=] call[call[name[re].findall, parameter[binary_operation[binary_operation[constant[\["] + name[keyword]] + constant[",(.*?),\["]], name[r].text]]][constant[0]]
return[call[name[json].loads, parameter[name[sugg]]]] | keyword[def] identifier[get_sugg] ( identifier[self] , identifier[keyword] ):
literal[string]
identifier[url] = literal[string] . identifier[format] (
identifier[quote] ( identifier[keyword] . identifier[encode] ( literal[string] )))
identifier[r] = identifier[requests] . identifier[get] ( identifier[url] )
keyword[if] keyword[not] identifier[r] . identifier[ok] :
keyword[raise] identifier[WechatSogouRequestsException] ( literal[string] , identifier[r] )
identifier[sugg] = identifier[re] . identifier[findall] ( literal[string] + identifier[keyword] + literal[string] , identifier[r] . identifier[text] )[ literal[int] ]
keyword[return] identifier[json] . identifier[loads] ( identifier[sugg] ) | def get_sugg(self, keyword):
"""获取微信搜狗搜索关键词联想
Parameters
----------
keyword : str or unicode
关键词
Returns
-------
list[str]
联想关键词列表
Raises
------
WechatSogouRequestsException
"""
url = 'http://w.sugg.sogou.com/sugg/ajaj_json.jsp?key={}&type=wxpub&pr=web'.format(quote(keyword.encode('utf-8')))
r = requests.get(url)
if not r.ok:
raise WechatSogouRequestsException('get_sugg', r) # depends on [control=['if'], data=[]]
sugg = re.findall(u'\\["' + keyword + '",(.*?),\\["', r.text)[0]
return json.loads(sugg) |
def add(self, data, status_code, status_reason):
""" Add data to this response.
This method should be used to add data to the response. The data should
be all the data returned in one page from SpaceGDN.
If this method is called before, data will be appended to the existing
data with `+=`, this means dicts, for instance will not work with
multiple pages.
Arguments:
`data`
The data to add
`status_code`
The HTTP response code of the HTTP response containing the data
`status_reason`
The reason or description for the HTTP response code
"""
self.status_code = status_code
self.status_reason = status_reason
self.success = status_code == 200
if data:
if not self.data:
self.data = data
else:
self.data += data | def function[add, parameter[self, data, status_code, status_reason]]:
constant[ Add data to this response.
This method should be used to add data to the response. The data should
be all the data returned in one page from SpaceGDN.
If this method is called before, data will be appended to the existing
data with `+=`, this means dicts, for instance will not work with
multiple pages.
Arguments:
`data`
The data to add
`status_code`
The HTTP response code of the HTTP response containing the data
`status_reason`
The reason or description for the HTTP response code
]
name[self].status_code assign[=] name[status_code]
name[self].status_reason assign[=] name[status_reason]
name[self].success assign[=] compare[name[status_code] equal[==] constant[200]]
if name[data] begin[:]
if <ast.UnaryOp object at 0x7da1b133cd00> begin[:]
name[self].data assign[=] name[data] | keyword[def] identifier[add] ( identifier[self] , identifier[data] , identifier[status_code] , identifier[status_reason] ):
literal[string]
identifier[self] . identifier[status_code] = identifier[status_code]
identifier[self] . identifier[status_reason] = identifier[status_reason]
identifier[self] . identifier[success] = identifier[status_code] == literal[int]
keyword[if] identifier[data] :
keyword[if] keyword[not] identifier[self] . identifier[data] :
identifier[self] . identifier[data] = identifier[data]
keyword[else] :
identifier[self] . identifier[data] += identifier[data] | def add(self, data, status_code, status_reason):
""" Add data to this response.
This method should be used to add data to the response. The data should
be all the data returned in one page from SpaceGDN.
If this method is called before, data will be appended to the existing
data with `+=`, this means dicts, for instance will not work with
multiple pages.
Arguments:
`data`
The data to add
`status_code`
The HTTP response code of the HTTP response containing the data
`status_reason`
The reason or description for the HTTP response code
"""
self.status_code = status_code
self.status_reason = status_reason
self.success = status_code == 200
if data:
if not self.data:
self.data = data # depends on [control=['if'], data=[]]
else:
self.data += data # depends on [control=['if'], data=[]] |
def print_exc_plus(stream=sys.stdout):
'''print normal traceback information with some local arg values'''
# code of this mothod is mainly from <Python Cookbook>
write = stream.write # assert the mothod exists
flush = stream.flush
tp, value, tb = sys.exc_info()
while tb.tb_next:
tb = tb.tb_next
stack = list()
f = tb.tb_frame
while f:
stack.append(f)
f = f.f_back
stack.reverse()
try:
traceback.print_exc(None, stream)
except BaseException as e:
write(u("FAILED PRINTING TRACE\n\n"))
write(u(str(value)))
write(u('\n\n'))
finally:
flush()
write(u('Locals by frame, innermost last\n'))
for frame in stack:
write(u('\nFrame %s in %s at line %s\n' % (frame.f_code.co_name,
frame.f_code.co_filename,
frame.f_lineno)))
for key, value, in frame.f_locals.items():
write(u('\t%20s = ' % key))
try:
write(u('%s\n' % value))
except BaseException:
write(u('<ERROR WHILE PRINTING VALUE>\n'))
flush() | def function[print_exc_plus, parameter[stream]]:
constant[print normal traceback information with some local arg values]
variable[write] assign[=] name[stream].write
variable[flush] assign[=] name[stream].flush
<ast.Tuple object at 0x7da1b235ad70> assign[=] call[name[sys].exc_info, parameter[]]
while name[tb].tb_next begin[:]
variable[tb] assign[=] name[tb].tb_next
variable[stack] assign[=] call[name[list], parameter[]]
variable[f] assign[=] name[tb].tb_frame
while name[f] begin[:]
call[name[stack].append, parameter[name[f]]]
variable[f] assign[=] name[f].f_back
call[name[stack].reverse, parameter[]]
<ast.Try object at 0x7da1b2371e10>
call[name[write], parameter[call[name[u], parameter[constant[Locals by frame, innermost last
]]]]]
for taget[name[frame]] in starred[name[stack]] begin[:]
call[name[write], parameter[call[name[u], parameter[binary_operation[constant[
Frame %s in %s at line %s
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b2504fa0>, <ast.Attribute object at 0x7da1b2506020>, <ast.Attribute object at 0x7da1b2505570>]]]]]]]
for taget[tuple[[<ast.Name object at 0x7da1b2370850>, <ast.Name object at 0x7da1b2372bf0>]]] in starred[call[name[frame].f_locals.items, parameter[]]] begin[:]
call[name[write], parameter[call[name[u], parameter[binary_operation[constant[ %20s = ] <ast.Mod object at 0x7da2590d6920> name[key]]]]]]
<ast.Try object at 0x7da1b23728f0>
call[name[flush], parameter[]] | keyword[def] identifier[print_exc_plus] ( identifier[stream] = identifier[sys] . identifier[stdout] ):
literal[string]
identifier[write] = identifier[stream] . identifier[write]
identifier[flush] = identifier[stream] . identifier[flush]
identifier[tp] , identifier[value] , identifier[tb] = identifier[sys] . identifier[exc_info] ()
keyword[while] identifier[tb] . identifier[tb_next] :
identifier[tb] = identifier[tb] . identifier[tb_next]
identifier[stack] = identifier[list] ()
identifier[f] = identifier[tb] . identifier[tb_frame]
keyword[while] identifier[f] :
identifier[stack] . identifier[append] ( identifier[f] )
identifier[f] = identifier[f] . identifier[f_back]
identifier[stack] . identifier[reverse] ()
keyword[try] :
identifier[traceback] . identifier[print_exc] ( keyword[None] , identifier[stream] )
keyword[except] identifier[BaseException] keyword[as] identifier[e] :
identifier[write] ( identifier[u] ( literal[string] ))
identifier[write] ( identifier[u] ( identifier[str] ( identifier[value] )))
identifier[write] ( identifier[u] ( literal[string] ))
keyword[finally] :
identifier[flush] ()
identifier[write] ( identifier[u] ( literal[string] ))
keyword[for] identifier[frame] keyword[in] identifier[stack] :
identifier[write] ( identifier[u] ( literal[string] %( identifier[frame] . identifier[f_code] . identifier[co_name] ,
identifier[frame] . identifier[f_code] . identifier[co_filename] ,
identifier[frame] . identifier[f_lineno] )))
keyword[for] identifier[key] , identifier[value] , keyword[in] identifier[frame] . identifier[f_locals] . identifier[items] ():
identifier[write] ( identifier[u] ( literal[string] % identifier[key] ))
keyword[try] :
identifier[write] ( identifier[u] ( literal[string] % identifier[value] ))
keyword[except] identifier[BaseException] :
identifier[write] ( identifier[u] ( literal[string] ))
identifier[flush] () | def print_exc_plus(stream=sys.stdout):
"""print normal traceback information with some local arg values"""
# code of this mothod is mainly from <Python Cookbook>
write = stream.write # assert the mothod exists
flush = stream.flush
(tp, value, tb) = sys.exc_info()
while tb.tb_next:
tb = tb.tb_next # depends on [control=['while'], data=[]]
stack = list()
f = tb.tb_frame
while f:
stack.append(f)
f = f.f_back # depends on [control=['while'], data=[]]
stack.reverse()
try:
traceback.print_exc(None, stream) # depends on [control=['try'], data=[]]
except BaseException as e:
write(u('FAILED PRINTING TRACE\n\n'))
write(u(str(value)))
write(u('\n\n')) # depends on [control=['except'], data=[]]
finally:
flush()
write(u('Locals by frame, innermost last\n'))
for frame in stack:
write(u('\nFrame %s in %s at line %s\n' % (frame.f_code.co_name, frame.f_code.co_filename, frame.f_lineno))) # depends on [control=['for'], data=['frame']]
for (key, value) in frame.f_locals.items():
write(u('\t%20s = ' % key))
try:
write(u('%s\n' % value)) # depends on [control=['try'], data=[]]
except BaseException:
write(u('<ERROR WHILE PRINTING VALUE>\n')) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]]
flush() |
def python_version_matchers():
"""Return set of string representations of current python version"""
version = sys.version_info
patterns = [
"{0}",
"{0}{1}",
"{0}.{1}",
]
matchers = [
pattern.format(*version)
for pattern in patterns
] + [None]
return set(matchers) | def function[python_version_matchers, parameter[]]:
constant[Return set of string representations of current python version]
variable[version] assign[=] name[sys].version_info
variable[patterns] assign[=] list[[<ast.Constant object at 0x7da18fe92410>, <ast.Constant object at 0x7da18fe90760>, <ast.Constant object at 0x7da18fe93940>]]
variable[matchers] assign[=] binary_operation[<ast.ListComp object at 0x7da18fe92b90> + list[[<ast.Constant object at 0x7da18fe90850>]]]
return[call[name[set], parameter[name[matchers]]]] | keyword[def] identifier[python_version_matchers] ():
literal[string]
identifier[version] = identifier[sys] . identifier[version_info]
identifier[patterns] =[
literal[string] ,
literal[string] ,
literal[string] ,
]
identifier[matchers] =[
identifier[pattern] . identifier[format] (* identifier[version] )
keyword[for] identifier[pattern] keyword[in] identifier[patterns]
]+[ keyword[None] ]
keyword[return] identifier[set] ( identifier[matchers] ) | def python_version_matchers():
"""Return set of string representations of current python version"""
version = sys.version_info
patterns = ['{0}', '{0}{1}', '{0}.{1}']
matchers = [pattern.format(*version) for pattern in patterns] + [None]
return set(matchers) |
def gitmodule_report(self):
"""
Yields:
str: .gitmodules configuration lines for this repository
"""
fpath = self.relpath
if fpath == '.':
return
yield '[submodule "%s"]' % fpath.replace(os.path.sep, '_')
yield " path = %s" % fpath
yield " url = %s" % self.remote_url
yield "" | def function[gitmodule_report, parameter[self]]:
constant[
Yields:
str: .gitmodules configuration lines for this repository
]
variable[fpath] assign[=] name[self].relpath
if compare[name[fpath] equal[==] constant[.]] begin[:]
return[None]
<ast.Yield object at 0x7da20e955090>
<ast.Yield object at 0x7da1b0806c80>
<ast.Yield object at 0x7da1b0806110>
<ast.Yield object at 0x7da1b0807d00> | keyword[def] identifier[gitmodule_report] ( identifier[self] ):
literal[string]
identifier[fpath] = identifier[self] . identifier[relpath]
keyword[if] identifier[fpath] == literal[string] :
keyword[return]
keyword[yield] literal[string] % identifier[fpath] . identifier[replace] ( identifier[os] . identifier[path] . identifier[sep] , literal[string] )
keyword[yield] literal[string] % identifier[fpath]
keyword[yield] literal[string] % identifier[self] . identifier[remote_url]
keyword[yield] literal[string] | def gitmodule_report(self):
"""
Yields:
str: .gitmodules configuration lines for this repository
"""
fpath = self.relpath
if fpath == '.':
return # depends on [control=['if'], data=[]]
yield ('[submodule "%s"]' % fpath.replace(os.path.sep, '_'))
yield (' path = %s' % fpath)
yield (' url = %s' % self.remote_url)
yield '' |
def _choices(self):
"""
Generate a string of choices as key/value pairs
:return: string
"""
# Generate key/value strings
pairs = []
for key, value in self.choices.items():
pairs.append(str(value) + "=" + str(key))
# Assemble into overall string and escape
return GPTaskSpec.manifest_escape(";".join(pairs)) | def function[_choices, parameter[self]]:
constant[
Generate a string of choices as key/value pairs
:return: string
]
variable[pairs] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18eb55690>, <ast.Name object at 0x7da18eb54b50>]]] in starred[call[name[self].choices.items, parameter[]]] begin[:]
call[name[pairs].append, parameter[binary_operation[binary_operation[call[name[str], parameter[name[value]]] + constant[=]] + call[name[str], parameter[name[key]]]]]]
return[call[name[GPTaskSpec].manifest_escape, parameter[call[constant[;].join, parameter[name[pairs]]]]]] | keyword[def] identifier[_choices] ( identifier[self] ):
literal[string]
identifier[pairs] =[]
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[self] . identifier[choices] . identifier[items] ():
identifier[pairs] . identifier[append] ( identifier[str] ( identifier[value] )+ literal[string] + identifier[str] ( identifier[key] ))
keyword[return] identifier[GPTaskSpec] . identifier[manifest_escape] ( literal[string] . identifier[join] ( identifier[pairs] )) | def _choices(self):
"""
Generate a string of choices as key/value pairs
:return: string
"""
# Generate key/value strings
pairs = []
for (key, value) in self.choices.items():
pairs.append(str(value) + '=' + str(key)) # depends on [control=['for'], data=[]]
# Assemble into overall string and escape
return GPTaskSpec.manifest_escape(';'.join(pairs)) |
def read_pot_status(self):
"""Read the status of the digital pot. Firmware v18+ only.
The return value is a dictionary containing the following as
unsigned 8-bit integers: FanON, LaserON, FanDACVal, LaserDACVal.
:rtype: dict
:Example:
>>> alpha.read_pot_status()
{
'LaserDACVal': 230,
'FanDACVal': 255,
'FanON': 0,
'LaserON': 0
}
"""
# Send the command byte and wait 10 ms
a = self.cnxn.xfer([0x13])[0]
sleep(10e-3)
# Build an array of the results
res = []
for i in range(4):
res.append(self.cnxn.xfer([0x00])[0])
sleep(0.1)
return {
'FanON': res[0],
'LaserON': res[1],
'FanDACVal': res[2],
'LaserDACVal': res[3]
} | def function[read_pot_status, parameter[self]]:
constant[Read the status of the digital pot. Firmware v18+ only.
The return value is a dictionary containing the following as
unsigned 8-bit integers: FanON, LaserON, FanDACVal, LaserDACVal.
:rtype: dict
:Example:
>>> alpha.read_pot_status()
{
'LaserDACVal': 230,
'FanDACVal': 255,
'FanON': 0,
'LaserON': 0
}
]
variable[a] assign[=] call[call[name[self].cnxn.xfer, parameter[list[[<ast.Constant object at 0x7da1afe6db10>]]]]][constant[0]]
call[name[sleep], parameter[constant[0.01]]]
variable[res] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[constant[4]]]] begin[:]
call[name[res].append, parameter[call[call[name[self].cnxn.xfer, parameter[list[[<ast.Constant object at 0x7da1afe6e0b0>]]]]][constant[0]]]]
call[name[sleep], parameter[constant[0.1]]]
return[dictionary[[<ast.Constant object at 0x7da1afe6e2c0>, <ast.Constant object at 0x7da1afe6e1d0>, <ast.Constant object at 0x7da1afe6c9a0>, <ast.Constant object at 0x7da1afe6c130>], [<ast.Subscript object at 0x7da1afe6c0a0>, <ast.Subscript object at 0x7da1afe6fe50>, <ast.Subscript object at 0x7da1afe6ff40>, <ast.Subscript object at 0x7da1afe6fd60>]]] | keyword[def] identifier[read_pot_status] ( identifier[self] ):
literal[string]
identifier[a] = identifier[self] . identifier[cnxn] . identifier[xfer] ([ literal[int] ])[ literal[int] ]
identifier[sleep] ( literal[int] )
identifier[res] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] ):
identifier[res] . identifier[append] ( identifier[self] . identifier[cnxn] . identifier[xfer] ([ literal[int] ])[ literal[int] ])
identifier[sleep] ( literal[int] )
keyword[return] {
literal[string] : identifier[res] [ literal[int] ],
literal[string] : identifier[res] [ literal[int] ],
literal[string] : identifier[res] [ literal[int] ],
literal[string] : identifier[res] [ literal[int] ]
} | def read_pot_status(self):
"""Read the status of the digital pot. Firmware v18+ only.
The return value is a dictionary containing the following as
unsigned 8-bit integers: FanON, LaserON, FanDACVal, LaserDACVal.
:rtype: dict
:Example:
>>> alpha.read_pot_status()
{
'LaserDACVal': 230,
'FanDACVal': 255,
'FanON': 0,
'LaserON': 0
}
"""
# Send the command byte and wait 10 ms
a = self.cnxn.xfer([19])[0]
sleep(0.01)
# Build an array of the results
res = []
for i in range(4):
res.append(self.cnxn.xfer([0])[0]) # depends on [control=['for'], data=[]]
sleep(0.1)
return {'FanON': res[0], 'LaserON': res[1], 'FanDACVal': res[2], 'LaserDACVal': res[3]} |
def status(self):
'''returns rates'''
counts = {}
for bucket in self.buckets:
for x in bucket:
if not x in counts:
counts[x] = 0
counts[x] += bucket[x]
ret = ""
mtypes = counts.keys()
mtypes.sort()
for mtype in mtypes:
ret += "%s: %0.1f/s\n" % (mtype,
counts[mtype]/float(len(self.buckets)))
return ret | def function[status, parameter[self]]:
constant[returns rates]
variable[counts] assign[=] dictionary[[], []]
for taget[name[bucket]] in starred[name[self].buckets] begin[:]
for taget[name[x]] in starred[name[bucket]] begin[:]
if <ast.UnaryOp object at 0x7da1b1720730> begin[:]
call[name[counts]][name[x]] assign[=] constant[0]
<ast.AugAssign object at 0x7da1b17213c0>
variable[ret] assign[=] constant[]
variable[mtypes] assign[=] call[name[counts].keys, parameter[]]
call[name[mtypes].sort, parameter[]]
for taget[name[mtype]] in starred[name[mtypes]] begin[:]
<ast.AugAssign object at 0x7da1b1609d20>
return[name[ret]] | keyword[def] identifier[status] ( identifier[self] ):
literal[string]
identifier[counts] ={}
keyword[for] identifier[bucket] keyword[in] identifier[self] . identifier[buckets] :
keyword[for] identifier[x] keyword[in] identifier[bucket] :
keyword[if] keyword[not] identifier[x] keyword[in] identifier[counts] :
identifier[counts] [ identifier[x] ]= literal[int]
identifier[counts] [ identifier[x] ]+= identifier[bucket] [ identifier[x] ]
identifier[ret] = literal[string]
identifier[mtypes] = identifier[counts] . identifier[keys] ()
identifier[mtypes] . identifier[sort] ()
keyword[for] identifier[mtype] keyword[in] identifier[mtypes] :
identifier[ret] += literal[string] %( identifier[mtype] ,
identifier[counts] [ identifier[mtype] ]/ identifier[float] ( identifier[len] ( identifier[self] . identifier[buckets] )))
keyword[return] identifier[ret] | def status(self):
"""returns rates"""
counts = {}
for bucket in self.buckets:
for x in bucket:
if not x in counts:
counts[x] = 0 # depends on [control=['if'], data=[]]
counts[x] += bucket[x] # depends on [control=['for'], data=['x']] # depends on [control=['for'], data=['bucket']]
ret = ''
mtypes = counts.keys()
mtypes.sort()
for mtype in mtypes:
ret += '%s: %0.1f/s\n' % (mtype, counts[mtype] / float(len(self.buckets))) # depends on [control=['for'], data=['mtype']]
return ret |
def get_current_venv():
"""
Returns the path to the current virtualenv
"""
if 'VIRTUAL_ENV' in os.environ:
venv = os.environ['VIRTUAL_ENV']
elif os.path.exists('.python-version'): # pragma: no cover
try:
subprocess.check_output(['pyenv', 'help'], stderr=subprocess.STDOUT)
except OSError:
print("This directory seems to have pyenv's local venv, "
"but pyenv executable was not found.")
with open('.python-version', 'r') as f:
# minor fix in how .python-version is read
# Related: https://github.com/Miserlou/Zappa/issues/921
env_name = f.readline().strip()
bin_path = subprocess.check_output(['pyenv', 'which', 'python']).decode('utf-8')
venv = bin_path[:bin_path.rfind(env_name)] + env_name
else: # pragma: no cover
return None
return venv | def function[get_current_venv, parameter[]]:
constant[
Returns the path to the current virtualenv
]
if compare[constant[VIRTUAL_ENV] in name[os].environ] begin[:]
variable[venv] assign[=] call[name[os].environ][constant[VIRTUAL_ENV]]
return[name[venv]] | keyword[def] identifier[get_current_venv] ():
literal[string]
keyword[if] literal[string] keyword[in] identifier[os] . identifier[environ] :
identifier[venv] = identifier[os] . identifier[environ] [ literal[string] ]
keyword[elif] identifier[os] . identifier[path] . identifier[exists] ( literal[string] ):
keyword[try] :
identifier[subprocess] . identifier[check_output] ([ literal[string] , literal[string] ], identifier[stderr] = identifier[subprocess] . identifier[STDOUT] )
keyword[except] identifier[OSError] :
identifier[print] ( literal[string]
literal[string] )
keyword[with] identifier[open] ( literal[string] , literal[string] ) keyword[as] identifier[f] :
identifier[env_name] = identifier[f] . identifier[readline] (). identifier[strip] ()
identifier[bin_path] = identifier[subprocess] . identifier[check_output] ([ literal[string] , literal[string] , literal[string] ]). identifier[decode] ( literal[string] )
identifier[venv] = identifier[bin_path] [: identifier[bin_path] . identifier[rfind] ( identifier[env_name] )]+ identifier[env_name]
keyword[else] :
keyword[return] keyword[None]
keyword[return] identifier[venv] | def get_current_venv():
"""
Returns the path to the current virtualenv
"""
if 'VIRTUAL_ENV' in os.environ:
venv = os.environ['VIRTUAL_ENV'] # depends on [control=['if'], data=[]]
elif os.path.exists('.python-version'): # pragma: no cover
try:
subprocess.check_output(['pyenv', 'help'], stderr=subprocess.STDOUT) # depends on [control=['try'], data=[]]
except OSError:
print("This directory seems to have pyenv's local venv, but pyenv executable was not found.") # depends on [control=['except'], data=[]]
with open('.python-version', 'r') as f:
# minor fix in how .python-version is read
# Related: https://github.com/Miserlou/Zappa/issues/921
env_name = f.readline().strip() # depends on [control=['with'], data=['f']]
bin_path = subprocess.check_output(['pyenv', 'which', 'python']).decode('utf-8')
venv = bin_path[:bin_path.rfind(env_name)] + env_name # depends on [control=['if'], data=[]]
else: # pragma: no cover
return None
return venv |
async def uv_protection_window(
self, low: float = 3.5, high: float = 3.5) -> dict:
"""Get data on when a UV protection window is."""
return await self.request(
'get', 'protection', params={
'from': str(low),
'to': str(high)
}) | <ast.AsyncFunctionDef object at 0x7da1b2596980> | keyword[async] keyword[def] identifier[uv_protection_window] (
identifier[self] , identifier[low] : identifier[float] = literal[int] , identifier[high] : identifier[float] = literal[int] )-> identifier[dict] :
literal[string]
keyword[return] keyword[await] identifier[self] . identifier[request] (
literal[string] , literal[string] , identifier[params] ={
literal[string] : identifier[str] ( identifier[low] ),
literal[string] : identifier[str] ( identifier[high] )
}) | async def uv_protection_window(self, low: float=3.5, high: float=3.5) -> dict:
"""Get data on when a UV protection window is."""
return await self.request('get', 'protection', params={'from': str(low), 'to': str(high)}) |
def mex_hat_dir(x, y, sigma):
r"""Directional Mexican hat
This method implements a directional Mexican hat (or Ricker) wavelet.
Parameters
----------
x : float
Input data point for Gaussian
y : float
Input data point for Mexican hat
sigma : float
Standard deviation (filter scale)
Returns
-------
float directional Mexican hat filtered data point
Examples
--------
>>> from modopt.signal.filter import mex_hat_dir
>>> mex_hat_dir(1, 2, 1)
0.17606952612856686
"""
x = check_float(x)
sigma = check_float(sigma)
return -0.5 * (x / sigma) ** 2 * mex_hat(y, sigma) | def function[mex_hat_dir, parameter[x, y, sigma]]:
constant[Directional Mexican hat
This method implements a directional Mexican hat (or Ricker) wavelet.
Parameters
----------
x : float
Input data point for Gaussian
y : float
Input data point for Mexican hat
sigma : float
Standard deviation (filter scale)
Returns
-------
float directional Mexican hat filtered data point
Examples
--------
>>> from modopt.signal.filter import mex_hat_dir
>>> mex_hat_dir(1, 2, 1)
0.17606952612856686
]
variable[x] assign[=] call[name[check_float], parameter[name[x]]]
variable[sigma] assign[=] call[name[check_float], parameter[name[sigma]]]
return[binary_operation[binary_operation[<ast.UnaryOp object at 0x7da1b0efb430> * binary_operation[binary_operation[name[x] / name[sigma]] ** constant[2]]] * call[name[mex_hat], parameter[name[y], name[sigma]]]]] | keyword[def] identifier[mex_hat_dir] ( identifier[x] , identifier[y] , identifier[sigma] ):
literal[string]
identifier[x] = identifier[check_float] ( identifier[x] )
identifier[sigma] = identifier[check_float] ( identifier[sigma] )
keyword[return] - literal[int] *( identifier[x] / identifier[sigma] )** literal[int] * identifier[mex_hat] ( identifier[y] , identifier[sigma] ) | def mex_hat_dir(x, y, sigma):
"""Directional Mexican hat
This method implements a directional Mexican hat (or Ricker) wavelet.
Parameters
----------
x : float
Input data point for Gaussian
y : float
Input data point for Mexican hat
sigma : float
Standard deviation (filter scale)
Returns
-------
float directional Mexican hat filtered data point
Examples
--------
>>> from modopt.signal.filter import mex_hat_dir
>>> mex_hat_dir(1, 2, 1)
0.17606952612856686
"""
x = check_float(x)
sigma = check_float(sigma)
return -0.5 * (x / sigma) ** 2 * mex_hat(y, sigma) |
def parse_hh_mm(self):
"""Parses raw time
:return: Time parsed
"""
split_count = self.raw.count(":")
if split_count == 1: # hh:mm
return datetime.strptime(self.raw, "%H:%M").time()
return datetime.strptime(self.raw, "%M").time() | def function[parse_hh_mm, parameter[self]]:
constant[Parses raw time
:return: Time parsed
]
variable[split_count] assign[=] call[name[self].raw.count, parameter[constant[:]]]
if compare[name[split_count] equal[==] constant[1]] begin[:]
return[call[call[name[datetime].strptime, parameter[name[self].raw, constant[%H:%M]]].time, parameter[]]]
return[call[call[name[datetime].strptime, parameter[name[self].raw, constant[%M]]].time, parameter[]]] | keyword[def] identifier[parse_hh_mm] ( identifier[self] ):
literal[string]
identifier[split_count] = identifier[self] . identifier[raw] . identifier[count] ( literal[string] )
keyword[if] identifier[split_count] == literal[int] :
keyword[return] identifier[datetime] . identifier[strptime] ( identifier[self] . identifier[raw] , literal[string] ). identifier[time] ()
keyword[return] identifier[datetime] . identifier[strptime] ( identifier[self] . identifier[raw] , literal[string] ). identifier[time] () | def parse_hh_mm(self):
"""Parses raw time
:return: Time parsed
"""
split_count = self.raw.count(':')
if split_count == 1: # hh:mm
return datetime.strptime(self.raw, '%H:%M').time() # depends on [control=['if'], data=[]]
return datetime.strptime(self.raw, '%M').time() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.