code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def resolve_function(slither, contract_name, function_name):
"""
Resolves a function instance, given a contract name and function.
:param contract_name: The name of the contract the function is declared in.
:param function_name: The name of the function to resolve.
:return: Returns the resolved function, raises an exception otherwise.
"""
# Obtain the target contract
contract = slither.get_contract_from_name(contract_name)
# Verify the contract was resolved successfully
if contract is None:
raise ResolveFunctionException(f"Could not resolve target contract: {contract_name}")
# Obtain the target function
target_function = next((function for function in contract.functions if function.name == function_name), None)
# Verify we have resolved the function specified.
if target_function is None:
raise ResolveFunctionException(f"Could not resolve target function: {contract_name}.{function_name}")
# Add the resolved function to the new list.
return target_function | def function[resolve_function, parameter[slither, contract_name, function_name]]:
constant[
Resolves a function instance, given a contract name and function.
:param contract_name: The name of the contract the function is declared in.
:param function_name: The name of the function to resolve.
:return: Returns the resolved function, raises an exception otherwise.
]
variable[contract] assign[=] call[name[slither].get_contract_from_name, parameter[name[contract_name]]]
if compare[name[contract] is constant[None]] begin[:]
<ast.Raise object at 0x7da20c6ab7c0>
variable[target_function] assign[=] call[name[next], parameter[<ast.GeneratorExp object at 0x7da20c6a9030>, constant[None]]]
if compare[name[target_function] is constant[None]] begin[:]
<ast.Raise object at 0x7da20c6ab250>
return[name[target_function]] | keyword[def] identifier[resolve_function] ( identifier[slither] , identifier[contract_name] , identifier[function_name] ):
literal[string]
identifier[contract] = identifier[slither] . identifier[get_contract_from_name] ( identifier[contract_name] )
keyword[if] identifier[contract] keyword[is] keyword[None] :
keyword[raise] identifier[ResolveFunctionException] ( literal[string] )
identifier[target_function] = identifier[next] (( identifier[function] keyword[for] identifier[function] keyword[in] identifier[contract] . identifier[functions] keyword[if] identifier[function] . identifier[name] == identifier[function_name] ), keyword[None] )
keyword[if] identifier[target_function] keyword[is] keyword[None] :
keyword[raise] identifier[ResolveFunctionException] ( literal[string] )
keyword[return] identifier[target_function] | def resolve_function(slither, contract_name, function_name):
"""
Resolves a function instance, given a contract name and function.
:param contract_name: The name of the contract the function is declared in.
:param function_name: The name of the function to resolve.
:return: Returns the resolved function, raises an exception otherwise.
"""
# Obtain the target contract
contract = slither.get_contract_from_name(contract_name)
# Verify the contract was resolved successfully
if contract is None:
raise ResolveFunctionException(f'Could not resolve target contract: {contract_name}') # depends on [control=['if'], data=[]]
# Obtain the target function
target_function = next((function for function in contract.functions if function.name == function_name), None)
# Verify we have resolved the function specified.
if target_function is None:
raise ResolveFunctionException(f'Could not resolve target function: {contract_name}.{function_name}') # depends on [control=['if'], data=[]]
# Add the resolved function to the new list.
return target_function |
def search_mappings(kb, key=None, value=None, match_type=None,
sortby=None, page=None, per_page=None):
"""Search tags for knowledge."""
if kb.kbtype == models.KnwKB.KNWKB_TYPES['written_as']:
return pagination.RestfulSQLAlchemyPagination(
api.query_kb_mappings(
kbid=kb.id,
key=key or '',
value=value or '',
match_type=match_type or 's',
sortby=sortby or 'to',
), page=page or 1, per_page=per_page or 10
).items
return [] | def function[search_mappings, parameter[kb, key, value, match_type, sortby, page, per_page]]:
constant[Search tags for knowledge.]
if compare[name[kb].kbtype equal[==] call[name[models].KnwKB.KNWKB_TYPES][constant[written_as]]] begin[:]
return[call[name[pagination].RestfulSQLAlchemyPagination, parameter[call[name[api].query_kb_mappings, parameter[]]]].items]
return[list[[]]] | keyword[def] identifier[search_mappings] ( identifier[kb] , identifier[key] = keyword[None] , identifier[value] = keyword[None] , identifier[match_type] = keyword[None] ,
identifier[sortby] = keyword[None] , identifier[page] = keyword[None] , identifier[per_page] = keyword[None] ):
literal[string]
keyword[if] identifier[kb] . identifier[kbtype] == identifier[models] . identifier[KnwKB] . identifier[KNWKB_TYPES] [ literal[string] ]:
keyword[return] identifier[pagination] . identifier[RestfulSQLAlchemyPagination] (
identifier[api] . identifier[query_kb_mappings] (
identifier[kbid] = identifier[kb] . identifier[id] ,
identifier[key] = identifier[key] keyword[or] literal[string] ,
identifier[value] = identifier[value] keyword[or] literal[string] ,
identifier[match_type] = identifier[match_type] keyword[or] literal[string] ,
identifier[sortby] = identifier[sortby] keyword[or] literal[string] ,
), identifier[page] = identifier[page] keyword[or] literal[int] , identifier[per_page] = identifier[per_page] keyword[or] literal[int]
). identifier[items]
keyword[return] [] | def search_mappings(kb, key=None, value=None, match_type=None, sortby=None, page=None, per_page=None):
"""Search tags for knowledge."""
if kb.kbtype == models.KnwKB.KNWKB_TYPES['written_as']:
return pagination.RestfulSQLAlchemyPagination(api.query_kb_mappings(kbid=kb.id, key=key or '', value=value or '', match_type=match_type or 's', sortby=sortby or 'to'), page=page or 1, per_page=per_page or 10).items # depends on [control=['if'], data=[]]
return [] |
def prepare_untran(feat_type, tgt_dir, untran_dir):
""" Preprocesses untranscribed audio."""
org_dir = str(untran_dir)
wav_dir = os.path.join(str(tgt_dir), "wav", "untranscribed")
feat_dir = os.path.join(str(tgt_dir), "feat", "untranscribed")
if not os.path.isdir(wav_dir):
os.makedirs(wav_dir)
if not os.path.isdir(feat_dir):
os.makedirs(feat_dir)
# Standardize into wav files
for fn in os.listdir(org_dir):
in_path = os.path.join(org_dir, fn)
prefix, _ = os.path.splitext(fn)
mono16k_wav_path = os.path.join(wav_dir, "%s.wav" % prefix)
if not os.path.isfile(mono16k_wav_path):
feat_extract.convert_wav(Path(in_path), Path(mono16k_wav_path))
# Split up the wavs and write prefixes to prefix file.
wav_fns = os.listdir(wav_dir)
with (tgt_dir / "untranscribed_prefixes.txt").open("w") as prefix_f:
for fn in wav_fns:
in_fn = os.path.join(wav_dir, fn)
prefix, _ = os.path.splitext(fn)
# Split into sub-wavs and perform feat extraction.
split_id = 0
start, end = 0, 10 #in seconds
length = utils.wav_length(in_fn)
while True:
sub_wav_prefix = "{}.{}".format(prefix, split_id)
print(sub_wav_prefix, file=prefix_f)
out_fn = os.path.join(feat_dir, "{}.wav".format(sub_wav_prefix))
start_time = start * ureg.seconds
end_time = end * ureg.seconds
if not Path(out_fn).is_file():
wav.trim_wav_ms(Path(in_fn), Path(out_fn),
start_time.to(ureg.milliseconds).magnitude,
end_time.to(ureg.milliseconds).magnitude)
if end > length:
break
start += 10
end += 10
split_id += 1
# Do feat extraction.
feat_extract.from_dir(Path(os.path.join(feat_dir)), feat_type=feat_type) | def function[prepare_untran, parameter[feat_type, tgt_dir, untran_dir]]:
constant[ Preprocesses untranscribed audio.]
variable[org_dir] assign[=] call[name[str], parameter[name[untran_dir]]]
variable[wav_dir] assign[=] call[name[os].path.join, parameter[call[name[str], parameter[name[tgt_dir]]], constant[wav], constant[untranscribed]]]
variable[feat_dir] assign[=] call[name[os].path.join, parameter[call[name[str], parameter[name[tgt_dir]]], constant[feat], constant[untranscribed]]]
if <ast.UnaryOp object at 0x7da1b1120490> begin[:]
call[name[os].makedirs, parameter[name[wav_dir]]]
if <ast.UnaryOp object at 0x7da1b11202e0> begin[:]
call[name[os].makedirs, parameter[name[feat_dir]]]
for taget[name[fn]] in starred[call[name[os].listdir, parameter[name[org_dir]]]] begin[:]
variable[in_path] assign[=] call[name[os].path.join, parameter[name[org_dir], name[fn]]]
<ast.Tuple object at 0x7da1b1120c10> assign[=] call[name[os].path.splitext, parameter[name[fn]]]
variable[mono16k_wav_path] assign[=] call[name[os].path.join, parameter[name[wav_dir], binary_operation[constant[%s.wav] <ast.Mod object at 0x7da2590d6920> name[prefix]]]]
if <ast.UnaryOp object at 0x7da1b113b160> begin[:]
call[name[feat_extract].convert_wav, parameter[call[name[Path], parameter[name[in_path]]], call[name[Path], parameter[name[mono16k_wav_path]]]]]
variable[wav_fns] assign[=] call[name[os].listdir, parameter[name[wav_dir]]]
with call[binary_operation[name[tgt_dir] / constant[untranscribed_prefixes.txt]].open, parameter[constant[w]]] begin[:]
for taget[name[fn]] in starred[name[wav_fns]] begin[:]
variable[in_fn] assign[=] call[name[os].path.join, parameter[name[wav_dir], name[fn]]]
<ast.Tuple object at 0x7da1b1139f00> assign[=] call[name[os].path.splitext, parameter[name[fn]]]
variable[split_id] assign[=] constant[0]
<ast.Tuple object at 0x7da1b11387f0> assign[=] tuple[[<ast.Constant object at 0x7da1b113abf0>, <ast.Constant object at 0x7da1b1138fa0>]]
variable[length] assign[=] call[name[utils].wav_length, parameter[name[in_fn]]]
while constant[True] begin[:]
variable[sub_wav_prefix] assign[=] call[constant[{}.{}].format, parameter[name[prefix], name[split_id]]]
call[name[print], parameter[name[sub_wav_prefix]]]
variable[out_fn] assign[=] call[name[os].path.join, parameter[name[feat_dir], call[constant[{}.wav].format, parameter[name[sub_wav_prefix]]]]]
variable[start_time] assign[=] binary_operation[name[start] * name[ureg].seconds]
variable[end_time] assign[=] binary_operation[name[end] * name[ureg].seconds]
if <ast.UnaryOp object at 0x7da1b11ba410> begin[:]
call[name[wav].trim_wav_ms, parameter[call[name[Path], parameter[name[in_fn]]], call[name[Path], parameter[name[out_fn]]], call[name[start_time].to, parameter[name[ureg].milliseconds]].magnitude, call[name[end_time].to, parameter[name[ureg].milliseconds]].magnitude]]
if compare[name[end] greater[>] name[length]] begin[:]
break
<ast.AugAssign object at 0x7da1b11b9d20>
<ast.AugAssign object at 0x7da1b11bbfa0>
<ast.AugAssign object at 0x7da1b11bb910>
call[name[feat_extract].from_dir, parameter[call[name[Path], parameter[call[name[os].path.join, parameter[name[feat_dir]]]]]]] | keyword[def] identifier[prepare_untran] ( identifier[feat_type] , identifier[tgt_dir] , identifier[untran_dir] ):
literal[string]
identifier[org_dir] = identifier[str] ( identifier[untran_dir] )
identifier[wav_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[str] ( identifier[tgt_dir] ), literal[string] , literal[string] )
identifier[feat_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[str] ( identifier[tgt_dir] ), literal[string] , literal[string] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[wav_dir] ):
identifier[os] . identifier[makedirs] ( identifier[wav_dir] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[feat_dir] ):
identifier[os] . identifier[makedirs] ( identifier[feat_dir] )
keyword[for] identifier[fn] keyword[in] identifier[os] . identifier[listdir] ( identifier[org_dir] ):
identifier[in_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[org_dir] , identifier[fn] )
identifier[prefix] , identifier[_] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[fn] )
identifier[mono16k_wav_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[wav_dir] , literal[string] % identifier[prefix] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[mono16k_wav_path] ):
identifier[feat_extract] . identifier[convert_wav] ( identifier[Path] ( identifier[in_path] ), identifier[Path] ( identifier[mono16k_wav_path] ))
identifier[wav_fns] = identifier[os] . identifier[listdir] ( identifier[wav_dir] )
keyword[with] ( identifier[tgt_dir] / literal[string] ). identifier[open] ( literal[string] ) keyword[as] identifier[prefix_f] :
keyword[for] identifier[fn] keyword[in] identifier[wav_fns] :
identifier[in_fn] = identifier[os] . identifier[path] . identifier[join] ( identifier[wav_dir] , identifier[fn] )
identifier[prefix] , identifier[_] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[fn] )
identifier[split_id] = literal[int]
identifier[start] , identifier[end] = literal[int] , literal[int]
identifier[length] = identifier[utils] . identifier[wav_length] ( identifier[in_fn] )
keyword[while] keyword[True] :
identifier[sub_wav_prefix] = literal[string] . identifier[format] ( identifier[prefix] , identifier[split_id] )
identifier[print] ( identifier[sub_wav_prefix] , identifier[file] = identifier[prefix_f] )
identifier[out_fn] = identifier[os] . identifier[path] . identifier[join] ( identifier[feat_dir] , literal[string] . identifier[format] ( identifier[sub_wav_prefix] ))
identifier[start_time] = identifier[start] * identifier[ureg] . identifier[seconds]
identifier[end_time] = identifier[end] * identifier[ureg] . identifier[seconds]
keyword[if] keyword[not] identifier[Path] ( identifier[out_fn] ). identifier[is_file] ():
identifier[wav] . identifier[trim_wav_ms] ( identifier[Path] ( identifier[in_fn] ), identifier[Path] ( identifier[out_fn] ),
identifier[start_time] . identifier[to] ( identifier[ureg] . identifier[milliseconds] ). identifier[magnitude] ,
identifier[end_time] . identifier[to] ( identifier[ureg] . identifier[milliseconds] ). identifier[magnitude] )
keyword[if] identifier[end] > identifier[length] :
keyword[break]
identifier[start] += literal[int]
identifier[end] += literal[int]
identifier[split_id] += literal[int]
identifier[feat_extract] . identifier[from_dir] ( identifier[Path] ( identifier[os] . identifier[path] . identifier[join] ( identifier[feat_dir] )), identifier[feat_type] = identifier[feat_type] ) | def prepare_untran(feat_type, tgt_dir, untran_dir):
""" Preprocesses untranscribed audio."""
org_dir = str(untran_dir)
wav_dir = os.path.join(str(tgt_dir), 'wav', 'untranscribed')
feat_dir = os.path.join(str(tgt_dir), 'feat', 'untranscribed')
if not os.path.isdir(wav_dir):
os.makedirs(wav_dir) # depends on [control=['if'], data=[]]
if not os.path.isdir(feat_dir):
os.makedirs(feat_dir) # depends on [control=['if'], data=[]]
# Standardize into wav files
for fn in os.listdir(org_dir):
in_path = os.path.join(org_dir, fn)
(prefix, _) = os.path.splitext(fn)
mono16k_wav_path = os.path.join(wav_dir, '%s.wav' % prefix)
if not os.path.isfile(mono16k_wav_path):
feat_extract.convert_wav(Path(in_path), Path(mono16k_wav_path)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['fn']]
# Split up the wavs and write prefixes to prefix file.
wav_fns = os.listdir(wav_dir)
with (tgt_dir / 'untranscribed_prefixes.txt').open('w') as prefix_f:
for fn in wav_fns:
in_fn = os.path.join(wav_dir, fn)
(prefix, _) = os.path.splitext(fn)
# Split into sub-wavs and perform feat extraction.
split_id = 0
(start, end) = (0, 10) #in seconds
length = utils.wav_length(in_fn)
while True:
sub_wav_prefix = '{}.{}'.format(prefix, split_id)
print(sub_wav_prefix, file=prefix_f)
out_fn = os.path.join(feat_dir, '{}.wav'.format(sub_wav_prefix))
start_time = start * ureg.seconds
end_time = end * ureg.seconds
if not Path(out_fn).is_file():
wav.trim_wav_ms(Path(in_fn), Path(out_fn), start_time.to(ureg.milliseconds).magnitude, end_time.to(ureg.milliseconds).magnitude) # depends on [control=['if'], data=[]]
if end > length:
break # depends on [control=['if'], data=[]]
start += 10
end += 10
split_id += 1 # depends on [control=['while'], data=[]] # depends on [control=['for'], data=['fn']] # depends on [control=['with'], data=['prefix_f']]
# Do feat extraction.
feat_extract.from_dir(Path(os.path.join(feat_dir)), feat_type=feat_type) |
def annotations(obj: BioCCollection or BioCDocument or BioCPassage or BioCSentence,
docid: str = None, level: int = PASSAGE) -> Generator[BioCAnnotation, None, None]:
"""
Get all annotations in document id.
Args:
obj: BioCCollection, BioCDocument, BioCPassage, or BioCSentence
docid: document id. If None, all documents
level: one of DOCUMENT, PASSAGE, SENTENCE
Yields:
one annotation
"""
if isinstance(obj, BioCCollection):
for document in filter(lambda d: docid is None or docid == d.id, obj.documents):
yield from annotations(document, level=level)
elif isinstance(obj, BioCDocument):
if level == DOCUMENT:
yield from obj.annotations
elif level in (PASSAGE, SENTENCE):
for passage in obj.passages:
yield from annotations(passage, level=level)
else:
raise ValueError('level must be DOCUMENT, PASSAGE, or SENTENCE')
elif isinstance(obj, BioCPassage):
if level == PASSAGE:
yield from obj.annotations
elif level == SENTENCE:
for sentence in obj.sentences:
yield from annotations(sentence, level=level)
else:
raise ValueError('level must be PASSAGE or SENTENCE')
elif isinstance(obj, BioCSentence):
if level == SENTENCE:
yield from obj.annotations
else:
raise ValueError('level must be SENTENCE')
else:
raise TypeError(f'Object of type {obj.__class__.__name__} must be BioCCollection, '
f'BioCDocument, BioCPassage, or BioCSentence') | def function[annotations, parameter[obj, docid, level]]:
constant[
Get all annotations in document id.
Args:
obj: BioCCollection, BioCDocument, BioCPassage, or BioCSentence
docid: document id. If None, all documents
level: one of DOCUMENT, PASSAGE, SENTENCE
Yields:
one annotation
]
if call[name[isinstance], parameter[name[obj], name[BioCCollection]]] begin[:]
for taget[name[document]] in starred[call[name[filter], parameter[<ast.Lambda object at 0x7da207f9b3a0>, name[obj].documents]]] begin[:]
<ast.YieldFrom object at 0x7da207f99540> | keyword[def] identifier[annotations] ( identifier[obj] : identifier[BioCCollection] keyword[or] identifier[BioCDocument] keyword[or] identifier[BioCPassage] keyword[or] identifier[BioCSentence] ,
identifier[docid] : identifier[str] = keyword[None] , identifier[level] : identifier[int] = identifier[PASSAGE] )-> identifier[Generator] [ identifier[BioCAnnotation] , keyword[None] , keyword[None] ]:
literal[string]
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[BioCCollection] ):
keyword[for] identifier[document] keyword[in] identifier[filter] ( keyword[lambda] identifier[d] : identifier[docid] keyword[is] keyword[None] keyword[or] identifier[docid] == identifier[d] . identifier[id] , identifier[obj] . identifier[documents] ):
keyword[yield] keyword[from] identifier[annotations] ( identifier[document] , identifier[level] = identifier[level] )
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[BioCDocument] ):
keyword[if] identifier[level] == identifier[DOCUMENT] :
keyword[yield] keyword[from] identifier[obj] . identifier[annotations]
keyword[elif] identifier[level] keyword[in] ( identifier[PASSAGE] , identifier[SENTENCE] ):
keyword[for] identifier[passage] keyword[in] identifier[obj] . identifier[passages] :
keyword[yield] keyword[from] identifier[annotations] ( identifier[passage] , identifier[level] = identifier[level] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[BioCPassage] ):
keyword[if] identifier[level] == identifier[PASSAGE] :
keyword[yield] keyword[from] identifier[obj] . identifier[annotations]
keyword[elif] identifier[level] == identifier[SENTENCE] :
keyword[for] identifier[sentence] keyword[in] identifier[obj] . identifier[sentences] :
keyword[yield] keyword[from] identifier[annotations] ( identifier[sentence] , identifier[level] = identifier[level] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[BioCSentence] ):
keyword[if] identifier[level] == identifier[SENTENCE] :
keyword[yield] keyword[from] identifier[obj] . identifier[annotations]
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string]
literal[string] ) | def annotations(obj: BioCCollection or BioCDocument or BioCPassage or BioCSentence, docid: str=None, level: int=PASSAGE) -> Generator[BioCAnnotation, None, None]:
"""
Get all annotations in document id.
Args:
obj: BioCCollection, BioCDocument, BioCPassage, or BioCSentence
docid: document id. If None, all documents
level: one of DOCUMENT, PASSAGE, SENTENCE
Yields:
one annotation
"""
if isinstance(obj, BioCCollection):
for document in filter(lambda d: docid is None or docid == d.id, obj.documents):
yield from annotations(document, level=level) # depends on [control=['for'], data=['document']] # depends on [control=['if'], data=[]]
elif isinstance(obj, BioCDocument):
if level == DOCUMENT:
yield from obj.annotations # depends on [control=['if'], data=[]]
elif level in (PASSAGE, SENTENCE):
for passage in obj.passages:
yield from annotations(passage, level=level) # depends on [control=['for'], data=['passage']] # depends on [control=['if'], data=['level']]
else:
raise ValueError('level must be DOCUMENT, PASSAGE, or SENTENCE') # depends on [control=['if'], data=[]]
elif isinstance(obj, BioCPassage):
if level == PASSAGE:
yield from obj.annotations # depends on [control=['if'], data=[]]
elif level == SENTENCE:
for sentence in obj.sentences:
yield from annotations(sentence, level=level) # depends on [control=['for'], data=['sentence']] # depends on [control=['if'], data=['level']]
else:
raise ValueError('level must be PASSAGE or SENTENCE') # depends on [control=['if'], data=[]]
elif isinstance(obj, BioCSentence):
if level == SENTENCE:
yield from obj.annotations # depends on [control=['if'], data=[]]
else:
raise ValueError('level must be SENTENCE') # depends on [control=['if'], data=[]]
else:
raise TypeError(f'Object of type {obj.__class__.__name__} must be BioCCollection, BioCDocument, BioCPassage, or BioCSentence') |
def loop():
"""Function that gets called again as soon as it finishes (forever)."""
print("Straight")
board.digital_write(L_CTRL_1, 1)
board.digital_write(L_CTRL_2, 0)
board.analog_write(PWM_L, 245)
board.digital_write(R_CTRL_1, 1)
board.digital_write(R_CTRL_2, 0)
board.analog_write(PWM_R, 245)
board.sleep(2.0)
print("CW spin")
board.digital_write(L_CTRL_1, 1)
board.digital_write(L_CTRL_2, 0)
board.analog_write(PWM_L, 245)
board.digital_write(R_CTRL_1, 0)
board.digital_write(R_CTRL_2, 1)
board.analog_write(PWM_R, 245)
board.sleep(2.0)
print("CCW spin")
board.digital_write(L_CTRL_1, 0)
board.digital_write(L_CTRL_2, 1)
board.analog_write(PWM_L, 245)
board.digital_write(R_CTRL_1, 1)
board.digital_write(R_CTRL_2, 0)
board.analog_write(PWM_R, 245)
board.sleep(2.0)
print("Stop")
board.digital_write(L_CTRL_1, 1)
board.digital_write(L_CTRL_2, 0)
board.analog_write(PWM_L, 0)
board.digital_write(R_CTRL_1, 1)
board.digital_write(R_CTRL_2, 0)
board.analog_write(PWM_R, 0)
board.sleep(5.0) | def function[loop, parameter[]]:
constant[Function that gets called again as soon as it finishes (forever).]
call[name[print], parameter[constant[Straight]]]
call[name[board].digital_write, parameter[name[L_CTRL_1], constant[1]]]
call[name[board].digital_write, parameter[name[L_CTRL_2], constant[0]]]
call[name[board].analog_write, parameter[name[PWM_L], constant[245]]]
call[name[board].digital_write, parameter[name[R_CTRL_1], constant[1]]]
call[name[board].digital_write, parameter[name[R_CTRL_2], constant[0]]]
call[name[board].analog_write, parameter[name[PWM_R], constant[245]]]
call[name[board].sleep, parameter[constant[2.0]]]
call[name[print], parameter[constant[CW spin]]]
call[name[board].digital_write, parameter[name[L_CTRL_1], constant[1]]]
call[name[board].digital_write, parameter[name[L_CTRL_2], constant[0]]]
call[name[board].analog_write, parameter[name[PWM_L], constant[245]]]
call[name[board].digital_write, parameter[name[R_CTRL_1], constant[0]]]
call[name[board].digital_write, parameter[name[R_CTRL_2], constant[1]]]
call[name[board].analog_write, parameter[name[PWM_R], constant[245]]]
call[name[board].sleep, parameter[constant[2.0]]]
call[name[print], parameter[constant[CCW spin]]]
call[name[board].digital_write, parameter[name[L_CTRL_1], constant[0]]]
call[name[board].digital_write, parameter[name[L_CTRL_2], constant[1]]]
call[name[board].analog_write, parameter[name[PWM_L], constant[245]]]
call[name[board].digital_write, parameter[name[R_CTRL_1], constant[1]]]
call[name[board].digital_write, parameter[name[R_CTRL_2], constant[0]]]
call[name[board].analog_write, parameter[name[PWM_R], constant[245]]]
call[name[board].sleep, parameter[constant[2.0]]]
call[name[print], parameter[constant[Stop]]]
call[name[board].digital_write, parameter[name[L_CTRL_1], constant[1]]]
call[name[board].digital_write, parameter[name[L_CTRL_2], constant[0]]]
call[name[board].analog_write, parameter[name[PWM_L], constant[0]]]
call[name[board].digital_write, parameter[name[R_CTRL_1], constant[1]]]
call[name[board].digital_write, parameter[name[R_CTRL_2], constant[0]]]
call[name[board].analog_write, parameter[name[PWM_R], constant[0]]]
call[name[board].sleep, parameter[constant[5.0]]] | keyword[def] identifier[loop] ():
literal[string]
identifier[print] ( literal[string] )
identifier[board] . identifier[digital_write] ( identifier[L_CTRL_1] , literal[int] )
identifier[board] . identifier[digital_write] ( identifier[L_CTRL_2] , literal[int] )
identifier[board] . identifier[analog_write] ( identifier[PWM_L] , literal[int] )
identifier[board] . identifier[digital_write] ( identifier[R_CTRL_1] , literal[int] )
identifier[board] . identifier[digital_write] ( identifier[R_CTRL_2] , literal[int] )
identifier[board] . identifier[analog_write] ( identifier[PWM_R] , literal[int] )
identifier[board] . identifier[sleep] ( literal[int] )
identifier[print] ( literal[string] )
identifier[board] . identifier[digital_write] ( identifier[L_CTRL_1] , literal[int] )
identifier[board] . identifier[digital_write] ( identifier[L_CTRL_2] , literal[int] )
identifier[board] . identifier[analog_write] ( identifier[PWM_L] , literal[int] )
identifier[board] . identifier[digital_write] ( identifier[R_CTRL_1] , literal[int] )
identifier[board] . identifier[digital_write] ( identifier[R_CTRL_2] , literal[int] )
identifier[board] . identifier[analog_write] ( identifier[PWM_R] , literal[int] )
identifier[board] . identifier[sleep] ( literal[int] )
identifier[print] ( literal[string] )
identifier[board] . identifier[digital_write] ( identifier[L_CTRL_1] , literal[int] )
identifier[board] . identifier[digital_write] ( identifier[L_CTRL_2] , literal[int] )
identifier[board] . identifier[analog_write] ( identifier[PWM_L] , literal[int] )
identifier[board] . identifier[digital_write] ( identifier[R_CTRL_1] , literal[int] )
identifier[board] . identifier[digital_write] ( identifier[R_CTRL_2] , literal[int] )
identifier[board] . identifier[analog_write] ( identifier[PWM_R] , literal[int] )
identifier[board] . identifier[sleep] ( literal[int] )
identifier[print] ( literal[string] )
identifier[board] . identifier[digital_write] ( identifier[L_CTRL_1] , literal[int] )
identifier[board] . identifier[digital_write] ( identifier[L_CTRL_2] , literal[int] )
identifier[board] . identifier[analog_write] ( identifier[PWM_L] , literal[int] )
identifier[board] . identifier[digital_write] ( identifier[R_CTRL_1] , literal[int] )
identifier[board] . identifier[digital_write] ( identifier[R_CTRL_2] , literal[int] )
identifier[board] . identifier[analog_write] ( identifier[PWM_R] , literal[int] )
identifier[board] . identifier[sleep] ( literal[int] ) | def loop():
"""Function that gets called again as soon as it finishes (forever)."""
print('Straight')
board.digital_write(L_CTRL_1, 1)
board.digital_write(L_CTRL_2, 0)
board.analog_write(PWM_L, 245)
board.digital_write(R_CTRL_1, 1)
board.digital_write(R_CTRL_2, 0)
board.analog_write(PWM_R, 245)
board.sleep(2.0)
print('CW spin')
board.digital_write(L_CTRL_1, 1)
board.digital_write(L_CTRL_2, 0)
board.analog_write(PWM_L, 245)
board.digital_write(R_CTRL_1, 0)
board.digital_write(R_CTRL_2, 1)
board.analog_write(PWM_R, 245)
board.sleep(2.0)
print('CCW spin')
board.digital_write(L_CTRL_1, 0)
board.digital_write(L_CTRL_2, 1)
board.analog_write(PWM_L, 245)
board.digital_write(R_CTRL_1, 1)
board.digital_write(R_CTRL_2, 0)
board.analog_write(PWM_R, 245)
board.sleep(2.0)
print('Stop')
board.digital_write(L_CTRL_1, 1)
board.digital_write(L_CTRL_2, 0)
board.analog_write(PWM_L, 0)
board.digital_write(R_CTRL_1, 1)
board.digital_write(R_CTRL_2, 0)
board.analog_write(PWM_R, 0)
board.sleep(5.0) |
def batch_entrez(list_of_terms, db="nuccore", retmax=1, rettype="fasta",
batchsize=1, email=myEmail):
"""
Retrieve multiple rather than a single record
"""
for term in list_of_terms:
logging.debug("Search term %s" % term)
success = False
ids = None
if not term:
continue
while not success:
try:
search_handle = Entrez.esearch(db=db, retmax=retmax, term=term)
rec = Entrez.read(search_handle)
success = True
ids = rec["IdList"]
except (HTTPError, URLError,
RuntimeError, KeyError) as e:
logging.error(e)
logging.debug("wait 5 seconds to reconnect...")
time.sleep(5)
if not ids:
logging.error("term {0} not found".format(term))
continue
assert ids
nids = len(ids)
if nids > 1:
logging.debug("A total of {0} results found.".format(nids))
if batchsize != 1:
logging.debug("Use a batch size of {0}.".format(batchsize))
ids = list(grouper(ids, batchsize))
for id in ids:
id = [x for x in id if x]
size = len(id)
id = ",".join(id)
success = False
while not success:
try:
fetch_handle = Entrez.efetch(db=db, id=id, rettype=rettype,
email=email)
success = True
except (HTTPError, URLError,
RuntimeError) as e:
logging.error(e)
logging.debug("wait 5 seconds to reconnect...")
time.sleep(5)
yield id, size, term, fetch_handle | def function[batch_entrez, parameter[list_of_terms, db, retmax, rettype, batchsize, email]]:
constant[
Retrieve multiple rather than a single record
]
for taget[name[term]] in starred[name[list_of_terms]] begin[:]
call[name[logging].debug, parameter[binary_operation[constant[Search term %s] <ast.Mod object at 0x7da2590d6920> name[term]]]]
variable[success] assign[=] constant[False]
variable[ids] assign[=] constant[None]
if <ast.UnaryOp object at 0x7da1b09bdf60> begin[:]
continue
while <ast.UnaryOp object at 0x7da1b09bd2d0> begin[:]
<ast.Try object at 0x7da1b09bf310>
if <ast.UnaryOp object at 0x7da18fe91ab0> begin[:]
call[name[logging].error, parameter[call[constant[term {0} not found].format, parameter[name[term]]]]]
continue
assert[name[ids]]
variable[nids] assign[=] call[name[len], parameter[name[ids]]]
if compare[name[nids] greater[>] constant[1]] begin[:]
call[name[logging].debug, parameter[call[constant[A total of {0} results found.].format, parameter[name[nids]]]]]
if compare[name[batchsize] not_equal[!=] constant[1]] begin[:]
call[name[logging].debug, parameter[call[constant[Use a batch size of {0}.].format, parameter[name[batchsize]]]]]
variable[ids] assign[=] call[name[list], parameter[call[name[grouper], parameter[name[ids], name[batchsize]]]]]
for taget[name[id]] in starred[name[ids]] begin[:]
variable[id] assign[=] <ast.ListComp object at 0x7da2041db2e0>
variable[size] assign[=] call[name[len], parameter[name[id]]]
variable[id] assign[=] call[constant[,].join, parameter[name[id]]]
variable[success] assign[=] constant[False]
while <ast.UnaryOp object at 0x7da2041db640> begin[:]
<ast.Try object at 0x7da2041da440>
<ast.Yield object at 0x7da2041dbe80> | keyword[def] identifier[batch_entrez] ( identifier[list_of_terms] , identifier[db] = literal[string] , identifier[retmax] = literal[int] , identifier[rettype] = literal[string] ,
identifier[batchsize] = literal[int] , identifier[email] = identifier[myEmail] ):
literal[string]
keyword[for] identifier[term] keyword[in] identifier[list_of_terms] :
identifier[logging] . identifier[debug] ( literal[string] % identifier[term] )
identifier[success] = keyword[False]
identifier[ids] = keyword[None]
keyword[if] keyword[not] identifier[term] :
keyword[continue]
keyword[while] keyword[not] identifier[success] :
keyword[try] :
identifier[search_handle] = identifier[Entrez] . identifier[esearch] ( identifier[db] = identifier[db] , identifier[retmax] = identifier[retmax] , identifier[term] = identifier[term] )
identifier[rec] = identifier[Entrez] . identifier[read] ( identifier[search_handle] )
identifier[success] = keyword[True]
identifier[ids] = identifier[rec] [ literal[string] ]
keyword[except] ( identifier[HTTPError] , identifier[URLError] ,
identifier[RuntimeError] , identifier[KeyError] ) keyword[as] identifier[e] :
identifier[logging] . identifier[error] ( identifier[e] )
identifier[logging] . identifier[debug] ( literal[string] )
identifier[time] . identifier[sleep] ( literal[int] )
keyword[if] keyword[not] identifier[ids] :
identifier[logging] . identifier[error] ( literal[string] . identifier[format] ( identifier[term] ))
keyword[continue]
keyword[assert] identifier[ids]
identifier[nids] = identifier[len] ( identifier[ids] )
keyword[if] identifier[nids] > literal[int] :
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[nids] ))
keyword[if] identifier[batchsize] != literal[int] :
identifier[logging] . identifier[debug] ( literal[string] . identifier[format] ( identifier[batchsize] ))
identifier[ids] = identifier[list] ( identifier[grouper] ( identifier[ids] , identifier[batchsize] ))
keyword[for] identifier[id] keyword[in] identifier[ids] :
identifier[id] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[id] keyword[if] identifier[x] ]
identifier[size] = identifier[len] ( identifier[id] )
identifier[id] = literal[string] . identifier[join] ( identifier[id] )
identifier[success] = keyword[False]
keyword[while] keyword[not] identifier[success] :
keyword[try] :
identifier[fetch_handle] = identifier[Entrez] . identifier[efetch] ( identifier[db] = identifier[db] , identifier[id] = identifier[id] , identifier[rettype] = identifier[rettype] ,
identifier[email] = identifier[email] )
identifier[success] = keyword[True]
keyword[except] ( identifier[HTTPError] , identifier[URLError] ,
identifier[RuntimeError] ) keyword[as] identifier[e] :
identifier[logging] . identifier[error] ( identifier[e] )
identifier[logging] . identifier[debug] ( literal[string] )
identifier[time] . identifier[sleep] ( literal[int] )
keyword[yield] identifier[id] , identifier[size] , identifier[term] , identifier[fetch_handle] | def batch_entrez(list_of_terms, db='nuccore', retmax=1, rettype='fasta', batchsize=1, email=myEmail):
"""
Retrieve multiple rather than a single record
"""
for term in list_of_terms:
logging.debug('Search term %s' % term)
success = False
ids = None
if not term:
continue # depends on [control=['if'], data=[]]
while not success:
try:
search_handle = Entrez.esearch(db=db, retmax=retmax, term=term)
rec = Entrez.read(search_handle)
success = True
ids = rec['IdList'] # depends on [control=['try'], data=[]]
except (HTTPError, URLError, RuntimeError, KeyError) as e:
logging.error(e)
logging.debug('wait 5 seconds to reconnect...')
time.sleep(5) # depends on [control=['except'], data=['e']] # depends on [control=['while'], data=[]]
if not ids:
logging.error('term {0} not found'.format(term))
continue # depends on [control=['if'], data=[]]
assert ids
nids = len(ids)
if nids > 1:
logging.debug('A total of {0} results found.'.format(nids)) # depends on [control=['if'], data=['nids']]
if batchsize != 1:
logging.debug('Use a batch size of {0}.'.format(batchsize)) # depends on [control=['if'], data=['batchsize']]
ids = list(grouper(ids, batchsize))
for id in ids:
id = [x for x in id if x]
size = len(id)
id = ','.join(id)
success = False
while not success:
try:
fetch_handle = Entrez.efetch(db=db, id=id, rettype=rettype, email=email)
success = True # depends on [control=['try'], data=[]]
except (HTTPError, URLError, RuntimeError) as e:
logging.error(e)
logging.debug('wait 5 seconds to reconnect...')
time.sleep(5) # depends on [control=['except'], data=['e']] # depends on [control=['while'], data=[]]
yield (id, size, term, fetch_handle) # depends on [control=['for'], data=['id']] # depends on [control=['for'], data=['term']] |
def delete_os_in_nwk(self, tenant_id, fw_dict, is_fw_virt=False):
"""Deletes the Openstack In network and update the DB. """
ret = True
tenant_name = fw_dict.get('tenant_name')
try:
ret = self._delete_os_nwk(tenant_id, tenant_name, "in",
is_fw_virt=is_fw_virt)
except Exception as exc:
LOG.error("Deletion of In Openstack Network failed tenant "
"%(tenant)s Exception %(exc)s",
{'tenant': tenant_id, 'exc': str(exc)})
ret = False
# Updating the FW DB
if ret:
res = fw_const.OS_IN_NETWORK_DEL_SUCCESS
else:
res = fw_const.OS_IN_NETWORK_DEL_FAIL
self.update_fw_db_result(tenant_id, os_status=res)
return ret | def function[delete_os_in_nwk, parameter[self, tenant_id, fw_dict, is_fw_virt]]:
constant[Deletes the Openstack In network and update the DB. ]
variable[ret] assign[=] constant[True]
variable[tenant_name] assign[=] call[name[fw_dict].get, parameter[constant[tenant_name]]]
<ast.Try object at 0x7da18bc721d0>
if name[ret] begin[:]
variable[res] assign[=] name[fw_const].OS_IN_NETWORK_DEL_SUCCESS
call[name[self].update_fw_db_result, parameter[name[tenant_id]]]
return[name[ret]] | keyword[def] identifier[delete_os_in_nwk] ( identifier[self] , identifier[tenant_id] , identifier[fw_dict] , identifier[is_fw_virt] = keyword[False] ):
literal[string]
identifier[ret] = keyword[True]
identifier[tenant_name] = identifier[fw_dict] . identifier[get] ( literal[string] )
keyword[try] :
identifier[ret] = identifier[self] . identifier[_delete_os_nwk] ( identifier[tenant_id] , identifier[tenant_name] , literal[string] ,
identifier[is_fw_virt] = identifier[is_fw_virt] )
keyword[except] identifier[Exception] keyword[as] identifier[exc] :
identifier[LOG] . identifier[error] ( literal[string]
literal[string] ,
{ literal[string] : identifier[tenant_id] , literal[string] : identifier[str] ( identifier[exc] )})
identifier[ret] = keyword[False]
keyword[if] identifier[ret] :
identifier[res] = identifier[fw_const] . identifier[OS_IN_NETWORK_DEL_SUCCESS]
keyword[else] :
identifier[res] = identifier[fw_const] . identifier[OS_IN_NETWORK_DEL_FAIL]
identifier[self] . identifier[update_fw_db_result] ( identifier[tenant_id] , identifier[os_status] = identifier[res] )
keyword[return] identifier[ret] | def delete_os_in_nwk(self, tenant_id, fw_dict, is_fw_virt=False):
"""Deletes the Openstack In network and update the DB. """
ret = True
tenant_name = fw_dict.get('tenant_name')
try:
ret = self._delete_os_nwk(tenant_id, tenant_name, 'in', is_fw_virt=is_fw_virt) # depends on [control=['try'], data=[]]
except Exception as exc:
LOG.error('Deletion of In Openstack Network failed tenant %(tenant)s Exception %(exc)s', {'tenant': tenant_id, 'exc': str(exc)})
ret = False # depends on [control=['except'], data=['exc']]
# Updating the FW DB
if ret:
res = fw_const.OS_IN_NETWORK_DEL_SUCCESS # depends on [control=['if'], data=[]]
else:
res = fw_const.OS_IN_NETWORK_DEL_FAIL
self.update_fw_db_result(tenant_id, os_status=res)
return ret |
def n_bifurcation_points(neurites, neurite_type=NeuriteType.all):
'''number of bifurcation points in a collection of neurites'''
return n_sections(neurites, neurite_type=neurite_type, iterator_type=Tree.ibifurcation_point) | def function[n_bifurcation_points, parameter[neurites, neurite_type]]:
constant[number of bifurcation points in a collection of neurites]
return[call[name[n_sections], parameter[name[neurites]]]] | keyword[def] identifier[n_bifurcation_points] ( identifier[neurites] , identifier[neurite_type] = identifier[NeuriteType] . identifier[all] ):
literal[string]
keyword[return] identifier[n_sections] ( identifier[neurites] , identifier[neurite_type] = identifier[neurite_type] , identifier[iterator_type] = identifier[Tree] . identifier[ibifurcation_point] ) | def n_bifurcation_points(neurites, neurite_type=NeuriteType.all):
"""number of bifurcation points in a collection of neurites"""
return n_sections(neurites, neurite_type=neurite_type, iterator_type=Tree.ibifurcation_point) |
def load_config(cls):
""" Load global and local configuration files and update if needed."""
config_file = os.path.expanduser(cls.home_config)
global_conf = cls.load(config_file, 'global')
cls.load(cls.local_config, 'local')
# update global configuration if needed
cls.update_config(config_file, global_conf) | def function[load_config, parameter[cls]]:
constant[ Load global and local configuration files and update if needed.]
variable[config_file] assign[=] call[name[os].path.expanduser, parameter[name[cls].home_config]]
variable[global_conf] assign[=] call[name[cls].load, parameter[name[config_file], constant[global]]]
call[name[cls].load, parameter[name[cls].local_config, constant[local]]]
call[name[cls].update_config, parameter[name[config_file], name[global_conf]]] | keyword[def] identifier[load_config] ( identifier[cls] ):
literal[string]
identifier[config_file] = identifier[os] . identifier[path] . identifier[expanduser] ( identifier[cls] . identifier[home_config] )
identifier[global_conf] = identifier[cls] . identifier[load] ( identifier[config_file] , literal[string] )
identifier[cls] . identifier[load] ( identifier[cls] . identifier[local_config] , literal[string] )
identifier[cls] . identifier[update_config] ( identifier[config_file] , identifier[global_conf] ) | def load_config(cls):
""" Load global and local configuration files and update if needed."""
config_file = os.path.expanduser(cls.home_config)
global_conf = cls.load(config_file, 'global')
cls.load(cls.local_config, 'local')
# update global configuration if needed
cls.update_config(config_file, global_conf) |
def load_pert(self):
"""
Load perturbation files to ``self.callpert``
Returns
-------
None
"""
system = self.system
if system.files.pert:
try:
sys.path.append(system.files.path)
module = importlib.import_module(system.files.pert[:-3])
self.callpert = getattr(module, 'pert')
except ImportError:
logger.warning('Pert file is discarded due to import errors.')
self.callpert = None | def function[load_pert, parameter[self]]:
constant[
Load perturbation files to ``self.callpert``
Returns
-------
None
]
variable[system] assign[=] name[self].system
if name[system].files.pert begin[:]
<ast.Try object at 0x7da1b23467a0> | keyword[def] identifier[load_pert] ( identifier[self] ):
literal[string]
identifier[system] = identifier[self] . identifier[system]
keyword[if] identifier[system] . identifier[files] . identifier[pert] :
keyword[try] :
identifier[sys] . identifier[path] . identifier[append] ( identifier[system] . identifier[files] . identifier[path] )
identifier[module] = identifier[importlib] . identifier[import_module] ( identifier[system] . identifier[files] . identifier[pert] [:- literal[int] ])
identifier[self] . identifier[callpert] = identifier[getattr] ( identifier[module] , literal[string] )
keyword[except] identifier[ImportError] :
identifier[logger] . identifier[warning] ( literal[string] )
identifier[self] . identifier[callpert] = keyword[None] | def load_pert(self):
"""
Load perturbation files to ``self.callpert``
Returns
-------
None
"""
system = self.system
if system.files.pert:
try:
sys.path.append(system.files.path)
module = importlib.import_module(system.files.pert[:-3])
self.callpert = getattr(module, 'pert') # depends on [control=['try'], data=[]]
except ImportError:
logger.warning('Pert file is discarded due to import errors.')
self.callpert = None # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] |
def _get_bin_dir(self):
"""
Normaly we have a ...env/bin/ dir.
But under Windows we have ...env/Scripts/
But not PyPy2 under Windows, see:
https://bitbucket.org/pypy/pypy/issues/2125/tcl-doesnt-work-inside-a-virtualenv-on#comment-21247266
So just try to test via os.path.isdir()
"""
for subdir in ("bin", "Scripts"):
bin_dir = os.path.join(self.abs_home_dir, subdir)
if os.path.isdir(bin_dir):
print("bin dir: %r" % bin_dir)
return bin_dir
raise RuntimeError("Can't find 'bin/Scripts' dir in: %r" % self.abs_home_dir) | def function[_get_bin_dir, parameter[self]]:
constant[
Normaly we have a ...env/bin/ dir.
But under Windows we have ...env/Scripts/
But not PyPy2 under Windows, see:
https://bitbucket.org/pypy/pypy/issues/2125/tcl-doesnt-work-inside-a-virtualenv-on#comment-21247266
So just try to test via os.path.isdir()
]
for taget[name[subdir]] in starred[tuple[[<ast.Constant object at 0x7da1b05fd5d0>, <ast.Constant object at 0x7da1b05fd6f0>]]] begin[:]
variable[bin_dir] assign[=] call[name[os].path.join, parameter[name[self].abs_home_dir, name[subdir]]]
if call[name[os].path.isdir, parameter[name[bin_dir]]] begin[:]
call[name[print], parameter[binary_operation[constant[bin dir: %r] <ast.Mod object at 0x7da2590d6920> name[bin_dir]]]]
return[name[bin_dir]]
<ast.Raise object at 0x7da1b04d10f0> | keyword[def] identifier[_get_bin_dir] ( identifier[self] ):
literal[string]
keyword[for] identifier[subdir] keyword[in] ( literal[string] , literal[string] ):
identifier[bin_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[abs_home_dir] , identifier[subdir] )
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[bin_dir] ):
identifier[print] ( literal[string] % identifier[bin_dir] )
keyword[return] identifier[bin_dir]
keyword[raise] identifier[RuntimeError] ( literal[string] % identifier[self] . identifier[abs_home_dir] ) | def _get_bin_dir(self):
"""
Normaly we have a ...env/bin/ dir.
But under Windows we have ...env/Scripts/
But not PyPy2 under Windows, see:
https://bitbucket.org/pypy/pypy/issues/2125/tcl-doesnt-work-inside-a-virtualenv-on#comment-21247266
So just try to test via os.path.isdir()
"""
for subdir in ('bin', 'Scripts'):
bin_dir = os.path.join(self.abs_home_dir, subdir)
if os.path.isdir(bin_dir):
print('bin dir: %r' % bin_dir)
return bin_dir # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['subdir']]
raise RuntimeError("Can't find 'bin/Scripts' dir in: %r" % self.abs_home_dir) |
def _tracing_information():
"""Gets B3 distributed tracing information, if available.
This is returned as a list, ready to be formatted into Spring Cloud Sleuth compatible format.
"""
# We'll collate trace information if the B3 headers have been collected:
values = b3.values()
if values[b3.b3_trace_id]:
# Trace information would normally be sent to Zipkin if either of sampled or debug ("flags") is set to 1
# However we're not currently using Zipkin, so it's always false
# exported = "true" if values[b3.b3_sampled] == '1' or values[b3.b3_flags] == '1' else "false"
return [
current_app.name if current_app.name else " - ",
values[b3.b3_trace_id],
values[b3.b3_span_id],
"false",
] | def function[_tracing_information, parameter[]]:
constant[Gets B3 distributed tracing information, if available.
This is returned as a list, ready to be formatted into Spring Cloud Sleuth compatible format.
]
variable[values] assign[=] call[name[b3].values, parameter[]]
if call[name[values]][name[b3].b3_trace_id] begin[:]
return[list[[<ast.IfExp object at 0x7da18f8121d0>, <ast.Subscript object at 0x7da18f8135b0>, <ast.Subscript object at 0x7da18f812260>, <ast.Constant object at 0x7da18f811450>]]] | keyword[def] identifier[_tracing_information] ():
literal[string]
identifier[values] = identifier[b3] . identifier[values] ()
keyword[if] identifier[values] [ identifier[b3] . identifier[b3_trace_id] ]:
keyword[return] [
identifier[current_app] . identifier[name] keyword[if] identifier[current_app] . identifier[name] keyword[else] literal[string] ,
identifier[values] [ identifier[b3] . identifier[b3_trace_id] ],
identifier[values] [ identifier[b3] . identifier[b3_span_id] ],
literal[string] ,
] | def _tracing_information():
"""Gets B3 distributed tracing information, if available.
This is returned as a list, ready to be formatted into Spring Cloud Sleuth compatible format.
"""
# We'll collate trace information if the B3 headers have been collected:
values = b3.values()
if values[b3.b3_trace_id]:
# Trace information would normally be sent to Zipkin if either of sampled or debug ("flags") is set to 1
# However we're not currently using Zipkin, so it's always false
# exported = "true" if values[b3.b3_sampled] == '1' or values[b3.b3_flags] == '1' else "false"
return [current_app.name if current_app.name else ' - ', values[b3.b3_trace_id], values[b3.b3_span_id], 'false'] # depends on [control=['if'], data=[]] |
def extend_embedder_vocab(self, embedding_sources_mapping: Dict[str, str] = None) -> None:
"""
Iterates through all embedding modules in the model and assures it can embed
with the extended vocab. This is required in fine-tuning or transfer learning
scenarios where model was trained with original vocabulary but during
fine-tuning/tranfer-learning, it will have it work with extended vocabulary
(original + new-data vocabulary).
Parameters
----------
embedding_sources_mapping : Dict[str, str], (optional, default=None)
Mapping from model_path to pretrained-file path of the embedding
modules. If pretrained-file used at time of embedding initialization
isn't available now, user should pass this mapping. Model path is
path traversing the model attributes upto this embedding module.
Eg. "_text_field_embedder.token_embedder_tokens".
"""
# self.named_modules() gives all sub-modules (including nested children)
# The path nesting is already separated by ".": eg. parent_module_name.child_module_name
embedding_sources_mapping = embedding_sources_mapping or {}
for model_path, module in self.named_modules():
if hasattr(module, 'extend_vocab'):
pretrained_file = embedding_sources_mapping.get(model_path, None)
module.extend_vocab(self.vocab,
extension_pretrained_file=pretrained_file,
model_path=model_path) | def function[extend_embedder_vocab, parameter[self, embedding_sources_mapping]]:
constant[
Iterates through all embedding modules in the model and assures it can embed
with the extended vocab. This is required in fine-tuning or transfer learning
scenarios where model was trained with original vocabulary but during
fine-tuning/tranfer-learning, it will have it work with extended vocabulary
(original + new-data vocabulary).
Parameters
----------
embedding_sources_mapping : Dict[str, str], (optional, default=None)
Mapping from model_path to pretrained-file path of the embedding
modules. If pretrained-file used at time of embedding initialization
isn't available now, user should pass this mapping. Model path is
path traversing the model attributes upto this embedding module.
Eg. "_text_field_embedder.token_embedder_tokens".
]
variable[embedding_sources_mapping] assign[=] <ast.BoolOp object at 0x7da18f58e020>
for taget[tuple[[<ast.Name object at 0x7da18f58d3f0>, <ast.Name object at 0x7da18f58c610>]]] in starred[call[name[self].named_modules, parameter[]]] begin[:]
if call[name[hasattr], parameter[name[module], constant[extend_vocab]]] begin[:]
variable[pretrained_file] assign[=] call[name[embedding_sources_mapping].get, parameter[name[model_path], constant[None]]]
call[name[module].extend_vocab, parameter[name[self].vocab]] | keyword[def] identifier[extend_embedder_vocab] ( identifier[self] , identifier[embedding_sources_mapping] : identifier[Dict] [ identifier[str] , identifier[str] ]= keyword[None] )-> keyword[None] :
literal[string]
identifier[embedding_sources_mapping] = identifier[embedding_sources_mapping] keyword[or] {}
keyword[for] identifier[model_path] , identifier[module] keyword[in] identifier[self] . identifier[named_modules] ():
keyword[if] identifier[hasattr] ( identifier[module] , literal[string] ):
identifier[pretrained_file] = identifier[embedding_sources_mapping] . identifier[get] ( identifier[model_path] , keyword[None] )
identifier[module] . identifier[extend_vocab] ( identifier[self] . identifier[vocab] ,
identifier[extension_pretrained_file] = identifier[pretrained_file] ,
identifier[model_path] = identifier[model_path] ) | def extend_embedder_vocab(self, embedding_sources_mapping: Dict[str, str]=None) -> None:
"""
Iterates through all embedding modules in the model and assures it can embed
with the extended vocab. This is required in fine-tuning or transfer learning
scenarios where model was trained with original vocabulary but during
fine-tuning/tranfer-learning, it will have it work with extended vocabulary
(original + new-data vocabulary).
Parameters
----------
embedding_sources_mapping : Dict[str, str], (optional, default=None)
Mapping from model_path to pretrained-file path of the embedding
modules. If pretrained-file used at time of embedding initialization
isn't available now, user should pass this mapping. Model path is
path traversing the model attributes upto this embedding module.
Eg. "_text_field_embedder.token_embedder_tokens".
"""
# self.named_modules() gives all sub-modules (including nested children)
# The path nesting is already separated by ".": eg. parent_module_name.child_module_name
embedding_sources_mapping = embedding_sources_mapping or {}
for (model_path, module) in self.named_modules():
if hasattr(module, 'extend_vocab'):
pretrained_file = embedding_sources_mapping.get(model_path, None)
module.extend_vocab(self.vocab, extension_pretrained_file=pretrained_file, model_path=model_path) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def prepare_spec(spec, data=None):
"""Prepare a Vega-Lite spec for sending to the frontend.
This allows data to be passed in either as part of the spec
or separately. If separately, the data is assumed to be a
pandas DataFrame or object that can be converted to to a DataFrame.
Note that if data is not None, this modifies spec in-place
"""
import pandas as pd
if isinstance(data, pd.DataFrame):
# We have to do the isinstance test first because we can't
# compare a DataFrame to None.
data = sanitize_dataframe(data)
spec['data'] = {'values': data.to_dict(orient='records')}
elif data is None:
# Assume data is within spec & do nothing
# It may be deep in the spec rather than at the top level
pass
else:
# As a last resort try to pass the data to a DataFrame and use it
data = pd.DataFrame(data)
data = sanitize_dataframe(data)
spec['data'] = {'values': data.to_dict(orient='records')}
return spec | def function[prepare_spec, parameter[spec, data]]:
constant[Prepare a Vega-Lite spec for sending to the frontend.
This allows data to be passed in either as part of the spec
or separately. If separately, the data is assumed to be a
pandas DataFrame or object that can be converted to to a DataFrame.
Note that if data is not None, this modifies spec in-place
]
import module[pandas] as alias[pd]
if call[name[isinstance], parameter[name[data], name[pd].DataFrame]] begin[:]
variable[data] assign[=] call[name[sanitize_dataframe], parameter[name[data]]]
call[name[spec]][constant[data]] assign[=] dictionary[[<ast.Constant object at 0x7da20e962ef0>], [<ast.Call object at 0x7da20e961420>]]
return[name[spec]] | keyword[def] identifier[prepare_spec] ( identifier[spec] , identifier[data] = keyword[None] ):
literal[string]
keyword[import] identifier[pandas] keyword[as] identifier[pd]
keyword[if] identifier[isinstance] ( identifier[data] , identifier[pd] . identifier[DataFrame] ):
identifier[data] = identifier[sanitize_dataframe] ( identifier[data] )
identifier[spec] [ literal[string] ]={ literal[string] : identifier[data] . identifier[to_dict] ( identifier[orient] = literal[string] )}
keyword[elif] identifier[data] keyword[is] keyword[None] :
keyword[pass]
keyword[else] :
identifier[data] = identifier[pd] . identifier[DataFrame] ( identifier[data] )
identifier[data] = identifier[sanitize_dataframe] ( identifier[data] )
identifier[spec] [ literal[string] ]={ literal[string] : identifier[data] . identifier[to_dict] ( identifier[orient] = literal[string] )}
keyword[return] identifier[spec] | def prepare_spec(spec, data=None):
"""Prepare a Vega-Lite spec for sending to the frontend.
This allows data to be passed in either as part of the spec
or separately. If separately, the data is assumed to be a
pandas DataFrame or object that can be converted to to a DataFrame.
Note that if data is not None, this modifies spec in-place
"""
import pandas as pd
if isinstance(data, pd.DataFrame):
# We have to do the isinstance test first because we can't
# compare a DataFrame to None.
data = sanitize_dataframe(data)
spec['data'] = {'values': data.to_dict(orient='records')} # depends on [control=['if'], data=[]]
elif data is None:
# Assume data is within spec & do nothing
# It may be deep in the spec rather than at the top level
pass # depends on [control=['if'], data=[]]
else:
# As a last resort try to pass the data to a DataFrame and use it
data = pd.DataFrame(data)
data = sanitize_dataframe(data)
spec['data'] = {'values': data.to_dict(orient='records')}
return spec |
def _traverse_nodes(self):
""" Debugging function (exposes cython nodes as dummy nodes) """
node = self.root
stack = []
while stack or node is not None:
if node is not None:
stack.append(node)
node = node.left
else:
node = stack.pop()
yield node
node = node.right | def function[_traverse_nodes, parameter[self]]:
constant[ Debugging function (exposes cython nodes as dummy nodes) ]
variable[node] assign[=] name[self].root
variable[stack] assign[=] list[[]]
while <ast.BoolOp object at 0x7da1b24ea1d0> begin[:]
if compare[name[node] is_not constant[None]] begin[:]
call[name[stack].append, parameter[name[node]]]
variable[node] assign[=] name[node].left | keyword[def] identifier[_traverse_nodes] ( identifier[self] ):
literal[string]
identifier[node] = identifier[self] . identifier[root]
identifier[stack] =[]
keyword[while] identifier[stack] keyword[or] identifier[node] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[node] keyword[is] keyword[not] keyword[None] :
identifier[stack] . identifier[append] ( identifier[node] )
identifier[node] = identifier[node] . identifier[left]
keyword[else] :
identifier[node] = identifier[stack] . identifier[pop] ()
keyword[yield] identifier[node]
identifier[node] = identifier[node] . identifier[right] | def _traverse_nodes(self):
""" Debugging function (exposes cython nodes as dummy nodes) """
node = self.root
stack = []
while stack or node is not None:
if node is not None:
stack.append(node)
node = node.left # depends on [control=['if'], data=['node']]
else:
node = stack.pop()
yield node
node = node.right # depends on [control=['while'], data=[]] |
def save(self, file_path):
"""
Write database to a file. If a file with the specified name exists it's backed up
:param file_path: file path
"""
if os.path.isfile(file_path):
with open(self.get_backup_file_name(file_path), 'w') as d:
with open(file_path, 'r') as o:
d.write(o.read())
elif os.path.exists(file_path):
raise XonoticDBException('%s exists and is not a file. Cannot write to it.', file_path)
lines = [''] * self.db_buckets
for key, value in self.items():
lines[self.hashfunc(key) % self.db_buckets] += r'\%s\%s' % (key, urllib.parse.quote(value))
with open(file_path, 'w') as f:
f.write('%d\n' % self.db_buckets)
for i in lines:
f.write(i + '\n') | def function[save, parameter[self, file_path]]:
constant[
Write database to a file. If a file with the specified name exists it's backed up
:param file_path: file path
]
if call[name[os].path.isfile, parameter[name[file_path]]] begin[:]
with call[name[open], parameter[call[name[self].get_backup_file_name, parameter[name[file_path]]], constant[w]]] begin[:]
with call[name[open], parameter[name[file_path], constant[r]]] begin[:]
call[name[d].write, parameter[call[name[o].read, parameter[]]]]
variable[lines] assign[=] binary_operation[list[[<ast.Constant object at 0x7da20c76de10>]] * name[self].db_buckets]
for taget[tuple[[<ast.Name object at 0x7da20c76db40>, <ast.Name object at 0x7da20c76f670>]]] in starred[call[name[self].items, parameter[]]] begin[:]
<ast.AugAssign object at 0x7da20c76ef50>
with call[name[open], parameter[name[file_path], constant[w]]] begin[:]
call[name[f].write, parameter[binary_operation[constant[%d
] <ast.Mod object at 0x7da2590d6920> name[self].db_buckets]]]
for taget[name[i]] in starred[name[lines]] begin[:]
call[name[f].write, parameter[binary_operation[name[i] + constant[
]]]] | keyword[def] identifier[save] ( identifier[self] , identifier[file_path] ):
literal[string]
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[file_path] ):
keyword[with] identifier[open] ( identifier[self] . identifier[get_backup_file_name] ( identifier[file_path] ), literal[string] ) keyword[as] identifier[d] :
keyword[with] identifier[open] ( identifier[file_path] , literal[string] ) keyword[as] identifier[o] :
identifier[d] . identifier[write] ( identifier[o] . identifier[read] ())
keyword[elif] identifier[os] . identifier[path] . identifier[exists] ( identifier[file_path] ):
keyword[raise] identifier[XonoticDBException] ( literal[string] , identifier[file_path] )
identifier[lines] =[ literal[string] ]* identifier[self] . identifier[db_buckets]
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[self] . identifier[items] ():
identifier[lines] [ identifier[self] . identifier[hashfunc] ( identifier[key] )% identifier[self] . identifier[db_buckets] ]+= literal[string] %( identifier[key] , identifier[urllib] . identifier[parse] . identifier[quote] ( identifier[value] ))
keyword[with] identifier[open] ( identifier[file_path] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( literal[string] % identifier[self] . identifier[db_buckets] )
keyword[for] identifier[i] keyword[in] identifier[lines] :
identifier[f] . identifier[write] ( identifier[i] + literal[string] ) | def save(self, file_path):
"""
Write database to a file. If a file with the specified name exists it's backed up
:param file_path: file path
"""
if os.path.isfile(file_path):
with open(self.get_backup_file_name(file_path), 'w') as d:
with open(file_path, 'r') as o:
d.write(o.read()) # depends on [control=['with'], data=['o']] # depends on [control=['with'], data=['open', 'd']] # depends on [control=['if'], data=[]]
elif os.path.exists(file_path):
raise XonoticDBException('%s exists and is not a file. Cannot write to it.', file_path) # depends on [control=['if'], data=[]]
lines = [''] * self.db_buckets
for (key, value) in self.items():
lines[self.hashfunc(key) % self.db_buckets] += '\\%s\\%s' % (key, urllib.parse.quote(value)) # depends on [control=['for'], data=[]]
with open(file_path, 'w') as f:
f.write('%d\n' % self.db_buckets)
for i in lines:
f.write(i + '\n') # depends on [control=['for'], data=['i']] # depends on [control=['with'], data=['f']] |
def utime(self, path, times=None, ns=None,
dir_fd=None, follow_symlinks=None):
"""Change the access and modified times of a file.
Args:
path: (str) Path to the file.
times: 2-tuple of int or float numbers, of the form (atime, mtime)
which is used to set the access and modified times in seconds.
If None, both times are set to the current time.
ns: 2-tuple of int numbers, of the form (atime, mtime) which is
used to set the access and modified times in nanoseconds.
If None, both times are set to the current time.
New in Python 3.3.
dir_fd: If not `None`, the file descriptor of a directory,
with `path` being relative to this directory.
New in Python 3.3.
follow_symlinks: (bool) If `False` and `path` points to a symlink,
the link itself is queried instead of the linked object.
New in Python 3.3.
Raises:
TypeError: If anything other than the expected types is
specified in the passed `times` or `ns` tuple,
or if the tuple length is not equal to 2.
ValueError: If both times and ns are specified.
"""
if follow_symlinks is None:
follow_symlinks = True
elif sys.version_info < (3, 3):
raise TypeError(
"utime() got an unexpected keyword argument 'follow_symlinks'")
path = self._path_with_dir_fd(path, self.utime, dir_fd)
if ns is not None and sys.version_info < (3, 3):
raise TypeError("utime() got an unexpected keyword argument 'ns'")
self.filesystem.utime(path, times, ns, follow_symlinks) | def function[utime, parameter[self, path, times, ns, dir_fd, follow_symlinks]]:
constant[Change the access and modified times of a file.
Args:
path: (str) Path to the file.
times: 2-tuple of int or float numbers, of the form (atime, mtime)
which is used to set the access and modified times in seconds.
If None, both times are set to the current time.
ns: 2-tuple of int numbers, of the form (atime, mtime) which is
used to set the access and modified times in nanoseconds.
If None, both times are set to the current time.
New in Python 3.3.
dir_fd: If not `None`, the file descriptor of a directory,
with `path` being relative to this directory.
New in Python 3.3.
follow_symlinks: (bool) If `False` and `path` points to a symlink,
the link itself is queried instead of the linked object.
New in Python 3.3.
Raises:
TypeError: If anything other than the expected types is
specified in the passed `times` or `ns` tuple,
or if the tuple length is not equal to 2.
ValueError: If both times and ns are specified.
]
if compare[name[follow_symlinks] is constant[None]] begin[:]
variable[follow_symlinks] assign[=] constant[True]
variable[path] assign[=] call[name[self]._path_with_dir_fd, parameter[name[path], name[self].utime, name[dir_fd]]]
if <ast.BoolOp object at 0x7da18dc066e0> begin[:]
<ast.Raise object at 0x7da18dc06230>
call[name[self].filesystem.utime, parameter[name[path], name[times], name[ns], name[follow_symlinks]]] | keyword[def] identifier[utime] ( identifier[self] , identifier[path] , identifier[times] = keyword[None] , identifier[ns] = keyword[None] ,
identifier[dir_fd] = keyword[None] , identifier[follow_symlinks] = keyword[None] ):
literal[string]
keyword[if] identifier[follow_symlinks] keyword[is] keyword[None] :
identifier[follow_symlinks] = keyword[True]
keyword[elif] identifier[sys] . identifier[version_info] <( literal[int] , literal[int] ):
keyword[raise] identifier[TypeError] (
literal[string] )
identifier[path] = identifier[self] . identifier[_path_with_dir_fd] ( identifier[path] , identifier[self] . identifier[utime] , identifier[dir_fd] )
keyword[if] identifier[ns] keyword[is] keyword[not] keyword[None] keyword[and] identifier[sys] . identifier[version_info] <( literal[int] , literal[int] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[self] . identifier[filesystem] . identifier[utime] ( identifier[path] , identifier[times] , identifier[ns] , identifier[follow_symlinks] ) | def utime(self, path, times=None, ns=None, dir_fd=None, follow_symlinks=None):
"""Change the access and modified times of a file.
Args:
path: (str) Path to the file.
times: 2-tuple of int or float numbers, of the form (atime, mtime)
which is used to set the access and modified times in seconds.
If None, both times are set to the current time.
ns: 2-tuple of int numbers, of the form (atime, mtime) which is
used to set the access and modified times in nanoseconds.
If None, both times are set to the current time.
New in Python 3.3.
dir_fd: If not `None`, the file descriptor of a directory,
with `path` being relative to this directory.
New in Python 3.3.
follow_symlinks: (bool) If `False` and `path` points to a symlink,
the link itself is queried instead of the linked object.
New in Python 3.3.
Raises:
TypeError: If anything other than the expected types is
specified in the passed `times` or `ns` tuple,
or if the tuple length is not equal to 2.
ValueError: If both times and ns are specified.
"""
if follow_symlinks is None:
follow_symlinks = True # depends on [control=['if'], data=['follow_symlinks']]
elif sys.version_info < (3, 3):
raise TypeError("utime() got an unexpected keyword argument 'follow_symlinks'") # depends on [control=['if'], data=[]]
path = self._path_with_dir_fd(path, self.utime, dir_fd)
if ns is not None and sys.version_info < (3, 3):
raise TypeError("utime() got an unexpected keyword argument 'ns'") # depends on [control=['if'], data=[]]
self.filesystem.utime(path, times, ns, follow_symlinks) |
def process_large_file(self, local_file, parent):
"""
Upload a single file using multiple processes to upload multiple chunks at the same time.
Updates local_file with it's remote_id when done.
:param local_file: LocalFile: file we are uploading
:param parent: LocalFolder/LocalProject: parent of the file
"""
file_content_sender = FileUploader(self.settings.config, self.settings.data_service, local_file,
self.settings.watcher, self.settings.file_upload_post_processor)
remote_id = file_content_sender.upload(self.settings.project_id, parent.kind, parent.remote_id)
local_file.set_remote_id_after_send(remote_id) | def function[process_large_file, parameter[self, local_file, parent]]:
constant[
Upload a single file using multiple processes to upload multiple chunks at the same time.
Updates local_file with it's remote_id when done.
:param local_file: LocalFile: file we are uploading
:param parent: LocalFolder/LocalProject: parent of the file
]
variable[file_content_sender] assign[=] call[name[FileUploader], parameter[name[self].settings.config, name[self].settings.data_service, name[local_file], name[self].settings.watcher, name[self].settings.file_upload_post_processor]]
variable[remote_id] assign[=] call[name[file_content_sender].upload, parameter[name[self].settings.project_id, name[parent].kind, name[parent].remote_id]]
call[name[local_file].set_remote_id_after_send, parameter[name[remote_id]]] | keyword[def] identifier[process_large_file] ( identifier[self] , identifier[local_file] , identifier[parent] ):
literal[string]
identifier[file_content_sender] = identifier[FileUploader] ( identifier[self] . identifier[settings] . identifier[config] , identifier[self] . identifier[settings] . identifier[data_service] , identifier[local_file] ,
identifier[self] . identifier[settings] . identifier[watcher] , identifier[self] . identifier[settings] . identifier[file_upload_post_processor] )
identifier[remote_id] = identifier[file_content_sender] . identifier[upload] ( identifier[self] . identifier[settings] . identifier[project_id] , identifier[parent] . identifier[kind] , identifier[parent] . identifier[remote_id] )
identifier[local_file] . identifier[set_remote_id_after_send] ( identifier[remote_id] ) | def process_large_file(self, local_file, parent):
"""
Upload a single file using multiple processes to upload multiple chunks at the same time.
Updates local_file with it's remote_id when done.
:param local_file: LocalFile: file we are uploading
:param parent: LocalFolder/LocalProject: parent of the file
"""
file_content_sender = FileUploader(self.settings.config, self.settings.data_service, local_file, self.settings.watcher, self.settings.file_upload_post_processor)
remote_id = file_content_sender.upload(self.settings.project_id, parent.kind, parent.remote_id)
local_file.set_remote_id_after_send(remote_id) |
def capacity_salgado_2008(sl, fd, h_l=0, h_b=0, vertical_load=1, verbose=0, **kwargs):
"""
calculates the capacity according to
THe Engineering of Foundations textbook by Salgado
ISBN: 0072500581
:param sl: Soil object
:param fd: Foundation object
:param h_l: Horizontal load parallel to length
:param h_b: Horizontal load parallel to width
:param vertical_load: Vertical load
:param verbose: verbosity
:return: ultimate bearing stress
"""
# Need to make adjustments if sand has DR<40% or
# clay has liquidity indices greater than 0.7
if not kwargs.get("disable_requires", False):
models.check_required(sl, ["phi_r", "cohesion", "unit_dry_weight"])
models.check_required(fd, ["length", "width", "depth"])
h_eff_b = kwargs.get("h_eff_b", 0)
h_eff_l = kwargs.get("h_eff_l", 0)
loc_v_l = kwargs.get("loc_v_l", fd.length / 2)
loc_v_b = kwargs.get("loc_v_b", fd.width / 2)
ecc_b = h_b * h_eff_b / vertical_load
ecc_l = h_l * h_eff_l / vertical_load
width_eff = min(fd.width, 2 * (loc_v_b + ecc_b), 2 * (fd.width - loc_v_b - ecc_b))
length_eff = min(fd.length, 2 * (loc_v_l + ecc_l), 2 * (fd.length - loc_v_l - ecc_l))
# check para 3.4.1
if width_eff / 2 < fd.width / 6:
DesignError("failed on eccentricity")
# LOAD FACTORS:
fd.nq_factor = np.exp(np.pi * np.tan(sl.phi_r)) * (1 + np.sin(sl.phi_r)) / (1 - np.sin(sl.phi_r))
fd.ng_factor = 1.5 * (fd.nq_factor - 1) * np.tan(sl.phi_r)
# fd.ng_factor = (fd.nq_factor - 1) * np.tan(1.32 * sl.phi_r)
if sl.phi_r == 0:
fd.nc_factor = 5.14
else:
fd.nc_factor = (fd.nq_factor - 1) / np.tan(sl.phi_r)
# shape factors:
s_q = 1 + (width_eff / length_eff) * np.tan(sl.phi_r)
s_g = max(1 - 0.4 * width_eff / length_eff, 0.6)
s_c = 1.0
# depth factors:
d_q = 1 + 2 * np.tan(sl.phi_r) * (1 - np.sin(sl.phi_r)) ** 2 * fd.depth / width_eff
d_g = 1.0
d_c = 1.0
# stress at footing base:
q_d = sl.unit_dry_weight * fd.depth
if verbose:
log("width_eff: ", width_eff)
log("length_eff: ", length_eff)
log("Nc: ", fd.nc_factor)
log("Nq: ", fd.nq_factor)
log("Ng: ", fd.ng_factor)
log("s_c: ", s_c)
log("s_q: ", s_q)
log("s_g: ", s_g)
log("d_c: ", d_c)
log("d_q: ", d_q)
log("d_g: ", d_g)
log("q_d: ", q_d)
# Capacity
fd.q_ult = (sl.cohesion * fd.nc_factor * s_c * d_c +
q_d * fd.nq_factor * s_q * d_q +
0.5 * width_eff * sl.unit_dry_weight *
fd.ng_factor * s_g * d_g)
if verbose:
log("qult: ", fd.q_ult)
return fd.q_ult | def function[capacity_salgado_2008, parameter[sl, fd, h_l, h_b, vertical_load, verbose]]:
constant[
calculates the capacity according to
THe Engineering of Foundations textbook by Salgado
ISBN: 0072500581
:param sl: Soil object
:param fd: Foundation object
:param h_l: Horizontal load parallel to length
:param h_b: Horizontal load parallel to width
:param vertical_load: Vertical load
:param verbose: verbosity
:return: ultimate bearing stress
]
if <ast.UnaryOp object at 0x7da1b031bc40> begin[:]
call[name[models].check_required, parameter[name[sl], list[[<ast.Constant object at 0x7da1b031ba00>, <ast.Constant object at 0x7da1b031b9d0>, <ast.Constant object at 0x7da1b031b9a0>]]]]
call[name[models].check_required, parameter[name[fd], list[[<ast.Constant object at 0x7da1b031b820>, <ast.Constant object at 0x7da1b031b7f0>, <ast.Constant object at 0x7da1b031b7c0>]]]]
variable[h_eff_b] assign[=] call[name[kwargs].get, parameter[constant[h_eff_b], constant[0]]]
variable[h_eff_l] assign[=] call[name[kwargs].get, parameter[constant[h_eff_l], constant[0]]]
variable[loc_v_l] assign[=] call[name[kwargs].get, parameter[constant[loc_v_l], binary_operation[name[fd].length / constant[2]]]]
variable[loc_v_b] assign[=] call[name[kwargs].get, parameter[constant[loc_v_b], binary_operation[name[fd].width / constant[2]]]]
variable[ecc_b] assign[=] binary_operation[binary_operation[name[h_b] * name[h_eff_b]] / name[vertical_load]]
variable[ecc_l] assign[=] binary_operation[binary_operation[name[h_l] * name[h_eff_l]] / name[vertical_load]]
variable[width_eff] assign[=] call[name[min], parameter[name[fd].width, binary_operation[constant[2] * binary_operation[name[loc_v_b] + name[ecc_b]]], binary_operation[constant[2] * binary_operation[binary_operation[name[fd].width - name[loc_v_b]] - name[ecc_b]]]]]
variable[length_eff] assign[=] call[name[min], parameter[name[fd].length, binary_operation[constant[2] * binary_operation[name[loc_v_l] + name[ecc_l]]], binary_operation[constant[2] * binary_operation[binary_operation[name[fd].length - name[loc_v_l]] - name[ecc_l]]]]]
if compare[binary_operation[name[width_eff] / constant[2]] less[<] binary_operation[name[fd].width / constant[6]]] begin[:]
call[name[DesignError], parameter[constant[failed on eccentricity]]]
name[fd].nq_factor assign[=] binary_operation[binary_operation[call[name[np].exp, parameter[binary_operation[name[np].pi * call[name[np].tan, parameter[name[sl].phi_r]]]]] * binary_operation[constant[1] + call[name[np].sin, parameter[name[sl].phi_r]]]] / binary_operation[constant[1] - call[name[np].sin, parameter[name[sl].phi_r]]]]
name[fd].ng_factor assign[=] binary_operation[binary_operation[constant[1.5] * binary_operation[name[fd].nq_factor - constant[1]]] * call[name[np].tan, parameter[name[sl].phi_r]]]
if compare[name[sl].phi_r equal[==] constant[0]] begin[:]
name[fd].nc_factor assign[=] constant[5.14]
variable[s_q] assign[=] binary_operation[constant[1] + binary_operation[binary_operation[name[width_eff] / name[length_eff]] * call[name[np].tan, parameter[name[sl].phi_r]]]]
variable[s_g] assign[=] call[name[max], parameter[binary_operation[constant[1] - binary_operation[binary_operation[constant[0.4] * name[width_eff]] / name[length_eff]]], constant[0.6]]]
variable[s_c] assign[=] constant[1.0]
variable[d_q] assign[=] binary_operation[constant[1] + binary_operation[binary_operation[binary_operation[binary_operation[constant[2] * call[name[np].tan, parameter[name[sl].phi_r]]] * binary_operation[binary_operation[constant[1] - call[name[np].sin, parameter[name[sl].phi_r]]] ** constant[2]]] * name[fd].depth] / name[width_eff]]]
variable[d_g] assign[=] constant[1.0]
variable[d_c] assign[=] constant[1.0]
variable[q_d] assign[=] binary_operation[name[sl].unit_dry_weight * name[fd].depth]
if name[verbose] begin[:]
call[name[log], parameter[constant[width_eff: ], name[width_eff]]]
call[name[log], parameter[constant[length_eff: ], name[length_eff]]]
call[name[log], parameter[constant[Nc: ], name[fd].nc_factor]]
call[name[log], parameter[constant[Nq: ], name[fd].nq_factor]]
call[name[log], parameter[constant[Ng: ], name[fd].ng_factor]]
call[name[log], parameter[constant[s_c: ], name[s_c]]]
call[name[log], parameter[constant[s_q: ], name[s_q]]]
call[name[log], parameter[constant[s_g: ], name[s_g]]]
call[name[log], parameter[constant[d_c: ], name[d_c]]]
call[name[log], parameter[constant[d_q: ], name[d_q]]]
call[name[log], parameter[constant[d_g: ], name[d_g]]]
call[name[log], parameter[constant[q_d: ], name[q_d]]]
name[fd].q_ult assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[sl].cohesion * name[fd].nc_factor] * name[s_c]] * name[d_c]] + binary_operation[binary_operation[binary_operation[name[q_d] * name[fd].nq_factor] * name[s_q]] * name[d_q]]] + binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[constant[0.5] * name[width_eff]] * name[sl].unit_dry_weight] * name[fd].ng_factor] * name[s_g]] * name[d_g]]]
if name[verbose] begin[:]
call[name[log], parameter[constant[qult: ], name[fd].q_ult]]
return[name[fd].q_ult] | keyword[def] identifier[capacity_salgado_2008] ( identifier[sl] , identifier[fd] , identifier[h_l] = literal[int] , identifier[h_b] = literal[int] , identifier[vertical_load] = literal[int] , identifier[verbose] = literal[int] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[kwargs] . identifier[get] ( literal[string] , keyword[False] ):
identifier[models] . identifier[check_required] ( identifier[sl] ,[ literal[string] , literal[string] , literal[string] ])
identifier[models] . identifier[check_required] ( identifier[fd] ,[ literal[string] , literal[string] , literal[string] ])
identifier[h_eff_b] = identifier[kwargs] . identifier[get] ( literal[string] , literal[int] )
identifier[h_eff_l] = identifier[kwargs] . identifier[get] ( literal[string] , literal[int] )
identifier[loc_v_l] = identifier[kwargs] . identifier[get] ( literal[string] , identifier[fd] . identifier[length] / literal[int] )
identifier[loc_v_b] = identifier[kwargs] . identifier[get] ( literal[string] , identifier[fd] . identifier[width] / literal[int] )
identifier[ecc_b] = identifier[h_b] * identifier[h_eff_b] / identifier[vertical_load]
identifier[ecc_l] = identifier[h_l] * identifier[h_eff_l] / identifier[vertical_load]
identifier[width_eff] = identifier[min] ( identifier[fd] . identifier[width] , literal[int] *( identifier[loc_v_b] + identifier[ecc_b] ), literal[int] *( identifier[fd] . identifier[width] - identifier[loc_v_b] - identifier[ecc_b] ))
identifier[length_eff] = identifier[min] ( identifier[fd] . identifier[length] , literal[int] *( identifier[loc_v_l] + identifier[ecc_l] ), literal[int] *( identifier[fd] . identifier[length] - identifier[loc_v_l] - identifier[ecc_l] ))
keyword[if] identifier[width_eff] / literal[int] < identifier[fd] . identifier[width] / literal[int] :
identifier[DesignError] ( literal[string] )
identifier[fd] . identifier[nq_factor] = identifier[np] . identifier[exp] ( identifier[np] . identifier[pi] * identifier[np] . identifier[tan] ( identifier[sl] . identifier[phi_r] ))*( literal[int] + identifier[np] . identifier[sin] ( identifier[sl] . identifier[phi_r] ))/( literal[int] - identifier[np] . identifier[sin] ( identifier[sl] . identifier[phi_r] ))
identifier[fd] . identifier[ng_factor] = literal[int] *( identifier[fd] . identifier[nq_factor] - literal[int] )* identifier[np] . identifier[tan] ( identifier[sl] . identifier[phi_r] )
keyword[if] identifier[sl] . identifier[phi_r] == literal[int] :
identifier[fd] . identifier[nc_factor] = literal[int]
keyword[else] :
identifier[fd] . identifier[nc_factor] =( identifier[fd] . identifier[nq_factor] - literal[int] )/ identifier[np] . identifier[tan] ( identifier[sl] . identifier[phi_r] )
identifier[s_q] = literal[int] +( identifier[width_eff] / identifier[length_eff] )* identifier[np] . identifier[tan] ( identifier[sl] . identifier[phi_r] )
identifier[s_g] = identifier[max] ( literal[int] - literal[int] * identifier[width_eff] / identifier[length_eff] , literal[int] )
identifier[s_c] = literal[int]
identifier[d_q] = literal[int] + literal[int] * identifier[np] . identifier[tan] ( identifier[sl] . identifier[phi_r] )*( literal[int] - identifier[np] . identifier[sin] ( identifier[sl] . identifier[phi_r] ))** literal[int] * identifier[fd] . identifier[depth] / identifier[width_eff]
identifier[d_g] = literal[int]
identifier[d_c] = literal[int]
identifier[q_d] = identifier[sl] . identifier[unit_dry_weight] * identifier[fd] . identifier[depth]
keyword[if] identifier[verbose] :
identifier[log] ( literal[string] , identifier[width_eff] )
identifier[log] ( literal[string] , identifier[length_eff] )
identifier[log] ( literal[string] , identifier[fd] . identifier[nc_factor] )
identifier[log] ( literal[string] , identifier[fd] . identifier[nq_factor] )
identifier[log] ( literal[string] , identifier[fd] . identifier[ng_factor] )
identifier[log] ( literal[string] , identifier[s_c] )
identifier[log] ( literal[string] , identifier[s_q] )
identifier[log] ( literal[string] , identifier[s_g] )
identifier[log] ( literal[string] , identifier[d_c] )
identifier[log] ( literal[string] , identifier[d_q] )
identifier[log] ( literal[string] , identifier[d_g] )
identifier[log] ( literal[string] , identifier[q_d] )
identifier[fd] . identifier[q_ult] =( identifier[sl] . identifier[cohesion] * identifier[fd] . identifier[nc_factor] * identifier[s_c] * identifier[d_c] +
identifier[q_d] * identifier[fd] . identifier[nq_factor] * identifier[s_q] * identifier[d_q] +
literal[int] * identifier[width_eff] * identifier[sl] . identifier[unit_dry_weight] *
identifier[fd] . identifier[ng_factor] * identifier[s_g] * identifier[d_g] )
keyword[if] identifier[verbose] :
identifier[log] ( literal[string] , identifier[fd] . identifier[q_ult] )
keyword[return] identifier[fd] . identifier[q_ult] | def capacity_salgado_2008(sl, fd, h_l=0, h_b=0, vertical_load=1, verbose=0, **kwargs):
"""
calculates the capacity according to
THe Engineering of Foundations textbook by Salgado
ISBN: 0072500581
:param sl: Soil object
:param fd: Foundation object
:param h_l: Horizontal load parallel to length
:param h_b: Horizontal load parallel to width
:param vertical_load: Vertical load
:param verbose: verbosity
:return: ultimate bearing stress
"""
# Need to make adjustments if sand has DR<40% or
# clay has liquidity indices greater than 0.7
if not kwargs.get('disable_requires', False):
models.check_required(sl, ['phi_r', 'cohesion', 'unit_dry_weight'])
models.check_required(fd, ['length', 'width', 'depth']) # depends on [control=['if'], data=[]]
h_eff_b = kwargs.get('h_eff_b', 0)
h_eff_l = kwargs.get('h_eff_l', 0)
loc_v_l = kwargs.get('loc_v_l', fd.length / 2)
loc_v_b = kwargs.get('loc_v_b', fd.width / 2)
ecc_b = h_b * h_eff_b / vertical_load
ecc_l = h_l * h_eff_l / vertical_load
width_eff = min(fd.width, 2 * (loc_v_b + ecc_b), 2 * (fd.width - loc_v_b - ecc_b))
length_eff = min(fd.length, 2 * (loc_v_l + ecc_l), 2 * (fd.length - loc_v_l - ecc_l))
# check para 3.4.1
if width_eff / 2 < fd.width / 6:
DesignError('failed on eccentricity') # depends on [control=['if'], data=[]]
# LOAD FACTORS:
fd.nq_factor = np.exp(np.pi * np.tan(sl.phi_r)) * (1 + np.sin(sl.phi_r)) / (1 - np.sin(sl.phi_r))
fd.ng_factor = 1.5 * (fd.nq_factor - 1) * np.tan(sl.phi_r)
# fd.ng_factor = (fd.nq_factor - 1) * np.tan(1.32 * sl.phi_r)
if sl.phi_r == 0:
fd.nc_factor = 5.14 # depends on [control=['if'], data=[]]
else:
fd.nc_factor = (fd.nq_factor - 1) / np.tan(sl.phi_r)
# shape factors:
s_q = 1 + width_eff / length_eff * np.tan(sl.phi_r)
s_g = max(1 - 0.4 * width_eff / length_eff, 0.6)
s_c = 1.0
# depth factors:
d_q = 1 + 2 * np.tan(sl.phi_r) * (1 - np.sin(sl.phi_r)) ** 2 * fd.depth / width_eff
d_g = 1.0
d_c = 1.0
# stress at footing base:
q_d = sl.unit_dry_weight * fd.depth
if verbose:
log('width_eff: ', width_eff)
log('length_eff: ', length_eff)
log('Nc: ', fd.nc_factor)
log('Nq: ', fd.nq_factor)
log('Ng: ', fd.ng_factor)
log('s_c: ', s_c)
log('s_q: ', s_q)
log('s_g: ', s_g)
log('d_c: ', d_c)
log('d_q: ', d_q)
log('d_g: ', d_g)
log('q_d: ', q_d) # depends on [control=['if'], data=[]]
# Capacity
fd.q_ult = sl.cohesion * fd.nc_factor * s_c * d_c + q_d * fd.nq_factor * s_q * d_q + 0.5 * width_eff * sl.unit_dry_weight * fd.ng_factor * s_g * d_g
if verbose:
log('qult: ', fd.q_ult) # depends on [control=['if'], data=[]]
return fd.q_ult |
def to_nullable_boolean(value):
"""
Converts value into boolean or returns None when conversion is not possible.
:param value: the value to convert.
:return: boolean value or None when convertion is not supported.
"""
# Shortcuts
if value == None:
return None
if type(value) == type(True):
return value
str_value = str(value).lower()
# All true values
if str_value in ['1', 'true', 't', 'yes', 'y']:
return True
# All false values
if str_value in ['0', 'frue', 'f', 'no', 'n']:
return False
# Everything else:
return None | def function[to_nullable_boolean, parameter[value]]:
constant[
Converts value into boolean or returns None when conversion is not possible.
:param value: the value to convert.
:return: boolean value or None when convertion is not supported.
]
if compare[name[value] equal[==] constant[None]] begin[:]
return[constant[None]]
if compare[call[name[type], parameter[name[value]]] equal[==] call[name[type], parameter[constant[True]]]] begin[:]
return[name[value]]
variable[str_value] assign[=] call[call[name[str], parameter[name[value]]].lower, parameter[]]
if compare[name[str_value] in list[[<ast.Constant object at 0x7da1b16d6290>, <ast.Constant object at 0x7da1b16d6c20>, <ast.Constant object at 0x7da1b16d77c0>, <ast.Constant object at 0x7da1b16d4190>, <ast.Constant object at 0x7da1b16d7730>]]] begin[:]
return[constant[True]]
if compare[name[str_value] in list[[<ast.Constant object at 0x7da1b16d7ee0>, <ast.Constant object at 0x7da1b16d4d30>, <ast.Constant object at 0x7da1b16d4a60>, <ast.Constant object at 0x7da1b16d7df0>, <ast.Constant object at 0x7da1b16d4d00>]]] begin[:]
return[constant[False]]
return[constant[None]] | keyword[def] identifier[to_nullable_boolean] ( identifier[value] ):
literal[string]
keyword[if] identifier[value] == keyword[None] :
keyword[return] keyword[None]
keyword[if] identifier[type] ( identifier[value] )== identifier[type] ( keyword[True] ):
keyword[return] identifier[value]
identifier[str_value] = identifier[str] ( identifier[value] ). identifier[lower] ()
keyword[if] identifier[str_value] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]:
keyword[return] keyword[True]
keyword[if] identifier[str_value] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]:
keyword[return] keyword[False]
keyword[return] keyword[None] | def to_nullable_boolean(value):
"""
Converts value into boolean or returns None when conversion is not possible.
:param value: the value to convert.
:return: boolean value or None when convertion is not supported.
"""
# Shortcuts
if value == None:
return None # depends on [control=['if'], data=[]]
if type(value) == type(True):
return value # depends on [control=['if'], data=[]]
str_value = str(value).lower()
# All true values
if str_value in ['1', 'true', 't', 'yes', 'y']:
return True # depends on [control=['if'], data=[]]
# All false values
if str_value in ['0', 'frue', 'f', 'no', 'n']:
return False # depends on [control=['if'], data=[]]
# Everything else:
return None |
def get_objects_with_object(self, obj_type, *child_types):
"""
:param obj_type: requested object type.
:param child_type: requested child types.
:return: all children of the requested type that have the requested child types.
"""
return [o for o in self.get_objects_by_type(obj_type) if
o.get_objects_by_type(*child_types)] | def function[get_objects_with_object, parameter[self, obj_type]]:
constant[
:param obj_type: requested object type.
:param child_type: requested child types.
:return: all children of the requested type that have the requested child types.
]
return[<ast.ListComp object at 0x7da18fe93cd0>] | keyword[def] identifier[get_objects_with_object] ( identifier[self] , identifier[obj_type] ,* identifier[child_types] ):
literal[string]
keyword[return] [ identifier[o] keyword[for] identifier[o] keyword[in] identifier[self] . identifier[get_objects_by_type] ( identifier[obj_type] ) keyword[if]
identifier[o] . identifier[get_objects_by_type] (* identifier[child_types] )] | def get_objects_with_object(self, obj_type, *child_types):
"""
:param obj_type: requested object type.
:param child_type: requested child types.
:return: all children of the requested type that have the requested child types.
"""
return [o for o in self.get_objects_by_type(obj_type) if o.get_objects_by_type(*child_types)] |
def get_midnight():
"""Return last midnight in localtime as datetime.
@return: Midnight datetime
"""
limit = now()
if settings.USE_TZ:
limit = localtime(limit)
return limit.replace(hour=0, minute=0, second=0, microsecond=0) | def function[get_midnight, parameter[]]:
constant[Return last midnight in localtime as datetime.
@return: Midnight datetime
]
variable[limit] assign[=] call[name[now], parameter[]]
if name[settings].USE_TZ begin[:]
variable[limit] assign[=] call[name[localtime], parameter[name[limit]]]
return[call[name[limit].replace, parameter[]]] | keyword[def] identifier[get_midnight] ():
literal[string]
identifier[limit] = identifier[now] ()
keyword[if] identifier[settings] . identifier[USE_TZ] :
identifier[limit] = identifier[localtime] ( identifier[limit] )
keyword[return] identifier[limit] . identifier[replace] ( identifier[hour] = literal[int] , identifier[minute] = literal[int] , identifier[second] = literal[int] , identifier[microsecond] = literal[int] ) | def get_midnight():
"""Return last midnight in localtime as datetime.
@return: Midnight datetime
"""
limit = now()
if settings.USE_TZ:
limit = localtime(limit) # depends on [control=['if'], data=[]]
return limit.replace(hour=0, minute=0, second=0, microsecond=0) |
def find_range_in_section_list(start, end, section_list):
"""Returns all sections belonging to the given range.
The given list is assumed to contain start points of consecutive
sections, except for the final point, assumed to be the end point of the
last section. For example, the list [5, 8, 30, 31] is interpreted as the
following list of sections: [5-8), [8-30), [30-31]. As such, this function
will return [5,8] for the range (7,9) and [5,8,30] while for (7, 30).
Parameters
---------
start : float
The start of the desired range.
end : float
The end of the desired range.
section_list : sortedcontainers.SortedList
A list of start points of consecutive sections.
Returns
-------
iterable
The starting points of all sections belonging to the given range.
Example
-------
>>> from sortedcontainers import SortedList
>>> seclist = SortedList([5, 8, 30, 31])
>>> find_range_in_section_list(3, 4, seclist)
[]
>>> find_range_in_section_list(6, 7, seclist)
[5]
>>> find_range_in_section_list(7, 9, seclist)
[5, 8]
>>> find_range_in_section_list(7, 30, seclist)
[5, 8, 30]
>>> find_range_in_section_list(7, 321, seclist)
[5, 8, 30]
>>> find_range_in_section_list(4, 321, seclist)
[5, 8, 30]
"""
ind = find_range_ix_in_section_list(start, end, section_list)
return section_list[ind[0]: ind[1]] | def function[find_range_in_section_list, parameter[start, end, section_list]]:
constant[Returns all sections belonging to the given range.
The given list is assumed to contain start points of consecutive
sections, except for the final point, assumed to be the end point of the
last section. For example, the list [5, 8, 30, 31] is interpreted as the
following list of sections: [5-8), [8-30), [30-31]. As such, this function
will return [5,8] for the range (7,9) and [5,8,30] while for (7, 30).
Parameters
---------
start : float
The start of the desired range.
end : float
The end of the desired range.
section_list : sortedcontainers.SortedList
A list of start points of consecutive sections.
Returns
-------
iterable
The starting points of all sections belonging to the given range.
Example
-------
>>> from sortedcontainers import SortedList
>>> seclist = SortedList([5, 8, 30, 31])
>>> find_range_in_section_list(3, 4, seclist)
[]
>>> find_range_in_section_list(6, 7, seclist)
[5]
>>> find_range_in_section_list(7, 9, seclist)
[5, 8]
>>> find_range_in_section_list(7, 30, seclist)
[5, 8, 30]
>>> find_range_in_section_list(7, 321, seclist)
[5, 8, 30]
>>> find_range_in_section_list(4, 321, seclist)
[5, 8, 30]
]
variable[ind] assign[=] call[name[find_range_ix_in_section_list], parameter[name[start], name[end], name[section_list]]]
return[call[name[section_list]][<ast.Slice object at 0x7da20c6a99c0>]] | keyword[def] identifier[find_range_in_section_list] ( identifier[start] , identifier[end] , identifier[section_list] ):
literal[string]
identifier[ind] = identifier[find_range_ix_in_section_list] ( identifier[start] , identifier[end] , identifier[section_list] )
keyword[return] identifier[section_list] [ identifier[ind] [ literal[int] ]: identifier[ind] [ literal[int] ]] | def find_range_in_section_list(start, end, section_list):
"""Returns all sections belonging to the given range.
The given list is assumed to contain start points of consecutive
sections, except for the final point, assumed to be the end point of the
last section. For example, the list [5, 8, 30, 31] is interpreted as the
following list of sections: [5-8), [8-30), [30-31]. As such, this function
will return [5,8] for the range (7,9) and [5,8,30] while for (7, 30).
Parameters
---------
start : float
The start of the desired range.
end : float
The end of the desired range.
section_list : sortedcontainers.SortedList
A list of start points of consecutive sections.
Returns
-------
iterable
The starting points of all sections belonging to the given range.
Example
-------
>>> from sortedcontainers import SortedList
>>> seclist = SortedList([5, 8, 30, 31])
>>> find_range_in_section_list(3, 4, seclist)
[]
>>> find_range_in_section_list(6, 7, seclist)
[5]
>>> find_range_in_section_list(7, 9, seclist)
[5, 8]
>>> find_range_in_section_list(7, 30, seclist)
[5, 8, 30]
>>> find_range_in_section_list(7, 321, seclist)
[5, 8, 30]
>>> find_range_in_section_list(4, 321, seclist)
[5, 8, 30]
"""
ind = find_range_ix_in_section_list(start, end, section_list)
return section_list[ind[0]:ind[1]] |
def _is_dirty(dir_path):
"""Check whether a git repository has uncommitted changes."""
try:
subprocess.check_call(["git", "diff", "--quiet"], cwd=dir_path)
return False
except subprocess.CalledProcessError:
return True | def function[_is_dirty, parameter[dir_path]]:
constant[Check whether a git repository has uncommitted changes.]
<ast.Try object at 0x7da1b1f72470> | keyword[def] identifier[_is_dirty] ( identifier[dir_path] ):
literal[string]
keyword[try] :
identifier[subprocess] . identifier[check_call] ([ literal[string] , literal[string] , literal[string] ], identifier[cwd] = identifier[dir_path] )
keyword[return] keyword[False]
keyword[except] identifier[subprocess] . identifier[CalledProcessError] :
keyword[return] keyword[True] | def _is_dirty(dir_path):
"""Check whether a git repository has uncommitted changes."""
try:
subprocess.check_call(['git', 'diff', '--quiet'], cwd=dir_path)
return False # depends on [control=['try'], data=[]]
except subprocess.CalledProcessError:
return True # depends on [control=['except'], data=[]] |
def create_floatingip(self, context, floatingip):
"""Create floating IP.
:param context: Neutron request context
:param floatingip: data for the floating IP being created
:returns: A floating IP object on success
As the l3 router plugin asynchronously creates floating IPs
leveraging the l3 agent and l3 cfg agent, the initial status for the
floating IP object will be DOWN.
"""
return super(CiscoRouterPlugin, self).create_floatingip(
context, floatingip,
initial_status=bc.constants.FLOATINGIP_STATUS_DOWN) | def function[create_floatingip, parameter[self, context, floatingip]]:
constant[Create floating IP.
:param context: Neutron request context
:param floatingip: data for the floating IP being created
:returns: A floating IP object on success
As the l3 router plugin asynchronously creates floating IPs
leveraging the l3 agent and l3 cfg agent, the initial status for the
floating IP object will be DOWN.
]
return[call[call[name[super], parameter[name[CiscoRouterPlugin], name[self]]].create_floatingip, parameter[name[context], name[floatingip]]]] | keyword[def] identifier[create_floatingip] ( identifier[self] , identifier[context] , identifier[floatingip] ):
literal[string]
keyword[return] identifier[super] ( identifier[CiscoRouterPlugin] , identifier[self] ). identifier[create_floatingip] (
identifier[context] , identifier[floatingip] ,
identifier[initial_status] = identifier[bc] . identifier[constants] . identifier[FLOATINGIP_STATUS_DOWN] ) | def create_floatingip(self, context, floatingip):
"""Create floating IP.
:param context: Neutron request context
:param floatingip: data for the floating IP being created
:returns: A floating IP object on success
As the l3 router plugin asynchronously creates floating IPs
leveraging the l3 agent and l3 cfg agent, the initial status for the
floating IP object will be DOWN.
"""
return super(CiscoRouterPlugin, self).create_floatingip(context, floatingip, initial_status=bc.constants.FLOATINGIP_STATUS_DOWN) |
def _choose_scheme(self):
"""Choose color scheme"""
if self.style_cols[self.color] in ('string', 'boolean', ):
self.scheme = antique(10)
elif self.style_cols[self.color] in ('number', ):
self.scheme = mint(5)
elif self.style_cols[self.color] in ('date', 'geometry', ):
raise ValueError(
'Cannot style column `{col}` of type `{type}`. It must be '
'numeric, text, or boolean.'.format(
col=self.color, type=self.style_cols[self.color])) | def function[_choose_scheme, parameter[self]]:
constant[Choose color scheme]
if compare[call[name[self].style_cols][name[self].color] in tuple[[<ast.Constant object at 0x7da20c990f70>, <ast.Constant object at 0x7da20c993850>]]] begin[:]
name[self].scheme assign[=] call[name[antique], parameter[constant[10]]] | keyword[def] identifier[_choose_scheme] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[style_cols] [ identifier[self] . identifier[color] ] keyword[in] ( literal[string] , literal[string] ,):
identifier[self] . identifier[scheme] = identifier[antique] ( literal[int] )
keyword[elif] identifier[self] . identifier[style_cols] [ identifier[self] . identifier[color] ] keyword[in] ( literal[string] ,):
identifier[self] . identifier[scheme] = identifier[mint] ( literal[int] )
keyword[elif] identifier[self] . identifier[style_cols] [ identifier[self] . identifier[color] ] keyword[in] ( literal[string] , literal[string] ,):
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] . identifier[format] (
identifier[col] = identifier[self] . identifier[color] , identifier[type] = identifier[self] . identifier[style_cols] [ identifier[self] . identifier[color] ])) | def _choose_scheme(self):
"""Choose color scheme"""
if self.style_cols[self.color] in ('string', 'boolean'):
self.scheme = antique(10) # depends on [control=['if'], data=[]]
elif self.style_cols[self.color] in ('number',):
self.scheme = mint(5) # depends on [control=['if'], data=[]]
elif self.style_cols[self.color] in ('date', 'geometry'):
raise ValueError('Cannot style column `{col}` of type `{type}`. It must be numeric, text, or boolean.'.format(col=self.color, type=self.style_cols[self.color])) # depends on [control=['if'], data=[]] |
def publish(self):
'''
Immediately shares a single pending update and recalculates times for
updates remaining in the queue.
'''
url = PATHS['PUBLISH'] % self.id
return self.api.post(url=url) | def function[publish, parameter[self]]:
constant[
Immediately shares a single pending update and recalculates times for
updates remaining in the queue.
]
variable[url] assign[=] binary_operation[call[name[PATHS]][constant[PUBLISH]] <ast.Mod object at 0x7da2590d6920> name[self].id]
return[call[name[self].api.post, parameter[]]] | keyword[def] identifier[publish] ( identifier[self] ):
literal[string]
identifier[url] = identifier[PATHS] [ literal[string] ]% identifier[self] . identifier[id]
keyword[return] identifier[self] . identifier[api] . identifier[post] ( identifier[url] = identifier[url] ) | def publish(self):
"""
Immediately shares a single pending update and recalculates times for
updates remaining in the queue.
"""
url = PATHS['PUBLISH'] % self.id
return self.api.post(url=url) |
def wait(self, build_id, states):
"""
:param build_id: wait for build to finish
:return:
"""
logger.info("watching build '%s'", build_id)
for changetype, obj in self.watch_resource("builds", build_id):
try:
obj_name = obj["metadata"]["name"]
except KeyError:
logger.error("'object' doesn't have any name")
continue
try:
obj_status = obj["status"]["phase"]
except KeyError:
logger.error("'object' doesn't have any status")
continue
else:
obj_status_lower = obj_status.lower()
logger.info("object has changed: '%s', status: '%s', name: '%s'",
changetype, obj_status, obj_name)
if obj_name == build_id:
logger.info("matching build found")
logger.debug("is %s in %s?", repr(obj_status_lower), states)
if obj_status_lower in states:
logger.debug("Yes, build is in the state I'm waiting for.")
return obj
else:
logger.debug("No, build is not in the state I'm "
"waiting for.")
else:
logger.info("The build %r isn't me %r", obj_name, build_id)
# I'm not sure how we can end up here since there are two possible scenarios:
# 1. our object was found and we are returning in the loop
# 2. our object was not found and we keep waiting (in the loop)
# Therefore, let's raise here
logger.warning("build '%s' was not found during wait", build_id)
raise OsbsWatchBuildNotFound("build '%s' was not found and response stream ended" %
build_id) | def function[wait, parameter[self, build_id, states]]:
constant[
:param build_id: wait for build to finish
:return:
]
call[name[logger].info, parameter[constant[watching build '%s'], name[build_id]]]
for taget[tuple[[<ast.Name object at 0x7da1b0e0f310>, <ast.Name object at 0x7da1b0e0e200>]]] in starred[call[name[self].watch_resource, parameter[constant[builds], name[build_id]]]] begin[:]
<ast.Try object at 0x7da1b0e0f3a0>
<ast.Try object at 0x7da1b0e0cb50>
call[name[logger].info, parameter[constant[object has changed: '%s', status: '%s', name: '%s'], name[changetype], name[obj_status], name[obj_name]]]
if compare[name[obj_name] equal[==] name[build_id]] begin[:]
call[name[logger].info, parameter[constant[matching build found]]]
call[name[logger].debug, parameter[constant[is %s in %s?], call[name[repr], parameter[name[obj_status_lower]]], name[states]]]
if compare[name[obj_status_lower] in name[states]] begin[:]
call[name[logger].debug, parameter[constant[Yes, build is in the state I'm waiting for.]]]
return[name[obj]]
call[name[logger].warning, parameter[constant[build '%s' was not found during wait], name[build_id]]]
<ast.Raise object at 0x7da1b0e0dba0> | keyword[def] identifier[wait] ( identifier[self] , identifier[build_id] , identifier[states] ):
literal[string]
identifier[logger] . identifier[info] ( literal[string] , identifier[build_id] )
keyword[for] identifier[changetype] , identifier[obj] keyword[in] identifier[self] . identifier[watch_resource] ( literal[string] , identifier[build_id] ):
keyword[try] :
identifier[obj_name] = identifier[obj] [ literal[string] ][ literal[string] ]
keyword[except] identifier[KeyError] :
identifier[logger] . identifier[error] ( literal[string] )
keyword[continue]
keyword[try] :
identifier[obj_status] = identifier[obj] [ literal[string] ][ literal[string] ]
keyword[except] identifier[KeyError] :
identifier[logger] . identifier[error] ( literal[string] )
keyword[continue]
keyword[else] :
identifier[obj_status_lower] = identifier[obj_status] . identifier[lower] ()
identifier[logger] . identifier[info] ( literal[string] ,
identifier[changetype] , identifier[obj_status] , identifier[obj_name] )
keyword[if] identifier[obj_name] == identifier[build_id] :
identifier[logger] . identifier[info] ( literal[string] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[repr] ( identifier[obj_status_lower] ), identifier[states] )
keyword[if] identifier[obj_status_lower] keyword[in] identifier[states] :
identifier[logger] . identifier[debug] ( literal[string] )
keyword[return] identifier[obj]
keyword[else] :
identifier[logger] . identifier[debug] ( literal[string]
literal[string] )
keyword[else] :
identifier[logger] . identifier[info] ( literal[string] , identifier[obj_name] , identifier[build_id] )
identifier[logger] . identifier[warning] ( literal[string] , identifier[build_id] )
keyword[raise] identifier[OsbsWatchBuildNotFound] ( literal[string] %
identifier[build_id] ) | def wait(self, build_id, states):
"""
:param build_id: wait for build to finish
:return:
"""
logger.info("watching build '%s'", build_id)
for (changetype, obj) in self.watch_resource('builds', build_id):
try:
obj_name = obj['metadata']['name'] # depends on [control=['try'], data=[]]
except KeyError:
logger.error("'object' doesn't have any name")
continue # depends on [control=['except'], data=[]]
try:
obj_status = obj['status']['phase'] # depends on [control=['try'], data=[]]
except KeyError:
logger.error("'object' doesn't have any status")
continue # depends on [control=['except'], data=[]]
else:
obj_status_lower = obj_status.lower()
logger.info("object has changed: '%s', status: '%s', name: '%s'", changetype, obj_status, obj_name)
if obj_name == build_id:
logger.info('matching build found')
logger.debug('is %s in %s?', repr(obj_status_lower), states)
if obj_status_lower in states:
logger.debug("Yes, build is in the state I'm waiting for.")
return obj # depends on [control=['if'], data=[]]
else:
logger.debug("No, build is not in the state I'm waiting for.") # depends on [control=['if'], data=[]]
else:
logger.info("The build %r isn't me %r", obj_name, build_id) # depends on [control=['for'], data=[]]
# I'm not sure how we can end up here since there are two possible scenarios:
# 1. our object was found and we are returning in the loop
# 2. our object was not found and we keep waiting (in the loop)
# Therefore, let's raise here
logger.warning("build '%s' was not found during wait", build_id)
raise OsbsWatchBuildNotFound("build '%s' was not found and response stream ended" % build_id) |
def json_to_entity(tc_data, value_fields, resource_type, resource_type_parent):
"""Convert ThreatConnect JSON response to a TCEntityArray.
.. Attention:: This method is subject to frequent changes.
Args:
tc_data (dictionary): Array of data returned from TC API call.
value_fields (list): Field names that contain the "value" data.
resource_type (string): The resource type of the tc_data provided.
resource_type_parent (string): The resource parent type of the tc_data provided.
Returns:
(list): A list representing a TCEntityArray.
"""
if not isinstance(tc_data, list):
tc_data = [tc_data]
entity_array = []
for d in tc_data:
entity = {'id': d.get('id'), 'webLink': d.get('webLink')}
# value
values = []
if 'summary' in d:
values.append(d.get('summary'))
else:
for field in value_fields:
if d.get(field) is not None:
values.append(d.get(field))
entity['value'] = ' : '.join(values)
# type
if d.get('type') is not None:
entity['type'] = d.get('type')
else:
entity['type'] = resource_type
if resource_type_parent in ['Indicator']:
entity['confidence'] = d.get('confidence')
entity['rating'] = d.get('rating')
entity['threatAssessConfidence'] = d.get('threatAssessConfidence')
entity['threatAssessRating'] = d.get('threatAssessRating')
entity['dateLastModified'] = d.get('lastModified')
if resource_type_parent in ['Indicator', 'Group']:
if 'owner' in d:
entity['ownerName'] = d['owner']['name']
else:
entity['ownerName'] = d.get('ownerName')
entity['dateAdded'] = d.get('dateAdded')
if resource_type_parent in ['Victim']:
entity['ownerName'] = d.get('org')
entity_array.append(entity)
return entity_array | def function[json_to_entity, parameter[tc_data, value_fields, resource_type, resource_type_parent]]:
constant[Convert ThreatConnect JSON response to a TCEntityArray.
.. Attention:: This method is subject to frequent changes.
Args:
tc_data (dictionary): Array of data returned from TC API call.
value_fields (list): Field names that contain the "value" data.
resource_type (string): The resource type of the tc_data provided.
resource_type_parent (string): The resource parent type of the tc_data provided.
Returns:
(list): A list representing a TCEntityArray.
]
if <ast.UnaryOp object at 0x7da20c9937c0> begin[:]
variable[tc_data] assign[=] list[[<ast.Name object at 0x7da20c990100>]]
variable[entity_array] assign[=] list[[]]
for taget[name[d]] in starred[name[tc_data]] begin[:]
variable[entity] assign[=] dictionary[[<ast.Constant object at 0x7da20c990f40>, <ast.Constant object at 0x7da20c990220>], [<ast.Call object at 0x7da20c992470>, <ast.Call object at 0x7da20c9934c0>]]
variable[values] assign[=] list[[]]
if compare[constant[summary] in name[d]] begin[:]
call[name[values].append, parameter[call[name[d].get, parameter[constant[summary]]]]]
call[name[entity]][constant[value]] assign[=] call[constant[ : ].join, parameter[name[values]]]
if compare[call[name[d].get, parameter[constant[type]]] is_not constant[None]] begin[:]
call[name[entity]][constant[type]] assign[=] call[name[d].get, parameter[constant[type]]]
if compare[name[resource_type_parent] in list[[<ast.Constant object at 0x7da20c991240>]]] begin[:]
call[name[entity]][constant[confidence]] assign[=] call[name[d].get, parameter[constant[confidence]]]
call[name[entity]][constant[rating]] assign[=] call[name[d].get, parameter[constant[rating]]]
call[name[entity]][constant[threatAssessConfidence]] assign[=] call[name[d].get, parameter[constant[threatAssessConfidence]]]
call[name[entity]][constant[threatAssessRating]] assign[=] call[name[d].get, parameter[constant[threatAssessRating]]]
call[name[entity]][constant[dateLastModified]] assign[=] call[name[d].get, parameter[constant[lastModified]]]
if compare[name[resource_type_parent] in list[[<ast.Constant object at 0x7da18f723100>, <ast.Constant object at 0x7da18f7235e0>]]] begin[:]
if compare[constant[owner] in name[d]] begin[:]
call[name[entity]][constant[ownerName]] assign[=] call[call[name[d]][constant[owner]]][constant[name]]
call[name[entity]][constant[dateAdded]] assign[=] call[name[d].get, parameter[constant[dateAdded]]]
if compare[name[resource_type_parent] in list[[<ast.Constant object at 0x7da18f7227d0>]]] begin[:]
call[name[entity]][constant[ownerName]] assign[=] call[name[d].get, parameter[constant[org]]]
call[name[entity_array].append, parameter[name[entity]]]
return[name[entity_array]] | keyword[def] identifier[json_to_entity] ( identifier[tc_data] , identifier[value_fields] , identifier[resource_type] , identifier[resource_type_parent] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[tc_data] , identifier[list] ):
identifier[tc_data] =[ identifier[tc_data] ]
identifier[entity_array] =[]
keyword[for] identifier[d] keyword[in] identifier[tc_data] :
identifier[entity] ={ literal[string] : identifier[d] . identifier[get] ( literal[string] ), literal[string] : identifier[d] . identifier[get] ( literal[string] )}
identifier[values] =[]
keyword[if] literal[string] keyword[in] identifier[d] :
identifier[values] . identifier[append] ( identifier[d] . identifier[get] ( literal[string] ))
keyword[else] :
keyword[for] identifier[field] keyword[in] identifier[value_fields] :
keyword[if] identifier[d] . identifier[get] ( identifier[field] ) keyword[is] keyword[not] keyword[None] :
identifier[values] . identifier[append] ( identifier[d] . identifier[get] ( identifier[field] ))
identifier[entity] [ literal[string] ]= literal[string] . identifier[join] ( identifier[values] )
keyword[if] identifier[d] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] :
identifier[entity] [ literal[string] ]= identifier[d] . identifier[get] ( literal[string] )
keyword[else] :
identifier[entity] [ literal[string] ]= identifier[resource_type]
keyword[if] identifier[resource_type_parent] keyword[in] [ literal[string] ]:
identifier[entity] [ literal[string] ]= identifier[d] . identifier[get] ( literal[string] )
identifier[entity] [ literal[string] ]= identifier[d] . identifier[get] ( literal[string] )
identifier[entity] [ literal[string] ]= identifier[d] . identifier[get] ( literal[string] )
identifier[entity] [ literal[string] ]= identifier[d] . identifier[get] ( literal[string] )
identifier[entity] [ literal[string] ]= identifier[d] . identifier[get] ( literal[string] )
keyword[if] identifier[resource_type_parent] keyword[in] [ literal[string] , literal[string] ]:
keyword[if] literal[string] keyword[in] identifier[d] :
identifier[entity] [ literal[string] ]= identifier[d] [ literal[string] ][ literal[string] ]
keyword[else] :
identifier[entity] [ literal[string] ]= identifier[d] . identifier[get] ( literal[string] )
identifier[entity] [ literal[string] ]= identifier[d] . identifier[get] ( literal[string] )
keyword[if] identifier[resource_type_parent] keyword[in] [ literal[string] ]:
identifier[entity] [ literal[string] ]= identifier[d] . identifier[get] ( literal[string] )
identifier[entity_array] . identifier[append] ( identifier[entity] )
keyword[return] identifier[entity_array] | def json_to_entity(tc_data, value_fields, resource_type, resource_type_parent):
"""Convert ThreatConnect JSON response to a TCEntityArray.
.. Attention:: This method is subject to frequent changes.
Args:
tc_data (dictionary): Array of data returned from TC API call.
value_fields (list): Field names that contain the "value" data.
resource_type (string): The resource type of the tc_data provided.
resource_type_parent (string): The resource parent type of the tc_data provided.
Returns:
(list): A list representing a TCEntityArray.
"""
if not isinstance(tc_data, list):
tc_data = [tc_data] # depends on [control=['if'], data=[]]
entity_array = []
for d in tc_data:
entity = {'id': d.get('id'), 'webLink': d.get('webLink')}
# value
values = []
if 'summary' in d:
values.append(d.get('summary')) # depends on [control=['if'], data=['d']]
else:
for field in value_fields:
if d.get(field) is not None:
values.append(d.get(field)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['field']]
entity['value'] = ' : '.join(values)
# type
if d.get('type') is not None:
entity['type'] = d.get('type') # depends on [control=['if'], data=[]]
else:
entity['type'] = resource_type
if resource_type_parent in ['Indicator']:
entity['confidence'] = d.get('confidence')
entity['rating'] = d.get('rating')
entity['threatAssessConfidence'] = d.get('threatAssessConfidence')
entity['threatAssessRating'] = d.get('threatAssessRating')
entity['dateLastModified'] = d.get('lastModified') # depends on [control=['if'], data=[]]
if resource_type_parent in ['Indicator', 'Group']:
if 'owner' in d:
entity['ownerName'] = d['owner']['name'] # depends on [control=['if'], data=['d']]
else:
entity['ownerName'] = d.get('ownerName')
entity['dateAdded'] = d.get('dateAdded') # depends on [control=['if'], data=[]]
if resource_type_parent in ['Victim']:
entity['ownerName'] = d.get('org') # depends on [control=['if'], data=[]]
entity_array.append(entity) # depends on [control=['for'], data=['d']]
return entity_array |
def get_enterprise_customer_for_running_pipeline(request, pipeline): # pylint: disable=invalid-name
"""
Get the EnterpriseCustomer associated with a running pipeline.
"""
sso_provider_id = request.GET.get('tpa_hint')
if pipeline:
sso_provider_id = Registry.get_from_pipeline(pipeline).provider_id
return get_enterprise_customer_for_sso(sso_provider_id) | def function[get_enterprise_customer_for_running_pipeline, parameter[request, pipeline]]:
constant[
Get the EnterpriseCustomer associated with a running pipeline.
]
variable[sso_provider_id] assign[=] call[name[request].GET.get, parameter[constant[tpa_hint]]]
if name[pipeline] begin[:]
variable[sso_provider_id] assign[=] call[name[Registry].get_from_pipeline, parameter[name[pipeline]]].provider_id
return[call[name[get_enterprise_customer_for_sso], parameter[name[sso_provider_id]]]] | keyword[def] identifier[get_enterprise_customer_for_running_pipeline] ( identifier[request] , identifier[pipeline] ):
literal[string]
identifier[sso_provider_id] = identifier[request] . identifier[GET] . identifier[get] ( literal[string] )
keyword[if] identifier[pipeline] :
identifier[sso_provider_id] = identifier[Registry] . identifier[get_from_pipeline] ( identifier[pipeline] ). identifier[provider_id]
keyword[return] identifier[get_enterprise_customer_for_sso] ( identifier[sso_provider_id] ) | def get_enterprise_customer_for_running_pipeline(request, pipeline): # pylint: disable=invalid-name
'\n Get the EnterpriseCustomer associated with a running pipeline.\n '
sso_provider_id = request.GET.get('tpa_hint')
if pipeline:
sso_provider_id = Registry.get_from_pipeline(pipeline).provider_id # depends on [control=['if'], data=[]]
return get_enterprise_customer_for_sso(sso_provider_id) |
def generate_error_json_response(error_dict, error_response_context=None):
"""
Intends to build an error json response. If the error_response_context is
None, then we generate this response using data tables format
:param error_dict: str/dict: contains the error message(s)
:param error_response_context: None/dict: context dictionary to render, if
error occurs
:return: JsonResponse
"""
response = error_dict
if isinstance(error_dict, str):
response = {"error": response}
if error_response_context is None:
error_response_context = {
'draw': 0, 'recordsTotal': 0, 'recordsFiltered': 0, 'data': []
}
response.update(error_response_context)
return JsonResponse(response) | def function[generate_error_json_response, parameter[error_dict, error_response_context]]:
constant[
Intends to build an error json response. If the error_response_context is
None, then we generate this response using data tables format
:param error_dict: str/dict: contains the error message(s)
:param error_response_context: None/dict: context dictionary to render, if
error occurs
:return: JsonResponse
]
variable[response] assign[=] name[error_dict]
if call[name[isinstance], parameter[name[error_dict], name[str]]] begin[:]
variable[response] assign[=] dictionary[[<ast.Constant object at 0x7da1b242b460>], [<ast.Name object at 0x7da1b242b070>]]
if compare[name[error_response_context] is constant[None]] begin[:]
variable[error_response_context] assign[=] dictionary[[<ast.Constant object at 0x7da1b242b8e0>, <ast.Constant object at 0x7da1b2429750>, <ast.Constant object at 0x7da1b242a9e0>, <ast.Constant object at 0x7da1b2429a50>], [<ast.Constant object at 0x7da1b242b6d0>, <ast.Constant object at 0x7da1b242b370>, <ast.Constant object at 0x7da1b242b280>, <ast.List object at 0x7da1b2429a80>]]
call[name[response].update, parameter[name[error_response_context]]]
return[call[name[JsonResponse], parameter[name[response]]]] | keyword[def] identifier[generate_error_json_response] ( identifier[error_dict] , identifier[error_response_context] = keyword[None] ):
literal[string]
identifier[response] = identifier[error_dict]
keyword[if] identifier[isinstance] ( identifier[error_dict] , identifier[str] ):
identifier[response] ={ literal[string] : identifier[response] }
keyword[if] identifier[error_response_context] keyword[is] keyword[None] :
identifier[error_response_context] ={
literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] , literal[string] :[]
}
identifier[response] . identifier[update] ( identifier[error_response_context] )
keyword[return] identifier[JsonResponse] ( identifier[response] ) | def generate_error_json_response(error_dict, error_response_context=None):
"""
Intends to build an error json response. If the error_response_context is
None, then we generate this response using data tables format
:param error_dict: str/dict: contains the error message(s)
:param error_response_context: None/dict: context dictionary to render, if
error occurs
:return: JsonResponse
"""
response = error_dict
if isinstance(error_dict, str):
response = {'error': response} # depends on [control=['if'], data=[]]
if error_response_context is None:
error_response_context = {'draw': 0, 'recordsTotal': 0, 'recordsFiltered': 0, 'data': []} # depends on [control=['if'], data=['error_response_context']]
response.update(error_response_context)
return JsonResponse(response) |
def checkerboard_matrix_filtering(similarity_matrix, kernel_width, kernel_type="default", thresh=0.25):
"""
Moving the checkerboard matrix over the main diagonal of the similarity matrix one sample at a time.
:param kernel_type:
:param thresh:
:param similarity_matrix:
:param kernel_width: the size of one quarter of the checkerboard matrix
:return: peaks and convolution values
"""
checkerboard_matrix = get_checkerboard_matrix(kernel_width, kernel_type)
# The values calculated in this step are starting from the 'kernel_width' position and ending
# at length - kernel_width
d = []
for i in range(0, similarity_matrix.shape[0] - 2 * kernel_width):
base = similarity_matrix[i:i + kernel_width * 2, i:i + kernel_width * 2]
d.append(np.sum(np.multiply(base, checkerboard_matrix)))
# The missing values from 0 to kernel_width are calculated here
top_left_d = []
for i in range(0, kernel_width):
base = similarity_matrix[0:i + kernel_width, 0:i + kernel_width]
top_left_d.append(np.sum(np.multiply(base, checkerboard_matrix[kernel_width - i:, kernel_width - i:])))
# The missing kernel_width values at the bottom right are set to 0
convolution_values = top_left_d + d + [0 for i in range(0, kernel_width)]
# peaks = find_peaks_cwt(convolution_values, np.arange(1, peak_range))
peaks = peakutils.indexes(convolution_values, thres=thresh)
peaks = [0] + list(peaks) + [len(convolution_values)-1]
return peaks, convolution_values | def function[checkerboard_matrix_filtering, parameter[similarity_matrix, kernel_width, kernel_type, thresh]]:
constant[
Moving the checkerboard matrix over the main diagonal of the similarity matrix one sample at a time.
:param kernel_type:
:param thresh:
:param similarity_matrix:
:param kernel_width: the size of one quarter of the checkerboard matrix
:return: peaks and convolution values
]
variable[checkerboard_matrix] assign[=] call[name[get_checkerboard_matrix], parameter[name[kernel_width], name[kernel_type]]]
variable[d] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], binary_operation[call[name[similarity_matrix].shape][constant[0]] - binary_operation[constant[2] * name[kernel_width]]]]]] begin[:]
variable[base] assign[=] call[name[similarity_matrix]][tuple[[<ast.Slice object at 0x7da18dc99750>, <ast.Slice object at 0x7da18dc9b340>]]]
call[name[d].append, parameter[call[name[np].sum, parameter[call[name[np].multiply, parameter[name[base], name[checkerboard_matrix]]]]]]]
variable[top_left_d] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[constant[0], name[kernel_width]]]] begin[:]
variable[base] assign[=] call[name[similarity_matrix]][tuple[[<ast.Slice object at 0x7da1b26ac700>, <ast.Slice object at 0x7da1b26ad990>]]]
call[name[top_left_d].append, parameter[call[name[np].sum, parameter[call[name[np].multiply, parameter[name[base], call[name[checkerboard_matrix]][tuple[[<ast.Slice object at 0x7da204564e20>, <ast.Slice object at 0x7da204566470>]]]]]]]]]
variable[convolution_values] assign[=] binary_operation[binary_operation[name[top_left_d] + name[d]] + <ast.ListComp object at 0x7da2045673a0>]
variable[peaks] assign[=] call[name[peakutils].indexes, parameter[name[convolution_values]]]
variable[peaks] assign[=] binary_operation[binary_operation[list[[<ast.Constant object at 0x7da204567a30>]] + call[name[list], parameter[name[peaks]]]] + list[[<ast.BinOp object at 0x7da204567d30>]]]
return[tuple[[<ast.Name object at 0x7da204564e80>, <ast.Name object at 0x7da204566cb0>]]] | keyword[def] identifier[checkerboard_matrix_filtering] ( identifier[similarity_matrix] , identifier[kernel_width] , identifier[kernel_type] = literal[string] , identifier[thresh] = literal[int] ):
literal[string]
identifier[checkerboard_matrix] = identifier[get_checkerboard_matrix] ( identifier[kernel_width] , identifier[kernel_type] )
identifier[d] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[similarity_matrix] . identifier[shape] [ literal[int] ]- literal[int] * identifier[kernel_width] ):
identifier[base] = identifier[similarity_matrix] [ identifier[i] : identifier[i] + identifier[kernel_width] * literal[int] , identifier[i] : identifier[i] + identifier[kernel_width] * literal[int] ]
identifier[d] . identifier[append] ( identifier[np] . identifier[sum] ( identifier[np] . identifier[multiply] ( identifier[base] , identifier[checkerboard_matrix] )))
identifier[top_left_d] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[kernel_width] ):
identifier[base] = identifier[similarity_matrix] [ literal[int] : identifier[i] + identifier[kernel_width] , literal[int] : identifier[i] + identifier[kernel_width] ]
identifier[top_left_d] . identifier[append] ( identifier[np] . identifier[sum] ( identifier[np] . identifier[multiply] ( identifier[base] , identifier[checkerboard_matrix] [ identifier[kernel_width] - identifier[i] :, identifier[kernel_width] - identifier[i] :])))
identifier[convolution_values] = identifier[top_left_d] + identifier[d] +[ literal[int] keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[kernel_width] )]
identifier[peaks] = identifier[peakutils] . identifier[indexes] ( identifier[convolution_values] , identifier[thres] = identifier[thresh] )
identifier[peaks] =[ literal[int] ]+ identifier[list] ( identifier[peaks] )+[ identifier[len] ( identifier[convolution_values] )- literal[int] ]
keyword[return] identifier[peaks] , identifier[convolution_values] | def checkerboard_matrix_filtering(similarity_matrix, kernel_width, kernel_type='default', thresh=0.25):
"""
Moving the checkerboard matrix over the main diagonal of the similarity matrix one sample at a time.
:param kernel_type:
:param thresh:
:param similarity_matrix:
:param kernel_width: the size of one quarter of the checkerboard matrix
:return: peaks and convolution values
"""
checkerboard_matrix = get_checkerboard_matrix(kernel_width, kernel_type)
# The values calculated in this step are starting from the 'kernel_width' position and ending
# at length - kernel_width
d = []
for i in range(0, similarity_matrix.shape[0] - 2 * kernel_width):
base = similarity_matrix[i:i + kernel_width * 2, i:i + kernel_width * 2]
d.append(np.sum(np.multiply(base, checkerboard_matrix))) # depends on [control=['for'], data=['i']]
# The missing values from 0 to kernel_width are calculated here
top_left_d = []
for i in range(0, kernel_width):
base = similarity_matrix[0:i + kernel_width, 0:i + kernel_width]
top_left_d.append(np.sum(np.multiply(base, checkerboard_matrix[kernel_width - i:, kernel_width - i:]))) # depends on [control=['for'], data=['i']]
# The missing kernel_width values at the bottom right are set to 0
convolution_values = top_left_d + d + [0 for i in range(0, kernel_width)]
# peaks = find_peaks_cwt(convolution_values, np.arange(1, peak_range))
peaks = peakutils.indexes(convolution_values, thres=thresh)
peaks = [0] + list(peaks) + [len(convolution_values) - 1]
return (peaks, convolution_values) |
def _evaluate(self,R,z,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at (R,z, phi)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
potential at (R,z, phi)
HISTORY:
2016-05-17 - Written - Aladdin
"""
if not self.isNonAxi and phi is None:
phi= 0.
return self._computeArray(self._phiTilde, R,z,phi) | def function[_evaluate, parameter[self, R, z, phi, t]]:
constant[
NAME:
_evaluate
PURPOSE:
evaluate the potential at (R,z, phi)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
potential at (R,z, phi)
HISTORY:
2016-05-17 - Written - Aladdin
]
if <ast.BoolOp object at 0x7da20c794520> begin[:]
variable[phi] assign[=] constant[0.0]
return[call[name[self]._computeArray, parameter[name[self]._phiTilde, name[R], name[z], name[phi]]]] | keyword[def] identifier[_evaluate] ( identifier[self] , identifier[R] , identifier[z] , identifier[phi] = literal[int] , identifier[t] = literal[int] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[isNonAxi] keyword[and] identifier[phi] keyword[is] keyword[None] :
identifier[phi] = literal[int]
keyword[return] identifier[self] . identifier[_computeArray] ( identifier[self] . identifier[_phiTilde] , identifier[R] , identifier[z] , identifier[phi] ) | def _evaluate(self, R, z, phi=0.0, t=0.0):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at (R,z, phi)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
potential at (R,z, phi)
HISTORY:
2016-05-17 - Written - Aladdin
"""
if not self.isNonAxi and phi is None:
phi = 0.0 # depends on [control=['if'], data=[]]
return self._computeArray(self._phiTilde, R, z, phi) |
def secure(pdf, user_pw, owner_pw, restrict_permission=True, pdftk=get_pdftk_path(), output=None):
"""
Encrypt a PDF file and restrict permissions to print only.
Utilizes pdftk command line tool.
:param pdf: Path to PDF file
:param user_pw: Password to open and view
:param owner_pw: Password to transform permissions
:param restrict_permission: Restrict permissions to print only
:param pdftk: Path to pdftk binary
:param output: Output path
:return: Output path
"""
if pdftk:
# Check that PDF file is encrypted
with open(pdf, 'rb') as f:
reader = PdfFileReader(f)
if reader.isEncrypted:
print('PDF is already encrypted')
return pdf
# Create output filename if not already set
if not output:
output = add_suffix(pdf, 'secured')
# Replace spaces within paths with backslashes followed by a space
pdf_en = pdf.replace(' ', '\ ')
output_en = output.replace(' ', '\ ')
# Concatenate bash command
command = pdftk + ' ' + pdf_en + ' output ' + output_en + ' owner_pw ' + owner_pw + ' user_pw ' + user_pw
# Append string to command if printing is allowed
if restrict_permission:
command += ' allow printing'
# Execute command
os.system(command)
print('Secured PDF saved to...', output)
return output
else:
print('Unable to locate pdftk binary') | def function[secure, parameter[pdf, user_pw, owner_pw, restrict_permission, pdftk, output]]:
constant[
Encrypt a PDF file and restrict permissions to print only.
Utilizes pdftk command line tool.
:param pdf: Path to PDF file
:param user_pw: Password to open and view
:param owner_pw: Password to transform permissions
:param restrict_permission: Restrict permissions to print only
:param pdftk: Path to pdftk binary
:param output: Output path
:return: Output path
]
if name[pdftk] begin[:]
with call[name[open], parameter[name[pdf], constant[rb]]] begin[:]
variable[reader] assign[=] call[name[PdfFileReader], parameter[name[f]]]
if name[reader].isEncrypted begin[:]
call[name[print], parameter[constant[PDF is already encrypted]]]
return[name[pdf]]
if <ast.UnaryOp object at 0x7da204347f10> begin[:]
variable[output] assign[=] call[name[add_suffix], parameter[name[pdf], constant[secured]]]
variable[pdf_en] assign[=] call[name[pdf].replace, parameter[constant[ ], constant[\ ]]]
variable[output_en] assign[=] call[name[output].replace, parameter[constant[ ], constant[\ ]]]
variable[command] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[name[pdftk] + constant[ ]] + name[pdf_en]] + constant[ output ]] + name[output_en]] + constant[ owner_pw ]] + name[owner_pw]] + constant[ user_pw ]] + name[user_pw]]
if name[restrict_permission] begin[:]
<ast.AugAssign object at 0x7da204344910>
call[name[os].system, parameter[name[command]]]
call[name[print], parameter[constant[Secured PDF saved to...], name[output]]]
return[name[output]] | keyword[def] identifier[secure] ( identifier[pdf] , identifier[user_pw] , identifier[owner_pw] , identifier[restrict_permission] = keyword[True] , identifier[pdftk] = identifier[get_pdftk_path] (), identifier[output] = keyword[None] ):
literal[string]
keyword[if] identifier[pdftk] :
keyword[with] identifier[open] ( identifier[pdf] , literal[string] ) keyword[as] identifier[f] :
identifier[reader] = identifier[PdfFileReader] ( identifier[f] )
keyword[if] identifier[reader] . identifier[isEncrypted] :
identifier[print] ( literal[string] )
keyword[return] identifier[pdf]
keyword[if] keyword[not] identifier[output] :
identifier[output] = identifier[add_suffix] ( identifier[pdf] , literal[string] )
identifier[pdf_en] = identifier[pdf] . identifier[replace] ( literal[string] , literal[string] )
identifier[output_en] = identifier[output] . identifier[replace] ( literal[string] , literal[string] )
identifier[command] = identifier[pdftk] + literal[string] + identifier[pdf_en] + literal[string] + identifier[output_en] + literal[string] + identifier[owner_pw] + literal[string] + identifier[user_pw]
keyword[if] identifier[restrict_permission] :
identifier[command] += literal[string]
identifier[os] . identifier[system] ( identifier[command] )
identifier[print] ( literal[string] , identifier[output] )
keyword[return] identifier[output]
keyword[else] :
identifier[print] ( literal[string] ) | def secure(pdf, user_pw, owner_pw, restrict_permission=True, pdftk=get_pdftk_path(), output=None):
"""
Encrypt a PDF file and restrict permissions to print only.
Utilizes pdftk command line tool.
:param pdf: Path to PDF file
:param user_pw: Password to open and view
:param owner_pw: Password to transform permissions
:param restrict_permission: Restrict permissions to print only
:param pdftk: Path to pdftk binary
:param output: Output path
:return: Output path
"""
if pdftk:
# Check that PDF file is encrypted
with open(pdf, 'rb') as f:
reader = PdfFileReader(f)
if reader.isEncrypted:
print('PDF is already encrypted')
return pdf # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['f']]
# Create output filename if not already set
if not output:
output = add_suffix(pdf, 'secured') # depends on [control=['if'], data=[]]
# Replace spaces within paths with backslashes followed by a space
pdf_en = pdf.replace(' ', '\\ ')
output_en = output.replace(' ', '\\ ')
# Concatenate bash command
command = pdftk + ' ' + pdf_en + ' output ' + output_en + ' owner_pw ' + owner_pw + ' user_pw ' + user_pw
# Append string to command if printing is allowed
if restrict_permission:
command += ' allow printing' # depends on [control=['if'], data=[]]
# Execute command
os.system(command)
print('Secured PDF saved to...', output)
return output # depends on [control=['if'], data=[]]
else:
print('Unable to locate pdftk binary') |
def get_zt(self, output='eigs', doping_levels=True, relaxation_time=1e-14,
kl=1.0):
"""
Gives the ZT coefficient (S^2*cond*T/thermal cond) in either a full
3x3 tensor form,
as 3 eigenvalues, or as the average value (trace/3.0) If
doping_levels=True, the results are given at
different p and n doping levels (given by self.doping), otherwise it
is given as a series of
electron chemical potential values. We assume a constant relaxation
time and a constant
lattice thermal conductivity
Args:
output (string): the type of output. 'tensor' give the full 3x3
tensor, 'eigs' its 3 eigenvalues and
'average' the average of the three eigenvalues
doping_levels (boolean): True for the results to be given at
different doping levels, False for results
at different electron chemical potentials
relaxation_time (float): constant relaxation time in secs
k_l (float): lattice thermal cond in W/(m*K)
Returns:
If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}. The
'p' links to ZT
at p-type doping and 'n' to the ZT at n-type doping. Otherwise,
returns a {temp:[]} dictionary. The result contains either the
sorted three eigenvalues of the symmetric
ZT tensor (format='eigs') or a full tensor (3x3 array) (
output='tensor') or as an average
(output='average').
The result includes a given constant relaxation time and lattice
thermal conductivity
"""
result = None
result_doping = None
if doping_levels:
result_doping = {doping: {t: [] for t in
self._seebeck_doping[doping]} for
doping in self._seebeck_doping}
for doping in result_doping:
for t in result_doping[doping]:
for i in range(len(self.doping[doping])):
pf_tensor = np.dot(self._cond_doping[doping][t][i],
np.dot(
self._seebeck_doping[doping][t][
i],
self._seebeck_doping[doping][t][
i]))
thermal_conduct = (self._kappa_doping[doping][t][i]
- pf_tensor * t) * relaxation_time
result_doping[doping][t].append(
np.dot(pf_tensor * relaxation_time * t,
np.linalg.inv(
thermal_conduct + kl * np.eye(3, 3))))
else:
result = {t: [] for t in self._seebeck}
for t in result:
for i in range(len(self.mu_steps)):
pf_tensor = np.dot(self._cond[t][i],
np.dot(self._seebeck[t][i],
self._seebeck[t][i]))
thermal_conduct = (self._kappa[t][i]
- pf_tensor * t) * relaxation_time
result[t].append(np.dot(pf_tensor * relaxation_time * t,
np.linalg.inv(
thermal_conduct + kl *
np.eye(3, 3))))
return BoltztrapAnalyzer._format_to_output(result, result_doping,
output, doping_levels) | def function[get_zt, parameter[self, output, doping_levels, relaxation_time, kl]]:
constant[
Gives the ZT coefficient (S^2*cond*T/thermal cond) in either a full
3x3 tensor form,
as 3 eigenvalues, or as the average value (trace/3.0) If
doping_levels=True, the results are given at
different p and n doping levels (given by self.doping), otherwise it
is given as a series of
electron chemical potential values. We assume a constant relaxation
time and a constant
lattice thermal conductivity
Args:
output (string): the type of output. 'tensor' give the full 3x3
tensor, 'eigs' its 3 eigenvalues and
'average' the average of the three eigenvalues
doping_levels (boolean): True for the results to be given at
different doping levels, False for results
at different electron chemical potentials
relaxation_time (float): constant relaxation time in secs
k_l (float): lattice thermal cond in W/(m*K)
Returns:
If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}. The
'p' links to ZT
at p-type doping and 'n' to the ZT at n-type doping. Otherwise,
returns a {temp:[]} dictionary. The result contains either the
sorted three eigenvalues of the symmetric
ZT tensor (format='eigs') or a full tensor (3x3 array) (
output='tensor') or as an average
(output='average').
The result includes a given constant relaxation time and lattice
thermal conductivity
]
variable[result] assign[=] constant[None]
variable[result_doping] assign[=] constant[None]
if name[doping_levels] begin[:]
variable[result_doping] assign[=] <ast.DictComp object at 0x7da20e9552a0>
for taget[name[doping]] in starred[name[result_doping]] begin[:]
for taget[name[t]] in starred[call[name[result_doping]][name[doping]]] begin[:]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[call[name[self].doping][name[doping]]]]]]] begin[:]
variable[pf_tensor] assign[=] call[name[np].dot, parameter[call[call[call[name[self]._cond_doping][name[doping]]][name[t]]][name[i]], call[name[np].dot, parameter[call[call[call[name[self]._seebeck_doping][name[doping]]][name[t]]][name[i]], call[call[call[name[self]._seebeck_doping][name[doping]]][name[t]]][name[i]]]]]]
variable[thermal_conduct] assign[=] binary_operation[binary_operation[call[call[call[name[self]._kappa_doping][name[doping]]][name[t]]][name[i]] - binary_operation[name[pf_tensor] * name[t]]] * name[relaxation_time]]
call[call[call[name[result_doping]][name[doping]]][name[t]].append, parameter[call[name[np].dot, parameter[binary_operation[binary_operation[name[pf_tensor] * name[relaxation_time]] * name[t]], call[name[np].linalg.inv, parameter[binary_operation[name[thermal_conduct] + binary_operation[name[kl] * call[name[np].eye, parameter[constant[3], constant[3]]]]]]]]]]]
return[call[name[BoltztrapAnalyzer]._format_to_output, parameter[name[result], name[result_doping], name[output], name[doping_levels]]]] | keyword[def] identifier[get_zt] ( identifier[self] , identifier[output] = literal[string] , identifier[doping_levels] = keyword[True] , identifier[relaxation_time] = literal[int] ,
identifier[kl] = literal[int] ):
literal[string]
identifier[result] = keyword[None]
identifier[result_doping] = keyword[None]
keyword[if] identifier[doping_levels] :
identifier[result_doping] ={ identifier[doping] :{ identifier[t] :[] keyword[for] identifier[t] keyword[in]
identifier[self] . identifier[_seebeck_doping] [ identifier[doping] ]} keyword[for]
identifier[doping] keyword[in] identifier[self] . identifier[_seebeck_doping] }
keyword[for] identifier[doping] keyword[in] identifier[result_doping] :
keyword[for] identifier[t] keyword[in] identifier[result_doping] [ identifier[doping] ]:
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[doping] [ identifier[doping] ])):
identifier[pf_tensor] = identifier[np] . identifier[dot] ( identifier[self] . identifier[_cond_doping] [ identifier[doping] ][ identifier[t] ][ identifier[i] ],
identifier[np] . identifier[dot] (
identifier[self] . identifier[_seebeck_doping] [ identifier[doping] ][ identifier[t] ][
identifier[i] ],
identifier[self] . identifier[_seebeck_doping] [ identifier[doping] ][ identifier[t] ][
identifier[i] ]))
identifier[thermal_conduct] =( identifier[self] . identifier[_kappa_doping] [ identifier[doping] ][ identifier[t] ][ identifier[i] ]
- identifier[pf_tensor] * identifier[t] )* identifier[relaxation_time]
identifier[result_doping] [ identifier[doping] ][ identifier[t] ]. identifier[append] (
identifier[np] . identifier[dot] ( identifier[pf_tensor] * identifier[relaxation_time] * identifier[t] ,
identifier[np] . identifier[linalg] . identifier[inv] (
identifier[thermal_conduct] + identifier[kl] * identifier[np] . identifier[eye] ( literal[int] , literal[int] ))))
keyword[else] :
identifier[result] ={ identifier[t] :[] keyword[for] identifier[t] keyword[in] identifier[self] . identifier[_seebeck] }
keyword[for] identifier[t] keyword[in] identifier[result] :
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[self] . identifier[mu_steps] )):
identifier[pf_tensor] = identifier[np] . identifier[dot] ( identifier[self] . identifier[_cond] [ identifier[t] ][ identifier[i] ],
identifier[np] . identifier[dot] ( identifier[self] . identifier[_seebeck] [ identifier[t] ][ identifier[i] ],
identifier[self] . identifier[_seebeck] [ identifier[t] ][ identifier[i] ]))
identifier[thermal_conduct] =( identifier[self] . identifier[_kappa] [ identifier[t] ][ identifier[i] ]
- identifier[pf_tensor] * identifier[t] )* identifier[relaxation_time]
identifier[result] [ identifier[t] ]. identifier[append] ( identifier[np] . identifier[dot] ( identifier[pf_tensor] * identifier[relaxation_time] * identifier[t] ,
identifier[np] . identifier[linalg] . identifier[inv] (
identifier[thermal_conduct] + identifier[kl] *
identifier[np] . identifier[eye] ( literal[int] , literal[int] ))))
keyword[return] identifier[BoltztrapAnalyzer] . identifier[_format_to_output] ( identifier[result] , identifier[result_doping] ,
identifier[output] , identifier[doping_levels] ) | def get_zt(self, output='eigs', doping_levels=True, relaxation_time=1e-14, kl=1.0):
"""
Gives the ZT coefficient (S^2*cond*T/thermal cond) in either a full
3x3 tensor form,
as 3 eigenvalues, or as the average value (trace/3.0) If
doping_levels=True, the results are given at
different p and n doping levels (given by self.doping), otherwise it
is given as a series of
electron chemical potential values. We assume a constant relaxation
time and a constant
lattice thermal conductivity
Args:
output (string): the type of output. 'tensor' give the full 3x3
tensor, 'eigs' its 3 eigenvalues and
'average' the average of the three eigenvalues
doping_levels (boolean): True for the results to be given at
different doping levels, False for results
at different electron chemical potentials
relaxation_time (float): constant relaxation time in secs
k_l (float): lattice thermal cond in W/(m*K)
Returns:
If doping_levels=True, a dictionary {temp:{'p':[],'n':[]}}. The
'p' links to ZT
at p-type doping and 'n' to the ZT at n-type doping. Otherwise,
returns a {temp:[]} dictionary. The result contains either the
sorted three eigenvalues of the symmetric
ZT tensor (format='eigs') or a full tensor (3x3 array) (
output='tensor') or as an average
(output='average').
The result includes a given constant relaxation time and lattice
thermal conductivity
"""
result = None
result_doping = None
if doping_levels:
result_doping = {doping: {t: [] for t in self._seebeck_doping[doping]} for doping in self._seebeck_doping}
for doping in result_doping:
for t in result_doping[doping]:
for i in range(len(self.doping[doping])):
pf_tensor = np.dot(self._cond_doping[doping][t][i], np.dot(self._seebeck_doping[doping][t][i], self._seebeck_doping[doping][t][i]))
thermal_conduct = (self._kappa_doping[doping][t][i] - pf_tensor * t) * relaxation_time
result_doping[doping][t].append(np.dot(pf_tensor * relaxation_time * t, np.linalg.inv(thermal_conduct + kl * np.eye(3, 3)))) # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=['t']] # depends on [control=['for'], data=['doping']] # depends on [control=['if'], data=[]]
else:
result = {t: [] for t in self._seebeck}
for t in result:
for i in range(len(self.mu_steps)):
pf_tensor = np.dot(self._cond[t][i], np.dot(self._seebeck[t][i], self._seebeck[t][i]))
thermal_conduct = (self._kappa[t][i] - pf_tensor * t) * relaxation_time
result[t].append(np.dot(pf_tensor * relaxation_time * t, np.linalg.inv(thermal_conduct + kl * np.eye(3, 3)))) # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=['t']]
return BoltztrapAnalyzer._format_to_output(result, result_doping, output, doping_levels) |
def _find_export(self, func):
# type: (Callable[[Tuple[Any, EndpointDescription]], bool]) -> Optional[Tuple[Any, EndpointDescription]]
"""
Look for an export using the given lookup method
The lookup method must accept a single parameter, which is a tuple
containing a service instance and endpoint description.
:param func: A function to look for the excepted export
:return: The found tuple or None
"""
with self._exported_instances_lock:
for val in self._exported_services.values():
if func(val):
return val
return None | def function[_find_export, parameter[self, func]]:
constant[
Look for an export using the given lookup method
The lookup method must accept a single parameter, which is a tuple
containing a service instance and endpoint description.
:param func: A function to look for the excepted export
:return: The found tuple or None
]
with name[self]._exported_instances_lock begin[:]
for taget[name[val]] in starred[call[name[self]._exported_services.values, parameter[]]] begin[:]
if call[name[func], parameter[name[val]]] begin[:]
return[name[val]]
return[constant[None]] | keyword[def] identifier[_find_export] ( identifier[self] , identifier[func] ):
literal[string]
keyword[with] identifier[self] . identifier[_exported_instances_lock] :
keyword[for] identifier[val] keyword[in] identifier[self] . identifier[_exported_services] . identifier[values] ():
keyword[if] identifier[func] ( identifier[val] ):
keyword[return] identifier[val]
keyword[return] keyword[None] | def _find_export(self, func):
# type: (Callable[[Tuple[Any, EndpointDescription]], bool]) -> Optional[Tuple[Any, EndpointDescription]]
'\n Look for an export using the given lookup method\n\n The lookup method must accept a single parameter, which is a tuple\n containing a service instance and endpoint description.\n\n :param func: A function to look for the excepted export\n :return: The found tuple or None\n '
with self._exported_instances_lock:
for val in self._exported_services.values():
if func(val):
return val # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['val']]
return None # depends on [control=['with'], data=[]] |
def _parse_aot(self, first, name_first): # type: (Table, str) -> AoT
"""
Parses all siblings of the provided table first and bundles them into
an AoT.
"""
payload = [first]
self._aot_stack.append(name_first)
while not self.end():
is_aot_next, name_next = self._peek_table()
if is_aot_next and name_next == name_first:
_, table = self._parse_table(name_first)
payload.append(table)
else:
break
self._aot_stack.pop()
return AoT(payload, parsed=True) | def function[_parse_aot, parameter[self, first, name_first]]:
constant[
Parses all siblings of the provided table first and bundles them into
an AoT.
]
variable[payload] assign[=] list[[<ast.Name object at 0x7da1b207fe20>]]
call[name[self]._aot_stack.append, parameter[name[name_first]]]
while <ast.UnaryOp object at 0x7da1b207feb0> begin[:]
<ast.Tuple object at 0x7da1b2040c70> assign[=] call[name[self]._peek_table, parameter[]]
if <ast.BoolOp object at 0x7da1b2040280> begin[:]
<ast.Tuple object at 0x7da1b20411b0> assign[=] call[name[self]._parse_table, parameter[name[name_first]]]
call[name[payload].append, parameter[name[table]]]
call[name[self]._aot_stack.pop, parameter[]]
return[call[name[AoT], parameter[name[payload]]]] | keyword[def] identifier[_parse_aot] ( identifier[self] , identifier[first] , identifier[name_first] ):
literal[string]
identifier[payload] =[ identifier[first] ]
identifier[self] . identifier[_aot_stack] . identifier[append] ( identifier[name_first] )
keyword[while] keyword[not] identifier[self] . identifier[end] ():
identifier[is_aot_next] , identifier[name_next] = identifier[self] . identifier[_peek_table] ()
keyword[if] identifier[is_aot_next] keyword[and] identifier[name_next] == identifier[name_first] :
identifier[_] , identifier[table] = identifier[self] . identifier[_parse_table] ( identifier[name_first] )
identifier[payload] . identifier[append] ( identifier[table] )
keyword[else] :
keyword[break]
identifier[self] . identifier[_aot_stack] . identifier[pop] ()
keyword[return] identifier[AoT] ( identifier[payload] , identifier[parsed] = keyword[True] ) | def _parse_aot(self, first, name_first): # type: (Table, str) -> AoT
'\n Parses all siblings of the provided table first and bundles them into\n an AoT.\n '
payload = [first]
self._aot_stack.append(name_first)
while not self.end():
(is_aot_next, name_next) = self._peek_table()
if is_aot_next and name_next == name_first:
(_, table) = self._parse_table(name_first)
payload.append(table) # depends on [control=['if'], data=[]]
else:
break # depends on [control=['while'], data=[]]
self._aot_stack.pop()
return AoT(payload, parsed=True) |
def bulkCmd(snmpDispatcher, authData, transportTarget,
nonRepeaters, maxRepetitions, *varBinds, **options):
"""Creates a generator to perform one or more SNMP GETBULK queries.
On each iteration, new SNMP GETBULK request is send
(:RFC:`1905#section-4.2.3`). The iterator blocks waiting for response
to arrive or error to occur.
Parameters
----------
snmpDispatcher : :py:class:`~pysnmp.hlapi.snmpDispatcher`
Class instance representing SNMP engine.
authData : :py:class:`~pysnmp.hlapi.CommunityData` or :py:class:`~pysnmp.hlapi.UsmUserData`
Class instance representing SNMP credentials.
transportTarget : :py:class:`~pysnmp.hlapi.asyncore.UdpTransportTarget` or :py:class:`~pysnmp.hlapi.asyncore.Udp6TransportTarget`
Class instance representing transport type along with SNMP peer address.
nonRepeaters : int
One MIB variable is requested in response for the first
`nonRepeaters` MIB variables in request.
maxRepetitions : int
`maxRepetitions` MIB variables are requested in response for each
of the remaining MIB variables in the request (e.g. excluding
`nonRepeaters`). Remote SNMP engine may choose lesser value than
requested.
\*varBinds : :py:class:`~pysnmp.smi.rfc1902.ObjectType`
One or more class instances representing MIB variables to place
into SNMP request.
Other Parameters
----------------
\*\*options :
Request options:
* `lookupMib` - load MIB and resolve response MIB variables at
the cost of slightly reduced performance. Default is `True`.
Default is `True`.
* `lexicographicMode` - walk SNMP agent's MIB till the end (if `True`),
otherwise (if `False`) stop iteration when all response MIB
variables leave the scope of initial MIB variables in
`varBinds`. Default is `True`.
* `ignoreNonIncreasingOid` - continue iteration even if response
MIB variables (OIDs) are not greater then request MIB variables.
Be aware that setting it to `True` may cause infinite loop between
SNMP management and agent applications. Default is `False`.
* `maxRows` - stop iteration once this generator instance processed
`maxRows` of SNMP conceptual table. Default is `0` (no limit).
* `maxCalls` - stop iteration once this generator instance processed
`maxCalls` responses. Default is 0 (no limit).
Yields
------
errorIndication : str
True value indicates SNMP engine error.
errorStatus : str
True value indicates SNMP PDU error.
errorIndex : int
Non-zero value refers to \*varBinds[errorIndex-1]
varBinds: tuple
A sequence of :py:class:`~pysnmp.smi.rfc1902.ObjectType` class
instances representing MIB variables returned in SNMP response.
Raises
------
PySnmpError
Or its derivative indicating that an error occurred while
performing SNMP operation.
Notes
-----
The `bulkCmd` generator will be exhausted on any of the following
conditions:
* SNMP engine error occurs thus `errorIndication` is `True`
* SNMP PDU `errorStatus` is reported as `True`
* SNMP :py:class:`~pysnmp.proto.rfc1905.EndOfMibView` values
(also known as *SNMP exception values*) are reported for all
MIB variables in `varBinds`
* *lexicographicMode* option is `True` and SNMP agent reports
end-of-mib or *lexicographicMode* is `False` and all
response MIB variables leave the scope of `varBinds`
At any moment a new sequence of `varBinds` could be send back into
running generator (supported since Python 2.6).
Setting `maxRepetitions` value to 15..50 might significantly improve
system performance, as many MIB variables get packed into a single
response message at once.
Examples
--------
>>> from pysnmp.hlapi.v1arch import *
>>>
>>> g = bulkCmd(snmpDispatcher(),
>>> CommunityData('public'),
>>> UdpTransportTarget(('demo.snmplabs.com', 161)),
>>> 0, 25,
>>> ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr')))
>>> next(g)
(None, 0, 0, [[ObjectType(ObjectIdentity(ObjectName('1.3.6.1.2.1.1.1.0')), DisplayString('SunOS zeus.snmplabs.com 4.1.3_U1 1 sun4m'))]])
>>> g.send([ObjectType(ObjectIdentity('IF-MIB', 'ifInOctets'))])
(None, 0, 0, [[(ObjectName('1.3.6.1.2.1.2.2.1.10.1'), Counter32(284817787))]])
"""
def cbFun(*args, **kwargs):
response[:] = args + (kwargs.get('nextVarBinds', ()),)
options['cbFun'] = cbFun
lexicographicMode = options.pop('lexicographicMode', True)
maxRows = options.pop('maxRows', 0)
maxCalls = options.pop('maxCalls', 0)
initialVarBinds = VB_PROCESSOR.makeVarBinds(snmpDispatcher.cache, varBinds)
nullVarBinds = [False] * len(initialVarBinds)
totalRows = totalCalls = 0
errorIndication, errorStatus, errorIndex, varBindTable = None, 0, 0, ()
response = []
stopFlag = False
while not stopFlag:
if not varBinds:
yield (errorIndication, errorStatus, errorIndex, varBinds)
return
if maxRows and totalRows < maxRows:
maxRepetitions = min(maxRepetitions, maxRows - totalRows)
cmdgen.bulkCmd(snmpDispatcher, authData, transportTarget,
nonRepeaters, maxRepetitions,
*[(x[0], Null('')) for x in varBinds], **options)
snmpDispatcher.transportDispatcher.runDispatcher()
errorIndication, errorStatus, errorIndex, varBindTable, varBinds = response
if errorIndication:
yield (errorIndication, errorStatus, errorIndex, ())
return
elif errorStatus:
if errorStatus == 2:
# Hide SNMPv1 noSuchName error which leaks in here
# from SNMPv1 Agent through internal pysnmp proxy.
errorStatus = errorStatus.clone(0)
errorIndex = errorIndex.clone(0)
yield (errorIndication, errorStatus, errorIndex, varBindTable and varBindTable[0] or [])
return
else:
for rowIdx, varBindRow in enumerate(varBindTable):
stopFlag = True
if len(varBindRow) != len(initialVarBinds):
varBindTable = rowIdx and varBindTable[:rowIdx - 1] or []
break
for colIdx, varBind in enumerate(varBindRow):
name, val = varBind
if nullVarBinds[colIdx]:
varBindRow[colIdx] = name, endOfMibView
continue
stopFlag = False
if isinstance(val, Null):
nullVarBinds[colIdx] = True
elif not lexicographicMode and not initialVarBinds[colIdx][0].isPrefixOf(name):
varBindRow[colIdx] = name, endOfMibView
nullVarBinds[colIdx] = True
if stopFlag:
varBindTable = rowIdx and varBindTable[:rowIdx - 1] or []
break
totalRows += len(varBindTable)
totalCalls += 1
if maxRows and totalRows >= maxRows:
if totalRows > maxRows:
varBindTable = varBindTable[:-(totalRows - maxRows)]
stopFlag = True
if maxCalls and totalCalls >= maxCalls:
stopFlag = True
for varBindRow in varBindTable:
nextVarBinds = (yield errorIndication, errorStatus, errorIndex, varBindRow)
if nextVarBinds:
initialVarBinds = varBinds = VB_PROCESSOR.makeVarBinds(snmpDispatcher.cache, nextVarBinds) | def function[bulkCmd, parameter[snmpDispatcher, authData, transportTarget, nonRepeaters, maxRepetitions]]:
constant[Creates a generator to perform one or more SNMP GETBULK queries.
On each iteration, new SNMP GETBULK request is send
(:RFC:`1905#section-4.2.3`). The iterator blocks waiting for response
to arrive or error to occur.
Parameters
----------
snmpDispatcher : :py:class:`~pysnmp.hlapi.snmpDispatcher`
Class instance representing SNMP engine.
authData : :py:class:`~pysnmp.hlapi.CommunityData` or :py:class:`~pysnmp.hlapi.UsmUserData`
Class instance representing SNMP credentials.
transportTarget : :py:class:`~pysnmp.hlapi.asyncore.UdpTransportTarget` or :py:class:`~pysnmp.hlapi.asyncore.Udp6TransportTarget`
Class instance representing transport type along with SNMP peer address.
nonRepeaters : int
One MIB variable is requested in response for the first
`nonRepeaters` MIB variables in request.
maxRepetitions : int
`maxRepetitions` MIB variables are requested in response for each
of the remaining MIB variables in the request (e.g. excluding
`nonRepeaters`). Remote SNMP engine may choose lesser value than
requested.
\*varBinds : :py:class:`~pysnmp.smi.rfc1902.ObjectType`
One or more class instances representing MIB variables to place
into SNMP request.
Other Parameters
----------------
\*\*options :
Request options:
* `lookupMib` - load MIB and resolve response MIB variables at
the cost of slightly reduced performance. Default is `True`.
Default is `True`.
* `lexicographicMode` - walk SNMP agent's MIB till the end (if `True`),
otherwise (if `False`) stop iteration when all response MIB
variables leave the scope of initial MIB variables in
`varBinds`. Default is `True`.
* `ignoreNonIncreasingOid` - continue iteration even if response
MIB variables (OIDs) are not greater then request MIB variables.
Be aware that setting it to `True` may cause infinite loop between
SNMP management and agent applications. Default is `False`.
* `maxRows` - stop iteration once this generator instance processed
`maxRows` of SNMP conceptual table. Default is `0` (no limit).
* `maxCalls` - stop iteration once this generator instance processed
`maxCalls` responses. Default is 0 (no limit).
Yields
------
errorIndication : str
True value indicates SNMP engine error.
errorStatus : str
True value indicates SNMP PDU error.
errorIndex : int
Non-zero value refers to \*varBinds[errorIndex-1]
varBinds: tuple
A sequence of :py:class:`~pysnmp.smi.rfc1902.ObjectType` class
instances representing MIB variables returned in SNMP response.
Raises
------
PySnmpError
Or its derivative indicating that an error occurred while
performing SNMP operation.
Notes
-----
The `bulkCmd` generator will be exhausted on any of the following
conditions:
* SNMP engine error occurs thus `errorIndication` is `True`
* SNMP PDU `errorStatus` is reported as `True`
* SNMP :py:class:`~pysnmp.proto.rfc1905.EndOfMibView` values
(also known as *SNMP exception values*) are reported for all
MIB variables in `varBinds`
* *lexicographicMode* option is `True` and SNMP agent reports
end-of-mib or *lexicographicMode* is `False` and all
response MIB variables leave the scope of `varBinds`
At any moment a new sequence of `varBinds` could be send back into
running generator (supported since Python 2.6).
Setting `maxRepetitions` value to 15..50 might significantly improve
system performance, as many MIB variables get packed into a single
response message at once.
Examples
--------
>>> from pysnmp.hlapi.v1arch import *
>>>
>>> g = bulkCmd(snmpDispatcher(),
>>> CommunityData('public'),
>>> UdpTransportTarget(('demo.snmplabs.com', 161)),
>>> 0, 25,
>>> ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr')))
>>> next(g)
(None, 0, 0, [[ObjectType(ObjectIdentity(ObjectName('1.3.6.1.2.1.1.1.0')), DisplayString('SunOS zeus.snmplabs.com 4.1.3_U1 1 sun4m'))]])
>>> g.send([ObjectType(ObjectIdentity('IF-MIB', 'ifInOctets'))])
(None, 0, 0, [[(ObjectName('1.3.6.1.2.1.2.2.1.10.1'), Counter32(284817787))]])
]
def function[cbFun, parameter[]]:
call[name[response]][<ast.Slice object at 0x7da1b1519090>] assign[=] binary_operation[name[args] + tuple[[<ast.Call object at 0x7da1b1518bb0>]]]
call[name[options]][constant[cbFun]] assign[=] name[cbFun]
variable[lexicographicMode] assign[=] call[name[options].pop, parameter[constant[lexicographicMode], constant[True]]]
variable[maxRows] assign[=] call[name[options].pop, parameter[constant[maxRows], constant[0]]]
variable[maxCalls] assign[=] call[name[options].pop, parameter[constant[maxCalls], constant[0]]]
variable[initialVarBinds] assign[=] call[name[VB_PROCESSOR].makeVarBinds, parameter[name[snmpDispatcher].cache, name[varBinds]]]
variable[nullVarBinds] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b151a950>]] * call[name[len], parameter[name[initialVarBinds]]]]
variable[totalRows] assign[=] constant[0]
<ast.Tuple object at 0x7da1b1518e80> assign[=] tuple[[<ast.Constant object at 0x7da1b1519450>, <ast.Constant object at 0x7da1b1519810>, <ast.Constant object at 0x7da1b151a710>, <ast.Tuple object at 0x7da1b151a8f0>]]
variable[response] assign[=] list[[]]
variable[stopFlag] assign[=] constant[False]
while <ast.UnaryOp object at 0x7da2043458d0> begin[:]
if <ast.UnaryOp object at 0x7da204347370> begin[:]
<ast.Yield object at 0x7da204347130>
return[None]
if <ast.BoolOp object at 0x7da204347400> begin[:]
variable[maxRepetitions] assign[=] call[name[min], parameter[name[maxRepetitions], binary_operation[name[maxRows] - name[totalRows]]]]
call[name[cmdgen].bulkCmd, parameter[name[snmpDispatcher], name[authData], name[transportTarget], name[nonRepeaters], name[maxRepetitions], <ast.Starred object at 0x7da204345090>]]
call[name[snmpDispatcher].transportDispatcher.runDispatcher, parameter[]]
<ast.Tuple object at 0x7da204345840> assign[=] name[response]
if name[errorIndication] begin[:]
<ast.Yield object at 0x7da2043456c0>
return[None] | keyword[def] identifier[bulkCmd] ( identifier[snmpDispatcher] , identifier[authData] , identifier[transportTarget] ,
identifier[nonRepeaters] , identifier[maxRepetitions] ,* identifier[varBinds] ,** identifier[options] ):
literal[string]
keyword[def] identifier[cbFun] (* identifier[args] ,** identifier[kwargs] ):
identifier[response] [:]= identifier[args] +( identifier[kwargs] . identifier[get] ( literal[string] ,()),)
identifier[options] [ literal[string] ]= identifier[cbFun]
identifier[lexicographicMode] = identifier[options] . identifier[pop] ( literal[string] , keyword[True] )
identifier[maxRows] = identifier[options] . identifier[pop] ( literal[string] , literal[int] )
identifier[maxCalls] = identifier[options] . identifier[pop] ( literal[string] , literal[int] )
identifier[initialVarBinds] = identifier[VB_PROCESSOR] . identifier[makeVarBinds] ( identifier[snmpDispatcher] . identifier[cache] , identifier[varBinds] )
identifier[nullVarBinds] =[ keyword[False] ]* identifier[len] ( identifier[initialVarBinds] )
identifier[totalRows] = identifier[totalCalls] = literal[int]
identifier[errorIndication] , identifier[errorStatus] , identifier[errorIndex] , identifier[varBindTable] = keyword[None] , literal[int] , literal[int] ,()
identifier[response] =[]
identifier[stopFlag] = keyword[False]
keyword[while] keyword[not] identifier[stopFlag] :
keyword[if] keyword[not] identifier[varBinds] :
keyword[yield] ( identifier[errorIndication] , identifier[errorStatus] , identifier[errorIndex] , identifier[varBinds] )
keyword[return]
keyword[if] identifier[maxRows] keyword[and] identifier[totalRows] < identifier[maxRows] :
identifier[maxRepetitions] = identifier[min] ( identifier[maxRepetitions] , identifier[maxRows] - identifier[totalRows] )
identifier[cmdgen] . identifier[bulkCmd] ( identifier[snmpDispatcher] , identifier[authData] , identifier[transportTarget] ,
identifier[nonRepeaters] , identifier[maxRepetitions] ,
*[( identifier[x] [ literal[int] ], identifier[Null] ( literal[string] )) keyword[for] identifier[x] keyword[in] identifier[varBinds] ],** identifier[options] )
identifier[snmpDispatcher] . identifier[transportDispatcher] . identifier[runDispatcher] ()
identifier[errorIndication] , identifier[errorStatus] , identifier[errorIndex] , identifier[varBindTable] , identifier[varBinds] = identifier[response]
keyword[if] identifier[errorIndication] :
keyword[yield] ( identifier[errorIndication] , identifier[errorStatus] , identifier[errorIndex] ,())
keyword[return]
keyword[elif] identifier[errorStatus] :
keyword[if] identifier[errorStatus] == literal[int] :
identifier[errorStatus] = identifier[errorStatus] . identifier[clone] ( literal[int] )
identifier[errorIndex] = identifier[errorIndex] . identifier[clone] ( literal[int] )
keyword[yield] ( identifier[errorIndication] , identifier[errorStatus] , identifier[errorIndex] , identifier[varBindTable] keyword[and] identifier[varBindTable] [ literal[int] ] keyword[or] [])
keyword[return]
keyword[else] :
keyword[for] identifier[rowIdx] , identifier[varBindRow] keyword[in] identifier[enumerate] ( identifier[varBindTable] ):
identifier[stopFlag] = keyword[True]
keyword[if] identifier[len] ( identifier[varBindRow] )!= identifier[len] ( identifier[initialVarBinds] ):
identifier[varBindTable] = identifier[rowIdx] keyword[and] identifier[varBindTable] [: identifier[rowIdx] - literal[int] ] keyword[or] []
keyword[break]
keyword[for] identifier[colIdx] , identifier[varBind] keyword[in] identifier[enumerate] ( identifier[varBindRow] ):
identifier[name] , identifier[val] = identifier[varBind]
keyword[if] identifier[nullVarBinds] [ identifier[colIdx] ]:
identifier[varBindRow] [ identifier[colIdx] ]= identifier[name] , identifier[endOfMibView]
keyword[continue]
identifier[stopFlag] = keyword[False]
keyword[if] identifier[isinstance] ( identifier[val] , identifier[Null] ):
identifier[nullVarBinds] [ identifier[colIdx] ]= keyword[True]
keyword[elif] keyword[not] identifier[lexicographicMode] keyword[and] keyword[not] identifier[initialVarBinds] [ identifier[colIdx] ][ literal[int] ]. identifier[isPrefixOf] ( identifier[name] ):
identifier[varBindRow] [ identifier[colIdx] ]= identifier[name] , identifier[endOfMibView]
identifier[nullVarBinds] [ identifier[colIdx] ]= keyword[True]
keyword[if] identifier[stopFlag] :
identifier[varBindTable] = identifier[rowIdx] keyword[and] identifier[varBindTable] [: identifier[rowIdx] - literal[int] ] keyword[or] []
keyword[break]
identifier[totalRows] += identifier[len] ( identifier[varBindTable] )
identifier[totalCalls] += literal[int]
keyword[if] identifier[maxRows] keyword[and] identifier[totalRows] >= identifier[maxRows] :
keyword[if] identifier[totalRows] > identifier[maxRows] :
identifier[varBindTable] = identifier[varBindTable] [:-( identifier[totalRows] - identifier[maxRows] )]
identifier[stopFlag] = keyword[True]
keyword[if] identifier[maxCalls] keyword[and] identifier[totalCalls] >= identifier[maxCalls] :
identifier[stopFlag] = keyword[True]
keyword[for] identifier[varBindRow] keyword[in] identifier[varBindTable] :
identifier[nextVarBinds] =( keyword[yield] identifier[errorIndication] , identifier[errorStatus] , identifier[errorIndex] , identifier[varBindRow] )
keyword[if] identifier[nextVarBinds] :
identifier[initialVarBinds] = identifier[varBinds] = identifier[VB_PROCESSOR] . identifier[makeVarBinds] ( identifier[snmpDispatcher] . identifier[cache] , identifier[nextVarBinds] ) | def bulkCmd(snmpDispatcher, authData, transportTarget, nonRepeaters, maxRepetitions, *varBinds, **options):
"""Creates a generator to perform one or more SNMP GETBULK queries.
On each iteration, new SNMP GETBULK request is send
(:RFC:`1905#section-4.2.3`). The iterator blocks waiting for response
to arrive or error to occur.
Parameters
----------
snmpDispatcher : :py:class:`~pysnmp.hlapi.snmpDispatcher`
Class instance representing SNMP engine.
authData : :py:class:`~pysnmp.hlapi.CommunityData` or :py:class:`~pysnmp.hlapi.UsmUserData`
Class instance representing SNMP credentials.
transportTarget : :py:class:`~pysnmp.hlapi.asyncore.UdpTransportTarget` or :py:class:`~pysnmp.hlapi.asyncore.Udp6TransportTarget`
Class instance representing transport type along with SNMP peer address.
nonRepeaters : int
One MIB variable is requested in response for the first
`nonRepeaters` MIB variables in request.
maxRepetitions : int
`maxRepetitions` MIB variables are requested in response for each
of the remaining MIB variables in the request (e.g. excluding
`nonRepeaters`). Remote SNMP engine may choose lesser value than
requested.
\\*varBinds : :py:class:`~pysnmp.smi.rfc1902.ObjectType`
One or more class instances representing MIB variables to place
into SNMP request.
Other Parameters
----------------
\\*\\*options :
Request options:
* `lookupMib` - load MIB and resolve response MIB variables at
the cost of slightly reduced performance. Default is `True`.
Default is `True`.
* `lexicographicMode` - walk SNMP agent's MIB till the end (if `True`),
otherwise (if `False`) stop iteration when all response MIB
variables leave the scope of initial MIB variables in
`varBinds`. Default is `True`.
* `ignoreNonIncreasingOid` - continue iteration even if response
MIB variables (OIDs) are not greater then request MIB variables.
Be aware that setting it to `True` may cause infinite loop between
SNMP management and agent applications. Default is `False`.
* `maxRows` - stop iteration once this generator instance processed
`maxRows` of SNMP conceptual table. Default is `0` (no limit).
* `maxCalls` - stop iteration once this generator instance processed
`maxCalls` responses. Default is 0 (no limit).
Yields
------
errorIndication : str
True value indicates SNMP engine error.
errorStatus : str
True value indicates SNMP PDU error.
errorIndex : int
Non-zero value refers to \\*varBinds[errorIndex-1]
varBinds: tuple
A sequence of :py:class:`~pysnmp.smi.rfc1902.ObjectType` class
instances representing MIB variables returned in SNMP response.
Raises
------
PySnmpError
Or its derivative indicating that an error occurred while
performing SNMP operation.
Notes
-----
The `bulkCmd` generator will be exhausted on any of the following
conditions:
* SNMP engine error occurs thus `errorIndication` is `True`
* SNMP PDU `errorStatus` is reported as `True`
* SNMP :py:class:`~pysnmp.proto.rfc1905.EndOfMibView` values
(also known as *SNMP exception values*) are reported for all
MIB variables in `varBinds`
* *lexicographicMode* option is `True` and SNMP agent reports
end-of-mib or *lexicographicMode* is `False` and all
response MIB variables leave the scope of `varBinds`
At any moment a new sequence of `varBinds` could be send back into
running generator (supported since Python 2.6).
Setting `maxRepetitions` value to 15..50 might significantly improve
system performance, as many MIB variables get packed into a single
response message at once.
Examples
--------
>>> from pysnmp.hlapi.v1arch import *
>>>
>>> g = bulkCmd(snmpDispatcher(),
>>> CommunityData('public'),
>>> UdpTransportTarget(('demo.snmplabs.com', 161)),
>>> 0, 25,
>>> ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr')))
>>> next(g)
(None, 0, 0, [[ObjectType(ObjectIdentity(ObjectName('1.3.6.1.2.1.1.1.0')), DisplayString('SunOS zeus.snmplabs.com 4.1.3_U1 1 sun4m'))]])
>>> g.send([ObjectType(ObjectIdentity('IF-MIB', 'ifInOctets'))])
(None, 0, 0, [[(ObjectName('1.3.6.1.2.1.2.2.1.10.1'), Counter32(284817787))]])
"""
def cbFun(*args, **kwargs):
response[:] = args + (kwargs.get('nextVarBinds', ()),)
options['cbFun'] = cbFun
lexicographicMode = options.pop('lexicographicMode', True)
maxRows = options.pop('maxRows', 0)
maxCalls = options.pop('maxCalls', 0)
initialVarBinds = VB_PROCESSOR.makeVarBinds(snmpDispatcher.cache, varBinds)
nullVarBinds = [False] * len(initialVarBinds)
totalRows = totalCalls = 0
(errorIndication, errorStatus, errorIndex, varBindTable) = (None, 0, 0, ())
response = []
stopFlag = False
while not stopFlag:
if not varBinds:
yield (errorIndication, errorStatus, errorIndex, varBinds)
return # depends on [control=['if'], data=[]]
if maxRows and totalRows < maxRows:
maxRepetitions = min(maxRepetitions, maxRows - totalRows) # depends on [control=['if'], data=[]]
cmdgen.bulkCmd(snmpDispatcher, authData, transportTarget, nonRepeaters, maxRepetitions, *[(x[0], Null('')) for x in varBinds], **options)
snmpDispatcher.transportDispatcher.runDispatcher()
(errorIndication, errorStatus, errorIndex, varBindTable, varBinds) = response
if errorIndication:
yield (errorIndication, errorStatus, errorIndex, ())
return # depends on [control=['if'], data=[]]
elif errorStatus:
if errorStatus == 2:
# Hide SNMPv1 noSuchName error which leaks in here
# from SNMPv1 Agent through internal pysnmp proxy.
errorStatus = errorStatus.clone(0)
errorIndex = errorIndex.clone(0) # depends on [control=['if'], data=['errorStatus']]
yield (errorIndication, errorStatus, errorIndex, varBindTable and varBindTable[0] or [])
return # depends on [control=['if'], data=[]]
else:
for (rowIdx, varBindRow) in enumerate(varBindTable):
stopFlag = True
if len(varBindRow) != len(initialVarBinds):
varBindTable = rowIdx and varBindTable[:rowIdx - 1] or []
break # depends on [control=['if'], data=[]]
for (colIdx, varBind) in enumerate(varBindRow):
(name, val) = varBind
if nullVarBinds[colIdx]:
varBindRow[colIdx] = (name, endOfMibView)
continue # depends on [control=['if'], data=[]]
stopFlag = False
if isinstance(val, Null):
nullVarBinds[colIdx] = True # depends on [control=['if'], data=[]]
elif not lexicographicMode and (not initialVarBinds[colIdx][0].isPrefixOf(name)):
varBindRow[colIdx] = (name, endOfMibView)
nullVarBinds[colIdx] = True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if stopFlag:
varBindTable = rowIdx and varBindTable[:rowIdx - 1] or []
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
totalRows += len(varBindTable)
totalCalls += 1
if maxRows and totalRows >= maxRows:
if totalRows > maxRows:
varBindTable = varBindTable[:-(totalRows - maxRows)] # depends on [control=['if'], data=['totalRows', 'maxRows']]
stopFlag = True # depends on [control=['if'], data=[]]
if maxCalls and totalCalls >= maxCalls:
stopFlag = True # depends on [control=['if'], data=[]]
for varBindRow in varBindTable:
nextVarBinds = (yield (errorIndication, errorStatus, errorIndex, varBindRow))
if nextVarBinds:
initialVarBinds = varBinds = VB_PROCESSOR.makeVarBinds(snmpDispatcher.cache, nextVarBinds) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['varBindRow']] # depends on [control=['while'], data=[]] |
def write_temp_file(self, content, filename=None, mode='w'):
"""Write content to a temporary file.
Args:
content (bytes|str): The file content. If passing binary data the mode needs to be set
to 'wb'.
filename (str, optional): The filename to use when writing the file.
mode (str, optional): The file write mode which could be either 'w' or 'wb'.
Returns:
str: Fully qualified path name for the file.
"""
if filename is None:
filename = str(uuid.uuid4())
fqpn = os.path.join(self.tcex.default_args.tc_temp_path, filename)
with open(fqpn, mode) as fh:
fh.write(content)
return fqpn | def function[write_temp_file, parameter[self, content, filename, mode]]:
constant[Write content to a temporary file.
Args:
content (bytes|str): The file content. If passing binary data the mode needs to be set
to 'wb'.
filename (str, optional): The filename to use when writing the file.
mode (str, optional): The file write mode which could be either 'w' or 'wb'.
Returns:
str: Fully qualified path name for the file.
]
if compare[name[filename] is constant[None]] begin[:]
variable[filename] assign[=] call[name[str], parameter[call[name[uuid].uuid4, parameter[]]]]
variable[fqpn] assign[=] call[name[os].path.join, parameter[name[self].tcex.default_args.tc_temp_path, name[filename]]]
with call[name[open], parameter[name[fqpn], name[mode]]] begin[:]
call[name[fh].write, parameter[name[content]]]
return[name[fqpn]] | keyword[def] identifier[write_temp_file] ( identifier[self] , identifier[content] , identifier[filename] = keyword[None] , identifier[mode] = literal[string] ):
literal[string]
keyword[if] identifier[filename] keyword[is] keyword[None] :
identifier[filename] = identifier[str] ( identifier[uuid] . identifier[uuid4] ())
identifier[fqpn] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[tcex] . identifier[default_args] . identifier[tc_temp_path] , identifier[filename] )
keyword[with] identifier[open] ( identifier[fqpn] , identifier[mode] ) keyword[as] identifier[fh] :
identifier[fh] . identifier[write] ( identifier[content] )
keyword[return] identifier[fqpn] | def write_temp_file(self, content, filename=None, mode='w'):
"""Write content to a temporary file.
Args:
content (bytes|str): The file content. If passing binary data the mode needs to be set
to 'wb'.
filename (str, optional): The filename to use when writing the file.
mode (str, optional): The file write mode which could be either 'w' or 'wb'.
Returns:
str: Fully qualified path name for the file.
"""
if filename is None:
filename = str(uuid.uuid4()) # depends on [control=['if'], data=['filename']]
fqpn = os.path.join(self.tcex.default_args.tc_temp_path, filename)
with open(fqpn, mode) as fh:
fh.write(content) # depends on [control=['with'], data=['fh']]
return fqpn |
def escape_velocity(M,R):
"""
escape velocity.
Parameters
----------
M : float
Mass in solar masses.
R : float
Radius in solar radiu.
Returns
-------
v_escape
in km/s.
"""
ve = np.sqrt(2.*grav_const*M*msun_g/(R*rsun_cm))
ve = ve*1.e-5
return ve | def function[escape_velocity, parameter[M, R]]:
constant[
escape velocity.
Parameters
----------
M : float
Mass in solar masses.
R : float
Radius in solar radiu.
Returns
-------
v_escape
in km/s.
]
variable[ve] assign[=] call[name[np].sqrt, parameter[binary_operation[binary_operation[binary_operation[binary_operation[constant[2.0] * name[grav_const]] * name[M]] * name[msun_g]] / binary_operation[name[R] * name[rsun_cm]]]]]
variable[ve] assign[=] binary_operation[name[ve] * constant[1e-05]]
return[name[ve]] | keyword[def] identifier[escape_velocity] ( identifier[M] , identifier[R] ):
literal[string]
identifier[ve] = identifier[np] . identifier[sqrt] ( literal[int] * identifier[grav_const] * identifier[M] * identifier[msun_g] /( identifier[R] * identifier[rsun_cm] ))
identifier[ve] = identifier[ve] * literal[int]
keyword[return] identifier[ve] | def escape_velocity(M, R):
"""
escape velocity.
Parameters
----------
M : float
Mass in solar masses.
R : float
Radius in solar radiu.
Returns
-------
v_escape
in km/s.
"""
ve = np.sqrt(2.0 * grav_const * M * msun_g / (R * rsun_cm))
ve = ve * 1e-05
return ve |
def to_unicode_string(string):
"""
Return a Unicode string out of the given string.
On Python 2, it calls ``unicode`` with ``utf-8`` encoding.
On Python 3, it just returns the given string.
Return ``None`` if ``string`` is ``None``.
:param str string: the string to convert to Unicode
:rtype: (Unicode) str
"""
if string is None:
return None
if is_unicode_string(string):
return string
# if reached here, string is a byte string
if PY2:
return unicode(string, encoding="utf-8")
return string.decode(encoding="utf-8") | def function[to_unicode_string, parameter[string]]:
constant[
Return a Unicode string out of the given string.
On Python 2, it calls ``unicode`` with ``utf-8`` encoding.
On Python 3, it just returns the given string.
Return ``None`` if ``string`` is ``None``.
:param str string: the string to convert to Unicode
:rtype: (Unicode) str
]
if compare[name[string] is constant[None]] begin[:]
return[constant[None]]
if call[name[is_unicode_string], parameter[name[string]]] begin[:]
return[name[string]]
if name[PY2] begin[:]
return[call[name[unicode], parameter[name[string]]]]
return[call[name[string].decode, parameter[]]] | keyword[def] identifier[to_unicode_string] ( identifier[string] ):
literal[string]
keyword[if] identifier[string] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[if] identifier[is_unicode_string] ( identifier[string] ):
keyword[return] identifier[string]
keyword[if] identifier[PY2] :
keyword[return] identifier[unicode] ( identifier[string] , identifier[encoding] = literal[string] )
keyword[return] identifier[string] . identifier[decode] ( identifier[encoding] = literal[string] ) | def to_unicode_string(string):
"""
Return a Unicode string out of the given string.
On Python 2, it calls ``unicode`` with ``utf-8`` encoding.
On Python 3, it just returns the given string.
Return ``None`` if ``string`` is ``None``.
:param str string: the string to convert to Unicode
:rtype: (Unicode) str
"""
if string is None:
return None # depends on [control=['if'], data=[]]
if is_unicode_string(string):
return string # depends on [control=['if'], data=[]] # if reached here, string is a byte string
if PY2:
return unicode(string, encoding='utf-8') # depends on [control=['if'], data=[]]
return string.decode(encoding='utf-8') |
def get_dataset(ds,dataDir,removecompressed=1):
"""
A function which attempts downloads and uncompresses the latest version of an openfmri.fmri dataset.
PARAMETERS
:ds: dataset number of the openfMRI.org dataset (integer) without zero padding. I.e. can just be 212 (doesn't need to be 000212).
:dataDir: where to save the data. Will get saved in 'dataDir/openfmri/ds000XXX'
:removecompressed: delete compressed data once unzipped. 1=yes. 0=no.
NOTES
There is no "default" way to download data from openfMRI so this solution is a little hacky. It may not be a universal functoin and it is best to verify that all necessary data has been downloaded.
"""
#Convert input ds to string incase it is put in via function
ds = str(ds)
#The final character of the dataset can be a letter
lettersuffix=''
if re.search('[A-Za-z]$',ds):
lettersuffix = ds[-1]
ds = ds[:-1]
openfMRI_dataset_string = '{0:06d}'.format(int(ds)) + lettersuffix
#Some datasets include
try:
os.mkdir(dataDir)
except:
pass
datasetDir = os.path.join(dataDir, 'openfmri/')
try:
os.mkdir(datasetDir)
except:
pass
openfMRI_url = 'https://openfmri.org/dataset/ds' + openfMRI_dataset_string + '/'
r = urlopen(openfMRI_url).read()
soup = BeautifulSoup(r,'lxml')
#Isolate only the links from the latest revision. The text "data associated with revision". If the website changes its static text, this needs to be changed
unformatted_soup=soup.prettify()
firstOccurance=unformatted_soup.find('Data Associated with Revision')
secondOccurancce=unformatted_soup[firstOccurance+1:].find('Data Associated with Revision')
#If there is only one "Data Associated..." (i.e. only one revision) this returns -1. This should be kept. Otherwise add on the firstOccurance index
if secondOccurancce != -1:
secondOccurancce+=firstOccurance
#The latest links are confined within this part of the text
soup_latestversion = BeautifulSoup(unformatted_soup[firstOccurance:secondOccurancce],'lxml')
# Loop through all links and dowload files
filelist = []
for a in soup_latestversion.find_all('a', href=True):
#This assumes that all files include ds....
if re.search('ds[A-Za-z_0-9.-]*$',a['href']):
filename_start=re.search('ds[A-Za-z_0-9.-]*$',a['href']).start()
filelist.append(a['href'][filename_start:])
print('Downloading: ' + a['href'][filename_start:])
urlretrieve(a['href'],datasetDir + a['href'][filename_start:])
print('--- Download complete ---')
for f in filelist:
untar_or_unzip(datasetDir,f)
print('--- Uncompressing complete ---')
if removecompressed==1:
for f in filelist:
print('Clean up. Deleting: ' + f)
os.remove(datasetDir+f)
print('--- Clean up complete ---')
print('NOTE: It is best to verify manually that all the correct data has been downloaded and uncompressed correctly. \n If data is used in any publication, see openfmri.org about how to appropriately cite/credit the data.')
print('--- Script complete ---') | def function[get_dataset, parameter[ds, dataDir, removecompressed]]:
constant[
A function which attempts downloads and uncompresses the latest version of an openfmri.fmri dataset.
PARAMETERS
:ds: dataset number of the openfMRI.org dataset (integer) without zero padding. I.e. can just be 212 (doesn't need to be 000212).
:dataDir: where to save the data. Will get saved in 'dataDir/openfmri/ds000XXX'
:removecompressed: delete compressed data once unzipped. 1=yes. 0=no.
NOTES
There is no "default" way to download data from openfMRI so this solution is a little hacky. It may not be a universal functoin and it is best to verify that all necessary data has been downloaded.
]
variable[ds] assign[=] call[name[str], parameter[name[ds]]]
variable[lettersuffix] assign[=] constant[]
if call[name[re].search, parameter[constant[[A-Za-z]$], name[ds]]] begin[:]
variable[lettersuffix] assign[=] call[name[ds]][<ast.UnaryOp object at 0x7da1b253cc70>]
variable[ds] assign[=] call[name[ds]][<ast.Slice object at 0x7da1b253c280>]
variable[openfMRI_dataset_string] assign[=] binary_operation[call[constant[{0:06d}].format, parameter[call[name[int], parameter[name[ds]]]]] + name[lettersuffix]]
<ast.Try object at 0x7da1b255cdc0>
variable[datasetDir] assign[=] call[name[os].path.join, parameter[name[dataDir], constant[openfmri/]]]
<ast.Try object at 0x7da1b255c520>
variable[openfMRI_url] assign[=] binary_operation[binary_operation[constant[https://openfmri.org/dataset/ds] + name[openfMRI_dataset_string]] + constant[/]]
variable[r] assign[=] call[call[name[urlopen], parameter[name[openfMRI_url]]].read, parameter[]]
variable[soup] assign[=] call[name[BeautifulSoup], parameter[name[r], constant[lxml]]]
variable[unformatted_soup] assign[=] call[name[soup].prettify, parameter[]]
variable[firstOccurance] assign[=] call[name[unformatted_soup].find, parameter[constant[Data Associated with Revision]]]
variable[secondOccurancce] assign[=] call[call[name[unformatted_soup]][<ast.Slice object at 0x7da1b255f670>].find, parameter[constant[Data Associated with Revision]]]
if compare[name[secondOccurancce] not_equal[!=] <ast.UnaryOp object at 0x7da1b255efe0>] begin[:]
<ast.AugAssign object at 0x7da1b255f280>
variable[soup_latestversion] assign[=] call[name[BeautifulSoup], parameter[call[name[unformatted_soup]][<ast.Slice object at 0x7da1b255ded0>], constant[lxml]]]
variable[filelist] assign[=] list[[]]
for taget[name[a]] in starred[call[name[soup_latestversion].find_all, parameter[constant[a]]]] begin[:]
if call[name[re].search, parameter[constant[ds[A-Za-z_0-9.-]*$], call[name[a]][constant[href]]]] begin[:]
variable[filename_start] assign[=] call[call[name[re].search, parameter[constant[ds[A-Za-z_0-9.-]*$], call[name[a]][constant[href]]]].start, parameter[]]
call[name[filelist].append, parameter[call[call[name[a]][constant[href]]][<ast.Slice object at 0x7da1b2631bd0>]]]
call[name[print], parameter[binary_operation[constant[Downloading: ] + call[call[name[a]][constant[href]]][<ast.Slice object at 0x7da1b2631360>]]]]
call[name[urlretrieve], parameter[call[name[a]][constant[href]], binary_operation[name[datasetDir] + call[call[name[a]][constant[href]]][<ast.Slice object at 0x7da1b255e470>]]]]
call[name[print], parameter[constant[--- Download complete ---]]]
for taget[name[f]] in starred[name[filelist]] begin[:]
call[name[untar_or_unzip], parameter[name[datasetDir], name[f]]]
call[name[print], parameter[constant[--- Uncompressing complete ---]]]
if compare[name[removecompressed] equal[==] constant[1]] begin[:]
for taget[name[f]] in starred[name[filelist]] begin[:]
call[name[print], parameter[binary_operation[constant[Clean up. Deleting: ] + name[f]]]]
call[name[os].remove, parameter[binary_operation[name[datasetDir] + name[f]]]]
call[name[print], parameter[constant[--- Clean up complete ---]]]
call[name[print], parameter[constant[NOTE: It is best to verify manually that all the correct data has been downloaded and uncompressed correctly.
If data is used in any publication, see openfmri.org about how to appropriately cite/credit the data.]]]
call[name[print], parameter[constant[--- Script complete ---]]] | keyword[def] identifier[get_dataset] ( identifier[ds] , identifier[dataDir] , identifier[removecompressed] = literal[int] ):
literal[string]
identifier[ds] = identifier[str] ( identifier[ds] )
identifier[lettersuffix] = literal[string]
keyword[if] identifier[re] . identifier[search] ( literal[string] , identifier[ds] ):
identifier[lettersuffix] = identifier[ds] [- literal[int] ]
identifier[ds] = identifier[ds] [:- literal[int] ]
identifier[openfMRI_dataset_string] = literal[string] . identifier[format] ( identifier[int] ( identifier[ds] ))+ identifier[lettersuffix]
keyword[try] :
identifier[os] . identifier[mkdir] ( identifier[dataDir] )
keyword[except] :
keyword[pass]
identifier[datasetDir] = identifier[os] . identifier[path] . identifier[join] ( identifier[dataDir] , literal[string] )
keyword[try] :
identifier[os] . identifier[mkdir] ( identifier[datasetDir] )
keyword[except] :
keyword[pass]
identifier[openfMRI_url] = literal[string] + identifier[openfMRI_dataset_string] + literal[string]
identifier[r] = identifier[urlopen] ( identifier[openfMRI_url] ). identifier[read] ()
identifier[soup] = identifier[BeautifulSoup] ( identifier[r] , literal[string] )
identifier[unformatted_soup] = identifier[soup] . identifier[prettify] ()
identifier[firstOccurance] = identifier[unformatted_soup] . identifier[find] ( literal[string] )
identifier[secondOccurancce] = identifier[unformatted_soup] [ identifier[firstOccurance] + literal[int] :]. identifier[find] ( literal[string] )
keyword[if] identifier[secondOccurancce] !=- literal[int] :
identifier[secondOccurancce] += identifier[firstOccurance]
identifier[soup_latestversion] = identifier[BeautifulSoup] ( identifier[unformatted_soup] [ identifier[firstOccurance] : identifier[secondOccurancce] ], literal[string] )
identifier[filelist] =[]
keyword[for] identifier[a] keyword[in] identifier[soup_latestversion] . identifier[find_all] ( literal[string] , identifier[href] = keyword[True] ):
keyword[if] identifier[re] . identifier[search] ( literal[string] , identifier[a] [ literal[string] ]):
identifier[filename_start] = identifier[re] . identifier[search] ( literal[string] , identifier[a] [ literal[string] ]). identifier[start] ()
identifier[filelist] . identifier[append] ( identifier[a] [ literal[string] ][ identifier[filename_start] :])
identifier[print] ( literal[string] + identifier[a] [ literal[string] ][ identifier[filename_start] :])
identifier[urlretrieve] ( identifier[a] [ literal[string] ], identifier[datasetDir] + identifier[a] [ literal[string] ][ identifier[filename_start] :])
identifier[print] ( literal[string] )
keyword[for] identifier[f] keyword[in] identifier[filelist] :
identifier[untar_or_unzip] ( identifier[datasetDir] , identifier[f] )
identifier[print] ( literal[string] )
keyword[if] identifier[removecompressed] == literal[int] :
keyword[for] identifier[f] keyword[in] identifier[filelist] :
identifier[print] ( literal[string] + identifier[f] )
identifier[os] . identifier[remove] ( identifier[datasetDir] + identifier[f] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
identifier[print] ( literal[string] ) | def get_dataset(ds, dataDir, removecompressed=1):
"""
A function which attempts downloads and uncompresses the latest version of an openfmri.fmri dataset.
PARAMETERS
:ds: dataset number of the openfMRI.org dataset (integer) without zero padding. I.e. can just be 212 (doesn't need to be 000212).
:dataDir: where to save the data. Will get saved in 'dataDir/openfmri/ds000XXX'
:removecompressed: delete compressed data once unzipped. 1=yes. 0=no.
NOTES
There is no "default" way to download data from openfMRI so this solution is a little hacky. It may not be a universal functoin and it is best to verify that all necessary data has been downloaded.
"""
#Convert input ds to string incase it is put in via function
ds = str(ds)
#The final character of the dataset can be a letter
lettersuffix = ''
if re.search('[A-Za-z]$', ds):
lettersuffix = ds[-1]
ds = ds[:-1] # depends on [control=['if'], data=[]]
openfMRI_dataset_string = '{0:06d}'.format(int(ds)) + lettersuffix
#Some datasets include
try:
os.mkdir(dataDir) # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]]
datasetDir = os.path.join(dataDir, 'openfmri/')
try:
os.mkdir(datasetDir) # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]]
openfMRI_url = 'https://openfmri.org/dataset/ds' + openfMRI_dataset_string + '/'
r = urlopen(openfMRI_url).read()
soup = BeautifulSoup(r, 'lxml')
#Isolate only the links from the latest revision. The text "data associated with revision". If the website changes its static text, this needs to be changed
unformatted_soup = soup.prettify()
firstOccurance = unformatted_soup.find('Data Associated with Revision')
secondOccurancce = unformatted_soup[firstOccurance + 1:].find('Data Associated with Revision')
#If there is only one "Data Associated..." (i.e. only one revision) this returns -1. This should be kept. Otherwise add on the firstOccurance index
if secondOccurancce != -1:
secondOccurancce += firstOccurance # depends on [control=['if'], data=['secondOccurancce']]
#The latest links are confined within this part of the text
soup_latestversion = BeautifulSoup(unformatted_soup[firstOccurance:secondOccurancce], 'lxml')
# Loop through all links and dowload files
filelist = []
for a in soup_latestversion.find_all('a', href=True):
#This assumes that all files include ds....
if re.search('ds[A-Za-z_0-9.-]*$', a['href']):
filename_start = re.search('ds[A-Za-z_0-9.-]*$', a['href']).start()
filelist.append(a['href'][filename_start:])
print('Downloading: ' + a['href'][filename_start:])
urlretrieve(a['href'], datasetDir + a['href'][filename_start:]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['a']]
print('--- Download complete ---')
for f in filelist:
untar_or_unzip(datasetDir, f) # depends on [control=['for'], data=['f']]
print('--- Uncompressing complete ---')
if removecompressed == 1:
for f in filelist:
print('Clean up. Deleting: ' + f)
os.remove(datasetDir + f) # depends on [control=['for'], data=['f']]
print('--- Clean up complete ---') # depends on [control=['if'], data=[]]
print('NOTE: It is best to verify manually that all the correct data has been downloaded and uncompressed correctly. \n If data is used in any publication, see openfmri.org about how to appropriately cite/credit the data.')
print('--- Script complete ---') |
def get_element_by_id(self, ident):
"""Get a TocElement element identified by index number from the
container."""
for group in list(self.toc.keys()):
for name in list(self.toc[group].keys()):
if self.toc[group][name].ident == ident:
return self.toc[group][name]
return None | def function[get_element_by_id, parameter[self, ident]]:
constant[Get a TocElement element identified by index number from the
container.]
for taget[name[group]] in starred[call[name[list], parameter[call[name[self].toc.keys, parameter[]]]]] begin[:]
for taget[name[name]] in starred[call[name[list], parameter[call[call[name[self].toc][name[group]].keys, parameter[]]]]] begin[:]
if compare[call[call[name[self].toc][name[group]]][name[name]].ident equal[==] name[ident]] begin[:]
return[call[call[name[self].toc][name[group]]][name[name]]]
return[constant[None]] | keyword[def] identifier[get_element_by_id] ( identifier[self] , identifier[ident] ):
literal[string]
keyword[for] identifier[group] keyword[in] identifier[list] ( identifier[self] . identifier[toc] . identifier[keys] ()):
keyword[for] identifier[name] keyword[in] identifier[list] ( identifier[self] . identifier[toc] [ identifier[group] ]. identifier[keys] ()):
keyword[if] identifier[self] . identifier[toc] [ identifier[group] ][ identifier[name] ]. identifier[ident] == identifier[ident] :
keyword[return] identifier[self] . identifier[toc] [ identifier[group] ][ identifier[name] ]
keyword[return] keyword[None] | def get_element_by_id(self, ident):
"""Get a TocElement element identified by index number from the
container."""
for group in list(self.toc.keys()):
for name in list(self.toc[group].keys()):
if self.toc[group][name].ident == ident:
return self.toc[group][name] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['name']] # depends on [control=['for'], data=['group']]
return None |
def create_uo(self, configuration=None, tpl=None, keys=None, obj_type=None):
"""
Create a new UserObject from the given template.
:param configuration: EB configuration to use
:param tpl: CreateUserObject template, contain misc settings
:param keys: dictionary of keys, create_uo.KeyTypes. Communication keys, application key (if applicable).
:param obj_type: optional field for easy object type entry - required flags are computed from keys dict and tpl.
:return: UO - user object ready to use
"""
if configuration is not None:
self.configuration = configuration
if tpl is not None:
self.tpl = tpl
if keys is not None:
self.keys = keys
if self.keys is None:
self.keys = dict()
# generate comm keys if not present
TemplateProcessor.generate_comm_keys_if_not_present(self.keys)
# obj_type infer
if obj_type is not None:
tpl_type = CreateUO.get_uo_type(obj_type, KeyTypes.COMM_ENC in self.keys, KeyTypes.APP_KEY in self.keys)
self.tpl = CreateUO.set_type(self.tpl if self.tpl is not None else dict(), tpl_type)
# Create template specifications, using local config and defaults.
spec = CreateUO.get_template_request_spec(self.configuration)
if self.tpl is not None:
if isinstance(self.tpl, dict):
spec = EBUtils.update(spec, self.tpl)
else:
raise ValueError('Unknown tpl format')
# Fetch template for new UO.
tpl_resp = CreateUO.template_request(self.configuration, spec)
# Process the template, fill in the keys, do the crypto
tpl_processor = TemplateProcessor(configuration=self.configuration, keys=self.keys, tpl_response=tpl_resp)
tpl_req = tpl_processor.process()
# Import the initialized UO
self.import_resp = CreateUO.import_object(configuration=self.configuration, tpl=tpl_req)
# Build UO
uo = CreateUO.build_imported_object(configuration=self.configuration, tpl_import_req=tpl_req,
import_resp=self.import_resp)
return uo | def function[create_uo, parameter[self, configuration, tpl, keys, obj_type]]:
constant[
Create a new UserObject from the given template.
:param configuration: EB configuration to use
:param tpl: CreateUserObject template, contain misc settings
:param keys: dictionary of keys, create_uo.KeyTypes. Communication keys, application key (if applicable).
:param obj_type: optional field for easy object type entry - required flags are computed from keys dict and tpl.
:return: UO - user object ready to use
]
if compare[name[configuration] is_not constant[None]] begin[:]
name[self].configuration assign[=] name[configuration]
if compare[name[tpl] is_not constant[None]] begin[:]
name[self].tpl assign[=] name[tpl]
if compare[name[keys] is_not constant[None]] begin[:]
name[self].keys assign[=] name[keys]
if compare[name[self].keys is constant[None]] begin[:]
name[self].keys assign[=] call[name[dict], parameter[]]
call[name[TemplateProcessor].generate_comm_keys_if_not_present, parameter[name[self].keys]]
if compare[name[obj_type] is_not constant[None]] begin[:]
variable[tpl_type] assign[=] call[name[CreateUO].get_uo_type, parameter[name[obj_type], compare[name[KeyTypes].COMM_ENC in name[self].keys], compare[name[KeyTypes].APP_KEY in name[self].keys]]]
name[self].tpl assign[=] call[name[CreateUO].set_type, parameter[<ast.IfExp object at 0x7da1b2347160>, name[tpl_type]]]
variable[spec] assign[=] call[name[CreateUO].get_template_request_spec, parameter[name[self].configuration]]
if compare[name[self].tpl is_not constant[None]] begin[:]
if call[name[isinstance], parameter[name[self].tpl, name[dict]]] begin[:]
variable[spec] assign[=] call[name[EBUtils].update, parameter[name[spec], name[self].tpl]]
variable[tpl_resp] assign[=] call[name[CreateUO].template_request, parameter[name[self].configuration, name[spec]]]
variable[tpl_processor] assign[=] call[name[TemplateProcessor], parameter[]]
variable[tpl_req] assign[=] call[name[tpl_processor].process, parameter[]]
name[self].import_resp assign[=] call[name[CreateUO].import_object, parameter[]]
variable[uo] assign[=] call[name[CreateUO].build_imported_object, parameter[]]
return[name[uo]] | keyword[def] identifier[create_uo] ( identifier[self] , identifier[configuration] = keyword[None] , identifier[tpl] = keyword[None] , identifier[keys] = keyword[None] , identifier[obj_type] = keyword[None] ):
literal[string]
keyword[if] identifier[configuration] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[configuration] = identifier[configuration]
keyword[if] identifier[tpl] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[tpl] = identifier[tpl]
keyword[if] identifier[keys] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[keys] = identifier[keys]
keyword[if] identifier[self] . identifier[keys] keyword[is] keyword[None] :
identifier[self] . identifier[keys] = identifier[dict] ()
identifier[TemplateProcessor] . identifier[generate_comm_keys_if_not_present] ( identifier[self] . identifier[keys] )
keyword[if] identifier[obj_type] keyword[is] keyword[not] keyword[None] :
identifier[tpl_type] = identifier[CreateUO] . identifier[get_uo_type] ( identifier[obj_type] , identifier[KeyTypes] . identifier[COMM_ENC] keyword[in] identifier[self] . identifier[keys] , identifier[KeyTypes] . identifier[APP_KEY] keyword[in] identifier[self] . identifier[keys] )
identifier[self] . identifier[tpl] = identifier[CreateUO] . identifier[set_type] ( identifier[self] . identifier[tpl] keyword[if] identifier[self] . identifier[tpl] keyword[is] keyword[not] keyword[None] keyword[else] identifier[dict] (), identifier[tpl_type] )
identifier[spec] = identifier[CreateUO] . identifier[get_template_request_spec] ( identifier[self] . identifier[configuration] )
keyword[if] identifier[self] . identifier[tpl] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[isinstance] ( identifier[self] . identifier[tpl] , identifier[dict] ):
identifier[spec] = identifier[EBUtils] . identifier[update] ( identifier[spec] , identifier[self] . identifier[tpl] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[tpl_resp] = identifier[CreateUO] . identifier[template_request] ( identifier[self] . identifier[configuration] , identifier[spec] )
identifier[tpl_processor] = identifier[TemplateProcessor] ( identifier[configuration] = identifier[self] . identifier[configuration] , identifier[keys] = identifier[self] . identifier[keys] , identifier[tpl_response] = identifier[tpl_resp] )
identifier[tpl_req] = identifier[tpl_processor] . identifier[process] ()
identifier[self] . identifier[import_resp] = identifier[CreateUO] . identifier[import_object] ( identifier[configuration] = identifier[self] . identifier[configuration] , identifier[tpl] = identifier[tpl_req] )
identifier[uo] = identifier[CreateUO] . identifier[build_imported_object] ( identifier[configuration] = identifier[self] . identifier[configuration] , identifier[tpl_import_req] = identifier[tpl_req] ,
identifier[import_resp] = identifier[self] . identifier[import_resp] )
keyword[return] identifier[uo] | def create_uo(self, configuration=None, tpl=None, keys=None, obj_type=None):
"""
Create a new UserObject from the given template.
:param configuration: EB configuration to use
:param tpl: CreateUserObject template, contain misc settings
:param keys: dictionary of keys, create_uo.KeyTypes. Communication keys, application key (if applicable).
:param obj_type: optional field for easy object type entry - required flags are computed from keys dict and tpl.
:return: UO - user object ready to use
"""
if configuration is not None:
self.configuration = configuration # depends on [control=['if'], data=['configuration']]
if tpl is not None:
self.tpl = tpl # depends on [control=['if'], data=['tpl']]
if keys is not None:
self.keys = keys # depends on [control=['if'], data=['keys']]
if self.keys is None:
self.keys = dict() # depends on [control=['if'], data=[]]
# generate comm keys if not present
TemplateProcessor.generate_comm_keys_if_not_present(self.keys)
# obj_type infer
if obj_type is not None:
tpl_type = CreateUO.get_uo_type(obj_type, KeyTypes.COMM_ENC in self.keys, KeyTypes.APP_KEY in self.keys)
self.tpl = CreateUO.set_type(self.tpl if self.tpl is not None else dict(), tpl_type) # depends on [control=['if'], data=['obj_type']]
# Create template specifications, using local config and defaults.
spec = CreateUO.get_template_request_spec(self.configuration)
if self.tpl is not None:
if isinstance(self.tpl, dict):
spec = EBUtils.update(spec, self.tpl) # depends on [control=['if'], data=[]]
else:
raise ValueError('Unknown tpl format') # depends on [control=['if'], data=[]]
# Fetch template for new UO.
tpl_resp = CreateUO.template_request(self.configuration, spec)
# Process the template, fill in the keys, do the crypto
tpl_processor = TemplateProcessor(configuration=self.configuration, keys=self.keys, tpl_response=tpl_resp)
tpl_req = tpl_processor.process()
# Import the initialized UO
self.import_resp = CreateUO.import_object(configuration=self.configuration, tpl=tpl_req)
# Build UO
uo = CreateUO.build_imported_object(configuration=self.configuration, tpl_import_req=tpl_req, import_resp=self.import_resp)
return uo |
def endpoint_name(self, endpoint_name):
"""
Sets the endpoint_name of this PreSharedKey.
The unique endpoint identifier that this pre-shared key applies to. 16-64 [printable](https://en.wikipedia.org/wiki/ASCII#Printable_characters) (non-control) ASCII characters.
:param endpoint_name: The endpoint_name of this PreSharedKey.
:type: str
"""
if endpoint_name is None:
raise ValueError("Invalid value for `endpoint_name`, must not be `None`")
if endpoint_name is not None and not re.search('^[ -~]{16,64}$', endpoint_name):
raise ValueError("Invalid value for `endpoint_name`, must be a follow pattern or equal to `/^[ -~]{16,64}$/`")
self._endpoint_name = endpoint_name | def function[endpoint_name, parameter[self, endpoint_name]]:
constant[
Sets the endpoint_name of this PreSharedKey.
The unique endpoint identifier that this pre-shared key applies to. 16-64 [printable](https://en.wikipedia.org/wiki/ASCII#Printable_characters) (non-control) ASCII characters.
:param endpoint_name: The endpoint_name of this PreSharedKey.
:type: str
]
if compare[name[endpoint_name] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b04a70d0>
if <ast.BoolOp object at 0x7da20c990280> begin[:]
<ast.Raise object at 0x7da20c9923e0>
name[self]._endpoint_name assign[=] name[endpoint_name] | keyword[def] identifier[endpoint_name] ( identifier[self] , identifier[endpoint_name] ):
literal[string]
keyword[if] identifier[endpoint_name] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[endpoint_name] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[re] . identifier[search] ( literal[string] , identifier[endpoint_name] ):
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[self] . identifier[_endpoint_name] = identifier[endpoint_name] | def endpoint_name(self, endpoint_name):
"""
Sets the endpoint_name of this PreSharedKey.
The unique endpoint identifier that this pre-shared key applies to. 16-64 [printable](https://en.wikipedia.org/wiki/ASCII#Printable_characters) (non-control) ASCII characters.
:param endpoint_name: The endpoint_name of this PreSharedKey.
:type: str
"""
if endpoint_name is None:
raise ValueError('Invalid value for `endpoint_name`, must not be `None`') # depends on [control=['if'], data=[]]
if endpoint_name is not None and (not re.search('^[ -~]{16,64}$', endpoint_name)):
raise ValueError('Invalid value for `endpoint_name`, must be a follow pattern or equal to `/^[ -~]{16,64}$/`') # depends on [control=['if'], data=[]]
self._endpoint_name = endpoint_name |
def _addProteinIdsToGroupMapping(self, proteinIds, groupId):
"""Add a groupId to one or multiple entries of the internal
proteinToGroupId mapping.
:param proteinIds: a proteinId or a list of proteinIds, a proteinId
must be a string.
:param groupId: str, a groupId
"""
for proteinId in AUX.toList(proteinIds):
self._proteinToGroupIds[proteinId].add(groupId) | def function[_addProteinIdsToGroupMapping, parameter[self, proteinIds, groupId]]:
constant[Add a groupId to one or multiple entries of the internal
proteinToGroupId mapping.
:param proteinIds: a proteinId or a list of proteinIds, a proteinId
must be a string.
:param groupId: str, a groupId
]
for taget[name[proteinId]] in starred[call[name[AUX].toList, parameter[name[proteinIds]]]] begin[:]
call[call[name[self]._proteinToGroupIds][name[proteinId]].add, parameter[name[groupId]]] | keyword[def] identifier[_addProteinIdsToGroupMapping] ( identifier[self] , identifier[proteinIds] , identifier[groupId] ):
literal[string]
keyword[for] identifier[proteinId] keyword[in] identifier[AUX] . identifier[toList] ( identifier[proteinIds] ):
identifier[self] . identifier[_proteinToGroupIds] [ identifier[proteinId] ]. identifier[add] ( identifier[groupId] ) | def _addProteinIdsToGroupMapping(self, proteinIds, groupId):
"""Add a groupId to one or multiple entries of the internal
proteinToGroupId mapping.
:param proteinIds: a proteinId or a list of proteinIds, a proteinId
must be a string.
:param groupId: str, a groupId
"""
for proteinId in AUX.toList(proteinIds):
self._proteinToGroupIds[proteinId].add(groupId) # depends on [control=['for'], data=['proteinId']] |
def _evaluate(self,R,z,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at (R,z)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
potential at (R,z)
HISTORY:
2012-12-26 - Written - Bovy (IAS)
"""
if self._new:
#if R > 6.: return self._kp(R,z)
if nu.fabs(z) < 10.**-6.:
y= 0.5*self._alpha*R
return -nu.pi*R*(special.i0(y)*special.k1(y)-special.i1(y)*special.k0(y))
kalphamax= 10.
ks= kalphamax*0.5*(self._glx+1.)
weights= kalphamax*self._glw
sqrtp= nu.sqrt(z**2.+(ks+R)**2.)
sqrtm= nu.sqrt(z**2.+(ks-R)**2.)
evalInt= nu.arcsin(2.*ks/(sqrtp+sqrtm))*ks*special.k0(self._alpha*ks)
return -2.*self._alpha*nu.sum(weights*evalInt)
raise NotImplementedError("Not new=True not implemented for RazorThinExponentialDiskPotential") | def function[_evaluate, parameter[self, R, z, phi, t]]:
constant[
NAME:
_evaluate
PURPOSE:
evaluate the potential at (R,z)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
potential at (R,z)
HISTORY:
2012-12-26 - Written - Bovy (IAS)
]
if name[self]._new begin[:]
if compare[call[name[nu].fabs, parameter[name[z]]] less[<] binary_operation[constant[10.0] ** <ast.UnaryOp object at 0x7da1b0da2c50>]] begin[:]
variable[y] assign[=] binary_operation[binary_operation[constant[0.5] * name[self]._alpha] * name[R]]
return[binary_operation[binary_operation[<ast.UnaryOp object at 0x7da1b0c97040> * name[R]] * binary_operation[binary_operation[call[name[special].i0, parameter[name[y]]] * call[name[special].k1, parameter[name[y]]]] - binary_operation[call[name[special].i1, parameter[name[y]]] * call[name[special].k0, parameter[name[y]]]]]]]
variable[kalphamax] assign[=] constant[10.0]
variable[ks] assign[=] binary_operation[binary_operation[name[kalphamax] * constant[0.5]] * binary_operation[name[self]._glx + constant[1.0]]]
variable[weights] assign[=] binary_operation[name[kalphamax] * name[self]._glw]
variable[sqrtp] assign[=] call[name[nu].sqrt, parameter[binary_operation[binary_operation[name[z] ** constant[2.0]] + binary_operation[binary_operation[name[ks] + name[R]] ** constant[2.0]]]]]
variable[sqrtm] assign[=] call[name[nu].sqrt, parameter[binary_operation[binary_operation[name[z] ** constant[2.0]] + binary_operation[binary_operation[name[ks] - name[R]] ** constant[2.0]]]]]
variable[evalInt] assign[=] binary_operation[binary_operation[call[name[nu].arcsin, parameter[binary_operation[binary_operation[constant[2.0] * name[ks]] / binary_operation[name[sqrtp] + name[sqrtm]]]]] * name[ks]] * call[name[special].k0, parameter[binary_operation[name[self]._alpha * name[ks]]]]]
return[binary_operation[binary_operation[<ast.UnaryOp object at 0x7da1b0cb5150> * name[self]._alpha] * call[name[nu].sum, parameter[binary_operation[name[weights] * name[evalInt]]]]]]
<ast.Raise object at 0x7da1b0cb42e0> | keyword[def] identifier[_evaluate] ( identifier[self] , identifier[R] , identifier[z] , identifier[phi] = literal[int] , identifier[t] = literal[int] ):
literal[string]
keyword[if] identifier[self] . identifier[_new] :
keyword[if] identifier[nu] . identifier[fabs] ( identifier[z] )< literal[int] **- literal[int] :
identifier[y] = literal[int] * identifier[self] . identifier[_alpha] * identifier[R]
keyword[return] - identifier[nu] . identifier[pi] * identifier[R] *( identifier[special] . identifier[i0] ( identifier[y] )* identifier[special] . identifier[k1] ( identifier[y] )- identifier[special] . identifier[i1] ( identifier[y] )* identifier[special] . identifier[k0] ( identifier[y] ))
identifier[kalphamax] = literal[int]
identifier[ks] = identifier[kalphamax] * literal[int] *( identifier[self] . identifier[_glx] + literal[int] )
identifier[weights] = identifier[kalphamax] * identifier[self] . identifier[_glw]
identifier[sqrtp] = identifier[nu] . identifier[sqrt] ( identifier[z] ** literal[int] +( identifier[ks] + identifier[R] )** literal[int] )
identifier[sqrtm] = identifier[nu] . identifier[sqrt] ( identifier[z] ** literal[int] +( identifier[ks] - identifier[R] )** literal[int] )
identifier[evalInt] = identifier[nu] . identifier[arcsin] ( literal[int] * identifier[ks] /( identifier[sqrtp] + identifier[sqrtm] ))* identifier[ks] * identifier[special] . identifier[k0] ( identifier[self] . identifier[_alpha] * identifier[ks] )
keyword[return] - literal[int] * identifier[self] . identifier[_alpha] * identifier[nu] . identifier[sum] ( identifier[weights] * identifier[evalInt] )
keyword[raise] identifier[NotImplementedError] ( literal[string] ) | def _evaluate(self, R, z, phi=0.0, t=0.0):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at (R,z)
INPUT:
R - Cylindrical Galactocentric radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
potential at (R,z)
HISTORY:
2012-12-26 - Written - Bovy (IAS)
"""
if self._new:
#if R > 6.: return self._kp(R,z)
if nu.fabs(z) < 10.0 ** (-6.0):
y = 0.5 * self._alpha * R
return -nu.pi * R * (special.i0(y) * special.k1(y) - special.i1(y) * special.k0(y)) # depends on [control=['if'], data=[]]
kalphamax = 10.0
ks = kalphamax * 0.5 * (self._glx + 1.0)
weights = kalphamax * self._glw
sqrtp = nu.sqrt(z ** 2.0 + (ks + R) ** 2.0)
sqrtm = nu.sqrt(z ** 2.0 + (ks - R) ** 2.0)
evalInt = nu.arcsin(2.0 * ks / (sqrtp + sqrtm)) * ks * special.k0(self._alpha * ks)
return -2.0 * self._alpha * nu.sum(weights * evalInt) # depends on [control=['if'], data=[]]
raise NotImplementedError('Not new=True not implemented for RazorThinExponentialDiskPotential') |
def conditional_probability_alive(self, frequency, recency, T):
"""
Conditional probability alive.
Compute the probability that a customer with history
(frequency, recency, T) is currently alive.
From paper:
http://brucehardie.com/notes/009/pareto_nbd_derivations_2005-11-05.pdf
Parameters
----------
frequency: float
historical frequency of customer.
recency: float
historical recency of customer.
T: float
age of the customer.
Returns
-------
float
value representing a probability
"""
x, t_x = frequency, recency
r, alpha, s, beta = self._unload_params("r", "alpha", "s", "beta")
A_0 = self._log_A_0([r, alpha, s, beta], x, t_x, T)
return 1.0 / (1.0 + exp(log(s) - log(r + s + x) + (r + x) * log(alpha + T) + s * log(beta + T) + A_0)) | def function[conditional_probability_alive, parameter[self, frequency, recency, T]]:
constant[
Conditional probability alive.
Compute the probability that a customer with history
(frequency, recency, T) is currently alive.
From paper:
http://brucehardie.com/notes/009/pareto_nbd_derivations_2005-11-05.pdf
Parameters
----------
frequency: float
historical frequency of customer.
recency: float
historical recency of customer.
T: float
age of the customer.
Returns
-------
float
value representing a probability
]
<ast.Tuple object at 0x7da1b1d5c550> assign[=] tuple[[<ast.Name object at 0x7da1b1d5dc00>, <ast.Name object at 0x7da1b1d5d420>]]
<ast.Tuple object at 0x7da1b1d5df90> assign[=] call[name[self]._unload_params, parameter[constant[r], constant[alpha], constant[s], constant[beta]]]
variable[A_0] assign[=] call[name[self]._log_A_0, parameter[list[[<ast.Name object at 0x7da1b22ad480>, <ast.Name object at 0x7da1b22ae6e0>, <ast.Name object at 0x7da1b22ade70>, <ast.Name object at 0x7da1b22ac4f0>]], name[x], name[t_x], name[T]]]
return[binary_operation[constant[1.0] / binary_operation[constant[1.0] + call[name[exp], parameter[binary_operation[binary_operation[binary_operation[binary_operation[call[name[log], parameter[name[s]]] - call[name[log], parameter[binary_operation[binary_operation[name[r] + name[s]] + name[x]]]]] + binary_operation[binary_operation[name[r] + name[x]] * call[name[log], parameter[binary_operation[name[alpha] + name[T]]]]]] + binary_operation[name[s] * call[name[log], parameter[binary_operation[name[beta] + name[T]]]]]] + name[A_0]]]]]]] | keyword[def] identifier[conditional_probability_alive] ( identifier[self] , identifier[frequency] , identifier[recency] , identifier[T] ):
literal[string]
identifier[x] , identifier[t_x] = identifier[frequency] , identifier[recency]
identifier[r] , identifier[alpha] , identifier[s] , identifier[beta] = identifier[self] . identifier[_unload_params] ( literal[string] , literal[string] , literal[string] , literal[string] )
identifier[A_0] = identifier[self] . identifier[_log_A_0] ([ identifier[r] , identifier[alpha] , identifier[s] , identifier[beta] ], identifier[x] , identifier[t_x] , identifier[T] )
keyword[return] literal[int] /( literal[int] + identifier[exp] ( identifier[log] ( identifier[s] )- identifier[log] ( identifier[r] + identifier[s] + identifier[x] )+( identifier[r] + identifier[x] )* identifier[log] ( identifier[alpha] + identifier[T] )+ identifier[s] * identifier[log] ( identifier[beta] + identifier[T] )+ identifier[A_0] )) | def conditional_probability_alive(self, frequency, recency, T):
"""
Conditional probability alive.
Compute the probability that a customer with history
(frequency, recency, T) is currently alive.
From paper:
http://brucehardie.com/notes/009/pareto_nbd_derivations_2005-11-05.pdf
Parameters
----------
frequency: float
historical frequency of customer.
recency: float
historical recency of customer.
T: float
age of the customer.
Returns
-------
float
value representing a probability
"""
(x, t_x) = (frequency, recency)
(r, alpha, s, beta) = self._unload_params('r', 'alpha', 's', 'beta')
A_0 = self._log_A_0([r, alpha, s, beta], x, t_x, T)
return 1.0 / (1.0 + exp(log(s) - log(r + s + x) + (r + x) * log(alpha + T) + s * log(beta + T) + A_0)) |
def get_heteroatoms(self, elements=None):
"""
Identify non-H, non-C atoms in the MoleculeGraph, returning a list of
their node indices.
:param elements: List of elements to identify (if only certain
functional groups are of interest).
:return: set of ints representing node indices
"""
heteroatoms = set()
for node in self.molgraph.graph.nodes():
if elements is not None:
if str(self.species[node]) in elements:
heteroatoms.add(node)
else:
if str(self.species[node]) not in ["C", "H"]:
heteroatoms.add(node)
return heteroatoms | def function[get_heteroatoms, parameter[self, elements]]:
constant[
Identify non-H, non-C atoms in the MoleculeGraph, returning a list of
their node indices.
:param elements: List of elements to identify (if only certain
functional groups are of interest).
:return: set of ints representing node indices
]
variable[heteroatoms] assign[=] call[name[set], parameter[]]
for taget[name[node]] in starred[call[name[self].molgraph.graph.nodes, parameter[]]] begin[:]
if compare[name[elements] is_not constant[None]] begin[:]
if compare[call[name[str], parameter[call[name[self].species][name[node]]]] in name[elements]] begin[:]
call[name[heteroatoms].add, parameter[name[node]]]
return[name[heteroatoms]] | keyword[def] identifier[get_heteroatoms] ( identifier[self] , identifier[elements] = keyword[None] ):
literal[string]
identifier[heteroatoms] = identifier[set] ()
keyword[for] identifier[node] keyword[in] identifier[self] . identifier[molgraph] . identifier[graph] . identifier[nodes] ():
keyword[if] identifier[elements] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[str] ( identifier[self] . identifier[species] [ identifier[node] ]) keyword[in] identifier[elements] :
identifier[heteroatoms] . identifier[add] ( identifier[node] )
keyword[else] :
keyword[if] identifier[str] ( identifier[self] . identifier[species] [ identifier[node] ]) keyword[not] keyword[in] [ literal[string] , literal[string] ]:
identifier[heteroatoms] . identifier[add] ( identifier[node] )
keyword[return] identifier[heteroatoms] | def get_heteroatoms(self, elements=None):
"""
Identify non-H, non-C atoms in the MoleculeGraph, returning a list of
their node indices.
:param elements: List of elements to identify (if only certain
functional groups are of interest).
:return: set of ints representing node indices
"""
heteroatoms = set()
for node in self.molgraph.graph.nodes():
if elements is not None:
if str(self.species[node]) in elements:
heteroatoms.add(node) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['elements']]
elif str(self.species[node]) not in ['C', 'H']:
heteroatoms.add(node) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['node']]
return heteroatoms |
def setInstrumentParameters(self, instrpars):
""" This method overrides the superclass to set default values into
the parameter dictionary, in case empty entries are provided.
"""
pri_header = self._image[0].header
self.proc_unit = instrpars['proc_unit']
if self._isNotValid (instrpars['gain'], instrpars['gnkeyword']):
instrpars['gnkeyword'] = 'ADCGAIN' #gain has been hardcoded below
if self._isNotValid (instrpars['rdnoise'], instrpars['rnkeyword']):
instrpars['rnkeyword'] = None
if self._isNotValid (instrpars['exptime'], instrpars['expkeyword']):
instrpars['expkeyword'] = 'EXPTIME'
for chip in self.returnAllChips(extname=self.scienceExt):
chip._gain= 5.4 #measured gain
chip._rdnoise = self.getInstrParameter(
instrpars['rdnoise'], pri_header, instrpars['rnkeyword']
)
chip._exptime = self.getInstrParameter(
instrpars['exptime'], pri_header, instrpars['expkeyword']
)
if chip._gain is None or self._exptime is None:
print('ERROR: invalid instrument task parameter')
raise ValueError
# We need to treat Read Noise as a special case since it is
# not populated in the NICMOS primary header
if chip._rdnoise is None:
chip._rdnoise = self._getDefaultReadnoise()
chip._darkrate=self._getDarkRate()
chip.darkcurrent = self.getdarkcurrent()
chip._effGain = chip._gain
# this is used in the static mask, static mask name also defined
# here, must be done after outputNames
self._assignSignature(chip._chip)
# Convert the science data to electrons if specified by the user.
self.doUnitConversions() | def function[setInstrumentParameters, parameter[self, instrpars]]:
constant[ This method overrides the superclass to set default values into
the parameter dictionary, in case empty entries are provided.
]
variable[pri_header] assign[=] call[name[self]._image][constant[0]].header
name[self].proc_unit assign[=] call[name[instrpars]][constant[proc_unit]]
if call[name[self]._isNotValid, parameter[call[name[instrpars]][constant[gain]], call[name[instrpars]][constant[gnkeyword]]]] begin[:]
call[name[instrpars]][constant[gnkeyword]] assign[=] constant[ADCGAIN]
if call[name[self]._isNotValid, parameter[call[name[instrpars]][constant[rdnoise]], call[name[instrpars]][constant[rnkeyword]]]] begin[:]
call[name[instrpars]][constant[rnkeyword]] assign[=] constant[None]
if call[name[self]._isNotValid, parameter[call[name[instrpars]][constant[exptime]], call[name[instrpars]][constant[expkeyword]]]] begin[:]
call[name[instrpars]][constant[expkeyword]] assign[=] constant[EXPTIME]
for taget[name[chip]] in starred[call[name[self].returnAllChips, parameter[]]] begin[:]
name[chip]._gain assign[=] constant[5.4]
name[chip]._rdnoise assign[=] call[name[self].getInstrParameter, parameter[call[name[instrpars]][constant[rdnoise]], name[pri_header], call[name[instrpars]][constant[rnkeyword]]]]
name[chip]._exptime assign[=] call[name[self].getInstrParameter, parameter[call[name[instrpars]][constant[exptime]], name[pri_header], call[name[instrpars]][constant[expkeyword]]]]
if <ast.BoolOp object at 0x7da1b1b63e80> begin[:]
call[name[print], parameter[constant[ERROR: invalid instrument task parameter]]]
<ast.Raise object at 0x7da1b1b63850>
if compare[name[chip]._rdnoise is constant[None]] begin[:]
name[chip]._rdnoise assign[=] call[name[self]._getDefaultReadnoise, parameter[]]
name[chip]._darkrate assign[=] call[name[self]._getDarkRate, parameter[]]
name[chip].darkcurrent assign[=] call[name[self].getdarkcurrent, parameter[]]
name[chip]._effGain assign[=] name[chip]._gain
call[name[self]._assignSignature, parameter[name[chip]._chip]]
call[name[self].doUnitConversions, parameter[]] | keyword[def] identifier[setInstrumentParameters] ( identifier[self] , identifier[instrpars] ):
literal[string]
identifier[pri_header] = identifier[self] . identifier[_image] [ literal[int] ]. identifier[header]
identifier[self] . identifier[proc_unit] = identifier[instrpars] [ literal[string] ]
keyword[if] identifier[self] . identifier[_isNotValid] ( identifier[instrpars] [ literal[string] ], identifier[instrpars] [ literal[string] ]):
identifier[instrpars] [ literal[string] ]= literal[string]
keyword[if] identifier[self] . identifier[_isNotValid] ( identifier[instrpars] [ literal[string] ], identifier[instrpars] [ literal[string] ]):
identifier[instrpars] [ literal[string] ]= keyword[None]
keyword[if] identifier[self] . identifier[_isNotValid] ( identifier[instrpars] [ literal[string] ], identifier[instrpars] [ literal[string] ]):
identifier[instrpars] [ literal[string] ]= literal[string]
keyword[for] identifier[chip] keyword[in] identifier[self] . identifier[returnAllChips] ( identifier[extname] = identifier[self] . identifier[scienceExt] ):
identifier[chip] . identifier[_gain] = literal[int]
identifier[chip] . identifier[_rdnoise] = identifier[self] . identifier[getInstrParameter] (
identifier[instrpars] [ literal[string] ], identifier[pri_header] , identifier[instrpars] [ literal[string] ]
)
identifier[chip] . identifier[_exptime] = identifier[self] . identifier[getInstrParameter] (
identifier[instrpars] [ literal[string] ], identifier[pri_header] , identifier[instrpars] [ literal[string] ]
)
keyword[if] identifier[chip] . identifier[_gain] keyword[is] keyword[None] keyword[or] identifier[self] . identifier[_exptime] keyword[is] keyword[None] :
identifier[print] ( literal[string] )
keyword[raise] identifier[ValueError]
keyword[if] identifier[chip] . identifier[_rdnoise] keyword[is] keyword[None] :
identifier[chip] . identifier[_rdnoise] = identifier[self] . identifier[_getDefaultReadnoise] ()
identifier[chip] . identifier[_darkrate] = identifier[self] . identifier[_getDarkRate] ()
identifier[chip] . identifier[darkcurrent] = identifier[self] . identifier[getdarkcurrent] ()
identifier[chip] . identifier[_effGain] = identifier[chip] . identifier[_gain]
identifier[self] . identifier[_assignSignature] ( identifier[chip] . identifier[_chip] )
identifier[self] . identifier[doUnitConversions] () | def setInstrumentParameters(self, instrpars):
""" This method overrides the superclass to set default values into
the parameter dictionary, in case empty entries are provided.
"""
pri_header = self._image[0].header
self.proc_unit = instrpars['proc_unit']
if self._isNotValid(instrpars['gain'], instrpars['gnkeyword']):
instrpars['gnkeyword'] = 'ADCGAIN' #gain has been hardcoded below # depends on [control=['if'], data=[]]
if self._isNotValid(instrpars['rdnoise'], instrpars['rnkeyword']):
instrpars['rnkeyword'] = None # depends on [control=['if'], data=[]]
if self._isNotValid(instrpars['exptime'], instrpars['expkeyword']):
instrpars['expkeyword'] = 'EXPTIME' # depends on [control=['if'], data=[]]
for chip in self.returnAllChips(extname=self.scienceExt):
chip._gain = 5.4 #measured gain
chip._rdnoise = self.getInstrParameter(instrpars['rdnoise'], pri_header, instrpars['rnkeyword'])
chip._exptime = self.getInstrParameter(instrpars['exptime'], pri_header, instrpars['expkeyword'])
if chip._gain is None or self._exptime is None:
print('ERROR: invalid instrument task parameter')
raise ValueError # depends on [control=['if'], data=[]]
# We need to treat Read Noise as a special case since it is
# not populated in the NICMOS primary header
if chip._rdnoise is None:
chip._rdnoise = self._getDefaultReadnoise() # depends on [control=['if'], data=[]]
chip._darkrate = self._getDarkRate()
chip.darkcurrent = self.getdarkcurrent()
chip._effGain = chip._gain
# this is used in the static mask, static mask name also defined
# here, must be done after outputNames
self._assignSignature(chip._chip) # depends on [control=['for'], data=['chip']]
# Convert the science data to electrons if specified by the user.
self.doUnitConversions() |
def composition(mol):
"""Molecular composition in dict format
(ex. Glucose {'C': 6, 'H': 12, 'O': 6}).
"""
mol.require("Valence")
c = Counter()
for _, a in mol.atoms_iter():
c += a.composition()
return c | def function[composition, parameter[mol]]:
constant[Molecular composition in dict format
(ex. Glucose {'C': 6, 'H': 12, 'O': 6}).
]
call[name[mol].require, parameter[constant[Valence]]]
variable[c] assign[=] call[name[Counter], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b24e2dd0>, <ast.Name object at 0x7da1b24e37c0>]]] in starred[call[name[mol].atoms_iter, parameter[]]] begin[:]
<ast.AugAssign object at 0x7da1b24e24a0>
return[name[c]] | keyword[def] identifier[composition] ( identifier[mol] ):
literal[string]
identifier[mol] . identifier[require] ( literal[string] )
identifier[c] = identifier[Counter] ()
keyword[for] identifier[_] , identifier[a] keyword[in] identifier[mol] . identifier[atoms_iter] ():
identifier[c] += identifier[a] . identifier[composition] ()
keyword[return] identifier[c] | def composition(mol):
"""Molecular composition in dict format
(ex. Glucose {'C': 6, 'H': 12, 'O': 6}).
"""
mol.require('Valence')
c = Counter()
for (_, a) in mol.atoms_iter():
c += a.composition() # depends on [control=['for'], data=[]]
return c |
def plot4(self, num):
"""
Plots the abundances of H-1, He-4, C-12 and O-16.
"""
self.plot_prof_1(num,'H-1',0.,5.,-5,0.)
self.plot_prof_1(num,'He-4',0.,5.,-5,0.)
self.plot_prof_1(num,'C-12',0.,5.,-5,0.)
self.plot_prof_1(num,'O-16',0.,5.,-5,0.)
pyl.legend(loc=3) | def function[plot4, parameter[self, num]]:
constant[
Plots the abundances of H-1, He-4, C-12 and O-16.
]
call[name[self].plot_prof_1, parameter[name[num], constant[H-1], constant[0.0], constant[5.0], <ast.UnaryOp object at 0x7da20c6aa800>, constant[0.0]]]
call[name[self].plot_prof_1, parameter[name[num], constant[He-4], constant[0.0], constant[5.0], <ast.UnaryOp object at 0x7da20c794730>, constant[0.0]]]
call[name[self].plot_prof_1, parameter[name[num], constant[C-12], constant[0.0], constant[5.0], <ast.UnaryOp object at 0x7da20c794c70>, constant[0.0]]]
call[name[self].plot_prof_1, parameter[name[num], constant[O-16], constant[0.0], constant[5.0], <ast.UnaryOp object at 0x7da20c7968f0>, constant[0.0]]]
call[name[pyl].legend, parameter[]] | keyword[def] identifier[plot4] ( identifier[self] , identifier[num] ):
literal[string]
identifier[self] . identifier[plot_prof_1] ( identifier[num] , literal[string] , literal[int] , literal[int] ,- literal[int] , literal[int] )
identifier[self] . identifier[plot_prof_1] ( identifier[num] , literal[string] , literal[int] , literal[int] ,- literal[int] , literal[int] )
identifier[self] . identifier[plot_prof_1] ( identifier[num] , literal[string] , literal[int] , literal[int] ,- literal[int] , literal[int] )
identifier[self] . identifier[plot_prof_1] ( identifier[num] , literal[string] , literal[int] , literal[int] ,- literal[int] , literal[int] )
identifier[pyl] . identifier[legend] ( identifier[loc] = literal[int] ) | def plot4(self, num):
"""
Plots the abundances of H-1, He-4, C-12 and O-16.
"""
self.plot_prof_1(num, 'H-1', 0.0, 5.0, -5, 0.0)
self.plot_prof_1(num, 'He-4', 0.0, 5.0, -5, 0.0)
self.plot_prof_1(num, 'C-12', 0.0, 5.0, -5, 0.0)
self.plot_prof_1(num, 'O-16', 0.0, 5.0, -5, 0.0)
pyl.legend(loc=3) |
def define_sub_network_cycle_constraints( subnetwork, snapshots, passive_branch_p, attribute):
""" Constructs cycle_constraints for a particular subnetwork
"""
sub_network_cycle_constraints = {}
sub_network_cycle_index = []
matrix = subnetwork.C.tocsc()
branches = subnetwork.branches()
for col_j in range( matrix.shape[1] ):
cycle_is = matrix.getcol(col_j).nonzero()[0]
if len(cycle_is) == 0: continue
sub_network_cycle_index.append((subnetwork.name, col_j))
branch_idx_attributes = []
for cycle_i in cycle_is:
branch_idx = branches.index[cycle_i]
attribute_value = 1e5 * branches.at[ branch_idx, attribute] * subnetwork.C[ cycle_i, col_j]
branch_idx_attributes.append( (branch_idx, attribute_value))
for snapshot in snapshots:
expression_list = [ (attribute_value,
passive_branch_p[branch_idx[0], branch_idx[1], snapshot]) for (branch_idx, attribute_value) in branch_idx_attributes]
lhs = LExpression(expression_list)
sub_network_cycle_constraints[subnetwork.name,col_j,snapshot] = LConstraint(lhs,"==",LExpression())
return( sub_network_cycle_index, sub_network_cycle_constraints) | def function[define_sub_network_cycle_constraints, parameter[subnetwork, snapshots, passive_branch_p, attribute]]:
constant[ Constructs cycle_constraints for a particular subnetwork
]
variable[sub_network_cycle_constraints] assign[=] dictionary[[], []]
variable[sub_network_cycle_index] assign[=] list[[]]
variable[matrix] assign[=] call[name[subnetwork].C.tocsc, parameter[]]
variable[branches] assign[=] call[name[subnetwork].branches, parameter[]]
for taget[name[col_j]] in starred[call[name[range], parameter[call[name[matrix].shape][constant[1]]]]] begin[:]
variable[cycle_is] assign[=] call[call[call[name[matrix].getcol, parameter[name[col_j]]].nonzero, parameter[]]][constant[0]]
if compare[call[name[len], parameter[name[cycle_is]]] equal[==] constant[0]] begin[:]
continue
call[name[sub_network_cycle_index].append, parameter[tuple[[<ast.Attribute object at 0x7da18c4cd690>, <ast.Name object at 0x7da18c4cd5d0>]]]]
variable[branch_idx_attributes] assign[=] list[[]]
for taget[name[cycle_i]] in starred[name[cycle_is]] begin[:]
variable[branch_idx] assign[=] call[name[branches].index][name[cycle_i]]
variable[attribute_value] assign[=] binary_operation[binary_operation[constant[100000.0] * call[name[branches].at][tuple[[<ast.Name object at 0x7da18c4ccc10>, <ast.Name object at 0x7da18c4cd8d0>]]]] * call[name[subnetwork].C][tuple[[<ast.Name object at 0x7da18c4cf340>, <ast.Name object at 0x7da18c4cd180>]]]]
call[name[branch_idx_attributes].append, parameter[tuple[[<ast.Name object at 0x7da18c4ceb30>, <ast.Name object at 0x7da18c4cc2b0>]]]]
for taget[name[snapshot]] in starred[name[snapshots]] begin[:]
variable[expression_list] assign[=] <ast.ListComp object at 0x7da18c4cd9f0>
variable[lhs] assign[=] call[name[LExpression], parameter[name[expression_list]]]
call[name[sub_network_cycle_constraints]][tuple[[<ast.Attribute object at 0x7da18c4cdc30>, <ast.Name object at 0x7da18c4cc5e0>, <ast.Name object at 0x7da18c4cffd0>]]] assign[=] call[name[LConstraint], parameter[name[lhs], constant[==], call[name[LExpression], parameter[]]]]
return[tuple[[<ast.Name object at 0x7da18c4cc0a0>, <ast.Name object at 0x7da18c4ce0b0>]]] | keyword[def] identifier[define_sub_network_cycle_constraints] ( identifier[subnetwork] , identifier[snapshots] , identifier[passive_branch_p] , identifier[attribute] ):
literal[string]
identifier[sub_network_cycle_constraints] ={}
identifier[sub_network_cycle_index] =[]
identifier[matrix] = identifier[subnetwork] . identifier[C] . identifier[tocsc] ()
identifier[branches] = identifier[subnetwork] . identifier[branches] ()
keyword[for] identifier[col_j] keyword[in] identifier[range] ( identifier[matrix] . identifier[shape] [ literal[int] ]):
identifier[cycle_is] = identifier[matrix] . identifier[getcol] ( identifier[col_j] ). identifier[nonzero] ()[ literal[int] ]
keyword[if] identifier[len] ( identifier[cycle_is] )== literal[int] : keyword[continue]
identifier[sub_network_cycle_index] . identifier[append] (( identifier[subnetwork] . identifier[name] , identifier[col_j] ))
identifier[branch_idx_attributes] =[]
keyword[for] identifier[cycle_i] keyword[in] identifier[cycle_is] :
identifier[branch_idx] = identifier[branches] . identifier[index] [ identifier[cycle_i] ]
identifier[attribute_value] = literal[int] * identifier[branches] . identifier[at] [ identifier[branch_idx] , identifier[attribute] ]* identifier[subnetwork] . identifier[C] [ identifier[cycle_i] , identifier[col_j] ]
identifier[branch_idx_attributes] . identifier[append] (( identifier[branch_idx] , identifier[attribute_value] ))
keyword[for] identifier[snapshot] keyword[in] identifier[snapshots] :
identifier[expression_list] =[( identifier[attribute_value] ,
identifier[passive_branch_p] [ identifier[branch_idx] [ literal[int] ], identifier[branch_idx] [ literal[int] ], identifier[snapshot] ]) keyword[for] ( identifier[branch_idx] , identifier[attribute_value] ) keyword[in] identifier[branch_idx_attributes] ]
identifier[lhs] = identifier[LExpression] ( identifier[expression_list] )
identifier[sub_network_cycle_constraints] [ identifier[subnetwork] . identifier[name] , identifier[col_j] , identifier[snapshot] ]= identifier[LConstraint] ( identifier[lhs] , literal[string] , identifier[LExpression] ())
keyword[return] ( identifier[sub_network_cycle_index] , identifier[sub_network_cycle_constraints] ) | def define_sub_network_cycle_constraints(subnetwork, snapshots, passive_branch_p, attribute):
""" Constructs cycle_constraints for a particular subnetwork
"""
sub_network_cycle_constraints = {}
sub_network_cycle_index = []
matrix = subnetwork.C.tocsc()
branches = subnetwork.branches()
for col_j in range(matrix.shape[1]):
cycle_is = matrix.getcol(col_j).nonzero()[0]
if len(cycle_is) == 0:
continue # depends on [control=['if'], data=[]]
sub_network_cycle_index.append((subnetwork.name, col_j))
branch_idx_attributes = []
for cycle_i in cycle_is:
branch_idx = branches.index[cycle_i]
attribute_value = 100000.0 * branches.at[branch_idx, attribute] * subnetwork.C[cycle_i, col_j]
branch_idx_attributes.append((branch_idx, attribute_value)) # depends on [control=['for'], data=['cycle_i']]
for snapshot in snapshots:
expression_list = [(attribute_value, passive_branch_p[branch_idx[0], branch_idx[1], snapshot]) for (branch_idx, attribute_value) in branch_idx_attributes]
lhs = LExpression(expression_list)
sub_network_cycle_constraints[subnetwork.name, col_j, snapshot] = LConstraint(lhs, '==', LExpression()) # depends on [control=['for'], data=['snapshot']] # depends on [control=['for'], data=['col_j']]
return (sub_network_cycle_index, sub_network_cycle_constraints) |
def clear_vdp_vsi(self, port_uuid):
"""Stores the vNIC specific info for VDP Refresh.
:param uuid: vNIC UUID
"""
try:
LOG.debug("Clearing VDP VSI MAC %(mac)s UUID %(uuid)s",
{'mac': self.vdp_vif_map[port_uuid].get('mac'),
'uuid': self.vdp_vif_map[port_uuid].get('vsiid')})
del self.vdp_vif_map[port_uuid]
except Exception:
LOG.error("VSI does not exist")
self.clear_oui(port_uuid) | def function[clear_vdp_vsi, parameter[self, port_uuid]]:
constant[Stores the vNIC specific info for VDP Refresh.
:param uuid: vNIC UUID
]
<ast.Try object at 0x7da2041dbeb0>
call[name[self].clear_oui, parameter[name[port_uuid]]] | keyword[def] identifier[clear_vdp_vsi] ( identifier[self] , identifier[port_uuid] ):
literal[string]
keyword[try] :
identifier[LOG] . identifier[debug] ( literal[string] ,
{ literal[string] : identifier[self] . identifier[vdp_vif_map] [ identifier[port_uuid] ]. identifier[get] ( literal[string] ),
literal[string] : identifier[self] . identifier[vdp_vif_map] [ identifier[port_uuid] ]. identifier[get] ( literal[string] )})
keyword[del] identifier[self] . identifier[vdp_vif_map] [ identifier[port_uuid] ]
keyword[except] identifier[Exception] :
identifier[LOG] . identifier[error] ( literal[string] )
identifier[self] . identifier[clear_oui] ( identifier[port_uuid] ) | def clear_vdp_vsi(self, port_uuid):
"""Stores the vNIC specific info for VDP Refresh.
:param uuid: vNIC UUID
"""
try:
LOG.debug('Clearing VDP VSI MAC %(mac)s UUID %(uuid)s', {'mac': self.vdp_vif_map[port_uuid].get('mac'), 'uuid': self.vdp_vif_map[port_uuid].get('vsiid')})
del self.vdp_vif_map[port_uuid] # depends on [control=['try'], data=[]]
except Exception:
LOG.error('VSI does not exist') # depends on [control=['except'], data=[]]
self.clear_oui(port_uuid) |
def _validate_type_scalar(self, value):
""" Is not a list or a dict """
if isinstance(
value, _int_types + (_str_type, float, date, datetime, bool)
):
return True | def function[_validate_type_scalar, parameter[self, value]]:
constant[ Is not a list or a dict ]
if call[name[isinstance], parameter[name[value], binary_operation[name[_int_types] + tuple[[<ast.Name object at 0x7da1b1712260>, <ast.Name object at 0x7da1b17128f0>, <ast.Name object at 0x7da1b1712200>, <ast.Name object at 0x7da1b1713370>, <ast.Name object at 0x7da1b17132b0>]]]]] begin[:]
return[constant[True]] | keyword[def] identifier[_validate_type_scalar] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[isinstance] (
identifier[value] , identifier[_int_types] +( identifier[_str_type] , identifier[float] , identifier[date] , identifier[datetime] , identifier[bool] )
):
keyword[return] keyword[True] | def _validate_type_scalar(self, value):
""" Is not a list or a dict """
if isinstance(value, _int_types + (_str_type, float, date, datetime, bool)):
return True # depends on [control=['if'], data=[]] |
def save_model(self, net):
"""Save the model.
This function saves some or all of the following:
- model parameters;
- optimizer state;
- training history;
- entire model object.
"""
if self.f_params is not None:
f = self._format_target(net, self.f_params, -1)
self._save_params(f, net, "f_params", "model parameters")
if self.f_optimizer is not None:
f = self._format_target(net, self.f_optimizer, -1)
self._save_params(f, net, "f_optimizer", "optimizer state")
if self.f_history is not None:
f = self.f_history_
self._save_params(f, net, "f_history", "history")
if self.f_pickle:
f_pickle = self._format_target(net, self.f_pickle, -1)
with open_file_like(f_pickle, 'wb') as f:
pickle.dump(net, f) | def function[save_model, parameter[self, net]]:
constant[Save the model.
This function saves some or all of the following:
- model parameters;
- optimizer state;
- training history;
- entire model object.
]
if compare[name[self].f_params is_not constant[None]] begin[:]
variable[f] assign[=] call[name[self]._format_target, parameter[name[net], name[self].f_params, <ast.UnaryOp object at 0x7da18eb572b0>]]
call[name[self]._save_params, parameter[name[f], name[net], constant[f_params], constant[model parameters]]]
if compare[name[self].f_optimizer is_not constant[None]] begin[:]
variable[f] assign[=] call[name[self]._format_target, parameter[name[net], name[self].f_optimizer, <ast.UnaryOp object at 0x7da18eb54640>]]
call[name[self]._save_params, parameter[name[f], name[net], constant[f_optimizer], constant[optimizer state]]]
if compare[name[self].f_history is_not constant[None]] begin[:]
variable[f] assign[=] name[self].f_history_
call[name[self]._save_params, parameter[name[f], name[net], constant[f_history], constant[history]]]
if name[self].f_pickle begin[:]
variable[f_pickle] assign[=] call[name[self]._format_target, parameter[name[net], name[self].f_pickle, <ast.UnaryOp object at 0x7da18eb54f10>]]
with call[name[open_file_like], parameter[name[f_pickle], constant[wb]]] begin[:]
call[name[pickle].dump, parameter[name[net], name[f]]] | keyword[def] identifier[save_model] ( identifier[self] , identifier[net] ):
literal[string]
keyword[if] identifier[self] . identifier[f_params] keyword[is] keyword[not] keyword[None] :
identifier[f] = identifier[self] . identifier[_format_target] ( identifier[net] , identifier[self] . identifier[f_params] ,- literal[int] )
identifier[self] . identifier[_save_params] ( identifier[f] , identifier[net] , literal[string] , literal[string] )
keyword[if] identifier[self] . identifier[f_optimizer] keyword[is] keyword[not] keyword[None] :
identifier[f] = identifier[self] . identifier[_format_target] ( identifier[net] , identifier[self] . identifier[f_optimizer] ,- literal[int] )
identifier[self] . identifier[_save_params] ( identifier[f] , identifier[net] , literal[string] , literal[string] )
keyword[if] identifier[self] . identifier[f_history] keyword[is] keyword[not] keyword[None] :
identifier[f] = identifier[self] . identifier[f_history_]
identifier[self] . identifier[_save_params] ( identifier[f] , identifier[net] , literal[string] , literal[string] )
keyword[if] identifier[self] . identifier[f_pickle] :
identifier[f_pickle] = identifier[self] . identifier[_format_target] ( identifier[net] , identifier[self] . identifier[f_pickle] ,- literal[int] )
keyword[with] identifier[open_file_like] ( identifier[f_pickle] , literal[string] ) keyword[as] identifier[f] :
identifier[pickle] . identifier[dump] ( identifier[net] , identifier[f] ) | def save_model(self, net):
"""Save the model.
This function saves some or all of the following:
- model parameters;
- optimizer state;
- training history;
- entire model object.
"""
if self.f_params is not None:
f = self._format_target(net, self.f_params, -1)
self._save_params(f, net, 'f_params', 'model parameters') # depends on [control=['if'], data=[]]
if self.f_optimizer is not None:
f = self._format_target(net, self.f_optimizer, -1)
self._save_params(f, net, 'f_optimizer', 'optimizer state') # depends on [control=['if'], data=[]]
if self.f_history is not None:
f = self.f_history_
self._save_params(f, net, 'f_history', 'history') # depends on [control=['if'], data=[]]
if self.f_pickle:
f_pickle = self._format_target(net, self.f_pickle, -1)
with open_file_like(f_pickle, 'wb') as f:
pickle.dump(net, f) # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]] |
def prepare(self, variables):
"""Initialize all steps in this recipe using their parameters.
Args:
variables (dict): A dictionary of global variable definitions
that may be used to replace or augment the parameters given
to each step.
Returns:
list of RecipeActionObject like instances: The list of instantiated
steps that can be used to execute this recipe.
"""
initializedsteps = []
if variables is None:
variables = dict()
for step, params, _resources, _files in self.steps:
new_params = _complete_parameters(params, variables)
initializedsteps.append(step(new_params))
return initializedsteps | def function[prepare, parameter[self, variables]]:
constant[Initialize all steps in this recipe using their parameters.
Args:
variables (dict): A dictionary of global variable definitions
that may be used to replace or augment the parameters given
to each step.
Returns:
list of RecipeActionObject like instances: The list of instantiated
steps that can be used to execute this recipe.
]
variable[initializedsteps] assign[=] list[[]]
if compare[name[variables] is constant[None]] begin[:]
variable[variables] assign[=] call[name[dict], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da20c992c20>, <ast.Name object at 0x7da20c990130>, <ast.Name object at 0x7da20c990760>, <ast.Name object at 0x7da20c992ec0>]]] in starred[name[self].steps] begin[:]
variable[new_params] assign[=] call[name[_complete_parameters], parameter[name[params], name[variables]]]
call[name[initializedsteps].append, parameter[call[name[step], parameter[name[new_params]]]]]
return[name[initializedsteps]] | keyword[def] identifier[prepare] ( identifier[self] , identifier[variables] ):
literal[string]
identifier[initializedsteps] =[]
keyword[if] identifier[variables] keyword[is] keyword[None] :
identifier[variables] = identifier[dict] ()
keyword[for] identifier[step] , identifier[params] , identifier[_resources] , identifier[_files] keyword[in] identifier[self] . identifier[steps] :
identifier[new_params] = identifier[_complete_parameters] ( identifier[params] , identifier[variables] )
identifier[initializedsteps] . identifier[append] ( identifier[step] ( identifier[new_params] ))
keyword[return] identifier[initializedsteps] | def prepare(self, variables):
"""Initialize all steps in this recipe using their parameters.
Args:
variables (dict): A dictionary of global variable definitions
that may be used to replace or augment the parameters given
to each step.
Returns:
list of RecipeActionObject like instances: The list of instantiated
steps that can be used to execute this recipe.
"""
initializedsteps = []
if variables is None:
variables = dict() # depends on [control=['if'], data=['variables']]
for (step, params, _resources, _files) in self.steps:
new_params = _complete_parameters(params, variables)
initializedsteps.append(step(new_params)) # depends on [control=['for'], data=[]]
return initializedsteps |
def get_mnist(sc, data_type="train", location="/tmp/mnist"):
"""
Get mnist dataset and parallelize into RDDs.
Data would be downloaded automatically if it doesn't present at the specific location.
:param sc: SparkContext.
:param data_type: "train" for training data and "test" for testing data.
:param location: Location to store mnist dataset.
:return: RDD of (features: ndarray, label: ndarray).
"""
(images, labels) = mnist.read_data_sets(location, data_type)
images = sc.parallelize(images)
labels = sc.parallelize(labels + 1) # Target start from 1 in BigDL
record = images.zip(labels)
return record | def function[get_mnist, parameter[sc, data_type, location]]:
constant[
Get mnist dataset and parallelize into RDDs.
Data would be downloaded automatically if it doesn't present at the specific location.
:param sc: SparkContext.
:param data_type: "train" for training data and "test" for testing data.
:param location: Location to store mnist dataset.
:return: RDD of (features: ndarray, label: ndarray).
]
<ast.Tuple object at 0x7da2054a5630> assign[=] call[name[mnist].read_data_sets, parameter[name[location], name[data_type]]]
variable[images] assign[=] call[name[sc].parallelize, parameter[name[images]]]
variable[labels] assign[=] call[name[sc].parallelize, parameter[binary_operation[name[labels] + constant[1]]]]
variable[record] assign[=] call[name[images].zip, parameter[name[labels]]]
return[name[record]] | keyword[def] identifier[get_mnist] ( identifier[sc] , identifier[data_type] = literal[string] , identifier[location] = literal[string] ):
literal[string]
( identifier[images] , identifier[labels] )= identifier[mnist] . identifier[read_data_sets] ( identifier[location] , identifier[data_type] )
identifier[images] = identifier[sc] . identifier[parallelize] ( identifier[images] )
identifier[labels] = identifier[sc] . identifier[parallelize] ( identifier[labels] + literal[int] )
identifier[record] = identifier[images] . identifier[zip] ( identifier[labels] )
keyword[return] identifier[record] | def get_mnist(sc, data_type='train', location='/tmp/mnist'):
"""
Get mnist dataset and parallelize into RDDs.
Data would be downloaded automatically if it doesn't present at the specific location.
:param sc: SparkContext.
:param data_type: "train" for training data and "test" for testing data.
:param location: Location to store mnist dataset.
:return: RDD of (features: ndarray, label: ndarray).
"""
(images, labels) = mnist.read_data_sets(location, data_type)
images = sc.parallelize(images)
labels = sc.parallelize(labels + 1) # Target start from 1 in BigDL
record = images.zip(labels)
return record |
def hacking_python3x_except_compatible(logical_line, noqa):
r"""Check for except statements to be Python 3.x compatible
As of Python 3.x, the construct 'except x,y:' has been removed.
Use 'except x as y:' instead.
Okay: try:\n pass\nexcept Exception:\n pass
Okay: try:\n pass\nexcept (Exception, AttributeError):\n pass
H231: try:\n pass\nexcept AttributeError, e:\n pass
Okay: try:\n pass\nexcept AttributeError, e: # noqa\n pass
"""
if noqa:
return
def is_old_style_except(logical_line):
return (',' in logical_line and
')' not in logical_line.rpartition(',')[2])
if (logical_line.startswith("except ") and
logical_line.endswith(':') and
is_old_style_except(logical_line)):
yield 0, "H231: Python 3.x incompatible 'except x,y:' construct" | def function[hacking_python3x_except_compatible, parameter[logical_line, noqa]]:
constant[Check for except statements to be Python 3.x compatible
As of Python 3.x, the construct 'except x,y:' has been removed.
Use 'except x as y:' instead.
Okay: try:\n pass\nexcept Exception:\n pass
Okay: try:\n pass\nexcept (Exception, AttributeError):\n pass
H231: try:\n pass\nexcept AttributeError, e:\n pass
Okay: try:\n pass\nexcept AttributeError, e: # noqa\n pass
]
if name[noqa] begin[:]
return[None]
def function[is_old_style_except, parameter[logical_line]]:
return[<ast.BoolOp object at 0x7da18ede4bb0>]
if <ast.BoolOp object at 0x7da1b04b6680> begin[:]
<ast.Yield object at 0x7da1b04b4a90> | keyword[def] identifier[hacking_python3x_except_compatible] ( identifier[logical_line] , identifier[noqa] ):
literal[string]
keyword[if] identifier[noqa] :
keyword[return]
keyword[def] identifier[is_old_style_except] ( identifier[logical_line] ):
keyword[return] ( literal[string] keyword[in] identifier[logical_line] keyword[and]
literal[string] keyword[not] keyword[in] identifier[logical_line] . identifier[rpartition] ( literal[string] )[ literal[int] ])
keyword[if] ( identifier[logical_line] . identifier[startswith] ( literal[string] ) keyword[and]
identifier[logical_line] . identifier[endswith] ( literal[string] ) keyword[and]
identifier[is_old_style_except] ( identifier[logical_line] )):
keyword[yield] literal[int] , literal[string] | def hacking_python3x_except_compatible(logical_line, noqa):
"""Check for except statements to be Python 3.x compatible
As of Python 3.x, the construct 'except x,y:' has been removed.
Use 'except x as y:' instead.
Okay: try:\\n pass\\nexcept Exception:\\n pass
Okay: try:\\n pass\\nexcept (Exception, AttributeError):\\n pass
H231: try:\\n pass\\nexcept AttributeError, e:\\n pass
Okay: try:\\n pass\\nexcept AttributeError, e: # noqa\\n pass
"""
if noqa:
return # depends on [control=['if'], data=[]]
def is_old_style_except(logical_line):
return ',' in logical_line and ')' not in logical_line.rpartition(',')[2]
if logical_line.startswith('except ') and logical_line.endswith(':') and is_old_style_except(logical_line):
yield (0, "H231: Python 3.x incompatible 'except x,y:' construct") # depends on [control=['if'], data=[]] |
def community_topic_create(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/help_center/topics#create-topic"
api_path = "/api/v2/community/topics.json"
return self.call(api_path, method="POST", data=data, **kwargs) | def function[community_topic_create, parameter[self, data]]:
constant[https://developer.zendesk.com/rest_api/docs/help_center/topics#create-topic]
variable[api_path] assign[=] constant[/api/v2/community/topics.json]
return[call[name[self].call, parameter[name[api_path]]]] | keyword[def] identifier[community_topic_create] ( identifier[self] , identifier[data] ,** identifier[kwargs] ):
literal[string]
identifier[api_path] = literal[string]
keyword[return] identifier[self] . identifier[call] ( identifier[api_path] , identifier[method] = literal[string] , identifier[data] = identifier[data] ,** identifier[kwargs] ) | def community_topic_create(self, data, **kwargs):
"""https://developer.zendesk.com/rest_api/docs/help_center/topics#create-topic"""
api_path = '/api/v2/community/topics.json'
return self.call(api_path, method='POST', data=data, **kwargs) |
def get_path(self, prefix=None, filename=None):
"""Compose data location path."""
prefix = prefix or settings.FLOW_EXECUTOR['DATA_DIR']
path = os.path.join(prefix, self.subpath)
if filename:
path = os.path.join(path, filename)
return path | def function[get_path, parameter[self, prefix, filename]]:
constant[Compose data location path.]
variable[prefix] assign[=] <ast.BoolOp object at 0x7da18c4cdf30>
variable[path] assign[=] call[name[os].path.join, parameter[name[prefix], name[self].subpath]]
if name[filename] begin[:]
variable[path] assign[=] call[name[os].path.join, parameter[name[path], name[filename]]]
return[name[path]] | keyword[def] identifier[get_path] ( identifier[self] , identifier[prefix] = keyword[None] , identifier[filename] = keyword[None] ):
literal[string]
identifier[prefix] = identifier[prefix] keyword[or] identifier[settings] . identifier[FLOW_EXECUTOR] [ literal[string] ]
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[prefix] , identifier[self] . identifier[subpath] )
keyword[if] identifier[filename] :
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[filename] )
keyword[return] identifier[path] | def get_path(self, prefix=None, filename=None):
"""Compose data location path."""
prefix = prefix or settings.FLOW_EXECUTOR['DATA_DIR']
path = os.path.join(prefix, self.subpath)
if filename:
path = os.path.join(path, filename) # depends on [control=['if'], data=[]]
return path |
def pickFilepath( self ):
"""
Prompts the user to select a filepath from the system based on the \
current filepath mode.
"""
mode = self.filepathMode()
filepath = ''
filepaths = []
curr_dir = nativestring(self._filepathEdit.text())
if ( not curr_dir ):
curr_dir = QDir.currentPath()
if mode == XFilepathEdit.Mode.SaveFile:
filepath = QFileDialog.getSaveFileName( self,
self.windowTitle(),
curr_dir,
self.filepathTypes() )
elif mode == XFilepathEdit.Mode.OpenFile:
filepath = QFileDialog.getOpenFileName( self,
self.windowTitle(),
curr_dir,
self.filepathTypes() )
elif mode == XFilepathEdit.Mode.OpenFiles:
filepaths = QFileDialog.getOpenFileNames( self,
self.windowTitle(),
curr_dir,
self.filepathTypes() )
else:
filepath = QFileDialog.getExistingDirectory( self,
self.windowTitle(),
curr_dir )
if filepath:
if type(filepath) == tuple:
filepath = filepath[0]
self.setFilepath(nativestring(filepath))
elif filepaths:
self.setFilepaths(map(str, filepaths)) | def function[pickFilepath, parameter[self]]:
constant[
Prompts the user to select a filepath from the system based on the current filepath mode.
]
variable[mode] assign[=] call[name[self].filepathMode, parameter[]]
variable[filepath] assign[=] constant[]
variable[filepaths] assign[=] list[[]]
variable[curr_dir] assign[=] call[name[nativestring], parameter[call[name[self]._filepathEdit.text, parameter[]]]]
if <ast.UnaryOp object at 0x7da18fe91c00> begin[:]
variable[curr_dir] assign[=] call[name[QDir].currentPath, parameter[]]
if compare[name[mode] equal[==] name[XFilepathEdit].Mode.SaveFile] begin[:]
variable[filepath] assign[=] call[name[QFileDialog].getSaveFileName, parameter[name[self], call[name[self].windowTitle, parameter[]], name[curr_dir], call[name[self].filepathTypes, parameter[]]]]
if name[filepath] begin[:]
if compare[call[name[type], parameter[name[filepath]]] equal[==] name[tuple]] begin[:]
variable[filepath] assign[=] call[name[filepath]][constant[0]]
call[name[self].setFilepath, parameter[call[name[nativestring], parameter[name[filepath]]]]] | keyword[def] identifier[pickFilepath] ( identifier[self] ):
literal[string]
identifier[mode] = identifier[self] . identifier[filepathMode] ()
identifier[filepath] = literal[string]
identifier[filepaths] =[]
identifier[curr_dir] = identifier[nativestring] ( identifier[self] . identifier[_filepathEdit] . identifier[text] ())
keyword[if] ( keyword[not] identifier[curr_dir] ):
identifier[curr_dir] = identifier[QDir] . identifier[currentPath] ()
keyword[if] identifier[mode] == identifier[XFilepathEdit] . identifier[Mode] . identifier[SaveFile] :
identifier[filepath] = identifier[QFileDialog] . identifier[getSaveFileName] ( identifier[self] ,
identifier[self] . identifier[windowTitle] (),
identifier[curr_dir] ,
identifier[self] . identifier[filepathTypes] ())
keyword[elif] identifier[mode] == identifier[XFilepathEdit] . identifier[Mode] . identifier[OpenFile] :
identifier[filepath] = identifier[QFileDialog] . identifier[getOpenFileName] ( identifier[self] ,
identifier[self] . identifier[windowTitle] (),
identifier[curr_dir] ,
identifier[self] . identifier[filepathTypes] ())
keyword[elif] identifier[mode] == identifier[XFilepathEdit] . identifier[Mode] . identifier[OpenFiles] :
identifier[filepaths] = identifier[QFileDialog] . identifier[getOpenFileNames] ( identifier[self] ,
identifier[self] . identifier[windowTitle] (),
identifier[curr_dir] ,
identifier[self] . identifier[filepathTypes] ())
keyword[else] :
identifier[filepath] = identifier[QFileDialog] . identifier[getExistingDirectory] ( identifier[self] ,
identifier[self] . identifier[windowTitle] (),
identifier[curr_dir] )
keyword[if] identifier[filepath] :
keyword[if] identifier[type] ( identifier[filepath] )== identifier[tuple] :
identifier[filepath] = identifier[filepath] [ literal[int] ]
identifier[self] . identifier[setFilepath] ( identifier[nativestring] ( identifier[filepath] ))
keyword[elif] identifier[filepaths] :
identifier[self] . identifier[setFilepaths] ( identifier[map] ( identifier[str] , identifier[filepaths] )) | def pickFilepath(self):
"""
Prompts the user to select a filepath from the system based on the current filepath mode.
"""
mode = self.filepathMode()
filepath = ''
filepaths = []
curr_dir = nativestring(self._filepathEdit.text())
if not curr_dir:
curr_dir = QDir.currentPath() # depends on [control=['if'], data=[]]
if mode == XFilepathEdit.Mode.SaveFile:
filepath = QFileDialog.getSaveFileName(self, self.windowTitle(), curr_dir, self.filepathTypes()) # depends on [control=['if'], data=[]]
elif mode == XFilepathEdit.Mode.OpenFile:
filepath = QFileDialog.getOpenFileName(self, self.windowTitle(), curr_dir, self.filepathTypes()) # depends on [control=['if'], data=[]]
elif mode == XFilepathEdit.Mode.OpenFiles:
filepaths = QFileDialog.getOpenFileNames(self, self.windowTitle(), curr_dir, self.filepathTypes()) # depends on [control=['if'], data=[]]
else:
filepath = QFileDialog.getExistingDirectory(self, self.windowTitle(), curr_dir)
if filepath:
if type(filepath) == tuple:
filepath = filepath[0] # depends on [control=['if'], data=[]]
self.setFilepath(nativestring(filepath)) # depends on [control=['if'], data=[]]
elif filepaths:
self.setFilepaths(map(str, filepaths)) # depends on [control=['if'], data=[]] |
def exact_cftime_datetime_difference(a, b):
"""Exact computation of b - a
Assumes:
a = a_0 + a_m
b = b_0 + b_m
Here a_0, and b_0 represent the input dates rounded
down to the nearest second, and a_m, and b_m represent
the remaining microseconds associated with date a and
date b.
We can then express the value of b - a as:
b - a = (b_0 + b_m) - (a_0 + a_m) = b_0 - a_0 + b_m - a_m
By construction, we know that b_0 - a_0 must be a round number
of seconds. Therefore we can take the result of b_0 - a_0 using
ordinary cftime.datetime arithmetic and round to the nearest
second. b_m - a_m is the remainder, in microseconds, and we
can simply add this to the rounded timedelta.
Parameters
----------
a : cftime.datetime
Input datetime
b : cftime.datetime
Input datetime
Returns
-------
datetime.timedelta
"""
seconds = b.replace(microsecond=0) - a.replace(microsecond=0)
seconds = int(round(seconds.total_seconds()))
microseconds = b.microsecond - a.microsecond
return datetime.timedelta(seconds=seconds, microseconds=microseconds) | def function[exact_cftime_datetime_difference, parameter[a, b]]:
constant[Exact computation of b - a
Assumes:
a = a_0 + a_m
b = b_0 + b_m
Here a_0, and b_0 represent the input dates rounded
down to the nearest second, and a_m, and b_m represent
the remaining microseconds associated with date a and
date b.
We can then express the value of b - a as:
b - a = (b_0 + b_m) - (a_0 + a_m) = b_0 - a_0 + b_m - a_m
By construction, we know that b_0 - a_0 must be a round number
of seconds. Therefore we can take the result of b_0 - a_0 using
ordinary cftime.datetime arithmetic and round to the nearest
second. b_m - a_m is the remainder, in microseconds, and we
can simply add this to the rounded timedelta.
Parameters
----------
a : cftime.datetime
Input datetime
b : cftime.datetime
Input datetime
Returns
-------
datetime.timedelta
]
variable[seconds] assign[=] binary_operation[call[name[b].replace, parameter[]] - call[name[a].replace, parameter[]]]
variable[seconds] assign[=] call[name[int], parameter[call[name[round], parameter[call[name[seconds].total_seconds, parameter[]]]]]]
variable[microseconds] assign[=] binary_operation[name[b].microsecond - name[a].microsecond]
return[call[name[datetime].timedelta, parameter[]]] | keyword[def] identifier[exact_cftime_datetime_difference] ( identifier[a] , identifier[b] ):
literal[string]
identifier[seconds] = identifier[b] . identifier[replace] ( identifier[microsecond] = literal[int] )- identifier[a] . identifier[replace] ( identifier[microsecond] = literal[int] )
identifier[seconds] = identifier[int] ( identifier[round] ( identifier[seconds] . identifier[total_seconds] ()))
identifier[microseconds] = identifier[b] . identifier[microsecond] - identifier[a] . identifier[microsecond]
keyword[return] identifier[datetime] . identifier[timedelta] ( identifier[seconds] = identifier[seconds] , identifier[microseconds] = identifier[microseconds] ) | def exact_cftime_datetime_difference(a, b):
"""Exact computation of b - a
Assumes:
a = a_0 + a_m
b = b_0 + b_m
Here a_0, and b_0 represent the input dates rounded
down to the nearest second, and a_m, and b_m represent
the remaining microseconds associated with date a and
date b.
We can then express the value of b - a as:
b - a = (b_0 + b_m) - (a_0 + a_m) = b_0 - a_0 + b_m - a_m
By construction, we know that b_0 - a_0 must be a round number
of seconds. Therefore we can take the result of b_0 - a_0 using
ordinary cftime.datetime arithmetic and round to the nearest
second. b_m - a_m is the remainder, in microseconds, and we
can simply add this to the rounded timedelta.
Parameters
----------
a : cftime.datetime
Input datetime
b : cftime.datetime
Input datetime
Returns
-------
datetime.timedelta
"""
seconds = b.replace(microsecond=0) - a.replace(microsecond=0)
seconds = int(round(seconds.total_seconds()))
microseconds = b.microsecond - a.microsecond
return datetime.timedelta(seconds=seconds, microseconds=microseconds) |
def set_primary_keys_auto(self, tables=None):
"""
Create primary keys for every table in the connected database.
Checks that each table has a primary key. If a table does not have a key
then each column is analyzed to determine if it contains only unique values.
If no columns exist containing only unique values then a new 'ID' column
is created to serve as a auto_incrementing primary key.
"""
# Retrieve list of tables if not provided
tables = tables if tables else self.tables
# Resolve primary keys and return list of table, primary_key tuples
return [(table, self.set_primary_key_auto(table)) for table in tables] | def function[set_primary_keys_auto, parameter[self, tables]]:
constant[
Create primary keys for every table in the connected database.
Checks that each table has a primary key. If a table does not have a key
then each column is analyzed to determine if it contains only unique values.
If no columns exist containing only unique values then a new 'ID' column
is created to serve as a auto_incrementing primary key.
]
variable[tables] assign[=] <ast.IfExp object at 0x7da1b0bd0940>
return[<ast.ListComp object at 0x7da1b0bd2650>] | keyword[def] identifier[set_primary_keys_auto] ( identifier[self] , identifier[tables] = keyword[None] ):
literal[string]
identifier[tables] = identifier[tables] keyword[if] identifier[tables] keyword[else] identifier[self] . identifier[tables]
keyword[return] [( identifier[table] , identifier[self] . identifier[set_primary_key_auto] ( identifier[table] )) keyword[for] identifier[table] keyword[in] identifier[tables] ] | def set_primary_keys_auto(self, tables=None):
"""
Create primary keys for every table in the connected database.
Checks that each table has a primary key. If a table does not have a key
then each column is analyzed to determine if it contains only unique values.
If no columns exist containing only unique values then a new 'ID' column
is created to serve as a auto_incrementing primary key.
"""
# Retrieve list of tables if not provided
tables = tables if tables else self.tables
# Resolve primary keys and return list of table, primary_key tuples
return [(table, self.set_primary_key_auto(table)) for table in tables] |
def row_dict(self, row):
"""returns dictionary version of row using keys from self.field_map"""
d = {}
for field_name,index in self.field_map.items():
d[field_name] = self.field_value(row, field_name)
return d | def function[row_dict, parameter[self, row]]:
constant[returns dictionary version of row using keys from self.field_map]
variable[d] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b0fc71f0>, <ast.Name object at 0x7da1b0fc5210>]]] in starred[call[name[self].field_map.items, parameter[]]] begin[:]
call[name[d]][name[field_name]] assign[=] call[name[self].field_value, parameter[name[row], name[field_name]]]
return[name[d]] | keyword[def] identifier[row_dict] ( identifier[self] , identifier[row] ):
literal[string]
identifier[d] ={}
keyword[for] identifier[field_name] , identifier[index] keyword[in] identifier[self] . identifier[field_map] . identifier[items] ():
identifier[d] [ identifier[field_name] ]= identifier[self] . identifier[field_value] ( identifier[row] , identifier[field_name] )
keyword[return] identifier[d] | def row_dict(self, row):
"""returns dictionary version of row using keys from self.field_map"""
d = {}
for (field_name, index) in self.field_map.items():
d[field_name] = self.field_value(row, field_name) # depends on [control=['for'], data=[]]
return d |
def _write(self, session, openFile, replaceParamFile=None):
"""
ProjectFileEvent Write to File Method
"""
openFile.write(
text(
yaml.dump([evt.as_yml() for evt in
self.events.order_by(ProjectFileEvent.name,
ProjectFileEvent.subfolder)]
)
)
) | def function[_write, parameter[self, session, openFile, replaceParamFile]]:
constant[
ProjectFileEvent Write to File Method
]
call[name[openFile].write, parameter[call[name[text], parameter[call[name[yaml].dump, parameter[<ast.ListComp object at 0x7da18f09d4b0>]]]]]] | keyword[def] identifier[_write] ( identifier[self] , identifier[session] , identifier[openFile] , identifier[replaceParamFile] = keyword[None] ):
literal[string]
identifier[openFile] . identifier[write] (
identifier[text] (
identifier[yaml] . identifier[dump] ([ identifier[evt] . identifier[as_yml] () keyword[for] identifier[evt] keyword[in]
identifier[self] . identifier[events] . identifier[order_by] ( identifier[ProjectFileEvent] . identifier[name] ,
identifier[ProjectFileEvent] . identifier[subfolder] )]
)
)
) | def _write(self, session, openFile, replaceParamFile=None):
"""
ProjectFileEvent Write to File Method
"""
openFile.write(text(yaml.dump([evt.as_yml() for evt in self.events.order_by(ProjectFileEvent.name, ProjectFileEvent.subfolder)]))) |
def find_expectations(self,
expectation_type=None,
column=None,
expectation_kwargs=None,
discard_result_format_kwargs=True,
discard_include_configs_kwargs=True,
discard_catch_exceptions_kwargs=True,
):
"""Find matching expectations within _expectation_config.
Args:
expectation_type=None : The name of the expectation type to be matched.
column=None : The name of the column to be matched.
expectation_kwargs=None : A dictionary of kwargs to match against.
discard_result_format_kwargs=True : In returned expectation object(s), suppress the `result_format` parameter.
discard_include_configs_kwargs=True : In returned expectation object(s), suppress the `include_configs` parameter.
discard_catch_exceptions_kwargs=True : In returned expectation object(s), suppress the `catch_exceptions` parameter.
Returns:
A list of matching expectation objects.
If there are no matches, the list will be empty.
"""
match_indexes = self.find_expectation_indexes(
expectation_type,
column,
expectation_kwargs,
)
return self._copy_and_clean_up_expectations_from_indexes(
match_indexes,
discard_result_format_kwargs,
discard_include_configs_kwargs,
discard_catch_exceptions_kwargs,
) | def function[find_expectations, parameter[self, expectation_type, column, expectation_kwargs, discard_result_format_kwargs, discard_include_configs_kwargs, discard_catch_exceptions_kwargs]]:
constant[Find matching expectations within _expectation_config.
Args:
expectation_type=None : The name of the expectation type to be matched.
column=None : The name of the column to be matched.
expectation_kwargs=None : A dictionary of kwargs to match against.
discard_result_format_kwargs=True : In returned expectation object(s), suppress the `result_format` parameter.
discard_include_configs_kwargs=True : In returned expectation object(s), suppress the `include_configs` parameter.
discard_catch_exceptions_kwargs=True : In returned expectation object(s), suppress the `catch_exceptions` parameter.
Returns:
A list of matching expectation objects.
If there are no matches, the list will be empty.
]
variable[match_indexes] assign[=] call[name[self].find_expectation_indexes, parameter[name[expectation_type], name[column], name[expectation_kwargs]]]
return[call[name[self]._copy_and_clean_up_expectations_from_indexes, parameter[name[match_indexes], name[discard_result_format_kwargs], name[discard_include_configs_kwargs], name[discard_catch_exceptions_kwargs]]]] | keyword[def] identifier[find_expectations] ( identifier[self] ,
identifier[expectation_type] = keyword[None] ,
identifier[column] = keyword[None] ,
identifier[expectation_kwargs] = keyword[None] ,
identifier[discard_result_format_kwargs] = keyword[True] ,
identifier[discard_include_configs_kwargs] = keyword[True] ,
identifier[discard_catch_exceptions_kwargs] = keyword[True] ,
):
literal[string]
identifier[match_indexes] = identifier[self] . identifier[find_expectation_indexes] (
identifier[expectation_type] ,
identifier[column] ,
identifier[expectation_kwargs] ,
)
keyword[return] identifier[self] . identifier[_copy_and_clean_up_expectations_from_indexes] (
identifier[match_indexes] ,
identifier[discard_result_format_kwargs] ,
identifier[discard_include_configs_kwargs] ,
identifier[discard_catch_exceptions_kwargs] ,
) | def find_expectations(self, expectation_type=None, column=None, expectation_kwargs=None, discard_result_format_kwargs=True, discard_include_configs_kwargs=True, discard_catch_exceptions_kwargs=True):
"""Find matching expectations within _expectation_config.
Args:
expectation_type=None : The name of the expectation type to be matched.
column=None : The name of the column to be matched.
expectation_kwargs=None : A dictionary of kwargs to match against.
discard_result_format_kwargs=True : In returned expectation object(s), suppress the `result_format` parameter.
discard_include_configs_kwargs=True : In returned expectation object(s), suppress the `include_configs` parameter.
discard_catch_exceptions_kwargs=True : In returned expectation object(s), suppress the `catch_exceptions` parameter.
Returns:
A list of matching expectation objects.
If there are no matches, the list will be empty.
"""
match_indexes = self.find_expectation_indexes(expectation_type, column, expectation_kwargs)
return self._copy_and_clean_up_expectations_from_indexes(match_indexes, discard_result_format_kwargs, discard_include_configs_kwargs, discard_catch_exceptions_kwargs) |
def convert_date(value, parameter):
'''
Converts to datetime.date:
'', '-', None convert to parameter default
The first matching format in settings.DATE_INPUT_FORMATS converts to datetime
'''
value = _check_default(value, parameter, ( '', '-', None ))
if value is None or isinstance(value, datetime.date):
return value
for fmt in settings.DATE_INPUT_FORMATS:
try:
return datetime.datetime.strptime(value, fmt).date()
except (ValueError, TypeError):
continue
raise ValueError("`{}` does not match a format in settings.DATE_INPUT_FORMATS".format(value)) | def function[convert_date, parameter[value, parameter]]:
constant[
Converts to datetime.date:
'', '-', None convert to parameter default
The first matching format in settings.DATE_INPUT_FORMATS converts to datetime
]
variable[value] assign[=] call[name[_check_default], parameter[name[value], name[parameter], tuple[[<ast.Constant object at 0x7da20c6c6bf0>, <ast.Constant object at 0x7da20c6c45b0>, <ast.Constant object at 0x7da20c6c5ae0>]]]]
if <ast.BoolOp object at 0x7da20c6c62c0> begin[:]
return[name[value]]
for taget[name[fmt]] in starred[name[settings].DATE_INPUT_FORMATS] begin[:]
<ast.Try object at 0x7da20c6c7790>
<ast.Raise object at 0x7da1b115ea40> | keyword[def] identifier[convert_date] ( identifier[value] , identifier[parameter] ):
literal[string]
identifier[value] = identifier[_check_default] ( identifier[value] , identifier[parameter] ,( literal[string] , literal[string] , keyword[None] ))
keyword[if] identifier[value] keyword[is] keyword[None] keyword[or] identifier[isinstance] ( identifier[value] , identifier[datetime] . identifier[date] ):
keyword[return] identifier[value]
keyword[for] identifier[fmt] keyword[in] identifier[settings] . identifier[DATE_INPUT_FORMATS] :
keyword[try] :
keyword[return] identifier[datetime] . identifier[datetime] . identifier[strptime] ( identifier[value] , identifier[fmt] ). identifier[date] ()
keyword[except] ( identifier[ValueError] , identifier[TypeError] ):
keyword[continue]
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[value] )) | def convert_date(value, parameter):
"""
Converts to datetime.date:
'', '-', None convert to parameter default
The first matching format in settings.DATE_INPUT_FORMATS converts to datetime
"""
value = _check_default(value, parameter, ('', '-', None))
if value is None or isinstance(value, datetime.date):
return value # depends on [control=['if'], data=[]]
for fmt in settings.DATE_INPUT_FORMATS:
try:
return datetime.datetime.strptime(value, fmt).date() # depends on [control=['try'], data=[]]
except (ValueError, TypeError):
continue # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['fmt']]
raise ValueError('`{}` does not match a format in settings.DATE_INPUT_FORMATS'.format(value)) |
def json(self):
"""Custom JSON encoder"""
attributes = {
'type': self.type,
'filename': self.filename,
'line_number': self.lineno,
'hashed_secret': self.secret_hash,
}
if self.is_secret is not None:
attributes['is_secret'] = self.is_secret
return attributes | def function[json, parameter[self]]:
constant[Custom JSON encoder]
variable[attributes] assign[=] dictionary[[<ast.Constant object at 0x7da20c6e7ca0>, <ast.Constant object at 0x7da20c6e7130>, <ast.Constant object at 0x7da20c6e66b0>, <ast.Constant object at 0x7da20c6e5ea0>], [<ast.Attribute object at 0x7da20c6e63b0>, <ast.Attribute object at 0x7da20c6e4190>, <ast.Attribute object at 0x7da20c6e63e0>, <ast.Attribute object at 0x7da20c6e72b0>]]
if compare[name[self].is_secret is_not constant[None]] begin[:]
call[name[attributes]][constant[is_secret]] assign[=] name[self].is_secret
return[name[attributes]] | keyword[def] identifier[json] ( identifier[self] ):
literal[string]
identifier[attributes] ={
literal[string] : identifier[self] . identifier[type] ,
literal[string] : identifier[self] . identifier[filename] ,
literal[string] : identifier[self] . identifier[lineno] ,
literal[string] : identifier[self] . identifier[secret_hash] ,
}
keyword[if] identifier[self] . identifier[is_secret] keyword[is] keyword[not] keyword[None] :
identifier[attributes] [ literal[string] ]= identifier[self] . identifier[is_secret]
keyword[return] identifier[attributes] | def json(self):
"""Custom JSON encoder"""
attributes = {'type': self.type, 'filename': self.filename, 'line_number': self.lineno, 'hashed_secret': self.secret_hash}
if self.is_secret is not None:
attributes['is_secret'] = self.is_secret # depends on [control=['if'], data=[]]
return attributes |
def read_oplog_progress(self):
"""Reads oplog progress from file provided by user.
This method is only called once before any threads are spanwed.
"""
if self.oplog_checkpoint is None:
return None
# Check for empty file
try:
if os.stat(self.oplog_checkpoint).st_size == 0:
LOG.info("MongoConnector: Empty oplog progress file.")
return None
except OSError:
return None
with open(self.oplog_checkpoint, "r") as progress_file:
try:
data = json.load(progress_file)
except ValueError:
LOG.exception(
'Cannot read oplog progress file "%s". '
"It may be corrupt after Mongo Connector was shut down"
"uncleanly. You can try to recover from a backup file "
'(may be called "%s.backup") or create a new progress file '
"starting at the current moment in time by running "
"mongo-connector --no-dump <other options>. "
"You may also be trying to read an oplog progress file "
"created with the old format for sharded clusters. "
"See https://github.com/10gen-labs/mongo-connector/wiki"
"/Oplog-Progress-File for complete documentation."
% (self.oplog_checkpoint, self.oplog_checkpoint)
)
return
# data format:
# [name, timestamp] = replica set
# [[name, timestamp], [name, timestamp], ...] = sharded cluster
if not isinstance(data[0], list):
data = [data]
with self.oplog_progress:
self.oplog_progress.dict = dict(
(name, util.long_to_bson_ts(timestamp)) for name, timestamp in data
) | def function[read_oplog_progress, parameter[self]]:
constant[Reads oplog progress from file provided by user.
This method is only called once before any threads are spanwed.
]
if compare[name[self].oplog_checkpoint is constant[None]] begin[:]
return[constant[None]]
<ast.Try object at 0x7da1b1da3160>
with call[name[open], parameter[name[self].oplog_checkpoint, constant[r]]] begin[:]
<ast.Try object at 0x7da1b1da1960>
if <ast.UnaryOp object at 0x7da1b1da3520> begin[:]
variable[data] assign[=] list[[<ast.Name object at 0x7da1b1edbfd0>]]
with name[self].oplog_progress begin[:]
name[self].oplog_progress.dict assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da1b1edaf20>]] | keyword[def] identifier[read_oplog_progress] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[oplog_checkpoint] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[try] :
keyword[if] identifier[os] . identifier[stat] ( identifier[self] . identifier[oplog_checkpoint] ). identifier[st_size] == literal[int] :
identifier[LOG] . identifier[info] ( literal[string] )
keyword[return] keyword[None]
keyword[except] identifier[OSError] :
keyword[return] keyword[None]
keyword[with] identifier[open] ( identifier[self] . identifier[oplog_checkpoint] , literal[string] ) keyword[as] identifier[progress_file] :
keyword[try] :
identifier[data] = identifier[json] . identifier[load] ( identifier[progress_file] )
keyword[except] identifier[ValueError] :
identifier[LOG] . identifier[exception] (
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
%( identifier[self] . identifier[oplog_checkpoint] , identifier[self] . identifier[oplog_checkpoint] )
)
keyword[return]
keyword[if] keyword[not] identifier[isinstance] ( identifier[data] [ literal[int] ], identifier[list] ):
identifier[data] =[ identifier[data] ]
keyword[with] identifier[self] . identifier[oplog_progress] :
identifier[self] . identifier[oplog_progress] . identifier[dict] = identifier[dict] (
( identifier[name] , identifier[util] . identifier[long_to_bson_ts] ( identifier[timestamp] )) keyword[for] identifier[name] , identifier[timestamp] keyword[in] identifier[data]
) | def read_oplog_progress(self):
"""Reads oplog progress from file provided by user.
This method is only called once before any threads are spanwed.
"""
if self.oplog_checkpoint is None:
return None # depends on [control=['if'], data=[]]
# Check for empty file
try:
if os.stat(self.oplog_checkpoint).st_size == 0:
LOG.info('MongoConnector: Empty oplog progress file.')
return None # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except OSError:
return None # depends on [control=['except'], data=[]]
with open(self.oplog_checkpoint, 'r') as progress_file:
try:
data = json.load(progress_file) # depends on [control=['try'], data=[]]
except ValueError:
LOG.exception('Cannot read oplog progress file "%s". It may be corrupt after Mongo Connector was shut downuncleanly. You can try to recover from a backup file (may be called "%s.backup") or create a new progress file starting at the current moment in time by running mongo-connector --no-dump <other options>. You may also be trying to read an oplog progress file created with the old format for sharded clusters. See https://github.com/10gen-labs/mongo-connector/wiki/Oplog-Progress-File for complete documentation.' % (self.oplog_checkpoint, self.oplog_checkpoint))
return # depends on [control=['except'], data=[]]
# data format:
# [name, timestamp] = replica set
# [[name, timestamp], [name, timestamp], ...] = sharded cluster
if not isinstance(data[0], list):
data = [data] # depends on [control=['if'], data=[]]
with self.oplog_progress:
self.oplog_progress.dict = dict(((name, util.long_to_bson_ts(timestamp)) for (name, timestamp) in data)) # depends on [control=['with'], data=[]] # depends on [control=['with'], data=['progress_file']] |
def postProcess(self, images_data, new_format, new_size):
""" Convert image binary data to a target format and/or size (None if no conversion needed), and return the processed data. """
if len(images_data) == 1:
in_bytes = io.BytesIO(images_data[0])
img = PIL.Image.open(in_bytes)
if img.mode != "RGB":
img = img.convert("RGB")
else:
# images need to be joined before further processing
logging.getLogger("Cover").info("Joining %u images..." % (len(images_data)))
# TODO find a way to do this losslessly for JPEG
new_img = PIL.Image.new("RGB", self.size)
assert(is_square(len(images_data)))
sq = int(math.sqrt(len(images_data)))
images_data_it = iter(images_data)
img_sizes = {}
for x in range(sq):
for y in range(sq):
current_image_data = next(images_data_it)
img_stream = io.BytesIO(current_image_data)
img = PIL.Image.open(img_stream)
img_sizes[(x, y)] = img.size
box = [0, 0]
if x > 0:
for px in range(x):
box[0] += img_sizes[(px, y)][0]
if y > 0:
for py in range(y):
box[1] += img_sizes[(x, py)][1]
box.extend((box[0] + img.size[0], box[1] + img.size[1]))
new_img.paste(img, box=tuple(box))
img = new_img
out_bytes = io.BytesIO()
if new_size is not None:
logging.getLogger("Cover").info("Resizing from %ux%u to %ux%u..." % (self.size[0], self.size[1], new_size, new_size))
img = img.resize((new_size, new_size), PIL.Image.LANCZOS)
# apply unsharp filter to remove resize blur (equivalent to (images/graphics)magick -unsharp 1.5x1+0.7+0.02)
# we don't use PIL.ImageFilter.SHARPEN or PIL.ImageEnhance.Sharpness because we want precise control over
# parameters
unsharper = PIL.ImageFilter.UnsharpMask(radius=1.5, percent=70, threshold=5)
img = img.filter(unsharper)
if new_format is not None:
logging.getLogger("Cover").info("Converting to %s..." % (new_format.name.upper()))
target_format = new_format
else:
target_format = self.format
img.save(out_bytes,
format=target_format.name,
quality=90,
optimize=True)
return out_bytes.getvalue() | def function[postProcess, parameter[self, images_data, new_format, new_size]]:
constant[ Convert image binary data to a target format and/or size (None if no conversion needed), and return the processed data. ]
if compare[call[name[len], parameter[name[images_data]]] equal[==] constant[1]] begin[:]
variable[in_bytes] assign[=] call[name[io].BytesIO, parameter[call[name[images_data]][constant[0]]]]
variable[img] assign[=] call[name[PIL].Image.open, parameter[name[in_bytes]]]
if compare[name[img].mode not_equal[!=] constant[RGB]] begin[:]
variable[img] assign[=] call[name[img].convert, parameter[constant[RGB]]]
variable[out_bytes] assign[=] call[name[io].BytesIO, parameter[]]
if compare[name[new_size] is_not constant[None]] begin[:]
call[call[name[logging].getLogger, parameter[constant[Cover]]].info, parameter[binary_operation[constant[Resizing from %ux%u to %ux%u...] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da1b06cfc70>, <ast.Subscript object at 0x7da1b06ce500>, <ast.Name object at 0x7da1b06ce560>, <ast.Name object at 0x7da1b06cd7b0>]]]]]
variable[img] assign[=] call[name[img].resize, parameter[tuple[[<ast.Name object at 0x7da1b06cc100>, <ast.Name object at 0x7da1b06cfe20>]], name[PIL].Image.LANCZOS]]
variable[unsharper] assign[=] call[name[PIL].ImageFilter.UnsharpMask, parameter[]]
variable[img] assign[=] call[name[img].filter, parameter[name[unsharper]]]
if compare[name[new_format] is_not constant[None]] begin[:]
call[call[name[logging].getLogger, parameter[constant[Cover]]].info, parameter[binary_operation[constant[Converting to %s...] <ast.Mod object at 0x7da2590d6920> call[name[new_format].name.upper, parameter[]]]]]
variable[target_format] assign[=] name[new_format]
call[name[img].save, parameter[name[out_bytes]]]
return[call[name[out_bytes].getvalue, parameter[]]] | keyword[def] identifier[postProcess] ( identifier[self] , identifier[images_data] , identifier[new_format] , identifier[new_size] ):
literal[string]
keyword[if] identifier[len] ( identifier[images_data] )== literal[int] :
identifier[in_bytes] = identifier[io] . identifier[BytesIO] ( identifier[images_data] [ literal[int] ])
identifier[img] = identifier[PIL] . identifier[Image] . identifier[open] ( identifier[in_bytes] )
keyword[if] identifier[img] . identifier[mode] != literal[string] :
identifier[img] = identifier[img] . identifier[convert] ( literal[string] )
keyword[else] :
identifier[logging] . identifier[getLogger] ( literal[string] ). identifier[info] ( literal[string] %( identifier[len] ( identifier[images_data] )))
identifier[new_img] = identifier[PIL] . identifier[Image] . identifier[new] ( literal[string] , identifier[self] . identifier[size] )
keyword[assert] ( identifier[is_square] ( identifier[len] ( identifier[images_data] )))
identifier[sq] = identifier[int] ( identifier[math] . identifier[sqrt] ( identifier[len] ( identifier[images_data] )))
identifier[images_data_it] = identifier[iter] ( identifier[images_data] )
identifier[img_sizes] ={}
keyword[for] identifier[x] keyword[in] identifier[range] ( identifier[sq] ):
keyword[for] identifier[y] keyword[in] identifier[range] ( identifier[sq] ):
identifier[current_image_data] = identifier[next] ( identifier[images_data_it] )
identifier[img_stream] = identifier[io] . identifier[BytesIO] ( identifier[current_image_data] )
identifier[img] = identifier[PIL] . identifier[Image] . identifier[open] ( identifier[img_stream] )
identifier[img_sizes] [( identifier[x] , identifier[y] )]= identifier[img] . identifier[size]
identifier[box] =[ literal[int] , literal[int] ]
keyword[if] identifier[x] > literal[int] :
keyword[for] identifier[px] keyword[in] identifier[range] ( identifier[x] ):
identifier[box] [ literal[int] ]+= identifier[img_sizes] [( identifier[px] , identifier[y] )][ literal[int] ]
keyword[if] identifier[y] > literal[int] :
keyword[for] identifier[py] keyword[in] identifier[range] ( identifier[y] ):
identifier[box] [ literal[int] ]+= identifier[img_sizes] [( identifier[x] , identifier[py] )][ literal[int] ]
identifier[box] . identifier[extend] (( identifier[box] [ literal[int] ]+ identifier[img] . identifier[size] [ literal[int] ], identifier[box] [ literal[int] ]+ identifier[img] . identifier[size] [ literal[int] ]))
identifier[new_img] . identifier[paste] ( identifier[img] , identifier[box] = identifier[tuple] ( identifier[box] ))
identifier[img] = identifier[new_img]
identifier[out_bytes] = identifier[io] . identifier[BytesIO] ()
keyword[if] identifier[new_size] keyword[is] keyword[not] keyword[None] :
identifier[logging] . identifier[getLogger] ( literal[string] ). identifier[info] ( literal[string] %( identifier[self] . identifier[size] [ literal[int] ], identifier[self] . identifier[size] [ literal[int] ], identifier[new_size] , identifier[new_size] ))
identifier[img] = identifier[img] . identifier[resize] (( identifier[new_size] , identifier[new_size] ), identifier[PIL] . identifier[Image] . identifier[LANCZOS] )
identifier[unsharper] = identifier[PIL] . identifier[ImageFilter] . identifier[UnsharpMask] ( identifier[radius] = literal[int] , identifier[percent] = literal[int] , identifier[threshold] = literal[int] )
identifier[img] = identifier[img] . identifier[filter] ( identifier[unsharper] )
keyword[if] identifier[new_format] keyword[is] keyword[not] keyword[None] :
identifier[logging] . identifier[getLogger] ( literal[string] ). identifier[info] ( literal[string] %( identifier[new_format] . identifier[name] . identifier[upper] ()))
identifier[target_format] = identifier[new_format]
keyword[else] :
identifier[target_format] = identifier[self] . identifier[format]
identifier[img] . identifier[save] ( identifier[out_bytes] ,
identifier[format] = identifier[target_format] . identifier[name] ,
identifier[quality] = literal[int] ,
identifier[optimize] = keyword[True] )
keyword[return] identifier[out_bytes] . identifier[getvalue] () | def postProcess(self, images_data, new_format, new_size):
""" Convert image binary data to a target format and/or size (None if no conversion needed), and return the processed data. """
if len(images_data) == 1:
in_bytes = io.BytesIO(images_data[0])
img = PIL.Image.open(in_bytes)
if img.mode != 'RGB':
img = img.convert('RGB') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# images need to be joined before further processing
logging.getLogger('Cover').info('Joining %u images...' % len(images_data))
# TODO find a way to do this losslessly for JPEG
new_img = PIL.Image.new('RGB', self.size)
assert is_square(len(images_data))
sq = int(math.sqrt(len(images_data)))
images_data_it = iter(images_data)
img_sizes = {}
for x in range(sq):
for y in range(sq):
current_image_data = next(images_data_it)
img_stream = io.BytesIO(current_image_data)
img = PIL.Image.open(img_stream)
img_sizes[x, y] = img.size
box = [0, 0]
if x > 0:
for px in range(x):
box[0] += img_sizes[px, y][0] # depends on [control=['for'], data=['px']] # depends on [control=['if'], data=['x']]
if y > 0:
for py in range(y):
box[1] += img_sizes[x, py][1] # depends on [control=['for'], data=['py']] # depends on [control=['if'], data=['y']]
box.extend((box[0] + img.size[0], box[1] + img.size[1]))
new_img.paste(img, box=tuple(box)) # depends on [control=['for'], data=['y']] # depends on [control=['for'], data=['x']]
img = new_img
out_bytes = io.BytesIO()
if new_size is not None:
logging.getLogger('Cover').info('Resizing from %ux%u to %ux%u...' % (self.size[0], self.size[1], new_size, new_size))
img = img.resize((new_size, new_size), PIL.Image.LANCZOS)
# apply unsharp filter to remove resize blur (equivalent to (images/graphics)magick -unsharp 1.5x1+0.7+0.02)
# we don't use PIL.ImageFilter.SHARPEN or PIL.ImageEnhance.Sharpness because we want precise control over
# parameters
unsharper = PIL.ImageFilter.UnsharpMask(radius=1.5, percent=70, threshold=5)
img = img.filter(unsharper) # depends on [control=['if'], data=['new_size']]
if new_format is not None:
logging.getLogger('Cover').info('Converting to %s...' % new_format.name.upper())
target_format = new_format # depends on [control=['if'], data=['new_format']]
else:
target_format = self.format
img.save(out_bytes, format=target_format.name, quality=90, optimize=True)
return out_bytes.getvalue() |
def on_result(self, task, result):
'''Called every result'''
if not result:
return
if 'taskid' in task and 'project' in task and 'url' in task:
logger.info('result %s:%s %s -> %.30r' % (
task['project'], task['taskid'], task['url'], result))
return self.resultdb.save(
project=task['project'],
taskid=task['taskid'],
url=task['url'],
result=result
)
else:
logger.warning('result UNKNOW -> %.30r' % result)
return | def function[on_result, parameter[self, task, result]]:
constant[Called every result]
if <ast.UnaryOp object at 0x7da1b1f61690> begin[:]
return[None]
if <ast.BoolOp object at 0x7da1b1f62e60> begin[:]
call[name[logger].info, parameter[binary_operation[constant[result %s:%s %s -> %.30r] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da1b1f60970>, <ast.Subscript object at 0x7da1b1f62d70>, <ast.Subscript object at 0x7da1b1f61660>, <ast.Name object at 0x7da1b1f61b10>]]]]]
return[call[name[self].resultdb.save, parameter[]]] | keyword[def] identifier[on_result] ( identifier[self] , identifier[task] , identifier[result] ):
literal[string]
keyword[if] keyword[not] identifier[result] :
keyword[return]
keyword[if] literal[string] keyword[in] identifier[task] keyword[and] literal[string] keyword[in] identifier[task] keyword[and] literal[string] keyword[in] identifier[task] :
identifier[logger] . identifier[info] ( literal[string] %(
identifier[task] [ literal[string] ], identifier[task] [ literal[string] ], identifier[task] [ literal[string] ], identifier[result] ))
keyword[return] identifier[self] . identifier[resultdb] . identifier[save] (
identifier[project] = identifier[task] [ literal[string] ],
identifier[taskid] = identifier[task] [ literal[string] ],
identifier[url] = identifier[task] [ literal[string] ],
identifier[result] = identifier[result]
)
keyword[else] :
identifier[logger] . identifier[warning] ( literal[string] % identifier[result] )
keyword[return] | def on_result(self, task, result):
"""Called every result"""
if not result:
return # depends on [control=['if'], data=[]]
if 'taskid' in task and 'project' in task and ('url' in task):
logger.info('result %s:%s %s -> %.30r' % (task['project'], task['taskid'], task['url'], result))
return self.resultdb.save(project=task['project'], taskid=task['taskid'], url=task['url'], result=result) # depends on [control=['if'], data=[]]
else:
logger.warning('result UNKNOW -> %.30r' % result)
return |
def dual(ABF):
"""Plot two channels of current sweep (top/bottom)."""
new(ABF)
pylab.subplot(211)
pylab.title("Input A (channel 0)")
ABF.channel=0
sweep(ABF)
pylab.subplot(212)
pylab.title("Input B (channel 1)")
ABF.channel=1
sweep(ABF) | def function[dual, parameter[ABF]]:
constant[Plot two channels of current sweep (top/bottom).]
call[name[new], parameter[name[ABF]]]
call[name[pylab].subplot, parameter[constant[211]]]
call[name[pylab].title, parameter[constant[Input A (channel 0)]]]
name[ABF].channel assign[=] constant[0]
call[name[sweep], parameter[name[ABF]]]
call[name[pylab].subplot, parameter[constant[212]]]
call[name[pylab].title, parameter[constant[Input B (channel 1)]]]
name[ABF].channel assign[=] constant[1]
call[name[sweep], parameter[name[ABF]]] | keyword[def] identifier[dual] ( identifier[ABF] ):
literal[string]
identifier[new] ( identifier[ABF] )
identifier[pylab] . identifier[subplot] ( literal[int] )
identifier[pylab] . identifier[title] ( literal[string] )
identifier[ABF] . identifier[channel] = literal[int]
identifier[sweep] ( identifier[ABF] )
identifier[pylab] . identifier[subplot] ( literal[int] )
identifier[pylab] . identifier[title] ( literal[string] )
identifier[ABF] . identifier[channel] = literal[int]
identifier[sweep] ( identifier[ABF] ) | def dual(ABF):
"""Plot two channels of current sweep (top/bottom)."""
new(ABF)
pylab.subplot(211)
pylab.title('Input A (channel 0)')
ABF.channel = 0
sweep(ABF)
pylab.subplot(212)
pylab.title('Input B (channel 1)')
ABF.channel = 1
sweep(ABF) |
def first_return():
"""Generate a random walk and return its length upto the moment
that the walker first returns to the origin.
It is mathematically provable that the walker will eventually return,
meaning that the function call will halt, although it may take
a *very* long time and your computer may run out of memory!
Thus, try this interactively only.
"""
walk = randwalk() >> drop(1) >> takewhile(lambda v: v != Origin) >> list
return len(walk) | def function[first_return, parameter[]]:
constant[Generate a random walk and return its length upto the moment
that the walker first returns to the origin.
It is mathematically provable that the walker will eventually return,
meaning that the function call will halt, although it may take
a *very* long time and your computer may run out of memory!
Thus, try this interactively only.
]
variable[walk] assign[=] binary_operation[binary_operation[binary_operation[call[name[randwalk], parameter[]] <ast.RShift object at 0x7da2590d6a40> call[name[drop], parameter[constant[1]]]] <ast.RShift object at 0x7da2590d6a40> call[name[takewhile], parameter[<ast.Lambda object at 0x7da1b232e1d0>]]] <ast.RShift object at 0x7da2590d6a40> name[list]]
return[call[name[len], parameter[name[walk]]]] | keyword[def] identifier[first_return] ():
literal[string]
identifier[walk] = identifier[randwalk] ()>> identifier[drop] ( literal[int] )>> identifier[takewhile] ( keyword[lambda] identifier[v] : identifier[v] != identifier[Origin] )>> identifier[list]
keyword[return] identifier[len] ( identifier[walk] ) | def first_return():
"""Generate a random walk and return its length upto the moment
that the walker first returns to the origin.
It is mathematically provable that the walker will eventually return,
meaning that the function call will halt, although it may take
a *very* long time and your computer may run out of memory!
Thus, try this interactively only.
"""
walk = randwalk() >> drop(1) >> takewhile(lambda v: v != Origin) >> list
return len(walk) |
def supervisor_events(stdin, stdout):
"""
An event stream from Supervisor.
"""
while True:
stdout.write('READY\n')
stdout.flush()
line = stdin.readline()
headers = get_headers(line)
payload = stdin.read(int(headers['len']))
event_headers, event_data = eventdata(payload)
yield event_headers, event_data
stdout.write('RESULT 2\nOK')
stdout.flush() | def function[supervisor_events, parameter[stdin, stdout]]:
constant[
An event stream from Supervisor.
]
while constant[True] begin[:]
call[name[stdout].write, parameter[constant[READY
]]]
call[name[stdout].flush, parameter[]]
variable[line] assign[=] call[name[stdin].readline, parameter[]]
variable[headers] assign[=] call[name[get_headers], parameter[name[line]]]
variable[payload] assign[=] call[name[stdin].read, parameter[call[name[int], parameter[call[name[headers]][constant[len]]]]]]
<ast.Tuple object at 0x7da207f99ed0> assign[=] call[name[eventdata], parameter[name[payload]]]
<ast.Yield object at 0x7da207f98f70>
call[name[stdout].write, parameter[constant[RESULT 2
OK]]]
call[name[stdout].flush, parameter[]] | keyword[def] identifier[supervisor_events] ( identifier[stdin] , identifier[stdout] ):
literal[string]
keyword[while] keyword[True] :
identifier[stdout] . identifier[write] ( literal[string] )
identifier[stdout] . identifier[flush] ()
identifier[line] = identifier[stdin] . identifier[readline] ()
identifier[headers] = identifier[get_headers] ( identifier[line] )
identifier[payload] = identifier[stdin] . identifier[read] ( identifier[int] ( identifier[headers] [ literal[string] ]))
identifier[event_headers] , identifier[event_data] = identifier[eventdata] ( identifier[payload] )
keyword[yield] identifier[event_headers] , identifier[event_data]
identifier[stdout] . identifier[write] ( literal[string] )
identifier[stdout] . identifier[flush] () | def supervisor_events(stdin, stdout):
"""
An event stream from Supervisor.
"""
while True:
stdout.write('READY\n')
stdout.flush()
line = stdin.readline()
headers = get_headers(line)
payload = stdin.read(int(headers['len']))
(event_headers, event_data) = eventdata(payload)
yield (event_headers, event_data)
stdout.write('RESULT 2\nOK')
stdout.flush() # depends on [control=['while'], data=[]] |
def get_assessment_offered_lookup_session(self):
"""Gets the ``OsidSession`` associated with the assessment offered lookup service.
return: (osid.assessment.AssessmentOfferedLookupSession) - an
``AssessmentOfferedLookupSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_assessment_offered_lookup()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_offered_lookup()`` is ``true``.*
"""
if not self.supports_assessment_offered_lookup():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.AssessmentOfferedLookupSession(runtime=self._runtime) | def function[get_assessment_offered_lookup_session, parameter[self]]:
constant[Gets the ``OsidSession`` associated with the assessment offered lookup service.
return: (osid.assessment.AssessmentOfferedLookupSession) - an
``AssessmentOfferedLookupSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_assessment_offered_lookup()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_offered_lookup()`` is ``true``.*
]
if <ast.UnaryOp object at 0x7da20c6e6860> begin[:]
<ast.Raise object at 0x7da20c6e4820>
return[call[name[sessions].AssessmentOfferedLookupSession, parameter[]]] | keyword[def] identifier[get_assessment_offered_lookup_session] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[supports_assessment_offered_lookup] ():
keyword[raise] identifier[errors] . identifier[Unimplemented] ()
keyword[return] identifier[sessions] . identifier[AssessmentOfferedLookupSession] ( identifier[runtime] = identifier[self] . identifier[_runtime] ) | def get_assessment_offered_lookup_session(self):
"""Gets the ``OsidSession`` associated with the assessment offered lookup service.
return: (osid.assessment.AssessmentOfferedLookupSession) - an
``AssessmentOfferedLookupSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_assessment_offered_lookup()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_assessment_offered_lookup()`` is ``true``.*
"""
if not self.supports_assessment_offered_lookup():
raise errors.Unimplemented() # depends on [control=['if'], data=[]]
# pylint: disable=no-member
return sessions.AssessmentOfferedLookupSession(runtime=self._runtime) |
def _read_syncmap_file(self, path, extension, text=False):
""" Read labels from a SyncMap file """
syncmap = SyncMap(logger=self.logger)
syncmap.read(extension, path, parameters=None)
if text:
return [(f.begin, f.end, u" ".join(f.text_fragment.lines)) for f in syncmap.fragments]
return [(f.begin, f.end, f.text_fragment.identifier) for f in syncmap.fragments] | def function[_read_syncmap_file, parameter[self, path, extension, text]]:
constant[ Read labels from a SyncMap file ]
variable[syncmap] assign[=] call[name[SyncMap], parameter[]]
call[name[syncmap].read, parameter[name[extension], name[path]]]
if name[text] begin[:]
return[<ast.ListComp object at 0x7da1b18f9450>]
return[<ast.ListComp object at 0x7da1b18fae00>] | keyword[def] identifier[_read_syncmap_file] ( identifier[self] , identifier[path] , identifier[extension] , identifier[text] = keyword[False] ):
literal[string]
identifier[syncmap] = identifier[SyncMap] ( identifier[logger] = identifier[self] . identifier[logger] )
identifier[syncmap] . identifier[read] ( identifier[extension] , identifier[path] , identifier[parameters] = keyword[None] )
keyword[if] identifier[text] :
keyword[return] [( identifier[f] . identifier[begin] , identifier[f] . identifier[end] , literal[string] . identifier[join] ( identifier[f] . identifier[text_fragment] . identifier[lines] )) keyword[for] identifier[f] keyword[in] identifier[syncmap] . identifier[fragments] ]
keyword[return] [( identifier[f] . identifier[begin] , identifier[f] . identifier[end] , identifier[f] . identifier[text_fragment] . identifier[identifier] ) keyword[for] identifier[f] keyword[in] identifier[syncmap] . identifier[fragments] ] | def _read_syncmap_file(self, path, extension, text=False):
""" Read labels from a SyncMap file """
syncmap = SyncMap(logger=self.logger)
syncmap.read(extension, path, parameters=None)
if text:
return [(f.begin, f.end, u' '.join(f.text_fragment.lines)) for f in syncmap.fragments] # depends on [control=['if'], data=[]]
return [(f.begin, f.end, f.text_fragment.identifier) for f in syncmap.fragments] |
def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [p for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD]
for p in parameters:
if p.kind == p.VAR_POSITIONAL:
raise RuntimeError("POT estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s with constructor %s doesn't "
" follow this convention."
% (cls, init_signature))
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters]) | def function[_get_param_names, parameter[cls]]:
constant[Get parameter names for the estimator]
variable[init] assign[=] call[name[getattr], parameter[name[cls].__init__, constant[deprecated_original], name[cls].__init__]]
if compare[name[init] is name[object].__init__] begin[:]
return[list[[]]]
variable[init_signature] assign[=] call[name[signature], parameter[name[init]]]
variable[parameters] assign[=] <ast.ListComp object at 0x7da1b163b220>
for taget[name[p]] in starred[name[parameters]] begin[:]
if compare[name[p].kind equal[==] name[p].VAR_POSITIONAL] begin[:]
<ast.Raise object at 0x7da1b1639120>
return[call[name[sorted], parameter[<ast.ListComp object at 0x7da1b16386a0>]]] | keyword[def] identifier[_get_param_names] ( identifier[cls] ):
literal[string]
identifier[init] = identifier[getattr] ( identifier[cls] . identifier[__init__] , literal[string] , identifier[cls] . identifier[__init__] )
keyword[if] identifier[init] keyword[is] identifier[object] . identifier[__init__] :
keyword[return] []
identifier[init_signature] = identifier[signature] ( identifier[init] )
identifier[parameters] =[ identifier[p] keyword[for] identifier[p] keyword[in] identifier[init_signature] . identifier[parameters] . identifier[values] ()
keyword[if] identifier[p] . identifier[name] != literal[string] keyword[and] identifier[p] . identifier[kind] != identifier[p] . identifier[VAR_KEYWORD] ]
keyword[for] identifier[p] keyword[in] identifier[parameters] :
keyword[if] identifier[p] . identifier[kind] == identifier[p] . identifier[VAR_POSITIONAL] :
keyword[raise] identifier[RuntimeError] ( literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
%( identifier[cls] , identifier[init_signature] ))
keyword[return] identifier[sorted] ([ identifier[p] . identifier[name] keyword[for] identifier[p] keyword[in] identifier[parameters] ]) | def _get_param_names(cls):
"""Get parameter names for the estimator"""
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return [] # depends on [control=['if'], data=[]]
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [p for p in init_signature.parameters.values() if p.name != 'self' and p.kind != p.VAR_KEYWORD]
for p in parameters:
if p.kind == p.VAR_POSITIONAL:
raise RuntimeError("POT estimators should always specify their parameters in the signature of their __init__ (no varargs). %s with constructor %s doesn't follow this convention." % (cls, init_signature)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['p']]
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters]) |
def probeLine(img, p1, p2, res=100):
"""
Takes a ``vtkImageData`` and probes its scalars along a line defined by 2 points `p1` and `p2`.
.. hint:: |probeLine| |probeLine.py|_
"""
line = vtk.vtkLineSource()
line.SetResolution(res)
line.SetPoint1(p1)
line.SetPoint2(p2)
probeFilter = vtk.vtkProbeFilter()
probeFilter.SetSourceData(img)
probeFilter.SetInputConnection(line.GetOutputPort())
probeFilter.Update()
lact = Actor(probeFilter.GetOutput(), c=None) # ScalarVisibilityOn
lact.mapper.SetScalarRange(img.GetScalarRange())
return lact | def function[probeLine, parameter[img, p1, p2, res]]:
constant[
Takes a ``vtkImageData`` and probes its scalars along a line defined by 2 points `p1` and `p2`.
.. hint:: |probeLine| |probeLine.py|_
]
variable[line] assign[=] call[name[vtk].vtkLineSource, parameter[]]
call[name[line].SetResolution, parameter[name[res]]]
call[name[line].SetPoint1, parameter[name[p1]]]
call[name[line].SetPoint2, parameter[name[p2]]]
variable[probeFilter] assign[=] call[name[vtk].vtkProbeFilter, parameter[]]
call[name[probeFilter].SetSourceData, parameter[name[img]]]
call[name[probeFilter].SetInputConnection, parameter[call[name[line].GetOutputPort, parameter[]]]]
call[name[probeFilter].Update, parameter[]]
variable[lact] assign[=] call[name[Actor], parameter[call[name[probeFilter].GetOutput, parameter[]]]]
call[name[lact].mapper.SetScalarRange, parameter[call[name[img].GetScalarRange, parameter[]]]]
return[name[lact]] | keyword[def] identifier[probeLine] ( identifier[img] , identifier[p1] , identifier[p2] , identifier[res] = literal[int] ):
literal[string]
identifier[line] = identifier[vtk] . identifier[vtkLineSource] ()
identifier[line] . identifier[SetResolution] ( identifier[res] )
identifier[line] . identifier[SetPoint1] ( identifier[p1] )
identifier[line] . identifier[SetPoint2] ( identifier[p2] )
identifier[probeFilter] = identifier[vtk] . identifier[vtkProbeFilter] ()
identifier[probeFilter] . identifier[SetSourceData] ( identifier[img] )
identifier[probeFilter] . identifier[SetInputConnection] ( identifier[line] . identifier[GetOutputPort] ())
identifier[probeFilter] . identifier[Update] ()
identifier[lact] = identifier[Actor] ( identifier[probeFilter] . identifier[GetOutput] (), identifier[c] = keyword[None] )
identifier[lact] . identifier[mapper] . identifier[SetScalarRange] ( identifier[img] . identifier[GetScalarRange] ())
keyword[return] identifier[lact] | def probeLine(img, p1, p2, res=100):
"""
Takes a ``vtkImageData`` and probes its scalars along a line defined by 2 points `p1` and `p2`.
.. hint:: |probeLine| |probeLine.py|_
"""
line = vtk.vtkLineSource()
line.SetResolution(res)
line.SetPoint1(p1)
line.SetPoint2(p2)
probeFilter = vtk.vtkProbeFilter()
probeFilter.SetSourceData(img)
probeFilter.SetInputConnection(line.GetOutputPort())
probeFilter.Update()
lact = Actor(probeFilter.GetOutput(), c=None) # ScalarVisibilityOn
lact.mapper.SetScalarRange(img.GetScalarRange())
return lact |
def parse_expression(expression: str) -> Tuple[Set[str], List[CompositeAxis]]:
"""
Parses an indexing expression (for a single tensor).
Checks uniqueness of names, checks usage of '...' (allowed only once)
Returns set of all used identifiers and a list of axis groups
"""
identifiers = set()
composite_axes = []
if '.' in expression:
if '...' not in expression:
raise EinopsError('Expression may contain dots only inside ellipsis (...)')
if str.count(expression, '...') != 1 or str.count(expression, '.') != 3:
raise EinopsError('Expression may contain dots only inside ellipsis (...); only one ellipsis for tensor ')
expression = expression.replace('...', _ellipsis)
bracket_group = None
def add_axis_name(x):
if x is not None:
if x in identifiers:
raise ValueError('Indexing expression contains duplicate dimension "{}"'.format(x))
identifiers.add(x)
if bracket_group is None:
composite_axes.append([x])
else:
bracket_group.append(x)
current_identifier = None
for char in expression:
if char in '() ' + _ellipsis:
add_axis_name(current_identifier)
current_identifier = None
if char == _ellipsis:
if bracket_group is not None:
raise EinopsError("Ellipsis can't be used inside the composite axis (inside brackets)")
composite_axes.append(_ellipsis)
identifiers.add(_ellipsis)
elif char == '(':
if bracket_group is not None:
raise EinopsError("Axis composition is one-level (brackets inside brackets not allowed)")
bracket_group = []
elif char == ')':
if bracket_group is None:
raise EinopsError('Brackets are not balanced')
composite_axes.append(bracket_group)
bracket_group = None
elif '0' <= char <= '9':
if current_identifier is None:
raise EinopsError("Axis name can't start with a digit")
current_identifier += char
elif 'a' <= char <= 'z':
if current_identifier is None:
current_identifier = char
else:
current_identifier += char
else:
if 'A' <= char <= 'Z':
raise EinopsError("Only lower-case latin letters allowed in names, not '{}'".format(char))
raise EinopsError("Unknown character '{}'".format(char))
if bracket_group is not None:
raise EinopsError('Imbalanced parentheses in expression: "{}"'.format(expression))
add_axis_name(current_identifier)
return identifiers, composite_axes | def function[parse_expression, parameter[expression]]:
constant[
Parses an indexing expression (for a single tensor).
Checks uniqueness of names, checks usage of '...' (allowed only once)
Returns set of all used identifiers and a list of axis groups
]
variable[identifiers] assign[=] call[name[set], parameter[]]
variable[composite_axes] assign[=] list[[]]
if compare[constant[.] in name[expression]] begin[:]
if compare[constant[...] <ast.NotIn object at 0x7da2590d7190> name[expression]] begin[:]
<ast.Raise object at 0x7da207f02e90>
if <ast.BoolOp object at 0x7da18f7203d0> begin[:]
<ast.Raise object at 0x7da18f7211b0>
variable[expression] assign[=] call[name[expression].replace, parameter[constant[...], name[_ellipsis]]]
variable[bracket_group] assign[=] constant[None]
def function[add_axis_name, parameter[x]]:
if compare[name[x] is_not constant[None]] begin[:]
if compare[name[x] in name[identifiers]] begin[:]
<ast.Raise object at 0x7da2046212d0>
call[name[identifiers].add, parameter[name[x]]]
if compare[name[bracket_group] is constant[None]] begin[:]
call[name[composite_axes].append, parameter[list[[<ast.Name object at 0x7da2046200d0>]]]]
variable[current_identifier] assign[=] constant[None]
for taget[name[char]] in starred[name[expression]] begin[:]
if compare[name[char] in binary_operation[constant[() ] + name[_ellipsis]]] begin[:]
call[name[add_axis_name], parameter[name[current_identifier]]]
variable[current_identifier] assign[=] constant[None]
if compare[name[char] equal[==] name[_ellipsis]] begin[:]
if compare[name[bracket_group] is_not constant[None]] begin[:]
<ast.Raise object at 0x7da1b1113d00>
call[name[composite_axes].append, parameter[name[_ellipsis]]]
call[name[identifiers].add, parameter[name[_ellipsis]]]
if compare[name[bracket_group] is_not constant[None]] begin[:]
<ast.Raise object at 0x7da2041d8490>
call[name[add_axis_name], parameter[name[current_identifier]]]
return[tuple[[<ast.Name object at 0x7da2041dbac0>, <ast.Name object at 0x7da2041d98d0>]]] | keyword[def] identifier[parse_expression] ( identifier[expression] : identifier[str] )-> identifier[Tuple] [ identifier[Set] [ identifier[str] ], identifier[List] [ identifier[CompositeAxis] ]]:
literal[string]
identifier[identifiers] = identifier[set] ()
identifier[composite_axes] =[]
keyword[if] literal[string] keyword[in] identifier[expression] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[expression] :
keyword[raise] identifier[EinopsError] ( literal[string] )
keyword[if] identifier[str] . identifier[count] ( identifier[expression] , literal[string] )!= literal[int] keyword[or] identifier[str] . identifier[count] ( identifier[expression] , literal[string] )!= literal[int] :
keyword[raise] identifier[EinopsError] ( literal[string] )
identifier[expression] = identifier[expression] . identifier[replace] ( literal[string] , identifier[_ellipsis] )
identifier[bracket_group] = keyword[None]
keyword[def] identifier[add_axis_name] ( identifier[x] ):
keyword[if] identifier[x] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[x] keyword[in] identifier[identifiers] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[x] ))
identifier[identifiers] . identifier[add] ( identifier[x] )
keyword[if] identifier[bracket_group] keyword[is] keyword[None] :
identifier[composite_axes] . identifier[append] ([ identifier[x] ])
keyword[else] :
identifier[bracket_group] . identifier[append] ( identifier[x] )
identifier[current_identifier] = keyword[None]
keyword[for] identifier[char] keyword[in] identifier[expression] :
keyword[if] identifier[char] keyword[in] literal[string] + identifier[_ellipsis] :
identifier[add_axis_name] ( identifier[current_identifier] )
identifier[current_identifier] = keyword[None]
keyword[if] identifier[char] == identifier[_ellipsis] :
keyword[if] identifier[bracket_group] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[EinopsError] ( literal[string] )
identifier[composite_axes] . identifier[append] ( identifier[_ellipsis] )
identifier[identifiers] . identifier[add] ( identifier[_ellipsis] )
keyword[elif] identifier[char] == literal[string] :
keyword[if] identifier[bracket_group] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[EinopsError] ( literal[string] )
identifier[bracket_group] =[]
keyword[elif] identifier[char] == literal[string] :
keyword[if] identifier[bracket_group] keyword[is] keyword[None] :
keyword[raise] identifier[EinopsError] ( literal[string] )
identifier[composite_axes] . identifier[append] ( identifier[bracket_group] )
identifier[bracket_group] = keyword[None]
keyword[elif] literal[string] <= identifier[char] <= literal[string] :
keyword[if] identifier[current_identifier] keyword[is] keyword[None] :
keyword[raise] identifier[EinopsError] ( literal[string] )
identifier[current_identifier] += identifier[char]
keyword[elif] literal[string] <= identifier[char] <= literal[string] :
keyword[if] identifier[current_identifier] keyword[is] keyword[None] :
identifier[current_identifier] = identifier[char]
keyword[else] :
identifier[current_identifier] += identifier[char]
keyword[else] :
keyword[if] literal[string] <= identifier[char] <= literal[string] :
keyword[raise] identifier[EinopsError] ( literal[string] . identifier[format] ( identifier[char] ))
keyword[raise] identifier[EinopsError] ( literal[string] . identifier[format] ( identifier[char] ))
keyword[if] identifier[bracket_group] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[EinopsError] ( literal[string] . identifier[format] ( identifier[expression] ))
identifier[add_axis_name] ( identifier[current_identifier] )
keyword[return] identifier[identifiers] , identifier[composite_axes] | def parse_expression(expression: str) -> Tuple[Set[str], List[CompositeAxis]]:
"""
Parses an indexing expression (for a single tensor).
Checks uniqueness of names, checks usage of '...' (allowed only once)
Returns set of all used identifiers and a list of axis groups
"""
identifiers = set()
composite_axes = []
if '.' in expression:
if '...' not in expression:
raise EinopsError('Expression may contain dots only inside ellipsis (...)') # depends on [control=['if'], data=[]]
if str.count(expression, '...') != 1 or str.count(expression, '.') != 3:
raise EinopsError('Expression may contain dots only inside ellipsis (...); only one ellipsis for tensor ') # depends on [control=['if'], data=[]]
expression = expression.replace('...', _ellipsis) # depends on [control=['if'], data=['expression']]
bracket_group = None
def add_axis_name(x):
if x is not None:
if x in identifiers:
raise ValueError('Indexing expression contains duplicate dimension "{}"'.format(x)) # depends on [control=['if'], data=['x']]
identifiers.add(x)
if bracket_group is None:
composite_axes.append([x]) # depends on [control=['if'], data=[]]
else:
bracket_group.append(x) # depends on [control=['if'], data=['x']]
current_identifier = None
for char in expression:
if char in '() ' + _ellipsis:
add_axis_name(current_identifier)
current_identifier = None
if char == _ellipsis:
if bracket_group is not None:
raise EinopsError("Ellipsis can't be used inside the composite axis (inside brackets)") # depends on [control=['if'], data=[]]
composite_axes.append(_ellipsis)
identifiers.add(_ellipsis) # depends on [control=['if'], data=['_ellipsis']]
elif char == '(':
if bracket_group is not None:
raise EinopsError('Axis composition is one-level (brackets inside brackets not allowed)') # depends on [control=['if'], data=[]]
bracket_group = [] # depends on [control=['if'], data=[]]
elif char == ')':
if bracket_group is None:
raise EinopsError('Brackets are not balanced') # depends on [control=['if'], data=[]]
composite_axes.append(bracket_group)
bracket_group = None # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['char']]
elif '0' <= char <= '9':
if current_identifier is None:
raise EinopsError("Axis name can't start with a digit") # depends on [control=['if'], data=[]]
current_identifier += char # depends on [control=['if'], data=['char']]
elif 'a' <= char <= 'z':
if current_identifier is None:
current_identifier = char # depends on [control=['if'], data=['current_identifier']]
else:
current_identifier += char # depends on [control=['if'], data=['char']]
else:
if 'A' <= char <= 'Z':
raise EinopsError("Only lower-case latin letters allowed in names, not '{}'".format(char)) # depends on [control=['if'], data=['char']]
raise EinopsError("Unknown character '{}'".format(char)) # depends on [control=['for'], data=['char']]
if bracket_group is not None:
raise EinopsError('Imbalanced parentheses in expression: "{}"'.format(expression)) # depends on [control=['if'], data=[]]
add_axis_name(current_identifier)
return (identifiers, composite_axes) |
def householder(self):
"""Return Matrices u,b,v with self = ubv and b is in bidiagonal form
The algorithm uses householder transformations.
:return tuple (u,b,v): A tuple with the Matrix u, b and v.
and self = ubv (except some rounding errors)
u is a unitary matrix
b is a bidiagonal matrix.
v is a unitary matrix.
:note: Currently the algorithm only works for squared matrices
:todo: Make sure, that the bidiagonal matrix is 0.0 except for the bidiagonal.
Due to rounding errors, this is currently not ensured
"""
# copy instance to transform it to bidiagonal form.
bidiagMatrix = Matrix.from_two_dim_array(self.get_width(), self.get_height(), self.matrix)
# build identity matrix, which is used to calculate householder transformations
identityMatrixRow = Matrix(self.get_height(), self.get_height())
for i in xrange(self.get_height()):
identityMatrixRow.set_value(i, i, 1.0)
identityMatrixCol = Matrix(self.get_width(), self.get_width())
for i in xrange(self.get_width()):
identityMatrixCol.set_value(i, i, 1.0)
# zero out the k'th column and row
for k in xrange(self.get_width() - 1):
# vector with the values of the k'th column (first k-1 rows are 0)
x = Vector(self.get_height())
y = Vector(self.get_height())
if k > 0:
x.set_value(0, k - 1, bidiagMatrix.get_value(k, k - 1))
y.set_value(0, k - 1, bidiagMatrix.get_value(k, k - 1))
s = 0.0
for i in xrange(k, self.get_height()):
val = bidiagMatrix.get_value(k, i)
x.set_value(0, i, val)
s += (val ** 2)
s = sqrt(s)
# y must have same length as x
y.set_value(0, k, s)
tmp = x - y
norm = sqrt(sum(i[0] ** 2 for i in tmp.get_array()))
# calculate w = (x-y)/(|x-y|)
w = tmp / norm
# uk is the k'th householder matrix for the column
uk = identityMatrixRow - 2 * (w * w.transform())
bidiagMatrix = uk * bidiagMatrix
if k == 0:
# set u in first iteration.
u = uk
else:
u = u * uk
# zero out the the row
if k < self.get_width() - 2:
x = Vector(self.get_width())
y = Vector(self.get_width())
x.set_value(0, k, bidiagMatrix.get_value(k, k))
y.set_value(0, k, bidiagMatrix.get_value(k, k))
s = 0.0
for i in xrange(k + 1, bidiagMatrix.get_width()):
val = bidiagMatrix.get_value(i, k)
x.set_value(0, i, val)
s += (val ** 2)
# length of vector x ignoring the k'th value
s = sqrt(s)
# y must have same length as x, since k'th value is equal
# set k+1 value to s
y.set_value(0, k + 1, s)
tmp = x - y
norm = sqrt(sum(i[0] ** 2 for i in tmp.get_array()))
w = tmp / norm
# vk is the k'th householder matrix for the row
vk = identityMatrixCol - (2 * (w * w.transform()))
bidiagMatrix = bidiagMatrix * vk
if k == 0:
# set v in first iteration
v = vk
else:
v = vk * v
return (u, bidiagMatrix, v) | def function[householder, parameter[self]]:
constant[Return Matrices u,b,v with self = ubv and b is in bidiagonal form
The algorithm uses householder transformations.
:return tuple (u,b,v): A tuple with the Matrix u, b and v.
and self = ubv (except some rounding errors)
u is a unitary matrix
b is a bidiagonal matrix.
v is a unitary matrix.
:note: Currently the algorithm only works for squared matrices
:todo: Make sure, that the bidiagonal matrix is 0.0 except for the bidiagonal.
Due to rounding errors, this is currently not ensured
]
variable[bidiagMatrix] assign[=] call[name[Matrix].from_two_dim_array, parameter[call[name[self].get_width, parameter[]], call[name[self].get_height, parameter[]], name[self].matrix]]
variable[identityMatrixRow] assign[=] call[name[Matrix], parameter[call[name[self].get_height, parameter[]], call[name[self].get_height, parameter[]]]]
for taget[name[i]] in starred[call[name[xrange], parameter[call[name[self].get_height, parameter[]]]]] begin[:]
call[name[identityMatrixRow].set_value, parameter[name[i], name[i], constant[1.0]]]
variable[identityMatrixCol] assign[=] call[name[Matrix], parameter[call[name[self].get_width, parameter[]], call[name[self].get_width, parameter[]]]]
for taget[name[i]] in starred[call[name[xrange], parameter[call[name[self].get_width, parameter[]]]]] begin[:]
call[name[identityMatrixCol].set_value, parameter[name[i], name[i], constant[1.0]]]
for taget[name[k]] in starred[call[name[xrange], parameter[binary_operation[call[name[self].get_width, parameter[]] - constant[1]]]]] begin[:]
variable[x] assign[=] call[name[Vector], parameter[call[name[self].get_height, parameter[]]]]
variable[y] assign[=] call[name[Vector], parameter[call[name[self].get_height, parameter[]]]]
if compare[name[k] greater[>] constant[0]] begin[:]
call[name[x].set_value, parameter[constant[0], binary_operation[name[k] - constant[1]], call[name[bidiagMatrix].get_value, parameter[name[k], binary_operation[name[k] - constant[1]]]]]]
call[name[y].set_value, parameter[constant[0], binary_operation[name[k] - constant[1]], call[name[bidiagMatrix].get_value, parameter[name[k], binary_operation[name[k] - constant[1]]]]]]
variable[s] assign[=] constant[0.0]
for taget[name[i]] in starred[call[name[xrange], parameter[name[k], call[name[self].get_height, parameter[]]]]] begin[:]
variable[val] assign[=] call[name[bidiagMatrix].get_value, parameter[name[k], name[i]]]
call[name[x].set_value, parameter[constant[0], name[i], name[val]]]
<ast.AugAssign object at 0x7da18dc07340>
variable[s] assign[=] call[name[sqrt], parameter[name[s]]]
call[name[y].set_value, parameter[constant[0], name[k], name[s]]]
variable[tmp] assign[=] binary_operation[name[x] - name[y]]
variable[norm] assign[=] call[name[sqrt], parameter[call[name[sum], parameter[<ast.GeneratorExp object at 0x7da18dc045e0>]]]]
variable[w] assign[=] binary_operation[name[tmp] / name[norm]]
variable[uk] assign[=] binary_operation[name[identityMatrixRow] - binary_operation[constant[2] * binary_operation[name[w] * call[name[w].transform, parameter[]]]]]
variable[bidiagMatrix] assign[=] binary_operation[name[uk] * name[bidiagMatrix]]
if compare[name[k] equal[==] constant[0]] begin[:]
variable[u] assign[=] name[uk]
if compare[name[k] less[<] binary_operation[call[name[self].get_width, parameter[]] - constant[2]]] begin[:]
variable[x] assign[=] call[name[Vector], parameter[call[name[self].get_width, parameter[]]]]
variable[y] assign[=] call[name[Vector], parameter[call[name[self].get_width, parameter[]]]]
call[name[x].set_value, parameter[constant[0], name[k], call[name[bidiagMatrix].get_value, parameter[name[k], name[k]]]]]
call[name[y].set_value, parameter[constant[0], name[k], call[name[bidiagMatrix].get_value, parameter[name[k], name[k]]]]]
variable[s] assign[=] constant[0.0]
for taget[name[i]] in starred[call[name[xrange], parameter[binary_operation[name[k] + constant[1]], call[name[bidiagMatrix].get_width, parameter[]]]]] begin[:]
variable[val] assign[=] call[name[bidiagMatrix].get_value, parameter[name[i], name[k]]]
call[name[x].set_value, parameter[constant[0], name[i], name[val]]]
<ast.AugAssign object at 0x7da207f996f0>
variable[s] assign[=] call[name[sqrt], parameter[name[s]]]
call[name[y].set_value, parameter[constant[0], binary_operation[name[k] + constant[1]], name[s]]]
variable[tmp] assign[=] binary_operation[name[x] - name[y]]
variable[norm] assign[=] call[name[sqrt], parameter[call[name[sum], parameter[<ast.GeneratorExp object at 0x7da207f98580>]]]]
variable[w] assign[=] binary_operation[name[tmp] / name[norm]]
variable[vk] assign[=] binary_operation[name[identityMatrixCol] - binary_operation[constant[2] * binary_operation[name[w] * call[name[w].transform, parameter[]]]]]
variable[bidiagMatrix] assign[=] binary_operation[name[bidiagMatrix] * name[vk]]
if compare[name[k] equal[==] constant[0]] begin[:]
variable[v] assign[=] name[vk]
return[tuple[[<ast.Name object at 0x7da207f9a740>, <ast.Name object at 0x7da207f99cf0>, <ast.Name object at 0x7da207f9bf70>]]] | keyword[def] identifier[householder] ( identifier[self] ):
literal[string]
identifier[bidiagMatrix] = identifier[Matrix] . identifier[from_two_dim_array] ( identifier[self] . identifier[get_width] (), identifier[self] . identifier[get_height] (), identifier[self] . identifier[matrix] )
identifier[identityMatrixRow] = identifier[Matrix] ( identifier[self] . identifier[get_height] (), identifier[self] . identifier[get_height] ())
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[self] . identifier[get_height] ()):
identifier[identityMatrixRow] . identifier[set_value] ( identifier[i] , identifier[i] , literal[int] )
identifier[identityMatrixCol] = identifier[Matrix] ( identifier[self] . identifier[get_width] (), identifier[self] . identifier[get_width] ())
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[self] . identifier[get_width] ()):
identifier[identityMatrixCol] . identifier[set_value] ( identifier[i] , identifier[i] , literal[int] )
keyword[for] identifier[k] keyword[in] identifier[xrange] ( identifier[self] . identifier[get_width] ()- literal[int] ):
identifier[x] = identifier[Vector] ( identifier[self] . identifier[get_height] ())
identifier[y] = identifier[Vector] ( identifier[self] . identifier[get_height] ())
keyword[if] identifier[k] > literal[int] :
identifier[x] . identifier[set_value] ( literal[int] , identifier[k] - literal[int] , identifier[bidiagMatrix] . identifier[get_value] ( identifier[k] , identifier[k] - literal[int] ))
identifier[y] . identifier[set_value] ( literal[int] , identifier[k] - literal[int] , identifier[bidiagMatrix] . identifier[get_value] ( identifier[k] , identifier[k] - literal[int] ))
identifier[s] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[k] , identifier[self] . identifier[get_height] ()):
identifier[val] = identifier[bidiagMatrix] . identifier[get_value] ( identifier[k] , identifier[i] )
identifier[x] . identifier[set_value] ( literal[int] , identifier[i] , identifier[val] )
identifier[s] +=( identifier[val] ** literal[int] )
identifier[s] = identifier[sqrt] ( identifier[s] )
identifier[y] . identifier[set_value] ( literal[int] , identifier[k] , identifier[s] )
identifier[tmp] = identifier[x] - identifier[y]
identifier[norm] = identifier[sqrt] ( identifier[sum] ( identifier[i] [ literal[int] ]** literal[int] keyword[for] identifier[i] keyword[in] identifier[tmp] . identifier[get_array] ()))
identifier[w] = identifier[tmp] / identifier[norm]
identifier[uk] = identifier[identityMatrixRow] - literal[int] *( identifier[w] * identifier[w] . identifier[transform] ())
identifier[bidiagMatrix] = identifier[uk] * identifier[bidiagMatrix]
keyword[if] identifier[k] == literal[int] :
identifier[u] = identifier[uk]
keyword[else] :
identifier[u] = identifier[u] * identifier[uk]
keyword[if] identifier[k] < identifier[self] . identifier[get_width] ()- literal[int] :
identifier[x] = identifier[Vector] ( identifier[self] . identifier[get_width] ())
identifier[y] = identifier[Vector] ( identifier[self] . identifier[get_width] ())
identifier[x] . identifier[set_value] ( literal[int] , identifier[k] , identifier[bidiagMatrix] . identifier[get_value] ( identifier[k] , identifier[k] ))
identifier[y] . identifier[set_value] ( literal[int] , identifier[k] , identifier[bidiagMatrix] . identifier[get_value] ( identifier[k] , identifier[k] ))
identifier[s] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[k] + literal[int] , identifier[bidiagMatrix] . identifier[get_width] ()):
identifier[val] = identifier[bidiagMatrix] . identifier[get_value] ( identifier[i] , identifier[k] )
identifier[x] . identifier[set_value] ( literal[int] , identifier[i] , identifier[val] )
identifier[s] +=( identifier[val] ** literal[int] )
identifier[s] = identifier[sqrt] ( identifier[s] )
identifier[y] . identifier[set_value] ( literal[int] , identifier[k] + literal[int] , identifier[s] )
identifier[tmp] = identifier[x] - identifier[y]
identifier[norm] = identifier[sqrt] ( identifier[sum] ( identifier[i] [ literal[int] ]** literal[int] keyword[for] identifier[i] keyword[in] identifier[tmp] . identifier[get_array] ()))
identifier[w] = identifier[tmp] / identifier[norm]
identifier[vk] = identifier[identityMatrixCol] -( literal[int] *( identifier[w] * identifier[w] . identifier[transform] ()))
identifier[bidiagMatrix] = identifier[bidiagMatrix] * identifier[vk]
keyword[if] identifier[k] == literal[int] :
identifier[v] = identifier[vk]
keyword[else] :
identifier[v] = identifier[vk] * identifier[v]
keyword[return] ( identifier[u] , identifier[bidiagMatrix] , identifier[v] ) | def householder(self):
"""Return Matrices u,b,v with self = ubv and b is in bidiagonal form
The algorithm uses householder transformations.
:return tuple (u,b,v): A tuple with the Matrix u, b and v.
and self = ubv (except some rounding errors)
u is a unitary matrix
b is a bidiagonal matrix.
v is a unitary matrix.
:note: Currently the algorithm only works for squared matrices
:todo: Make sure, that the bidiagonal matrix is 0.0 except for the bidiagonal.
Due to rounding errors, this is currently not ensured
"""
# copy instance to transform it to bidiagonal form.
bidiagMatrix = Matrix.from_two_dim_array(self.get_width(), self.get_height(), self.matrix)
# build identity matrix, which is used to calculate householder transformations
identityMatrixRow = Matrix(self.get_height(), self.get_height())
for i in xrange(self.get_height()):
identityMatrixRow.set_value(i, i, 1.0) # depends on [control=['for'], data=['i']]
identityMatrixCol = Matrix(self.get_width(), self.get_width())
for i in xrange(self.get_width()):
identityMatrixCol.set_value(i, i, 1.0) # depends on [control=['for'], data=['i']]
# zero out the k'th column and row
for k in xrange(self.get_width() - 1):
# vector with the values of the k'th column (first k-1 rows are 0)
x = Vector(self.get_height())
y = Vector(self.get_height())
if k > 0:
x.set_value(0, k - 1, bidiagMatrix.get_value(k, k - 1))
y.set_value(0, k - 1, bidiagMatrix.get_value(k, k - 1)) # depends on [control=['if'], data=['k']]
s = 0.0
for i in xrange(k, self.get_height()):
val = bidiagMatrix.get_value(k, i)
x.set_value(0, i, val)
s += val ** 2 # depends on [control=['for'], data=['i']]
s = sqrt(s)
# y must have same length as x
y.set_value(0, k, s)
tmp = x - y
norm = sqrt(sum((i[0] ** 2 for i in tmp.get_array())))
# calculate w = (x-y)/(|x-y|)
w = tmp / norm
# uk is the k'th householder matrix for the column
uk = identityMatrixRow - 2 * (w * w.transform())
bidiagMatrix = uk * bidiagMatrix
if k == 0:
# set u in first iteration.
u = uk # depends on [control=['if'], data=[]]
else:
u = u * uk
# zero out the the row
if k < self.get_width() - 2:
x = Vector(self.get_width())
y = Vector(self.get_width())
x.set_value(0, k, bidiagMatrix.get_value(k, k))
y.set_value(0, k, bidiagMatrix.get_value(k, k))
s = 0.0
for i in xrange(k + 1, bidiagMatrix.get_width()):
val = bidiagMatrix.get_value(i, k)
x.set_value(0, i, val)
s += val ** 2 # depends on [control=['for'], data=['i']]
# length of vector x ignoring the k'th value
s = sqrt(s)
# y must have same length as x, since k'th value is equal
# set k+1 value to s
y.set_value(0, k + 1, s)
tmp = x - y
norm = sqrt(sum((i[0] ** 2 for i in tmp.get_array())))
w = tmp / norm
# vk is the k'th householder matrix for the row
vk = identityMatrixCol - 2 * (w * w.transform())
bidiagMatrix = bidiagMatrix * vk
if k == 0:
# set v in first iteration
v = vk # depends on [control=['if'], data=[]]
else:
v = vk * v # depends on [control=['if'], data=['k']] # depends on [control=['for'], data=['k']]
return (u, bidiagMatrix, v) |
def install(pkg=None,
pkgs=None,
user=None,
install_global=False,
env=None):
'''
Install a cabal package.
pkg
A package name in format accepted by cabal-install. See:
https://wiki.haskell.org/Cabal-Install
pkgs
A list of packages names in same format as ``pkg``
user
The user to run cabal install with
install_global
Install package globally instead of locally
env
Environment variables to set when invoking cabal. Uses the
same ``env`` format as the :py:func:`cmd.run
<salt.modules.cmdmod.run>` execution function
CLI Example:
.. code-block:: bash
salt '*' cabal.install shellcheck
salt '*' cabal.install shellcheck-0.3.5
'''
cmd = ['cabal install']
if install_global:
cmd.append('--global')
if pkg:
cmd.append('"{0}"'.format(pkg))
elif pkgs:
cmd.append('"{0}"'.format('" "'.join(pkgs)))
result = __salt__['cmd.run_all'](' '.join(cmd), runas=user, env=env)
if result['retcode'] != 0:
raise CommandExecutionError(result['stderr'])
return result | def function[install, parameter[pkg, pkgs, user, install_global, env]]:
constant[
Install a cabal package.
pkg
A package name in format accepted by cabal-install. See:
https://wiki.haskell.org/Cabal-Install
pkgs
A list of packages names in same format as ``pkg``
user
The user to run cabal install with
install_global
Install package globally instead of locally
env
Environment variables to set when invoking cabal. Uses the
same ``env`` format as the :py:func:`cmd.run
<salt.modules.cmdmod.run>` execution function
CLI Example:
.. code-block:: bash
salt '*' cabal.install shellcheck
salt '*' cabal.install shellcheck-0.3.5
]
variable[cmd] assign[=] list[[<ast.Constant object at 0x7da1b21ae7d0>]]
if name[install_global] begin[:]
call[name[cmd].append, parameter[constant[--global]]]
if name[pkg] begin[:]
call[name[cmd].append, parameter[call[constant["{0}"].format, parameter[name[pkg]]]]]
variable[result] assign[=] call[call[name[__salt__]][constant[cmd.run_all]], parameter[call[constant[ ].join, parameter[name[cmd]]]]]
if compare[call[name[result]][constant[retcode]] not_equal[!=] constant[0]] begin[:]
<ast.Raise object at 0x7da1b1cb0490>
return[name[result]] | keyword[def] identifier[install] ( identifier[pkg] = keyword[None] ,
identifier[pkgs] = keyword[None] ,
identifier[user] = keyword[None] ,
identifier[install_global] = keyword[False] ,
identifier[env] = keyword[None] ):
literal[string]
identifier[cmd] =[ literal[string] ]
keyword[if] identifier[install_global] :
identifier[cmd] . identifier[append] ( literal[string] )
keyword[if] identifier[pkg] :
identifier[cmd] . identifier[append] ( literal[string] . identifier[format] ( identifier[pkg] ))
keyword[elif] identifier[pkgs] :
identifier[cmd] . identifier[append] ( literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[pkgs] )))
identifier[result] = identifier[__salt__] [ literal[string] ]( literal[string] . identifier[join] ( identifier[cmd] ), identifier[runas] = identifier[user] , identifier[env] = identifier[env] )
keyword[if] identifier[result] [ literal[string] ]!= literal[int] :
keyword[raise] identifier[CommandExecutionError] ( identifier[result] [ literal[string] ])
keyword[return] identifier[result] | def install(pkg=None, pkgs=None, user=None, install_global=False, env=None):
"""
Install a cabal package.
pkg
A package name in format accepted by cabal-install. See:
https://wiki.haskell.org/Cabal-Install
pkgs
A list of packages names in same format as ``pkg``
user
The user to run cabal install with
install_global
Install package globally instead of locally
env
Environment variables to set when invoking cabal. Uses the
same ``env`` format as the :py:func:`cmd.run
<salt.modules.cmdmod.run>` execution function
CLI Example:
.. code-block:: bash
salt '*' cabal.install shellcheck
salt '*' cabal.install shellcheck-0.3.5
"""
cmd = ['cabal install']
if install_global:
cmd.append('--global') # depends on [control=['if'], data=[]]
if pkg:
cmd.append('"{0}"'.format(pkg)) # depends on [control=['if'], data=[]]
elif pkgs:
cmd.append('"{0}"'.format('" "'.join(pkgs))) # depends on [control=['if'], data=[]]
result = __salt__['cmd.run_all'](' '.join(cmd), runas=user, env=env)
if result['retcode'] != 0:
raise CommandExecutionError(result['stderr']) # depends on [control=['if'], data=[]]
return result |
def requiredUnlessTable(col_name, arg, dm, df, con=None):
"""
Col_name must be present in df unless
arg (table_name) is present in contribution
"""
table_name = arg
if col_name in df.columns:
return None
elif not con:
return None
elif table_name in con.tables:
return None
else:
return "{} column is required unless table {} is present".format(col_name, table_name) | def function[requiredUnlessTable, parameter[col_name, arg, dm, df, con]]:
constant[
Col_name must be present in df unless
arg (table_name) is present in contribution
]
variable[table_name] assign[=] name[arg]
if compare[name[col_name] in name[df].columns] begin[:]
return[constant[None]] | keyword[def] identifier[requiredUnlessTable] ( identifier[col_name] , identifier[arg] , identifier[dm] , identifier[df] , identifier[con] = keyword[None] ):
literal[string]
identifier[table_name] = identifier[arg]
keyword[if] identifier[col_name] keyword[in] identifier[df] . identifier[columns] :
keyword[return] keyword[None]
keyword[elif] keyword[not] identifier[con] :
keyword[return] keyword[None]
keyword[elif] identifier[table_name] keyword[in] identifier[con] . identifier[tables] :
keyword[return] keyword[None]
keyword[else] :
keyword[return] literal[string] . identifier[format] ( identifier[col_name] , identifier[table_name] ) | def requiredUnlessTable(col_name, arg, dm, df, con=None):
"""
Col_name must be present in df unless
arg (table_name) is present in contribution
"""
table_name = arg
if col_name in df.columns:
return None # depends on [control=['if'], data=[]]
elif not con:
return None # depends on [control=['if'], data=[]]
elif table_name in con.tables:
return None # depends on [control=['if'], data=[]]
else:
return '{} column is required unless table {} is present'.format(col_name, table_name) |
def showDraw(self, fignum=1):
""" show the element drawing
:param fignum: define figure number to show element drawing
"""
if self._patches == []:
print("Please setDraw() before showDraw(), then try again.")
return
else:
fig = plt.figure(fignum)
fig.clear()
ax = fig.add_subplot(111, aspect='equal')
[ax.add_patch(i) for i in self._patches]
bbox = self._patches[0].get_path().get_extents()
x0 = 2.0 * min(bbox.xmin, bbox.ymin)
x1 = 2.0 * max(bbox.xmax, bbox.ymax)
ax.set_xlim(x0, x1)
ax.set_ylim(x0, x1)
# x1,y1=tuple(self.nextp0)
# x2,y2=tuple(self.nextp1)
# x3,y3=tuple(self.nextpc)
# ax.plot([x1,x2,x3], [y1,y2,y3], 'o')#, ms=5, fc='b', ec='b')
x, y = tuple(self.next_p0)
ax.plot(x, y, 'o', ms=10, c='b')
ax.annotate(s=self._anote['name'],
xy=self._anote['xypos'],
xytext=self._anote['textpos'],
textcoords='data',
arrowprops=dict(arrowstyle='->'),
rotation=-90,
fontsize='small')
fig.canvas.draw()
plt.grid()
plt.show() | def function[showDraw, parameter[self, fignum]]:
constant[ show the element drawing
:param fignum: define figure number to show element drawing
]
if compare[name[self]._patches equal[==] list[[]]] begin[:]
call[name[print], parameter[constant[Please setDraw() before showDraw(), then try again.]]]
return[None] | keyword[def] identifier[showDraw] ( identifier[self] , identifier[fignum] = literal[int] ):
literal[string]
keyword[if] identifier[self] . identifier[_patches] ==[]:
identifier[print] ( literal[string] )
keyword[return]
keyword[else] :
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[fignum] )
identifier[fig] . identifier[clear] ()
identifier[ax] = identifier[fig] . identifier[add_subplot] ( literal[int] , identifier[aspect] = literal[string] )
[ identifier[ax] . identifier[add_patch] ( identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[self] . identifier[_patches] ]
identifier[bbox] = identifier[self] . identifier[_patches] [ literal[int] ]. identifier[get_path] (). identifier[get_extents] ()
identifier[x0] = literal[int] * identifier[min] ( identifier[bbox] . identifier[xmin] , identifier[bbox] . identifier[ymin] )
identifier[x1] = literal[int] * identifier[max] ( identifier[bbox] . identifier[xmax] , identifier[bbox] . identifier[ymax] )
identifier[ax] . identifier[set_xlim] ( identifier[x0] , identifier[x1] )
identifier[ax] . identifier[set_ylim] ( identifier[x0] , identifier[x1] )
identifier[x] , identifier[y] = identifier[tuple] ( identifier[self] . identifier[next_p0] )
identifier[ax] . identifier[plot] ( identifier[x] , identifier[y] , literal[string] , identifier[ms] = literal[int] , identifier[c] = literal[string] )
identifier[ax] . identifier[annotate] ( identifier[s] = identifier[self] . identifier[_anote] [ literal[string] ],
identifier[xy] = identifier[self] . identifier[_anote] [ literal[string] ],
identifier[xytext] = identifier[self] . identifier[_anote] [ literal[string] ],
identifier[textcoords] = literal[string] ,
identifier[arrowprops] = identifier[dict] ( identifier[arrowstyle] = literal[string] ),
identifier[rotation] =- literal[int] ,
identifier[fontsize] = literal[string] )
identifier[fig] . identifier[canvas] . identifier[draw] ()
identifier[plt] . identifier[grid] ()
identifier[plt] . identifier[show] () | def showDraw(self, fignum=1):
""" show the element drawing
:param fignum: define figure number to show element drawing
"""
if self._patches == []:
print('Please setDraw() before showDraw(), then try again.')
return # depends on [control=['if'], data=[]]
else:
fig = plt.figure(fignum)
fig.clear()
ax = fig.add_subplot(111, aspect='equal')
[ax.add_patch(i) for i in self._patches]
bbox = self._patches[0].get_path().get_extents()
x0 = 2.0 * min(bbox.xmin, bbox.ymin)
x1 = 2.0 * max(bbox.xmax, bbox.ymax)
ax.set_xlim(x0, x1)
ax.set_ylim(x0, x1)
# x1,y1=tuple(self.nextp0)
# x2,y2=tuple(self.nextp1)
# x3,y3=tuple(self.nextpc)
# ax.plot([x1,x2,x3], [y1,y2,y3], 'o')#, ms=5, fc='b', ec='b')
(x, y) = tuple(self.next_p0)
ax.plot(x, y, 'o', ms=10, c='b')
ax.annotate(s=self._anote['name'], xy=self._anote['xypos'], xytext=self._anote['textpos'], textcoords='data', arrowprops=dict(arrowstyle='->'), rotation=-90, fontsize='small')
fig.canvas.draw()
plt.grid()
plt.show() |
def _generate_regex(self, line):
'''
Generates a regular expression from the magic bytes of a signature.
The regex is used by Magic._analyze.
@line - The first SignatureLine object of the signature.
Returns a compile regular expression.
'''
restr = ""
# Strings and single byte signatures are taken at face value;
# multi-byte integer values are turned into regex strings based
# on their data type size and endianness.
if line.type == 'regex':
# Regex types are already compiled expressions.
# Note that since re.finditer is used, unless the specified
# regex accounts for it, overlapping signatures will be ignored.
return line.value
if line.type == 'string':
restr = line.value
elif line.size == 1:
restr = chr(line.value)
elif line.size == 2:
if line.endianness == '<':
restr = chr(line.value & 0xFF) + chr(line.value >> 8)
elif line.endianness == '>':
restr = chr(line.value >> 8) + chr(line.value & 0xFF)
elif line.size == 4:
if line.endianness == '<':
restr = (chr(line.value & 0xFF) +
chr((line.value >> 8) & 0xFF) +
chr((line.value >> 16) & 0xFF) +
chr(line.value >> 24))
elif line.endianness == '>':
restr = (chr(line.value >> 24) +
chr((line.value >> 16) & 0xFF) +
chr((line.value >> 8) & 0xFF) +
chr(line.value & 0xFF))
elif line.size == 8:
if line.endianness == '<':
restr = (chr(line.value & 0xFF) +
chr((line.value >> 8) & 0xFF) +
chr((line.value >> 16) & 0xFF) +
chr((line.value >> 24) & 0xFF) +
chr((line.value >> 32) & 0xFF) +
chr((line.value >> 40) & 0xFF) +
chr((line.value >> 48) & 0xFF) +
chr(line.value >> 56))
elif line.endianness == '>':
restr = (chr(line.value >> 56) +
chr((line.value >> 48) & 0xFF) +
chr((line.value >> 40) & 0xFF) +
chr((line.value >> 32) & 0xFF) +
chr((line.value >> 24) & 0xFF) +
chr((line.value >> 16) & 0xFF) +
chr((line.value >> 8) & 0xFF) +
chr(line.value & 0xFF))
# Since re.finditer is used on a per-signature basis, signatures should be crafted carefully
# to ensure that they aren't potentially self-overlapping (e.g., a signature of "ABCDAB" could
# be confused by the byte sequence "ABCDABCDAB"). The longer the signature, the less likely an
# unintentional overlap is, although files could still be maliciously crafted to cause false
# negative results.
#
# Thus, unless a signature has been explicitly marked as knowingly overlapping ('{overlap}'),
# spit out a warning about any self-overlapping signatures.
if not binwalk.core.compat.has_key(line.tags, 'overlap'):
for i in range(1, line.size):
if restr[i:] == restr[0:(line.size - i)]:
binwalk.core.common.warning("Signature '%s' is a self-overlapping signature!" % line.text)
break
return re.compile(re.escape(restr)) | def function[_generate_regex, parameter[self, line]]:
constant[
Generates a regular expression from the magic bytes of a signature.
The regex is used by Magic._analyze.
@line - The first SignatureLine object of the signature.
Returns a compile regular expression.
]
variable[restr] assign[=] constant[]
if compare[name[line].type equal[==] constant[regex]] begin[:]
return[name[line].value]
if compare[name[line].type equal[==] constant[string]] begin[:]
variable[restr] assign[=] name[line].value
if <ast.UnaryOp object at 0x7da1b1c00fd0> begin[:]
for taget[name[i]] in starred[call[name[range], parameter[constant[1], name[line].size]]] begin[:]
if compare[call[name[restr]][<ast.Slice object at 0x7da1b215d8d0>] equal[==] call[name[restr]][<ast.Slice object at 0x7da1b215e4d0>]] begin[:]
call[name[binwalk].core.common.warning, parameter[binary_operation[constant[Signature '%s' is a self-overlapping signature!] <ast.Mod object at 0x7da2590d6920> name[line].text]]]
break
return[call[name[re].compile, parameter[call[name[re].escape, parameter[name[restr]]]]]] | keyword[def] identifier[_generate_regex] ( identifier[self] , identifier[line] ):
literal[string]
identifier[restr] = literal[string]
keyword[if] identifier[line] . identifier[type] == literal[string] :
keyword[return] identifier[line] . identifier[value]
keyword[if] identifier[line] . identifier[type] == literal[string] :
identifier[restr] = identifier[line] . identifier[value]
keyword[elif] identifier[line] . identifier[size] == literal[int] :
identifier[restr] = identifier[chr] ( identifier[line] . identifier[value] )
keyword[elif] identifier[line] . identifier[size] == literal[int] :
keyword[if] identifier[line] . identifier[endianness] == literal[string] :
identifier[restr] = identifier[chr] ( identifier[line] . identifier[value] & literal[int] )+ identifier[chr] ( identifier[line] . identifier[value] >> literal[int] )
keyword[elif] identifier[line] . identifier[endianness] == literal[string] :
identifier[restr] = identifier[chr] ( identifier[line] . identifier[value] >> literal[int] )+ identifier[chr] ( identifier[line] . identifier[value] & literal[int] )
keyword[elif] identifier[line] . identifier[size] == literal[int] :
keyword[if] identifier[line] . identifier[endianness] == literal[string] :
identifier[restr] =( identifier[chr] ( identifier[line] . identifier[value] & literal[int] )+
identifier[chr] (( identifier[line] . identifier[value] >> literal[int] )& literal[int] )+
identifier[chr] (( identifier[line] . identifier[value] >> literal[int] )& literal[int] )+
identifier[chr] ( identifier[line] . identifier[value] >> literal[int] ))
keyword[elif] identifier[line] . identifier[endianness] == literal[string] :
identifier[restr] =( identifier[chr] ( identifier[line] . identifier[value] >> literal[int] )+
identifier[chr] (( identifier[line] . identifier[value] >> literal[int] )& literal[int] )+
identifier[chr] (( identifier[line] . identifier[value] >> literal[int] )& literal[int] )+
identifier[chr] ( identifier[line] . identifier[value] & literal[int] ))
keyword[elif] identifier[line] . identifier[size] == literal[int] :
keyword[if] identifier[line] . identifier[endianness] == literal[string] :
identifier[restr] =( identifier[chr] ( identifier[line] . identifier[value] & literal[int] )+
identifier[chr] (( identifier[line] . identifier[value] >> literal[int] )& literal[int] )+
identifier[chr] (( identifier[line] . identifier[value] >> literal[int] )& literal[int] )+
identifier[chr] (( identifier[line] . identifier[value] >> literal[int] )& literal[int] )+
identifier[chr] (( identifier[line] . identifier[value] >> literal[int] )& literal[int] )+
identifier[chr] (( identifier[line] . identifier[value] >> literal[int] )& literal[int] )+
identifier[chr] (( identifier[line] . identifier[value] >> literal[int] )& literal[int] )+
identifier[chr] ( identifier[line] . identifier[value] >> literal[int] ))
keyword[elif] identifier[line] . identifier[endianness] == literal[string] :
identifier[restr] =( identifier[chr] ( identifier[line] . identifier[value] >> literal[int] )+
identifier[chr] (( identifier[line] . identifier[value] >> literal[int] )& literal[int] )+
identifier[chr] (( identifier[line] . identifier[value] >> literal[int] )& literal[int] )+
identifier[chr] (( identifier[line] . identifier[value] >> literal[int] )& literal[int] )+
identifier[chr] (( identifier[line] . identifier[value] >> literal[int] )& literal[int] )+
identifier[chr] (( identifier[line] . identifier[value] >> literal[int] )& literal[int] )+
identifier[chr] (( identifier[line] . identifier[value] >> literal[int] )& literal[int] )+
identifier[chr] ( identifier[line] . identifier[value] & literal[int] ))
keyword[if] keyword[not] identifier[binwalk] . identifier[core] . identifier[compat] . identifier[has_key] ( identifier[line] . identifier[tags] , literal[string] ):
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[line] . identifier[size] ):
keyword[if] identifier[restr] [ identifier[i] :]== identifier[restr] [ literal[int] :( identifier[line] . identifier[size] - identifier[i] )]:
identifier[binwalk] . identifier[core] . identifier[common] . identifier[warning] ( literal[string] % identifier[line] . identifier[text] )
keyword[break]
keyword[return] identifier[re] . identifier[compile] ( identifier[re] . identifier[escape] ( identifier[restr] )) | def _generate_regex(self, line):
"""
Generates a regular expression from the magic bytes of a signature.
The regex is used by Magic._analyze.
@line - The first SignatureLine object of the signature.
Returns a compile regular expression.
"""
restr = ''
# Strings and single byte signatures are taken at face value;
# multi-byte integer values are turned into regex strings based
# on their data type size and endianness.
if line.type == 'regex':
# Regex types are already compiled expressions.
# Note that since re.finditer is used, unless the specified
# regex accounts for it, overlapping signatures will be ignored.
return line.value # depends on [control=['if'], data=[]]
if line.type == 'string':
restr = line.value # depends on [control=['if'], data=[]]
elif line.size == 1:
restr = chr(line.value) # depends on [control=['if'], data=[]]
elif line.size == 2:
if line.endianness == '<':
restr = chr(line.value & 255) + chr(line.value >> 8) # depends on [control=['if'], data=[]]
elif line.endianness == '>':
restr = chr(line.value >> 8) + chr(line.value & 255) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif line.size == 4:
if line.endianness == '<':
restr = chr(line.value & 255) + chr(line.value >> 8 & 255) + chr(line.value >> 16 & 255) + chr(line.value >> 24) # depends on [control=['if'], data=[]]
elif line.endianness == '>':
restr = chr(line.value >> 24) + chr(line.value >> 16 & 255) + chr(line.value >> 8 & 255) + chr(line.value & 255) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif line.size == 8:
if line.endianness == '<':
restr = chr(line.value & 255) + chr(line.value >> 8 & 255) + chr(line.value >> 16 & 255) + chr(line.value >> 24 & 255) + chr(line.value >> 32 & 255) + chr(line.value >> 40 & 255) + chr(line.value >> 48 & 255) + chr(line.value >> 56) # depends on [control=['if'], data=[]]
elif line.endianness == '>':
restr = chr(line.value >> 56) + chr(line.value >> 48 & 255) + chr(line.value >> 40 & 255) + chr(line.value >> 32 & 255) + chr(line.value >> 24 & 255) + chr(line.value >> 16 & 255) + chr(line.value >> 8 & 255) + chr(line.value & 255) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Since re.finditer is used on a per-signature basis, signatures should be crafted carefully
# to ensure that they aren't potentially self-overlapping (e.g., a signature of "ABCDAB" could
# be confused by the byte sequence "ABCDABCDAB"). The longer the signature, the less likely an
# unintentional overlap is, although files could still be maliciously crafted to cause false
# negative results.
#
# Thus, unless a signature has been explicitly marked as knowingly overlapping ('{overlap}'),
# spit out a warning about any self-overlapping signatures.
if not binwalk.core.compat.has_key(line.tags, 'overlap'):
for i in range(1, line.size):
if restr[i:] == restr[0:line.size - i]:
binwalk.core.common.warning("Signature '%s' is a self-overlapping signature!" % line.text)
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # depends on [control=['if'], data=[]]
return re.compile(re.escape(restr)) |
def layers(self):
"""Yield a shallow view on a layer of this DAGCircuit for all d layers of this circuit.
A layer is a circuit whose gates act on disjoint qubits, i.e.
a layer has depth 1. The total number of layers equals the
circuit depth d. The layers are indexed from 0 to d-1 with the
earliest layer at index 0. The layers are constructed using a
greedy algorithm. Each returned layer is a dict containing
{"graph": circuit graph, "partition": list of qubit lists}.
TODO: Gates that use the same cbits will end up in different
layers as this is currently implemented. This may not be
the desired behavior.
"""
graph_layers = self.multigraph_layers()
try:
next(graph_layers) # Remove input nodes
except StopIteration:
return
def add_nodes_from(layer, nodes):
""" Convert DAGNodes into a format that can be added to a
multigraph and then add to graph"""
layer._multi_graph.add_nodes_from(nodes)
for graph_layer in graph_layers:
# Get the op nodes from the layer, removing any input and output nodes.
op_nodes = [node for node in graph_layer if node.type == "op"]
# Stop yielding once there are no more op_nodes in a layer.
if not op_nodes:
return
# Construct a shallow copy of self
new_layer = DAGCircuit()
new_layer.name = self.name
for creg in self.cregs.values():
new_layer.add_creg(creg)
for qreg in self.qregs.values():
new_layer.add_qreg(qreg)
add_nodes_from(new_layer, self.input_map.values())
add_nodes_from(new_layer, self.output_map.values())
add_nodes_from(new_layer, op_nodes)
# The quantum registers that have an operation in this layer.
support_list = [
op_node.qargs
for op_node in op_nodes
if op_node.name not in {"barrier", "snapshot", "save", "load", "noise"}
]
# Now add the edges to the multi_graph
# By default we just wire inputs to the outputs.
wires = {self.input_map[wire]: self.output_map[wire]
for wire in self.wires}
# Wire inputs to op nodes, and op nodes to outputs.
for op_node in op_nodes:
args = self._bits_in_condition(op_node.condition) \
+ op_node.cargs + op_node.qargs
arg_ids = (self.input_map[(arg[0], arg[1])] for arg in args)
for arg_id in arg_ids:
wires[arg_id], wires[op_node] = op_node, wires[arg_id]
# Add wiring to/from the operations and between unused inputs & outputs.
new_layer._multi_graph.add_edges_from(wires.items())
yield {"graph": new_layer, "partition": support_list} | def function[layers, parameter[self]]:
constant[Yield a shallow view on a layer of this DAGCircuit for all d layers of this circuit.
A layer is a circuit whose gates act on disjoint qubits, i.e.
a layer has depth 1. The total number of layers equals the
circuit depth d. The layers are indexed from 0 to d-1 with the
earliest layer at index 0. The layers are constructed using a
greedy algorithm. Each returned layer is a dict containing
{"graph": circuit graph, "partition": list of qubit lists}.
TODO: Gates that use the same cbits will end up in different
layers as this is currently implemented. This may not be
the desired behavior.
]
variable[graph_layers] assign[=] call[name[self].multigraph_layers, parameter[]]
<ast.Try object at 0x7da1b03a7d60>
def function[add_nodes_from, parameter[layer, nodes]]:
constant[ Convert DAGNodes into a format that can be added to a
multigraph and then add to graph]
call[name[layer]._multi_graph.add_nodes_from, parameter[name[nodes]]]
for taget[name[graph_layer]] in starred[name[graph_layers]] begin[:]
variable[op_nodes] assign[=] <ast.ListComp object at 0x7da1b03a77f0>
if <ast.UnaryOp object at 0x7da1b03a75e0> begin[:]
return[None]
variable[new_layer] assign[=] call[name[DAGCircuit], parameter[]]
name[new_layer].name assign[=] name[self].name
for taget[name[creg]] in starred[call[name[self].cregs.values, parameter[]]] begin[:]
call[name[new_layer].add_creg, parameter[name[creg]]]
for taget[name[qreg]] in starred[call[name[self].qregs.values, parameter[]]] begin[:]
call[name[new_layer].add_qreg, parameter[name[qreg]]]
call[name[add_nodes_from], parameter[name[new_layer], call[name[self].input_map.values, parameter[]]]]
call[name[add_nodes_from], parameter[name[new_layer], call[name[self].output_map.values, parameter[]]]]
call[name[add_nodes_from], parameter[name[new_layer], name[op_nodes]]]
variable[support_list] assign[=] <ast.ListComp object at 0x7da1b03a6a10>
variable[wires] assign[=] <ast.DictComp object at 0x7da1b03a6650>
for taget[name[op_node]] in starred[name[op_nodes]] begin[:]
variable[args] assign[=] binary_operation[binary_operation[call[name[self]._bits_in_condition, parameter[name[op_node].condition]] + name[op_node].cargs] + name[op_node].qargs]
variable[arg_ids] assign[=] <ast.GeneratorExp object at 0x7da1b03a5540>
for taget[name[arg_id]] in starred[name[arg_ids]] begin[:]
<ast.Tuple object at 0x7da1b059f010> assign[=] tuple[[<ast.Name object at 0x7da1b059caf0>, <ast.Subscript object at 0x7da1b059ef80>]]
call[name[new_layer]._multi_graph.add_edges_from, parameter[call[name[wires].items, parameter[]]]]
<ast.Yield object at 0x7da1b059e650> | keyword[def] identifier[layers] ( identifier[self] ):
literal[string]
identifier[graph_layers] = identifier[self] . identifier[multigraph_layers] ()
keyword[try] :
identifier[next] ( identifier[graph_layers] )
keyword[except] identifier[StopIteration] :
keyword[return]
keyword[def] identifier[add_nodes_from] ( identifier[layer] , identifier[nodes] ):
literal[string]
identifier[layer] . identifier[_multi_graph] . identifier[add_nodes_from] ( identifier[nodes] )
keyword[for] identifier[graph_layer] keyword[in] identifier[graph_layers] :
identifier[op_nodes] =[ identifier[node] keyword[for] identifier[node] keyword[in] identifier[graph_layer] keyword[if] identifier[node] . identifier[type] == literal[string] ]
keyword[if] keyword[not] identifier[op_nodes] :
keyword[return]
identifier[new_layer] = identifier[DAGCircuit] ()
identifier[new_layer] . identifier[name] = identifier[self] . identifier[name]
keyword[for] identifier[creg] keyword[in] identifier[self] . identifier[cregs] . identifier[values] ():
identifier[new_layer] . identifier[add_creg] ( identifier[creg] )
keyword[for] identifier[qreg] keyword[in] identifier[self] . identifier[qregs] . identifier[values] ():
identifier[new_layer] . identifier[add_qreg] ( identifier[qreg] )
identifier[add_nodes_from] ( identifier[new_layer] , identifier[self] . identifier[input_map] . identifier[values] ())
identifier[add_nodes_from] ( identifier[new_layer] , identifier[self] . identifier[output_map] . identifier[values] ())
identifier[add_nodes_from] ( identifier[new_layer] , identifier[op_nodes] )
identifier[support_list] =[
identifier[op_node] . identifier[qargs]
keyword[for] identifier[op_node] keyword[in] identifier[op_nodes]
keyword[if] identifier[op_node] . identifier[name] keyword[not] keyword[in] { literal[string] , literal[string] , literal[string] , literal[string] , literal[string] }
]
identifier[wires] ={ identifier[self] . identifier[input_map] [ identifier[wire] ]: identifier[self] . identifier[output_map] [ identifier[wire] ]
keyword[for] identifier[wire] keyword[in] identifier[self] . identifier[wires] }
keyword[for] identifier[op_node] keyword[in] identifier[op_nodes] :
identifier[args] = identifier[self] . identifier[_bits_in_condition] ( identifier[op_node] . identifier[condition] )+ identifier[op_node] . identifier[cargs] + identifier[op_node] . identifier[qargs]
identifier[arg_ids] =( identifier[self] . identifier[input_map] [( identifier[arg] [ literal[int] ], identifier[arg] [ literal[int] ])] keyword[for] identifier[arg] keyword[in] identifier[args] )
keyword[for] identifier[arg_id] keyword[in] identifier[arg_ids] :
identifier[wires] [ identifier[arg_id] ], identifier[wires] [ identifier[op_node] ]= identifier[op_node] , identifier[wires] [ identifier[arg_id] ]
identifier[new_layer] . identifier[_multi_graph] . identifier[add_edges_from] ( identifier[wires] . identifier[items] ())
keyword[yield] { literal[string] : identifier[new_layer] , literal[string] : identifier[support_list] } | def layers(self):
"""Yield a shallow view on a layer of this DAGCircuit for all d layers of this circuit.
A layer is a circuit whose gates act on disjoint qubits, i.e.
a layer has depth 1. The total number of layers equals the
circuit depth d. The layers are indexed from 0 to d-1 with the
earliest layer at index 0. The layers are constructed using a
greedy algorithm. Each returned layer is a dict containing
{"graph": circuit graph, "partition": list of qubit lists}.
TODO: Gates that use the same cbits will end up in different
layers as this is currently implemented. This may not be
the desired behavior.
"""
graph_layers = self.multigraph_layers()
try:
next(graph_layers) # Remove input nodes # depends on [control=['try'], data=[]]
except StopIteration:
return # depends on [control=['except'], data=[]]
def add_nodes_from(layer, nodes):
""" Convert DAGNodes into a format that can be added to a
multigraph and then add to graph"""
layer._multi_graph.add_nodes_from(nodes)
for graph_layer in graph_layers:
# Get the op nodes from the layer, removing any input and output nodes.
op_nodes = [node for node in graph_layer if node.type == 'op']
# Stop yielding once there are no more op_nodes in a layer.
if not op_nodes:
return # depends on [control=['if'], data=[]]
# Construct a shallow copy of self
new_layer = DAGCircuit()
new_layer.name = self.name
for creg in self.cregs.values():
new_layer.add_creg(creg) # depends on [control=['for'], data=['creg']]
for qreg in self.qregs.values():
new_layer.add_qreg(qreg) # depends on [control=['for'], data=['qreg']]
add_nodes_from(new_layer, self.input_map.values())
add_nodes_from(new_layer, self.output_map.values())
add_nodes_from(new_layer, op_nodes)
# The quantum registers that have an operation in this layer.
support_list = [op_node.qargs for op_node in op_nodes if op_node.name not in {'barrier', 'snapshot', 'save', 'load', 'noise'}]
# Now add the edges to the multi_graph
# By default we just wire inputs to the outputs.
wires = {self.input_map[wire]: self.output_map[wire] for wire in self.wires}
# Wire inputs to op nodes, and op nodes to outputs.
for op_node in op_nodes:
args = self._bits_in_condition(op_node.condition) + op_node.cargs + op_node.qargs
arg_ids = (self.input_map[arg[0], arg[1]] for arg in args)
for arg_id in arg_ids:
(wires[arg_id], wires[op_node]) = (op_node, wires[arg_id]) # depends on [control=['for'], data=['arg_id']] # depends on [control=['for'], data=['op_node']]
# Add wiring to/from the operations and between unused inputs & outputs.
new_layer._multi_graph.add_edges_from(wires.items())
yield {'graph': new_layer, 'partition': support_list} # depends on [control=['for'], data=['graph_layer']] |
def create_context(self, message_queue, task_id):
"""
Create values to be used by create_small_file function.
:param message_queue: Queue: queue background process can send messages to us on
:param task_id: int: id of this command's task so message will be routed correctly
"""
parent_data = ParentData(self.parent.kind, self.parent.remote_id)
path_data = self.local_file.get_path_data()
params = parent_data, path_data, self.local_file.remote_id
return UploadContext(self.settings, params, message_queue, task_id) | def function[create_context, parameter[self, message_queue, task_id]]:
constant[
Create values to be used by create_small_file function.
:param message_queue: Queue: queue background process can send messages to us on
:param task_id: int: id of this command's task so message will be routed correctly
]
variable[parent_data] assign[=] call[name[ParentData], parameter[name[self].parent.kind, name[self].parent.remote_id]]
variable[path_data] assign[=] call[name[self].local_file.get_path_data, parameter[]]
variable[params] assign[=] tuple[[<ast.Name object at 0x7da20c6e7160>, <ast.Name object at 0x7da20c6e60e0>, <ast.Attribute object at 0x7da20c6e77c0>]]
return[call[name[UploadContext], parameter[name[self].settings, name[params], name[message_queue], name[task_id]]]] | keyword[def] identifier[create_context] ( identifier[self] , identifier[message_queue] , identifier[task_id] ):
literal[string]
identifier[parent_data] = identifier[ParentData] ( identifier[self] . identifier[parent] . identifier[kind] , identifier[self] . identifier[parent] . identifier[remote_id] )
identifier[path_data] = identifier[self] . identifier[local_file] . identifier[get_path_data] ()
identifier[params] = identifier[parent_data] , identifier[path_data] , identifier[self] . identifier[local_file] . identifier[remote_id]
keyword[return] identifier[UploadContext] ( identifier[self] . identifier[settings] , identifier[params] , identifier[message_queue] , identifier[task_id] ) | def create_context(self, message_queue, task_id):
"""
Create values to be used by create_small_file function.
:param message_queue: Queue: queue background process can send messages to us on
:param task_id: int: id of this command's task so message will be routed correctly
"""
parent_data = ParentData(self.parent.kind, self.parent.remote_id)
path_data = self.local_file.get_path_data()
params = (parent_data, path_data, self.local_file.remote_id)
return UploadContext(self.settings, params, message_queue, task_id) |
def _read_iso_abund_marco(self, mass_range, cycle):
"""
plot the abundance of all the chemical species
Parameters
----------
mass_range : list
A 1x2 array containing the lower and upper mass range. If
None, it will plot over the entire range.
cycle : string or integer
A string/integer of the cycle of interest.
"""
import nuutils as u
masses = []
# Check the inputs
#if not self.se.cycles.count(str(cycle)):
# print 'You entered an cycle that doesn\'t exist in this dataset:', cycle
# print 'I will try and correct your format.'
# cyc_len = len(self.se.cycles[-1])
# print cyc_len, len(str(cycle))
#
# while len(str(cycle)) < cyc_len:
# cycle = '0'+str(cycle)
# print cycle
# if not self.se.cycles.count(str(cycle)):
# print 'I was unable to correct your cycle. Please check that it exists in your dataset.'
masses = self.se.get(cycle,'mass')
if mass_range == None:
print('Using default mass range')
mass_range = [min(masses),max(masses)]
# what this was for??? Marco
#masses.sort()
#mass_range.sort()
print('Using The following conditions:')
print('\tmass_range:', mass_range[0], mass_range[1])
print('\tcycle:', cycle)
isotope_names = self.se.isotopes
u.convert_specie_naming_from_h5_to_ppn(isotope_names)
names_ppn_world = u.spe
number_names_ppn_world = u.n_array
u.define_zip_index_for_species(names_ppn_world,number_names_ppn_world)
# from here below I read the abundance.
#name_specie_in_file=self.se.dcols[5]
# I am using directly 'iso_massf' only because somehow m20 explosive do not have dcols....
name_specie_in_file='iso_massf'
abunds=self.se.get(cycle,name_specie_in_file)
global used_masses
used_masses = []
self.mass_frac = []
for i in range(len(masses)):
if mass_range[0] <= masses[i] and mass_range[1] >= masses[i] :
used_masses.append(masses[i])
self.mass_frac.append(abunds[i]) | def function[_read_iso_abund_marco, parameter[self, mass_range, cycle]]:
constant[
plot the abundance of all the chemical species
Parameters
----------
mass_range : list
A 1x2 array containing the lower and upper mass range. If
None, it will plot over the entire range.
cycle : string or integer
A string/integer of the cycle of interest.
]
import module[nuutils] as alias[u]
variable[masses] assign[=] list[[]]
variable[masses] assign[=] call[name[self].se.get, parameter[name[cycle], constant[mass]]]
if compare[name[mass_range] equal[==] constant[None]] begin[:]
call[name[print], parameter[constant[Using default mass range]]]
variable[mass_range] assign[=] list[[<ast.Call object at 0x7da1b2344df0>, <ast.Call object at 0x7da1b2347e50>]]
call[name[print], parameter[constant[Using The following conditions:]]]
call[name[print], parameter[constant[ mass_range:], call[name[mass_range]][constant[0]], call[name[mass_range]][constant[1]]]]
call[name[print], parameter[constant[ cycle:], name[cycle]]]
variable[isotope_names] assign[=] name[self].se.isotopes
call[name[u].convert_specie_naming_from_h5_to_ppn, parameter[name[isotope_names]]]
variable[names_ppn_world] assign[=] name[u].spe
variable[number_names_ppn_world] assign[=] name[u].n_array
call[name[u].define_zip_index_for_species, parameter[name[names_ppn_world], name[number_names_ppn_world]]]
variable[name_specie_in_file] assign[=] constant[iso_massf]
variable[abunds] assign[=] call[name[self].se.get, parameter[name[cycle], name[name_specie_in_file]]]
<ast.Global object at 0x7da204347850>
variable[used_masses] assign[=] list[[]]
name[self].mass_frac assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[masses]]]]]] begin[:]
if <ast.BoolOp object at 0x7da204345090> begin[:]
call[name[used_masses].append, parameter[call[name[masses]][name[i]]]]
call[name[self].mass_frac.append, parameter[call[name[abunds]][name[i]]]] | keyword[def] identifier[_read_iso_abund_marco] ( identifier[self] , identifier[mass_range] , identifier[cycle] ):
literal[string]
keyword[import] identifier[nuutils] keyword[as] identifier[u]
identifier[masses] =[]
identifier[masses] = identifier[self] . identifier[se] . identifier[get] ( identifier[cycle] , literal[string] )
keyword[if] identifier[mass_range] == keyword[None] :
identifier[print] ( literal[string] )
identifier[mass_range] =[ identifier[min] ( identifier[masses] ), identifier[max] ( identifier[masses] )]
identifier[print] ( literal[string] )
identifier[print] ( literal[string] , identifier[mass_range] [ literal[int] ], identifier[mass_range] [ literal[int] ])
identifier[print] ( literal[string] , identifier[cycle] )
identifier[isotope_names] = identifier[self] . identifier[se] . identifier[isotopes]
identifier[u] . identifier[convert_specie_naming_from_h5_to_ppn] ( identifier[isotope_names] )
identifier[names_ppn_world] = identifier[u] . identifier[spe]
identifier[number_names_ppn_world] = identifier[u] . identifier[n_array]
identifier[u] . identifier[define_zip_index_for_species] ( identifier[names_ppn_world] , identifier[number_names_ppn_world] )
identifier[name_specie_in_file] = literal[string]
identifier[abunds] = identifier[self] . identifier[se] . identifier[get] ( identifier[cycle] , identifier[name_specie_in_file] )
keyword[global] identifier[used_masses]
identifier[used_masses] =[]
identifier[self] . identifier[mass_frac] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[masses] )):
keyword[if] identifier[mass_range] [ literal[int] ]<= identifier[masses] [ identifier[i] ] keyword[and] identifier[mass_range] [ literal[int] ]>= identifier[masses] [ identifier[i] ]:
identifier[used_masses] . identifier[append] ( identifier[masses] [ identifier[i] ])
identifier[self] . identifier[mass_frac] . identifier[append] ( identifier[abunds] [ identifier[i] ]) | def _read_iso_abund_marco(self, mass_range, cycle):
"""
plot the abundance of all the chemical species
Parameters
----------
mass_range : list
A 1x2 array containing the lower and upper mass range. If
None, it will plot over the entire range.
cycle : string or integer
A string/integer of the cycle of interest.
"""
import nuutils as u
masses = []
# Check the inputs
#if not self.se.cycles.count(str(cycle)):
# print 'You entered an cycle that doesn\'t exist in this dataset:', cycle
# print 'I will try and correct your format.'
# cyc_len = len(self.se.cycles[-1])
# print cyc_len, len(str(cycle))
#
# while len(str(cycle)) < cyc_len:
# cycle = '0'+str(cycle)
# print cycle
# if not self.se.cycles.count(str(cycle)):
# print 'I was unable to correct your cycle. Please check that it exists in your dataset.'
masses = self.se.get(cycle, 'mass')
if mass_range == None:
print('Using default mass range')
mass_range = [min(masses), max(masses)] # depends on [control=['if'], data=['mass_range']]
# what this was for??? Marco
#masses.sort()
#mass_range.sort()
print('Using The following conditions:')
print('\tmass_range:', mass_range[0], mass_range[1])
print('\tcycle:', cycle)
isotope_names = self.se.isotopes
u.convert_specie_naming_from_h5_to_ppn(isotope_names)
names_ppn_world = u.spe
number_names_ppn_world = u.n_array
u.define_zip_index_for_species(names_ppn_world, number_names_ppn_world)
# from here below I read the abundance.
#name_specie_in_file=self.se.dcols[5]
# I am using directly 'iso_massf' only because somehow m20 explosive do not have dcols....
name_specie_in_file = 'iso_massf'
abunds = self.se.get(cycle, name_specie_in_file)
global used_masses
used_masses = []
self.mass_frac = []
for i in range(len(masses)):
if mass_range[0] <= masses[i] and mass_range[1] >= masses[i]:
used_masses.append(masses[i])
self.mass_frac.append(abunds[i]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] |
def _addItemMultiPart(self,
itemParameters,
filePath):
"""
The secret sauce behind the addByPart workflow
Inputs:
itemParatmers - ItemParamter class
filePath - full disk path location.
Output:
UserItem class
"""
url = self._location + "/addItem"
params = {
"f": "json",
'multipart' : 'true',
"filename" : os.path.basename(filePath)
}
res = self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler)
if 'id' in res:
itemID = res['id']
iUrl = "%s/items/%s" % (self.location, itemID)
ui = UserItem(url=iUrl,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
res = ui.addByPart(filePath=filePath)
res = ui.commit(wait=True)
up = ItemParameter()
up.title = itemParameters.title
up.tags = itemParameters.tags
up.filename = os.path.basename(filePath)
up.type = itemParameters.type
ui.updateItem(itemParameters=up)
update_url = ui.root.replace('/rest/', '/') + "/update"
data = {'title': itemParameters.title,
'tags': itemParameters.tags,
'filename': os.path.basename(filePath),
'type': itemParameters.type,
'f': 'json'}
for k,v in itemParameters.value.items():
if k not in data.keys():
if isinstance(v, bool):
data[k] = json.dumps(v)
else:
data[k] = v
res = self._post(url=update_url,
param_dict=data,
securityHandler=self._securityHandler)
ui.refresh()
return ui
return None | def function[_addItemMultiPart, parameter[self, itemParameters, filePath]]:
constant[
The secret sauce behind the addByPart workflow
Inputs:
itemParatmers - ItemParamter class
filePath - full disk path location.
Output:
UserItem class
]
variable[url] assign[=] binary_operation[name[self]._location + constant[/addItem]]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b124c9a0>, <ast.Constant object at 0x7da1b124c040>, <ast.Constant object at 0x7da1b124e890>], [<ast.Constant object at 0x7da1b124ef20>, <ast.Constant object at 0x7da1b124e770>, <ast.Call object at 0x7da1b124f1c0>]]
variable[res] assign[=] call[name[self]._post, parameter[]]
if compare[constant[id] in name[res]] begin[:]
variable[itemID] assign[=] call[name[res]][constant[id]]
variable[iUrl] assign[=] binary_operation[constant[%s/items/%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b124a170>, <ast.Name object at 0x7da1b124ae00>]]]
variable[ui] assign[=] call[name[UserItem], parameter[]]
variable[res] assign[=] call[name[ui].addByPart, parameter[]]
variable[res] assign[=] call[name[ui].commit, parameter[]]
variable[up] assign[=] call[name[ItemParameter], parameter[]]
name[up].title assign[=] name[itemParameters].title
name[up].tags assign[=] name[itemParameters].tags
name[up].filename assign[=] call[name[os].path.basename, parameter[name[filePath]]]
name[up].type assign[=] name[itemParameters].type
call[name[ui].updateItem, parameter[]]
variable[update_url] assign[=] binary_operation[call[name[ui].root.replace, parameter[constant[/rest/], constant[/]]] + constant[/update]]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b124ffd0>, <ast.Constant object at 0x7da1b124efb0>, <ast.Constant object at 0x7da1b124fcd0>, <ast.Constant object at 0x7da1b124f970>, <ast.Constant object at 0x7da1b124f4f0>], [<ast.Attribute object at 0x7da1b124f550>, <ast.Attribute object at 0x7da1b124de70>, <ast.Call object at 0x7da1b124d4b0>, <ast.Attribute object at 0x7da1b124cd00>, <ast.Constant object at 0x7da1b124f7f0>]]
for taget[tuple[[<ast.Name object at 0x7da1b124e7d0>, <ast.Name object at 0x7da1b124fdc0>]]] in starred[call[name[itemParameters].value.items, parameter[]]] begin[:]
if compare[name[k] <ast.NotIn object at 0x7da2590d7190> call[name[data].keys, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[v], name[bool]]] begin[:]
call[name[data]][name[k]] assign[=] call[name[json].dumps, parameter[name[v]]]
variable[res] assign[=] call[name[self]._post, parameter[]]
call[name[ui].refresh, parameter[]]
return[name[ui]]
return[constant[None]] | keyword[def] identifier[_addItemMultiPart] ( identifier[self] ,
identifier[itemParameters] ,
identifier[filePath] ):
literal[string]
identifier[url] = identifier[self] . identifier[_location] + literal[string]
identifier[params] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : identifier[os] . identifier[path] . identifier[basename] ( identifier[filePath] )
}
identifier[res] = identifier[self] . identifier[_post] ( identifier[url] = identifier[url] ,
identifier[param_dict] = identifier[params] ,
identifier[securityHandler] = identifier[self] . identifier[_securityHandler] )
keyword[if] literal[string] keyword[in] identifier[res] :
identifier[itemID] = identifier[res] [ literal[string] ]
identifier[iUrl] = literal[string] %( identifier[self] . identifier[location] , identifier[itemID] )
identifier[ui] = identifier[UserItem] ( identifier[url] = identifier[iUrl] ,
identifier[securityHandler] = identifier[self] . identifier[_securityHandler] ,
identifier[proxy_url] = identifier[self] . identifier[_proxy_url] ,
identifier[proxy_port] = identifier[self] . identifier[_proxy_port] )
identifier[res] = identifier[ui] . identifier[addByPart] ( identifier[filePath] = identifier[filePath] )
identifier[res] = identifier[ui] . identifier[commit] ( identifier[wait] = keyword[True] )
identifier[up] = identifier[ItemParameter] ()
identifier[up] . identifier[title] = identifier[itemParameters] . identifier[title]
identifier[up] . identifier[tags] = identifier[itemParameters] . identifier[tags]
identifier[up] . identifier[filename] = identifier[os] . identifier[path] . identifier[basename] ( identifier[filePath] )
identifier[up] . identifier[type] = identifier[itemParameters] . identifier[type]
identifier[ui] . identifier[updateItem] ( identifier[itemParameters] = identifier[up] )
identifier[update_url] = identifier[ui] . identifier[root] . identifier[replace] ( literal[string] , literal[string] )+ literal[string]
identifier[data] ={ literal[string] : identifier[itemParameters] . identifier[title] ,
literal[string] : identifier[itemParameters] . identifier[tags] ,
literal[string] : identifier[os] . identifier[path] . identifier[basename] ( identifier[filePath] ),
literal[string] : identifier[itemParameters] . identifier[type] ,
literal[string] : literal[string] }
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[itemParameters] . identifier[value] . identifier[items] ():
keyword[if] identifier[k] keyword[not] keyword[in] identifier[data] . identifier[keys] ():
keyword[if] identifier[isinstance] ( identifier[v] , identifier[bool] ):
identifier[data] [ identifier[k] ]= identifier[json] . identifier[dumps] ( identifier[v] )
keyword[else] :
identifier[data] [ identifier[k] ]= identifier[v]
identifier[res] = identifier[self] . identifier[_post] ( identifier[url] = identifier[update_url] ,
identifier[param_dict] = identifier[data] ,
identifier[securityHandler] = identifier[self] . identifier[_securityHandler] )
identifier[ui] . identifier[refresh] ()
keyword[return] identifier[ui]
keyword[return] keyword[None] | def _addItemMultiPart(self, itemParameters, filePath):
"""
The secret sauce behind the addByPart workflow
Inputs:
itemParatmers - ItemParamter class
filePath - full disk path location.
Output:
UserItem class
"""
url = self._location + '/addItem'
params = {'f': 'json', 'multipart': 'true', 'filename': os.path.basename(filePath)}
res = self._post(url=url, param_dict=params, securityHandler=self._securityHandler)
if 'id' in res:
itemID = res['id']
iUrl = '%s/items/%s' % (self.location, itemID)
ui = UserItem(url=iUrl, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
res = ui.addByPart(filePath=filePath)
res = ui.commit(wait=True)
up = ItemParameter()
up.title = itemParameters.title
up.tags = itemParameters.tags
up.filename = os.path.basename(filePath)
up.type = itemParameters.type
ui.updateItem(itemParameters=up)
update_url = ui.root.replace('/rest/', '/') + '/update'
data = {'title': itemParameters.title, 'tags': itemParameters.tags, 'filename': os.path.basename(filePath), 'type': itemParameters.type, 'f': 'json'}
for (k, v) in itemParameters.value.items():
if k not in data.keys():
if isinstance(v, bool):
data[k] = json.dumps(v) # depends on [control=['if'], data=[]]
else:
data[k] = v # depends on [control=['if'], data=['k']] # depends on [control=['for'], data=[]]
res = self._post(url=update_url, param_dict=data, securityHandler=self._securityHandler)
ui.refresh()
return ui # depends on [control=['if'], data=['res']]
return None |
def forward_backward_pd(x, f, g, L, h, tau, sigma, niter,
callback=None, **kwargs):
r"""The forward-backward primal-dual splitting algorithm.
The algorithm minimizes the sum of several convex functionals composed with
linear operators::
min_x f(x) + sum_i g_i(L_i x) + h(x)
where ``f``, ``g_i`` are convex functionals, ``L_i`` are linear
operators, and ``h`` is a convex and differentiable functional.
The method can also be used to solve the more general problem::
min_x f(x) + sum_i (g_i @ l_i)(L_i x) + h(x)
where ``l_i`` are strongly convex functionals and @ is the infimal
convolution::
(g @ l)(x) = inf_y { g(y) + l(x-y) }
Note that the strong convexity of ``l_i`` makes the convex conjugate
``l_i^*`` differentiable; see the Notes section for more information on
this.
Parameters
----------
x : `LinearSpaceElement`
Initial point, updated in-place.
f : `Functional`
The functional ``f``. Needs to have ``f.proximal``.
g : sequence of `Functional`'s
The functionals ``g_i``. Needs to have ``g_i.convex_conj.proximal``.
L : sequence of `Operator`'s'
Sequence of linear operators ``L_i``, with as many elements as
``g``.
h : `Functional`
The functional ``h``. Needs to have ``h.gradient``.
tau : float
Step size-like parameter for ``f``.
sigma : sequence of floats
Sequence of step size-like parameters for the sequence ``g``.
niter : int
Number of iterations.
callback : callable, optional
Function called with the current iterate after each iteration.
Other Parameters
----------------
l : sequence of `Functional`'s, optional
The functionals ``l_i``. Needs to have ``g_i.convex_conj.gradient``.
If omitted, the simpler problem without ``l_i`` will be considered.
Notes
-----
The mathematical problem to solve is
.. math::
\min_x f(x) + \sum_{i=0}^n (g_i \Box l_i)(L_i x) + h(x),
where :math:`f`, :math:`g_i`, :math:`l_i` and :math:`h` are functionals and
:math:`L_i` are linear operators. The infimal convolution :math:`g \Box l`
is defined by
.. math::
(g \Box l)(x) = \inf_y g(y) + l(x - y).
The exact conditions on the involved functionals are as follows: :math:`f`
and :math:`g_i` are proper, convex and lower semicontinuous, and :math:`h`
is convex and differentiable with :math:`\eta^{-1}`-Lipschitz continuous
gradient, :math:`\eta > 0`.
The optional operators :math:`\nabla l_i^*` need to be
:math:`\nu_i`-Lipschitz continuous. Note that in the paper, the condition
is formulated as :math:`l_i` being proper, lower
semicontinuous, and :math:`\nu_i^{-1}`-strongly convex, which implies that
:math:`l_i^*` have :math:`\nu_i`-Lipschitz continuous gradients.
If the optional operators :math:`\nabla l_i^*` are omitted, the simpler
problem without :math:`l_i` will be considered. Mathematically, this is
done by taking :math:`l_i` to be the functionals that are zero only in the
zero element and :math:`\infty` otherwise. This gives that :math:`l_i^*`
are the zero functionals, and hence the corresponding gradients are the
zero operators.
To guarantee convergence, the parameters :math:`\tau`, :math:`\sigma` and
:math:`L_i` need to satisfy
.. math::
2 \min \{ \frac{1}{\tau}, \frac{1}{\sigma_1}, \ldots,
\frac{1}{\sigma_m} \} \cdot \min\{ \eta, \nu_1, \ldots, \nu_m \}
\cdot \sqrt{1 - \tau \sum_{i=1}^n \sigma_i ||L_i||^2} > 1,
where, if the simpler problem is considered, all :math:`\nu_i` can be
considered to be :math:`\infty`.
For reference on the forward-backward primal-dual algorithm, see [BC2015].
For more on proximal operators and algorithms see [PB2014].
See Also
--------
odl.solvers.nonsmooth.primal_dual_hybrid_gradient.pdhg :
Solver for similar problems without differentiability in any
of the terms.
odl.solvers.nonsmooth.douglas_rachford.douglas_rachford_pd :
Solver for similar problems without differentiability in any
of the terms.
References
----------
[BC2015] Bot, R I, and Csetnek, E R. *On the convergence rate of
a forward-backward type primal-dual splitting algorithm for convex
optimization problems*. Optimization, 64.1 (2015), pp 5--23.
[PB2014] Parikh, N, and Boyd, S. *Proximal Algorithms*.
Foundations and Trends in Optimization, 1 (2014), pp 127-239.
"""
# Problem size
m = len(L)
# Validate input
if not all(isinstance(op, Operator) for op in L):
raise ValueError('`L` not a sequence of operators')
if not all(op.is_linear for op in L):
raise ValueError('not all operators in `L` are linear')
if not all(x in op.domain for op in L):
raise ValueError('`x` not in the domain of all operators in `L`')
if len(sigma) != m:
raise ValueError('len(sigma) != len(L)')
if len(g) != m:
raise ValueError('len(prox_cc_g) != len(L)')
# Extract operators
prox_cc_g = [gi.convex_conj.proximal for gi in g]
grad_h = h.gradient
prox_f = f.proximal
l = kwargs.pop('l', None)
if l is not None:
if len(l) != m:
raise ValueError('`grad_cc_l` not same length as `L`')
grad_cc_l = [li.convex_conj.gradient for li in l]
if kwargs:
raise TypeError('unexpected keyword argument: {}'.format(kwargs))
# Pre-allocate values
v = [Li.range.zero() for Li in L]
y = x.space.zero()
for k in range(niter):
x_old = x
tmp_1 = grad_h(x) + sum(Li.adjoint(vi) for Li, vi in zip(L, v))
prox_f(tau)(x - tau * tmp_1, out=x)
y.lincomb(2.0, x, -1, x_old)
for i in range(m):
if l is not None:
# In this case gradients were given.
tmp_2 = sigma[i] * (L[i](y) - grad_cc_l[i](v[i]))
else:
# In this case gradients were not given. Therefore the gradient
# step is omitted. For more details, see the documentation.
tmp_2 = sigma[i] * L[i](y)
prox_cc_g[i](sigma[i])(v[i] + tmp_2, out=v[i])
if callback is not None:
callback(x) | def function[forward_backward_pd, parameter[x, f, g, L, h, tau, sigma, niter, callback]]:
constant[The forward-backward primal-dual splitting algorithm.
The algorithm minimizes the sum of several convex functionals composed with
linear operators::
min_x f(x) + sum_i g_i(L_i x) + h(x)
where ``f``, ``g_i`` are convex functionals, ``L_i`` are linear
operators, and ``h`` is a convex and differentiable functional.
The method can also be used to solve the more general problem::
min_x f(x) + sum_i (g_i @ l_i)(L_i x) + h(x)
where ``l_i`` are strongly convex functionals and @ is the infimal
convolution::
(g @ l)(x) = inf_y { g(y) + l(x-y) }
Note that the strong convexity of ``l_i`` makes the convex conjugate
``l_i^*`` differentiable; see the Notes section for more information on
this.
Parameters
----------
x : `LinearSpaceElement`
Initial point, updated in-place.
f : `Functional`
The functional ``f``. Needs to have ``f.proximal``.
g : sequence of `Functional`'s
The functionals ``g_i``. Needs to have ``g_i.convex_conj.proximal``.
L : sequence of `Operator`'s'
Sequence of linear operators ``L_i``, with as many elements as
``g``.
h : `Functional`
The functional ``h``. Needs to have ``h.gradient``.
tau : float
Step size-like parameter for ``f``.
sigma : sequence of floats
Sequence of step size-like parameters for the sequence ``g``.
niter : int
Number of iterations.
callback : callable, optional
Function called with the current iterate after each iteration.
Other Parameters
----------------
l : sequence of `Functional`'s, optional
The functionals ``l_i``. Needs to have ``g_i.convex_conj.gradient``.
If omitted, the simpler problem without ``l_i`` will be considered.
Notes
-----
The mathematical problem to solve is
.. math::
\min_x f(x) + \sum_{i=0}^n (g_i \Box l_i)(L_i x) + h(x),
where :math:`f`, :math:`g_i`, :math:`l_i` and :math:`h` are functionals and
:math:`L_i` are linear operators. The infimal convolution :math:`g \Box l`
is defined by
.. math::
(g \Box l)(x) = \inf_y g(y) + l(x - y).
The exact conditions on the involved functionals are as follows: :math:`f`
and :math:`g_i` are proper, convex and lower semicontinuous, and :math:`h`
is convex and differentiable with :math:`\eta^{-1}`-Lipschitz continuous
gradient, :math:`\eta > 0`.
The optional operators :math:`\nabla l_i^*` need to be
:math:`\nu_i`-Lipschitz continuous. Note that in the paper, the condition
is formulated as :math:`l_i` being proper, lower
semicontinuous, and :math:`\nu_i^{-1}`-strongly convex, which implies that
:math:`l_i^*` have :math:`\nu_i`-Lipschitz continuous gradients.
If the optional operators :math:`\nabla l_i^*` are omitted, the simpler
problem without :math:`l_i` will be considered. Mathematically, this is
done by taking :math:`l_i` to be the functionals that are zero only in the
zero element and :math:`\infty` otherwise. This gives that :math:`l_i^*`
are the zero functionals, and hence the corresponding gradients are the
zero operators.
To guarantee convergence, the parameters :math:`\tau`, :math:`\sigma` and
:math:`L_i` need to satisfy
.. math::
2 \min \{ \frac{1}{\tau}, \frac{1}{\sigma_1}, \ldots,
\frac{1}{\sigma_m} \} \cdot \min\{ \eta, \nu_1, \ldots, \nu_m \}
\cdot \sqrt{1 - \tau \sum_{i=1}^n \sigma_i ||L_i||^2} > 1,
where, if the simpler problem is considered, all :math:`\nu_i` can be
considered to be :math:`\infty`.
For reference on the forward-backward primal-dual algorithm, see [BC2015].
For more on proximal operators and algorithms see [PB2014].
See Also
--------
odl.solvers.nonsmooth.primal_dual_hybrid_gradient.pdhg :
Solver for similar problems without differentiability in any
of the terms.
odl.solvers.nonsmooth.douglas_rachford.douglas_rachford_pd :
Solver for similar problems without differentiability in any
of the terms.
References
----------
[BC2015] Bot, R I, and Csetnek, E R. *On the convergence rate of
a forward-backward type primal-dual splitting algorithm for convex
optimization problems*. Optimization, 64.1 (2015), pp 5--23.
[PB2014] Parikh, N, and Boyd, S. *Proximal Algorithms*.
Foundations and Trends in Optimization, 1 (2014), pp 127-239.
]
variable[m] assign[=] call[name[len], parameter[name[L]]]
if <ast.UnaryOp object at 0x7da1b1e923b0> begin[:]
<ast.Raise object at 0x7da1b1e91c60>
if <ast.UnaryOp object at 0x7da1b1e907f0> begin[:]
<ast.Raise object at 0x7da1b1e90400>
if <ast.UnaryOp object at 0x7da1b1e92470> begin[:]
<ast.Raise object at 0x7da1b1e920e0>
if compare[call[name[len], parameter[name[sigma]]] not_equal[!=] name[m]] begin[:]
<ast.Raise object at 0x7da1b1e93bb0>
if compare[call[name[len], parameter[name[g]]] not_equal[!=] name[m]] begin[:]
<ast.Raise object at 0x7da1b1e91840>
variable[prox_cc_g] assign[=] <ast.ListComp object at 0x7da1b1e93790>
variable[grad_h] assign[=] name[h].gradient
variable[prox_f] assign[=] name[f].proximal
variable[l] assign[=] call[name[kwargs].pop, parameter[constant[l], constant[None]]]
if compare[name[l] is_not constant[None]] begin[:]
if compare[call[name[len], parameter[name[l]]] not_equal[!=] name[m]] begin[:]
<ast.Raise object at 0x7da1b1e937f0>
variable[grad_cc_l] assign[=] <ast.ListComp object at 0x7da1b1e90df0>
if name[kwargs] begin[:]
<ast.Raise object at 0x7da1b1e911b0>
variable[v] assign[=] <ast.ListComp object at 0x7da1b1e91450>
variable[y] assign[=] call[name[x].space.zero, parameter[]]
for taget[name[k]] in starred[call[name[range], parameter[name[niter]]]] begin[:]
variable[x_old] assign[=] name[x]
variable[tmp_1] assign[=] binary_operation[call[name[grad_h], parameter[name[x]]] + call[name[sum], parameter[<ast.GeneratorExp object at 0x7da1b1eedde0>]]]
call[call[name[prox_f], parameter[name[tau]]], parameter[binary_operation[name[x] - binary_operation[name[tau] * name[tmp_1]]]]]
call[name[y].lincomb, parameter[constant[2.0], name[x], <ast.UnaryOp object at 0x7da1b1eeda50>, name[x_old]]]
for taget[name[i]] in starred[call[name[range], parameter[name[m]]]] begin[:]
if compare[name[l] is_not constant[None]] begin[:]
variable[tmp_2] assign[=] binary_operation[call[name[sigma]][name[i]] * binary_operation[call[call[name[L]][name[i]], parameter[name[y]]] - call[call[name[grad_cc_l]][name[i]], parameter[call[name[v]][name[i]]]]]]
call[call[call[name[prox_cc_g]][name[i]], parameter[call[name[sigma]][name[i]]]], parameter[binary_operation[call[name[v]][name[i]] + name[tmp_2]]]]
if compare[name[callback] is_not constant[None]] begin[:]
call[name[callback], parameter[name[x]]] | keyword[def] identifier[forward_backward_pd] ( identifier[x] , identifier[f] , identifier[g] , identifier[L] , identifier[h] , identifier[tau] , identifier[sigma] , identifier[niter] ,
identifier[callback] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[m] = identifier[len] ( identifier[L] )
keyword[if] keyword[not] identifier[all] ( identifier[isinstance] ( identifier[op] , identifier[Operator] ) keyword[for] identifier[op] keyword[in] identifier[L] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] identifier[all] ( identifier[op] . identifier[is_linear] keyword[for] identifier[op] keyword[in] identifier[L] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] keyword[not] identifier[all] ( identifier[x] keyword[in] identifier[op] . identifier[domain] keyword[for] identifier[op] keyword[in] identifier[L] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[len] ( identifier[sigma] )!= identifier[m] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[len] ( identifier[g] )!= identifier[m] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[prox_cc_g] =[ identifier[gi] . identifier[convex_conj] . identifier[proximal] keyword[for] identifier[gi] keyword[in] identifier[g] ]
identifier[grad_h] = identifier[h] . identifier[gradient]
identifier[prox_f] = identifier[f] . identifier[proximal]
identifier[l] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] )
keyword[if] identifier[l] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[len] ( identifier[l] )!= identifier[m] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[grad_cc_l] =[ identifier[li] . identifier[convex_conj] . identifier[gradient] keyword[for] identifier[li] keyword[in] identifier[l] ]
keyword[if] identifier[kwargs] :
keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] ( identifier[kwargs] ))
identifier[v] =[ identifier[Li] . identifier[range] . identifier[zero] () keyword[for] identifier[Li] keyword[in] identifier[L] ]
identifier[y] = identifier[x] . identifier[space] . identifier[zero] ()
keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[niter] ):
identifier[x_old] = identifier[x]
identifier[tmp_1] = identifier[grad_h] ( identifier[x] )+ identifier[sum] ( identifier[Li] . identifier[adjoint] ( identifier[vi] ) keyword[for] identifier[Li] , identifier[vi] keyword[in] identifier[zip] ( identifier[L] , identifier[v] ))
identifier[prox_f] ( identifier[tau] )( identifier[x] - identifier[tau] * identifier[tmp_1] , identifier[out] = identifier[x] )
identifier[y] . identifier[lincomb] ( literal[int] , identifier[x] ,- literal[int] , identifier[x_old] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[m] ):
keyword[if] identifier[l] keyword[is] keyword[not] keyword[None] :
identifier[tmp_2] = identifier[sigma] [ identifier[i] ]*( identifier[L] [ identifier[i] ]( identifier[y] )- identifier[grad_cc_l] [ identifier[i] ]( identifier[v] [ identifier[i] ]))
keyword[else] :
identifier[tmp_2] = identifier[sigma] [ identifier[i] ]* identifier[L] [ identifier[i] ]( identifier[y] )
identifier[prox_cc_g] [ identifier[i] ]( identifier[sigma] [ identifier[i] ])( identifier[v] [ identifier[i] ]+ identifier[tmp_2] , identifier[out] = identifier[v] [ identifier[i] ])
keyword[if] identifier[callback] keyword[is] keyword[not] keyword[None] :
identifier[callback] ( identifier[x] ) | def forward_backward_pd(x, f, g, L, h, tau, sigma, niter, callback=None, **kwargs):
"""The forward-backward primal-dual splitting algorithm.
The algorithm minimizes the sum of several convex functionals composed with
linear operators::
min_x f(x) + sum_i g_i(L_i x) + h(x)
where ``f``, ``g_i`` are convex functionals, ``L_i`` are linear
operators, and ``h`` is a convex and differentiable functional.
The method can also be used to solve the more general problem::
min_x f(x) + sum_i (g_i @ l_i)(L_i x) + h(x)
where ``l_i`` are strongly convex functionals and @ is the infimal
convolution::
(g @ l)(x) = inf_y { g(y) + l(x-y) }
Note that the strong convexity of ``l_i`` makes the convex conjugate
``l_i^*`` differentiable; see the Notes section for more information on
this.
Parameters
----------
x : `LinearSpaceElement`
Initial point, updated in-place.
f : `Functional`
The functional ``f``. Needs to have ``f.proximal``.
g : sequence of `Functional`'s
The functionals ``g_i``. Needs to have ``g_i.convex_conj.proximal``.
L : sequence of `Operator`'s'
Sequence of linear operators ``L_i``, with as many elements as
``g``.
h : `Functional`
The functional ``h``. Needs to have ``h.gradient``.
tau : float
Step size-like parameter for ``f``.
sigma : sequence of floats
Sequence of step size-like parameters for the sequence ``g``.
niter : int
Number of iterations.
callback : callable, optional
Function called with the current iterate after each iteration.
Other Parameters
----------------
l : sequence of `Functional`'s, optional
The functionals ``l_i``. Needs to have ``g_i.convex_conj.gradient``.
If omitted, the simpler problem without ``l_i`` will be considered.
Notes
-----
The mathematical problem to solve is
.. math::
\\min_x f(x) + \\sum_{i=0}^n (g_i \\Box l_i)(L_i x) + h(x),
where :math:`f`, :math:`g_i`, :math:`l_i` and :math:`h` are functionals and
:math:`L_i` are linear operators. The infimal convolution :math:`g \\Box l`
is defined by
.. math::
(g \\Box l)(x) = \\inf_y g(y) + l(x - y).
The exact conditions on the involved functionals are as follows: :math:`f`
and :math:`g_i` are proper, convex and lower semicontinuous, and :math:`h`
is convex and differentiable with :math:`\\eta^{-1}`-Lipschitz continuous
gradient, :math:`\\eta > 0`.
The optional operators :math:`\\nabla l_i^*` need to be
:math:`\\nu_i`-Lipschitz continuous. Note that in the paper, the condition
is formulated as :math:`l_i` being proper, lower
semicontinuous, and :math:`\\nu_i^{-1}`-strongly convex, which implies that
:math:`l_i^*` have :math:`\\nu_i`-Lipschitz continuous gradients.
If the optional operators :math:`\\nabla l_i^*` are omitted, the simpler
problem without :math:`l_i` will be considered. Mathematically, this is
done by taking :math:`l_i` to be the functionals that are zero only in the
zero element and :math:`\\infty` otherwise. This gives that :math:`l_i^*`
are the zero functionals, and hence the corresponding gradients are the
zero operators.
To guarantee convergence, the parameters :math:`\\tau`, :math:`\\sigma` and
:math:`L_i` need to satisfy
.. math::
2 \\min \\{ \\frac{1}{\\tau}, \\frac{1}{\\sigma_1}, \\ldots,
\\frac{1}{\\sigma_m} \\} \\cdot \\min\\{ \\eta, \\nu_1, \\ldots, \\nu_m \\}
\\cdot \\sqrt{1 - \\tau \\sum_{i=1}^n \\sigma_i ||L_i||^2} > 1,
where, if the simpler problem is considered, all :math:`\\nu_i` can be
considered to be :math:`\\infty`.
For reference on the forward-backward primal-dual algorithm, see [BC2015].
For more on proximal operators and algorithms see [PB2014].
See Also
--------
odl.solvers.nonsmooth.primal_dual_hybrid_gradient.pdhg :
Solver for similar problems without differentiability in any
of the terms.
odl.solvers.nonsmooth.douglas_rachford.douglas_rachford_pd :
Solver for similar problems without differentiability in any
of the terms.
References
----------
[BC2015] Bot, R I, and Csetnek, E R. *On the convergence rate of
a forward-backward type primal-dual splitting algorithm for convex
optimization problems*. Optimization, 64.1 (2015), pp 5--23.
[PB2014] Parikh, N, and Boyd, S. *Proximal Algorithms*.
Foundations and Trends in Optimization, 1 (2014), pp 127-239.
"""
# Problem size
m = len(L)
# Validate input
if not all((isinstance(op, Operator) for op in L)):
raise ValueError('`L` not a sequence of operators') # depends on [control=['if'], data=[]]
if not all((op.is_linear for op in L)):
raise ValueError('not all operators in `L` are linear') # depends on [control=['if'], data=[]]
if not all((x in op.domain for op in L)):
raise ValueError('`x` not in the domain of all operators in `L`') # depends on [control=['if'], data=[]]
if len(sigma) != m:
raise ValueError('len(sigma) != len(L)') # depends on [control=['if'], data=[]]
if len(g) != m:
raise ValueError('len(prox_cc_g) != len(L)') # depends on [control=['if'], data=[]]
# Extract operators
prox_cc_g = [gi.convex_conj.proximal for gi in g]
grad_h = h.gradient
prox_f = f.proximal
l = kwargs.pop('l', None)
if l is not None:
if len(l) != m:
raise ValueError('`grad_cc_l` not same length as `L`') # depends on [control=['if'], data=[]]
grad_cc_l = [li.convex_conj.gradient for li in l] # depends on [control=['if'], data=['l']]
if kwargs:
raise TypeError('unexpected keyword argument: {}'.format(kwargs)) # depends on [control=['if'], data=[]]
# Pre-allocate values
v = [Li.range.zero() for Li in L]
y = x.space.zero()
for k in range(niter):
x_old = x
tmp_1 = grad_h(x) + sum((Li.adjoint(vi) for (Li, vi) in zip(L, v)))
prox_f(tau)(x - tau * tmp_1, out=x)
y.lincomb(2.0, x, -1, x_old)
for i in range(m):
if l is not None:
# In this case gradients were given.
tmp_2 = sigma[i] * (L[i](y) - grad_cc_l[i](v[i])) # depends on [control=['if'], data=[]]
else:
# In this case gradients were not given. Therefore the gradient
# step is omitted. For more details, see the documentation.
tmp_2 = sigma[i] * L[i](y)
prox_cc_g[i](sigma[i])(v[i] + tmp_2, out=v[i]) # depends on [control=['for'], data=['i']]
if callback is not None:
callback(x) # depends on [control=['if'], data=['callback']] # depends on [control=['for'], data=[]] |
def local_port_range(self):
"""Tuple of (low_port, high_port) reflecting the local port range
assigned to outbound connections. We use this as part of a heuristic
to determine whether a connection is inbound or outbound.
"""
if self._local_port_range is None:
with open('/proc/sys/net/ipv4/ip_local_port_range', 'r') as f:
self._local_port_range = tuple(map(int, f.read().split('\t')))
return self._local_port_range | def function[local_port_range, parameter[self]]:
constant[Tuple of (low_port, high_port) reflecting the local port range
assigned to outbound connections. We use this as part of a heuristic
to determine whether a connection is inbound or outbound.
]
if compare[name[self]._local_port_range is constant[None]] begin[:]
with call[name[open], parameter[constant[/proc/sys/net/ipv4/ip_local_port_range], constant[r]]] begin[:]
name[self]._local_port_range assign[=] call[name[tuple], parameter[call[name[map], parameter[name[int], call[call[name[f].read, parameter[]].split, parameter[constant[ ]]]]]]]
return[name[self]._local_port_range] | keyword[def] identifier[local_port_range] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_local_port_range] keyword[is] keyword[None] :
keyword[with] identifier[open] ( literal[string] , literal[string] ) keyword[as] identifier[f] :
identifier[self] . identifier[_local_port_range] = identifier[tuple] ( identifier[map] ( identifier[int] , identifier[f] . identifier[read] (). identifier[split] ( literal[string] )))
keyword[return] identifier[self] . identifier[_local_port_range] | def local_port_range(self):
"""Tuple of (low_port, high_port) reflecting the local port range
assigned to outbound connections. We use this as part of a heuristic
to determine whether a connection is inbound or outbound.
"""
if self._local_port_range is None:
with open('/proc/sys/net/ipv4/ip_local_port_range', 'r') as f:
self._local_port_range = tuple(map(int, f.read().split('\t'))) # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]]
return self._local_port_range |
def serving_input_fn(self, hparams):
"""For serving/predict, assume that only video frames are provided."""
video_input_frames = tf.placeholder(
dtype=tf.float32,
shape=[
None, hparams.video_num_input_frames, self.frame_width,
self.frame_height, self.num_channels
])
# TODO(michalski): add support for passing input_action and input_reward.
return tf.estimator.export.ServingInputReceiver(
features={"inputs": video_input_frames},
receiver_tensors=video_input_frames) | def function[serving_input_fn, parameter[self, hparams]]:
constant[For serving/predict, assume that only video frames are provided.]
variable[video_input_frames] assign[=] call[name[tf].placeholder, parameter[]]
return[call[name[tf].estimator.export.ServingInputReceiver, parameter[]]] | keyword[def] identifier[serving_input_fn] ( identifier[self] , identifier[hparams] ):
literal[string]
identifier[video_input_frames] = identifier[tf] . identifier[placeholder] (
identifier[dtype] = identifier[tf] . identifier[float32] ,
identifier[shape] =[
keyword[None] , identifier[hparams] . identifier[video_num_input_frames] , identifier[self] . identifier[frame_width] ,
identifier[self] . identifier[frame_height] , identifier[self] . identifier[num_channels]
])
keyword[return] identifier[tf] . identifier[estimator] . identifier[export] . identifier[ServingInputReceiver] (
identifier[features] ={ literal[string] : identifier[video_input_frames] },
identifier[receiver_tensors] = identifier[video_input_frames] ) | def serving_input_fn(self, hparams):
"""For serving/predict, assume that only video frames are provided."""
video_input_frames = tf.placeholder(dtype=tf.float32, shape=[None, hparams.video_num_input_frames, self.frame_width, self.frame_height, self.num_channels])
# TODO(michalski): add support for passing input_action and input_reward.
return tf.estimator.export.ServingInputReceiver(features={'inputs': video_input_frames}, receiver_tensors=video_input_frames) |
def __get_distribution_tags(self, client, arn):
"""Returns a dict containing the tags for a CloudFront distribution
Args:
client (botocore.client.CloudFront): Boto3 CloudFront client object
arn (str): ARN of the distribution to get tags for
Returns:
`dict`
"""
return {
t['Key']: t['Value'] for t in client.list_tags_for_resource(
Resource=arn
)['Tags']['Items']
} | def function[__get_distribution_tags, parameter[self, client, arn]]:
constant[Returns a dict containing the tags for a CloudFront distribution
Args:
client (botocore.client.CloudFront): Boto3 CloudFront client object
arn (str): ARN of the distribution to get tags for
Returns:
`dict`
]
return[<ast.DictComp object at 0x7da1b2051300>] | keyword[def] identifier[__get_distribution_tags] ( identifier[self] , identifier[client] , identifier[arn] ):
literal[string]
keyword[return] {
identifier[t] [ literal[string] ]: identifier[t] [ literal[string] ] keyword[for] identifier[t] keyword[in] identifier[client] . identifier[list_tags_for_resource] (
identifier[Resource] = identifier[arn]
)[ literal[string] ][ literal[string] ]
} | def __get_distribution_tags(self, client, arn):
"""Returns a dict containing the tags for a CloudFront distribution
Args:
client (botocore.client.CloudFront): Boto3 CloudFront client object
arn (str): ARN of the distribution to get tags for
Returns:
`dict`
"""
return {t['Key']: t['Value'] for t in client.list_tags_for_resource(Resource=arn)['Tags']['Items']} |
def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate("Saving authentication cookies to %s" % self.cookie_file)
self.cookie_jar.save() | def function[_Authenticate, parameter[self]]:
constant[Save the cookie jar after authentication.]
call[call[name[super], parameter[name[HttpRpcServer], name[self]]]._Authenticate, parameter[]]
if name[self].save_cookies begin[:]
call[name[StatusUpdate], parameter[binary_operation[constant[Saving authentication cookies to %s] <ast.Mod object at 0x7da2590d6920> name[self].cookie_file]]]
call[name[self].cookie_jar.save, parameter[]] | keyword[def] identifier[_Authenticate] ( identifier[self] ):
literal[string]
identifier[super] ( identifier[HttpRpcServer] , identifier[self] ). identifier[_Authenticate] ()
keyword[if] identifier[self] . identifier[save_cookies] :
identifier[StatusUpdate] ( literal[string] % identifier[self] . identifier[cookie_file] )
identifier[self] . identifier[cookie_jar] . identifier[save] () | def _Authenticate(self):
"""Save the cookie jar after authentication."""
super(HttpRpcServer, self)._Authenticate()
if self.save_cookies:
StatusUpdate('Saving authentication cookies to %s' % self.cookie_file)
self.cookie_jar.save() # depends on [control=['if'], data=[]] |
def visit_Name(self, node: ast.Name) -> Any:
"""Load the variable by looking it up in the variable look-up and in the built-ins."""
if not isinstance(node.ctx, ast.Load):
raise NotImplementedError("Can only compute a value of Load on a name {}, but got context: {}".format(
node.id, node.ctx))
result = None # type: Optional[Any]
if node.id in self._name_to_value:
result = self._name_to_value[node.id]
if result is None and hasattr(builtins, node.id):
result = getattr(builtins, node.id)
if result is None and node.id != "None":
# The variable refers to a name local of the lambda (e.g., a target in the generator expression).
# Since we evaluate generator expressions with runtime compilation, None is returned here as a placeholder.
return PLACEHOLDER
self.recomputed_values[node] = result
return result | def function[visit_Name, parameter[self, node]]:
constant[Load the variable by looking it up in the variable look-up and in the built-ins.]
if <ast.UnaryOp object at 0x7da1b1034100> begin[:]
<ast.Raise object at 0x7da1b1034730>
variable[result] assign[=] constant[None]
if compare[name[node].id in name[self]._name_to_value] begin[:]
variable[result] assign[=] call[name[self]._name_to_value][name[node].id]
if <ast.BoolOp object at 0x7da1b10347f0> begin[:]
variable[result] assign[=] call[name[getattr], parameter[name[builtins], name[node].id]]
if <ast.BoolOp object at 0x7da1b1034b20> begin[:]
return[name[PLACEHOLDER]]
call[name[self].recomputed_values][name[node]] assign[=] name[result]
return[name[result]] | keyword[def] identifier[visit_Name] ( identifier[self] , identifier[node] : identifier[ast] . identifier[Name] )-> identifier[Any] :
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[node] . identifier[ctx] , identifier[ast] . identifier[Load] ):
keyword[raise] identifier[NotImplementedError] ( literal[string] . identifier[format] (
identifier[node] . identifier[id] , identifier[node] . identifier[ctx] ))
identifier[result] = keyword[None]
keyword[if] identifier[node] . identifier[id] keyword[in] identifier[self] . identifier[_name_to_value] :
identifier[result] = identifier[self] . identifier[_name_to_value] [ identifier[node] . identifier[id] ]
keyword[if] identifier[result] keyword[is] keyword[None] keyword[and] identifier[hasattr] ( identifier[builtins] , identifier[node] . identifier[id] ):
identifier[result] = identifier[getattr] ( identifier[builtins] , identifier[node] . identifier[id] )
keyword[if] identifier[result] keyword[is] keyword[None] keyword[and] identifier[node] . identifier[id] != literal[string] :
keyword[return] identifier[PLACEHOLDER]
identifier[self] . identifier[recomputed_values] [ identifier[node] ]= identifier[result]
keyword[return] identifier[result] | def visit_Name(self, node: ast.Name) -> Any:
"""Load the variable by looking it up in the variable look-up and in the built-ins."""
if not isinstance(node.ctx, ast.Load):
raise NotImplementedError('Can only compute a value of Load on a name {}, but got context: {}'.format(node.id, node.ctx)) # depends on [control=['if'], data=[]]
result = None # type: Optional[Any]
if node.id in self._name_to_value:
result = self._name_to_value[node.id] # depends on [control=['if'], data=[]]
if result is None and hasattr(builtins, node.id):
result = getattr(builtins, node.id) # depends on [control=['if'], data=[]]
if result is None and node.id != 'None':
# The variable refers to a name local of the lambda (e.g., a target in the generator expression).
# Since we evaluate generator expressions with runtime compilation, None is returned here as a placeholder.
return PLACEHOLDER # depends on [control=['if'], data=[]]
self.recomputed_values[node] = result
return result |
def dict(self, **kwargs):
"""
Dictionary representation.
"""
return dict(
time = self.timestamp,
address = self.address,
channel = self.channel,
value = self.value,
**kwargs
) | def function[dict, parameter[self]]:
constant[
Dictionary representation.
]
return[call[name[dict], parameter[]]] | keyword[def] identifier[dict] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[dict] (
identifier[time] = identifier[self] . identifier[timestamp] ,
identifier[address] = identifier[self] . identifier[address] ,
identifier[channel] = identifier[self] . identifier[channel] ,
identifier[value] = identifier[self] . identifier[value] ,
** identifier[kwargs]
) | def dict(self, **kwargs):
"""
Dictionary representation.
"""
return dict(time=self.timestamp, address=self.address, channel=self.channel, value=self.value, **kwargs) |
def _ReadLockAndUpdateCompletedRequests(self, request_keys, response_counts,
cursor):
"""Reads, locks, and updates completed requests."""
condition_template = """
(flow_requests.client_id = %s AND
flow_requests.flow_id = %s AND
flow_requests.request_id = %s AND
responses_expected = %s)"""
args = []
conditions = []
completed_requests = {}
for request_key in request_keys:
client_id, flow_id, request_id = request_key
if request_key in response_counts:
conditions.append(condition_template)
args.append(db_utils.ClientIDToInt(client_id))
args.append(db_utils.FlowIDToInt(flow_id))
args.append(request_id)
args.append(response_counts[request_key])
if not args:
return completed_requests
query = """
SELECT client_id, flow_id, request_id, request
FROM flow_requests
WHERE ({conditions}) AND NOT needs_processing
FOR UPDATE
"""
query = query.format(conditions=" OR ".join(conditions))
cursor.execute(query, args)
for client_id_int, flow_id_int, request_id, request in cursor.fetchall():
request_key = (db_utils.IntToClientID(client_id_int),
db_utils.IntToFlowID(flow_id_int), request_id)
r = rdf_flow_objects.FlowRequest.FromSerializedString(request)
completed_requests[request_key] = r
query = """
UPDATE flow_requests
SET needs_processing = TRUE
WHERE ({conditions}) AND NOT needs_processing
"""
query = query.format(conditions=" OR ".join(conditions))
cursor.execute(query, args)
return completed_requests | def function[_ReadLockAndUpdateCompletedRequests, parameter[self, request_keys, response_counts, cursor]]:
constant[Reads, locks, and updates completed requests.]
variable[condition_template] assign[=] constant[
(flow_requests.client_id = %s AND
flow_requests.flow_id = %s AND
flow_requests.request_id = %s AND
responses_expected = %s)]
variable[args] assign[=] list[[]]
variable[conditions] assign[=] list[[]]
variable[completed_requests] assign[=] dictionary[[], []]
for taget[name[request_key]] in starred[name[request_keys]] begin[:]
<ast.Tuple object at 0x7da1b1b04a60> assign[=] name[request_key]
if compare[name[request_key] in name[response_counts]] begin[:]
call[name[conditions].append, parameter[name[condition_template]]]
call[name[args].append, parameter[call[name[db_utils].ClientIDToInt, parameter[name[client_id]]]]]
call[name[args].append, parameter[call[name[db_utils].FlowIDToInt, parameter[name[flow_id]]]]]
call[name[args].append, parameter[name[request_id]]]
call[name[args].append, parameter[call[name[response_counts]][name[request_key]]]]
if <ast.UnaryOp object at 0x7da1b1b04b20> begin[:]
return[name[completed_requests]]
variable[query] assign[=] constant[
SELECT client_id, flow_id, request_id, request
FROM flow_requests
WHERE ({conditions}) AND NOT needs_processing
FOR UPDATE
]
variable[query] assign[=] call[name[query].format, parameter[]]
call[name[cursor].execute, parameter[name[query], name[args]]]
for taget[tuple[[<ast.Name object at 0x7da1b1b07850>, <ast.Name object at 0x7da1b1b075b0>, <ast.Name object at 0x7da1b1b074c0>, <ast.Name object at 0x7da1b1b07490>]]] in starred[call[name[cursor].fetchall, parameter[]]] begin[:]
variable[request_key] assign[=] tuple[[<ast.Call object at 0x7da1b1b06980>, <ast.Call object at 0x7da1b1b065c0>, <ast.Name object at 0x7da1b1b06290>]]
variable[r] assign[=] call[name[rdf_flow_objects].FlowRequest.FromSerializedString, parameter[name[request]]]
call[name[completed_requests]][name[request_key]] assign[=] name[r]
variable[query] assign[=] constant[
UPDATE flow_requests
SET needs_processing = TRUE
WHERE ({conditions}) AND NOT needs_processing
]
variable[query] assign[=] call[name[query].format, parameter[]]
call[name[cursor].execute, parameter[name[query], name[args]]]
return[name[completed_requests]] | keyword[def] identifier[_ReadLockAndUpdateCompletedRequests] ( identifier[self] , identifier[request_keys] , identifier[response_counts] ,
identifier[cursor] ):
literal[string]
identifier[condition_template] = literal[string]
identifier[args] =[]
identifier[conditions] =[]
identifier[completed_requests] ={}
keyword[for] identifier[request_key] keyword[in] identifier[request_keys] :
identifier[client_id] , identifier[flow_id] , identifier[request_id] = identifier[request_key]
keyword[if] identifier[request_key] keyword[in] identifier[response_counts] :
identifier[conditions] . identifier[append] ( identifier[condition_template] )
identifier[args] . identifier[append] ( identifier[db_utils] . identifier[ClientIDToInt] ( identifier[client_id] ))
identifier[args] . identifier[append] ( identifier[db_utils] . identifier[FlowIDToInt] ( identifier[flow_id] ))
identifier[args] . identifier[append] ( identifier[request_id] )
identifier[args] . identifier[append] ( identifier[response_counts] [ identifier[request_key] ])
keyword[if] keyword[not] identifier[args] :
keyword[return] identifier[completed_requests]
identifier[query] = literal[string]
identifier[query] = identifier[query] . identifier[format] ( identifier[conditions] = literal[string] . identifier[join] ( identifier[conditions] ))
identifier[cursor] . identifier[execute] ( identifier[query] , identifier[args] )
keyword[for] identifier[client_id_int] , identifier[flow_id_int] , identifier[request_id] , identifier[request] keyword[in] identifier[cursor] . identifier[fetchall] ():
identifier[request_key] =( identifier[db_utils] . identifier[IntToClientID] ( identifier[client_id_int] ),
identifier[db_utils] . identifier[IntToFlowID] ( identifier[flow_id_int] ), identifier[request_id] )
identifier[r] = identifier[rdf_flow_objects] . identifier[FlowRequest] . identifier[FromSerializedString] ( identifier[request] )
identifier[completed_requests] [ identifier[request_key] ]= identifier[r]
identifier[query] = literal[string]
identifier[query] = identifier[query] . identifier[format] ( identifier[conditions] = literal[string] . identifier[join] ( identifier[conditions] ))
identifier[cursor] . identifier[execute] ( identifier[query] , identifier[args] )
keyword[return] identifier[completed_requests] | def _ReadLockAndUpdateCompletedRequests(self, request_keys, response_counts, cursor):
"""Reads, locks, and updates completed requests."""
condition_template = '\n (flow_requests.client_id = %s AND\n flow_requests.flow_id = %s AND\n flow_requests.request_id = %s AND\n responses_expected = %s)'
args = []
conditions = []
completed_requests = {}
for request_key in request_keys:
(client_id, flow_id, request_id) = request_key
if request_key in response_counts:
conditions.append(condition_template)
args.append(db_utils.ClientIDToInt(client_id))
args.append(db_utils.FlowIDToInt(flow_id))
args.append(request_id)
args.append(response_counts[request_key]) # depends on [control=['if'], data=['request_key', 'response_counts']] # depends on [control=['for'], data=['request_key']]
if not args:
return completed_requests # depends on [control=['if'], data=[]]
query = '\n SELECT client_id, flow_id, request_id, request\n FROM flow_requests\n WHERE ({conditions}) AND NOT needs_processing\n FOR UPDATE\n '
query = query.format(conditions=' OR '.join(conditions))
cursor.execute(query, args)
for (client_id_int, flow_id_int, request_id, request) in cursor.fetchall():
request_key = (db_utils.IntToClientID(client_id_int), db_utils.IntToFlowID(flow_id_int), request_id)
r = rdf_flow_objects.FlowRequest.FromSerializedString(request)
completed_requests[request_key] = r # depends on [control=['for'], data=[]]
query = '\n UPDATE flow_requests\n SET needs_processing = TRUE\n WHERE ({conditions}) AND NOT needs_processing\n '
query = query.format(conditions=' OR '.join(conditions))
cursor.execute(query, args)
return completed_requests |
def future_exceptions(context, rrevent=None):
"""
Displays a list of all the future exceptions (extra info, cancellations and
postponements) for a recurring event. If the recurring event is not
specified it is assumed to be the current page.
"""
request = context['request']
if rrevent is None:
rrevent = context.get('page')
if rrevent:
exceptions = rrevent._futureExceptions(request)
else:
exceptions = []
return {'request': request,
'exceptions': exceptions} | def function[future_exceptions, parameter[context, rrevent]]:
constant[
Displays a list of all the future exceptions (extra info, cancellations and
postponements) for a recurring event. If the recurring event is not
specified it is assumed to be the current page.
]
variable[request] assign[=] call[name[context]][constant[request]]
if compare[name[rrevent] is constant[None]] begin[:]
variable[rrevent] assign[=] call[name[context].get, parameter[constant[page]]]
if name[rrevent] begin[:]
variable[exceptions] assign[=] call[name[rrevent]._futureExceptions, parameter[name[request]]]
return[dictionary[[<ast.Constant object at 0x7da18dc99cf0>, <ast.Constant object at 0x7da18dc98100>], [<ast.Name object at 0x7da18dc99db0>, <ast.Name object at 0x7da18dc997e0>]]] | keyword[def] identifier[future_exceptions] ( identifier[context] , identifier[rrevent] = keyword[None] ):
literal[string]
identifier[request] = identifier[context] [ literal[string] ]
keyword[if] identifier[rrevent] keyword[is] keyword[None] :
identifier[rrevent] = identifier[context] . identifier[get] ( literal[string] )
keyword[if] identifier[rrevent] :
identifier[exceptions] = identifier[rrevent] . identifier[_futureExceptions] ( identifier[request] )
keyword[else] :
identifier[exceptions] =[]
keyword[return] { literal[string] : identifier[request] ,
literal[string] : identifier[exceptions] } | def future_exceptions(context, rrevent=None):
"""
Displays a list of all the future exceptions (extra info, cancellations and
postponements) for a recurring event. If the recurring event is not
specified it is assumed to be the current page.
"""
request = context['request']
if rrevent is None:
rrevent = context.get('page') # depends on [control=['if'], data=['rrevent']]
if rrevent:
exceptions = rrevent._futureExceptions(request) # depends on [control=['if'], data=[]]
else:
exceptions = []
return {'request': request, 'exceptions': exceptions} |
def photos(self):
"""Copy photos to the destination user."""
# Reverse because pictures appear in inverse chronological order.
for photo_info in self.dest_user.profile.photo_infos:
self.dest_user.photo.delete(photo_info)
return [self.dest_user.photo.upload_and_confirm(info)
for info in reversed(self.source_profile.photo_infos)] | def function[photos, parameter[self]]:
constant[Copy photos to the destination user.]
for taget[name[photo_info]] in starred[name[self].dest_user.profile.photo_infos] begin[:]
call[name[self].dest_user.photo.delete, parameter[name[photo_info]]]
return[<ast.ListComp object at 0x7da1b282b310>] | keyword[def] identifier[photos] ( identifier[self] ):
literal[string]
keyword[for] identifier[photo_info] keyword[in] identifier[self] . identifier[dest_user] . identifier[profile] . identifier[photo_infos] :
identifier[self] . identifier[dest_user] . identifier[photo] . identifier[delete] ( identifier[photo_info] )
keyword[return] [ identifier[self] . identifier[dest_user] . identifier[photo] . identifier[upload_and_confirm] ( identifier[info] )
keyword[for] identifier[info] keyword[in] identifier[reversed] ( identifier[self] . identifier[source_profile] . identifier[photo_infos] )] | def photos(self):
"""Copy photos to the destination user."""
# Reverse because pictures appear in inverse chronological order.
for photo_info in self.dest_user.profile.photo_infos:
self.dest_user.photo.delete(photo_info) # depends on [control=['for'], data=['photo_info']]
return [self.dest_user.photo.upload_and_confirm(info) for info in reversed(self.source_profile.photo_infos)] |
def const_return(func):
"""
>>> from Redy.Magic.Classic import const_return
>>> @const_return
>>> def f(x):
>>> return x
>>> r1 = f(1)
>>> assert r1 is 1 and r1 is f(2)
"""
result = _undef
def ret_call(*args, **kwargs):
nonlocal result
if result is _undef:
result = func(*args, **kwargs)
return result
return ret_call | def function[const_return, parameter[func]]:
constant[
>>> from Redy.Magic.Classic import const_return
>>> @const_return
>>> def f(x):
>>> return x
>>> r1 = f(1)
>>> assert r1 is 1 and r1 is f(2)
]
variable[result] assign[=] name[_undef]
def function[ret_call, parameter[]]:
<ast.Nonlocal object at 0x7da20c76cbe0>
if compare[name[result] is name[_undef]] begin[:]
variable[result] assign[=] call[name[func], parameter[<ast.Starred object at 0x7da20c76c1f0>]]
return[name[result]]
return[name[ret_call]] | keyword[def] identifier[const_return] ( identifier[func] ):
literal[string]
identifier[result] = identifier[_undef]
keyword[def] identifier[ret_call] (* identifier[args] ,** identifier[kwargs] ):
keyword[nonlocal] identifier[result]
keyword[if] identifier[result] keyword[is] identifier[_undef] :
identifier[result] = identifier[func] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[result]
keyword[return] identifier[ret_call] | def const_return(func):
"""
>>> from Redy.Magic.Classic import const_return
>>> @const_return
>>> def f(x):
>>> return x
>>> r1 = f(1)
>>> assert r1 is 1 and r1 is f(2)
"""
result = _undef
def ret_call(*args, **kwargs):
nonlocal result
if result is _undef:
result = func(*args, **kwargs) # depends on [control=['if'], data=['result']]
return result
return ret_call |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.