code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def prepare(self, node):
"""
Initialise arguments effects as this analysis in inter-procedural.
Initialisation done for Pythonic functions and default values set for
user defined functions.
"""
super(ArgumentReadOnce, self).prepare(node)
# global functions init
for n in self.global_declarations.values():
fe = ArgumentReadOnce.FunctionEffects(n)
self.node_to_functioneffect[n] = fe
self.result.add(fe)
# Pythonic functions init
def save_effect(module):
""" Recursively save read once effect for Pythonic functions. """
for intr in module.values():
if isinstance(intr, dict): # Submodule case
save_effect(intr)
else:
fe = ArgumentReadOnce.FunctionEffects(intr)
self.node_to_functioneffect[intr] = fe
self.result.add(fe)
if isinstance(intr, intrinsic.Class): # Class case
save_effect(intr.fields)
for module in MODULES.values():
save_effect(module) | def function[prepare, parameter[self, node]]:
constant[
Initialise arguments effects as this analysis in inter-procedural.
Initialisation done for Pythonic functions and default values set for
user defined functions.
]
call[call[name[super], parameter[name[ArgumentReadOnce], name[self]]].prepare, parameter[name[node]]]
for taget[name[n]] in starred[call[name[self].global_declarations.values, parameter[]]] begin[:]
variable[fe] assign[=] call[name[ArgumentReadOnce].FunctionEffects, parameter[name[n]]]
call[name[self].node_to_functioneffect][name[n]] assign[=] name[fe]
call[name[self].result.add, parameter[name[fe]]]
def function[save_effect, parameter[module]]:
constant[ Recursively save read once effect for Pythonic functions. ]
for taget[name[intr]] in starred[call[name[module].values, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[intr], name[dict]]] begin[:]
call[name[save_effect], parameter[name[intr]]]
for taget[name[module]] in starred[call[name[MODULES].values, parameter[]]] begin[:]
call[name[save_effect], parameter[name[module]]] | keyword[def] identifier[prepare] ( identifier[self] , identifier[node] ):
literal[string]
identifier[super] ( identifier[ArgumentReadOnce] , identifier[self] ). identifier[prepare] ( identifier[node] )
keyword[for] identifier[n] keyword[in] identifier[self] . identifier[global_declarations] . identifier[values] ():
identifier[fe] = identifier[ArgumentReadOnce] . identifier[FunctionEffects] ( identifier[n] )
identifier[self] . identifier[node_to_functioneffect] [ identifier[n] ]= identifier[fe]
identifier[self] . identifier[result] . identifier[add] ( identifier[fe] )
keyword[def] identifier[save_effect] ( identifier[module] ):
literal[string]
keyword[for] identifier[intr] keyword[in] identifier[module] . identifier[values] ():
keyword[if] identifier[isinstance] ( identifier[intr] , identifier[dict] ):
identifier[save_effect] ( identifier[intr] )
keyword[else] :
identifier[fe] = identifier[ArgumentReadOnce] . identifier[FunctionEffects] ( identifier[intr] )
identifier[self] . identifier[node_to_functioneffect] [ identifier[intr] ]= identifier[fe]
identifier[self] . identifier[result] . identifier[add] ( identifier[fe] )
keyword[if] identifier[isinstance] ( identifier[intr] , identifier[intrinsic] . identifier[Class] ):
identifier[save_effect] ( identifier[intr] . identifier[fields] )
keyword[for] identifier[module] keyword[in] identifier[MODULES] . identifier[values] ():
identifier[save_effect] ( identifier[module] ) | def prepare(self, node):
"""
Initialise arguments effects as this analysis in inter-procedural.
Initialisation done for Pythonic functions and default values set for
user defined functions.
"""
super(ArgumentReadOnce, self).prepare(node)
# global functions init
for n in self.global_declarations.values():
fe = ArgumentReadOnce.FunctionEffects(n)
self.node_to_functioneffect[n] = fe
self.result.add(fe) # depends on [control=['for'], data=['n']]
# Pythonic functions init
def save_effect(module):
""" Recursively save read once effect for Pythonic functions. """
for intr in module.values():
if isinstance(intr, dict): # Submodule case
save_effect(intr) # depends on [control=['if'], data=[]]
else:
fe = ArgumentReadOnce.FunctionEffects(intr)
self.node_to_functioneffect[intr] = fe
self.result.add(fe)
if isinstance(intr, intrinsic.Class): # Class case
save_effect(intr.fields) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['intr']]
for module in MODULES.values():
save_effect(module) # depends on [control=['for'], data=['module']] |
def loaddata(settings_module,
fixtures,
bin_env=None,
database=None,
pythonpath=None,
env=None):
'''
Load fixture data
Fixtures:
comma separated list of fixtures to load
CLI Example:
.. code-block:: bash
salt '*' django.loaddata <settings_module> <comma delimited list of fixtures>
'''
args = []
kwargs = {}
if database:
kwargs['database'] = database
cmd = '{0} {1}'.format('loaddata', ' '.join(fixtures.split(',')))
return command(settings_module,
cmd,
bin_env,
pythonpath,
env,
*args, **kwargs) | def function[loaddata, parameter[settings_module, fixtures, bin_env, database, pythonpath, env]]:
constant[
Load fixture data
Fixtures:
comma separated list of fixtures to load
CLI Example:
.. code-block:: bash
salt '*' django.loaddata <settings_module> <comma delimited list of fixtures>
]
variable[args] assign[=] list[[]]
variable[kwargs] assign[=] dictionary[[], []]
if name[database] begin[:]
call[name[kwargs]][constant[database]] assign[=] name[database]
variable[cmd] assign[=] call[constant[{0} {1}].format, parameter[constant[loaddata], call[constant[ ].join, parameter[call[name[fixtures].split, parameter[constant[,]]]]]]]
return[call[name[command], parameter[name[settings_module], name[cmd], name[bin_env], name[pythonpath], name[env], <ast.Starred object at 0x7da18c4ceb00>]]] | keyword[def] identifier[loaddata] ( identifier[settings_module] ,
identifier[fixtures] ,
identifier[bin_env] = keyword[None] ,
identifier[database] = keyword[None] ,
identifier[pythonpath] = keyword[None] ,
identifier[env] = keyword[None] ):
literal[string]
identifier[args] =[]
identifier[kwargs] ={}
keyword[if] identifier[database] :
identifier[kwargs] [ literal[string] ]= identifier[database]
identifier[cmd] = literal[string] . identifier[format] ( literal[string] , literal[string] . identifier[join] ( identifier[fixtures] . identifier[split] ( literal[string] )))
keyword[return] identifier[command] ( identifier[settings_module] ,
identifier[cmd] ,
identifier[bin_env] ,
identifier[pythonpath] ,
identifier[env] ,
* identifier[args] ,** identifier[kwargs] ) | def loaddata(settings_module, fixtures, bin_env=None, database=None, pythonpath=None, env=None):
"""
Load fixture data
Fixtures:
comma separated list of fixtures to load
CLI Example:
.. code-block:: bash
salt '*' django.loaddata <settings_module> <comma delimited list of fixtures>
"""
args = []
kwargs = {}
if database:
kwargs['database'] = database # depends on [control=['if'], data=[]]
cmd = '{0} {1}'.format('loaddata', ' '.join(fixtures.split(',')))
return command(settings_module, cmd, bin_env, pythonpath, env, *args, **kwargs) |
def get_urn(self):
"""
Assumes that each HucitAuthor has only one CTS URN.
"""
# TODO: check type
try:
type_ctsurn = self.session.get_resource(BASE_URI_TYPES % "CTS_URN"
, self.session.get_class(surf.ns.ECRM['E55_Type']))
urn = [CTS_URN(urnstring.rdfs_label.one)
for urnstring in self.ecrm_P1_is_identified_by
if urnstring.uri == surf.ns.ECRM['E42_Identifier']
and urnstring.ecrm_P2_has_type.first == type_ctsurn][0]
return urn
except Exception as e:
return None | def function[get_urn, parameter[self]]:
constant[
Assumes that each HucitAuthor has only one CTS URN.
]
<ast.Try object at 0x7da18c4ceb60> | keyword[def] identifier[get_urn] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[type_ctsurn] = identifier[self] . identifier[session] . identifier[get_resource] ( identifier[BASE_URI_TYPES] % literal[string]
, identifier[self] . identifier[session] . identifier[get_class] ( identifier[surf] . identifier[ns] . identifier[ECRM] [ literal[string] ]))
identifier[urn] =[ identifier[CTS_URN] ( identifier[urnstring] . identifier[rdfs_label] . identifier[one] )
keyword[for] identifier[urnstring] keyword[in] identifier[self] . identifier[ecrm_P1_is_identified_by]
keyword[if] identifier[urnstring] . identifier[uri] == identifier[surf] . identifier[ns] . identifier[ECRM] [ literal[string] ]
keyword[and] identifier[urnstring] . identifier[ecrm_P2_has_type] . identifier[first] == identifier[type_ctsurn] ][ literal[int] ]
keyword[return] identifier[urn]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[return] keyword[None] | def get_urn(self):
"""
Assumes that each HucitAuthor has only one CTS URN.
"""
# TODO: check type
try:
type_ctsurn = self.session.get_resource(BASE_URI_TYPES % 'CTS_URN', self.session.get_class(surf.ns.ECRM['E55_Type']))
urn = [CTS_URN(urnstring.rdfs_label.one) for urnstring in self.ecrm_P1_is_identified_by if urnstring.uri == surf.ns.ECRM['E42_Identifier'] and urnstring.ecrm_P2_has_type.first == type_ctsurn][0]
return urn # depends on [control=['try'], data=[]]
except Exception as e:
return None # depends on [control=['except'], data=[]] |
def main(self):
"""
Run the necessary methods
"""
logging.info('Preparing metadata')
# If this script is run as part of a pipeline, the metadata objects will already exist
if not self.metadata:
self.filer()
else:
self.objectprep()
# Use the number of metadata objects to calculate the number of cores to use per sample in multi-threaded
# methods with sequence calls to multi-threaded applications
try:
self.threads = int(self.cpus / len(self.metadata)) if self.cpus / len(
self.metadata) > 1 else 1
except (TypeError, ZeroDivisionError):
self.threads = self.cpus
logging.info('Reading and formatting primers')
self.primers()
logging.info('Baiting .fastq files against primers')
self.bait()
logging.info('Baiting .fastq files against previously baited .fastq files')
self.doublebait()
logging.info('Assembling contigs from double-baited .fastq files')
self.assemble_amplicon_spades()
logging.info('Creating BLAST database')
self.make_blastdb()
logging.info('Running BLAST analyses')
self.blastnthreads()
logging.info('Parsing BLAST results')
self.parseblast()
logging.info('Clearing amplicon files from previous iterations')
self.ampliconclear()
logging.info('Creating reports')
self.reporter() | def function[main, parameter[self]]:
constant[
Run the necessary methods
]
call[name[logging].info, parameter[constant[Preparing metadata]]]
if <ast.UnaryOp object at 0x7da20e956830> begin[:]
call[name[self].filer, parameter[]]
<ast.Try object at 0x7da204622a40>
call[name[logging].info, parameter[constant[Reading and formatting primers]]]
call[name[self].primers, parameter[]]
call[name[logging].info, parameter[constant[Baiting .fastq files against primers]]]
call[name[self].bait, parameter[]]
call[name[logging].info, parameter[constant[Baiting .fastq files against previously baited .fastq files]]]
call[name[self].doublebait, parameter[]]
call[name[logging].info, parameter[constant[Assembling contigs from double-baited .fastq files]]]
call[name[self].assemble_amplicon_spades, parameter[]]
call[name[logging].info, parameter[constant[Creating BLAST database]]]
call[name[self].make_blastdb, parameter[]]
call[name[logging].info, parameter[constant[Running BLAST analyses]]]
call[name[self].blastnthreads, parameter[]]
call[name[logging].info, parameter[constant[Parsing BLAST results]]]
call[name[self].parseblast, parameter[]]
call[name[logging].info, parameter[constant[Clearing amplicon files from previous iterations]]]
call[name[self].ampliconclear, parameter[]]
call[name[logging].info, parameter[constant[Creating reports]]]
call[name[self].reporter, parameter[]] | keyword[def] identifier[main] ( identifier[self] ):
literal[string]
identifier[logging] . identifier[info] ( literal[string] )
keyword[if] keyword[not] identifier[self] . identifier[metadata] :
identifier[self] . identifier[filer] ()
keyword[else] :
identifier[self] . identifier[objectprep] ()
keyword[try] :
identifier[self] . identifier[threads] = identifier[int] ( identifier[self] . identifier[cpus] / identifier[len] ( identifier[self] . identifier[metadata] )) keyword[if] identifier[self] . identifier[cpus] / identifier[len] (
identifier[self] . identifier[metadata] )> literal[int] keyword[else] literal[int]
keyword[except] ( identifier[TypeError] , identifier[ZeroDivisionError] ):
identifier[self] . identifier[threads] = identifier[self] . identifier[cpus]
identifier[logging] . identifier[info] ( literal[string] )
identifier[self] . identifier[primers] ()
identifier[logging] . identifier[info] ( literal[string] )
identifier[self] . identifier[bait] ()
identifier[logging] . identifier[info] ( literal[string] )
identifier[self] . identifier[doublebait] ()
identifier[logging] . identifier[info] ( literal[string] )
identifier[self] . identifier[assemble_amplicon_spades] ()
identifier[logging] . identifier[info] ( literal[string] )
identifier[self] . identifier[make_blastdb] ()
identifier[logging] . identifier[info] ( literal[string] )
identifier[self] . identifier[blastnthreads] ()
identifier[logging] . identifier[info] ( literal[string] )
identifier[self] . identifier[parseblast] ()
identifier[logging] . identifier[info] ( literal[string] )
identifier[self] . identifier[ampliconclear] ()
identifier[logging] . identifier[info] ( literal[string] )
identifier[self] . identifier[reporter] () | def main(self):
"""
Run the necessary methods
"""
logging.info('Preparing metadata')
# If this script is run as part of a pipeline, the metadata objects will already exist
if not self.metadata:
self.filer() # depends on [control=['if'], data=[]]
else:
self.objectprep()
# Use the number of metadata objects to calculate the number of cores to use per sample in multi-threaded
# methods with sequence calls to multi-threaded applications
try:
self.threads = int(self.cpus / len(self.metadata)) if self.cpus / len(self.metadata) > 1 else 1 # depends on [control=['try'], data=[]]
except (TypeError, ZeroDivisionError):
self.threads = self.cpus # depends on [control=['except'], data=[]]
logging.info('Reading and formatting primers')
self.primers()
logging.info('Baiting .fastq files against primers')
self.bait()
logging.info('Baiting .fastq files against previously baited .fastq files')
self.doublebait()
logging.info('Assembling contigs from double-baited .fastq files')
self.assemble_amplicon_spades()
logging.info('Creating BLAST database')
self.make_blastdb()
logging.info('Running BLAST analyses')
self.blastnthreads()
logging.info('Parsing BLAST results')
self.parseblast()
logging.info('Clearing amplicon files from previous iterations')
self.ampliconclear()
logging.info('Creating reports')
self.reporter() |
def deep_get(self, content, keys, traversed_path=None):
'''
Allow to retrieve content nested inside a several layers deep dict/list
Examples: -content: {
"key1": {
"key2" : [
{
"name" : "object1",
"value" : 42
},
{
"name" : "object2",
"value" : 72
}
]
}
}
-keys: ["key1", "key2", "1", "value"]
would return:
[(["key1", "key2", "1", "value"], 72)]
-keys: ["key1", "key2", "1", "*"]
would return:
[(["key1", "key2", "1", "value"], 72), (["key1", "key2", "1", "name"], "object2")]
-keys: ["key1", "key2", "*", "value"]
would return:
[(["key1", "key2", "1", "value"], 72), (["key1", "key2", "0", "value"], 42)]
'''
if traversed_path is None:
traversed_path = []
if keys == []:
return [(traversed_path, content)]
key = keys[0]
regex = "".join(["^", key, "$"])
try:
key_rex = re.compile(regex)
except Exception:
self.warning("Cannot compile regex: %s" % regex)
return []
results = []
for new_key, new_content in self.items(content):
if key_rex.match(new_key):
results.extend(self.deep_get(new_content, keys[1:], traversed_path + [str(new_key)]))
return results | def function[deep_get, parameter[self, content, keys, traversed_path]]:
constant[
Allow to retrieve content nested inside a several layers deep dict/list
Examples: -content: {
"key1": {
"key2" : [
{
"name" : "object1",
"value" : 42
},
{
"name" : "object2",
"value" : 72
}
]
}
}
-keys: ["key1", "key2", "1", "value"]
would return:
[(["key1", "key2", "1", "value"], 72)]
-keys: ["key1", "key2", "1", "*"]
would return:
[(["key1", "key2", "1", "value"], 72), (["key1", "key2", "1", "name"], "object2")]
-keys: ["key1", "key2", "*", "value"]
would return:
[(["key1", "key2", "1", "value"], 72), (["key1", "key2", "0", "value"], 42)]
]
if compare[name[traversed_path] is constant[None]] begin[:]
variable[traversed_path] assign[=] list[[]]
if compare[name[keys] equal[==] list[[]]] begin[:]
return[list[[<ast.Tuple object at 0x7da18f813ca0>]]]
variable[key] assign[=] call[name[keys]][constant[0]]
variable[regex] assign[=] call[constant[].join, parameter[list[[<ast.Constant object at 0x7da2047ebca0>, <ast.Name object at 0x7da2047e9b40>, <ast.Constant object at 0x7da2047e9db0>]]]]
<ast.Try object at 0x7da2047e8700>
variable[results] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da20c6e7ee0>, <ast.Name object at 0x7da20c6e7e50>]]] in starred[call[name[self].items, parameter[name[content]]]] begin[:]
if call[name[key_rex].match, parameter[name[new_key]]] begin[:]
call[name[results].extend, parameter[call[name[self].deep_get, parameter[name[new_content], call[name[keys]][<ast.Slice object at 0x7da2045640d0>], binary_operation[name[traversed_path] + list[[<ast.Call object at 0x7da2045676d0>]]]]]]]
return[name[results]] | keyword[def] identifier[deep_get] ( identifier[self] , identifier[content] , identifier[keys] , identifier[traversed_path] = keyword[None] ):
literal[string]
keyword[if] identifier[traversed_path] keyword[is] keyword[None] :
identifier[traversed_path] =[]
keyword[if] identifier[keys] ==[]:
keyword[return] [( identifier[traversed_path] , identifier[content] )]
identifier[key] = identifier[keys] [ literal[int] ]
identifier[regex] = literal[string] . identifier[join] ([ literal[string] , identifier[key] , literal[string] ])
keyword[try] :
identifier[key_rex] = identifier[re] . identifier[compile] ( identifier[regex] )
keyword[except] identifier[Exception] :
identifier[self] . identifier[warning] ( literal[string] % identifier[regex] )
keyword[return] []
identifier[results] =[]
keyword[for] identifier[new_key] , identifier[new_content] keyword[in] identifier[self] . identifier[items] ( identifier[content] ):
keyword[if] identifier[key_rex] . identifier[match] ( identifier[new_key] ):
identifier[results] . identifier[extend] ( identifier[self] . identifier[deep_get] ( identifier[new_content] , identifier[keys] [ literal[int] :], identifier[traversed_path] +[ identifier[str] ( identifier[new_key] )]))
keyword[return] identifier[results] | def deep_get(self, content, keys, traversed_path=None):
"""
Allow to retrieve content nested inside a several layers deep dict/list
Examples: -content: {
"key1": {
"key2" : [
{
"name" : "object1",
"value" : 42
},
{
"name" : "object2",
"value" : 72
}
]
}
}
-keys: ["key1", "key2", "1", "value"]
would return:
[(["key1", "key2", "1", "value"], 72)]
-keys: ["key1", "key2", "1", "*"]
would return:
[(["key1", "key2", "1", "value"], 72), (["key1", "key2", "1", "name"], "object2")]
-keys: ["key1", "key2", "*", "value"]
would return:
[(["key1", "key2", "1", "value"], 72), (["key1", "key2", "0", "value"], 42)]
"""
if traversed_path is None:
traversed_path = [] # depends on [control=['if'], data=['traversed_path']]
if keys == []:
return [(traversed_path, content)] # depends on [control=['if'], data=[]]
key = keys[0]
regex = ''.join(['^', key, '$'])
try:
key_rex = re.compile(regex) # depends on [control=['try'], data=[]]
except Exception:
self.warning('Cannot compile regex: %s' % regex)
return [] # depends on [control=['except'], data=[]]
results = []
for (new_key, new_content) in self.items(content):
if key_rex.match(new_key):
results.extend(self.deep_get(new_content, keys[1:], traversed_path + [str(new_key)])) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return results |
def set_models_keyspace(self, keyspace):
"""Set keyspace for all connection models"""
for models in self.connection.introspection.cql_models.values():
for model in models:
model.__keyspace__ = keyspace | def function[set_models_keyspace, parameter[self, keyspace]]:
constant[Set keyspace for all connection models]
for taget[name[models]] in starred[call[name[self].connection.introspection.cql_models.values, parameter[]]] begin[:]
for taget[name[model]] in starred[name[models]] begin[:]
name[model].__keyspace__ assign[=] name[keyspace] | keyword[def] identifier[set_models_keyspace] ( identifier[self] , identifier[keyspace] ):
literal[string]
keyword[for] identifier[models] keyword[in] identifier[self] . identifier[connection] . identifier[introspection] . identifier[cql_models] . identifier[values] ():
keyword[for] identifier[model] keyword[in] identifier[models] :
identifier[model] . identifier[__keyspace__] = identifier[keyspace] | def set_models_keyspace(self, keyspace):
"""Set keyspace for all connection models"""
for models in self.connection.introspection.cql_models.values():
for model in models:
model.__keyspace__ = keyspace # depends on [control=['for'], data=['model']] # depends on [control=['for'], data=['models']] |
def add_content(self, content):
"""
Add content to this fragment.
`content` is a Unicode string, HTML to append to the body of the
fragment. It must not contain a ``<body>`` tag, or otherwise assume
that it is the only content on the page.
"""
assert isinstance(content, six.text_type)
self.content += content | def function[add_content, parameter[self, content]]:
constant[
Add content to this fragment.
`content` is a Unicode string, HTML to append to the body of the
fragment. It must not contain a ``<body>`` tag, or otherwise assume
that it is the only content on the page.
]
assert[call[name[isinstance], parameter[name[content], name[six].text_type]]]
<ast.AugAssign object at 0x7da20c6aad10> | keyword[def] identifier[add_content] ( identifier[self] , identifier[content] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[content] , identifier[six] . identifier[text_type] )
identifier[self] . identifier[content] += identifier[content] | def add_content(self, content):
"""
Add content to this fragment.
`content` is a Unicode string, HTML to append to the body of the
fragment. It must not contain a ``<body>`` tag, or otherwise assume
that it is the only content on the page.
"""
assert isinstance(content, six.text_type)
self.content += content |
def _get_string_type_from_token(token_type):
"""Return 'Single' or 'Double' depending on what kind of string this is."""
return_value = None
if token_type in [TokenType.BeginSingleQuotedLiteral,
TokenType.EndSingleQuotedLiteral]:
return_value = "Single"
elif token_type in [TokenType.BeginDoubleQuotedLiteral,
TokenType.EndDoubleQuotedLiteral]:
return_value = "Double"
assert return_value is not None
return return_value | def function[_get_string_type_from_token, parameter[token_type]]:
constant[Return 'Single' or 'Double' depending on what kind of string this is.]
variable[return_value] assign[=] constant[None]
if compare[name[token_type] in list[[<ast.Attribute object at 0x7da18bc70bb0>, <ast.Attribute object at 0x7da18bc73e50>]]] begin[:]
variable[return_value] assign[=] constant[Single]
assert[compare[name[return_value] is_not constant[None]]]
return[name[return_value]] | keyword[def] identifier[_get_string_type_from_token] ( identifier[token_type] ):
literal[string]
identifier[return_value] = keyword[None]
keyword[if] identifier[token_type] keyword[in] [ identifier[TokenType] . identifier[BeginSingleQuotedLiteral] ,
identifier[TokenType] . identifier[EndSingleQuotedLiteral] ]:
identifier[return_value] = literal[string]
keyword[elif] identifier[token_type] keyword[in] [ identifier[TokenType] . identifier[BeginDoubleQuotedLiteral] ,
identifier[TokenType] . identifier[EndDoubleQuotedLiteral] ]:
identifier[return_value] = literal[string]
keyword[assert] identifier[return_value] keyword[is] keyword[not] keyword[None]
keyword[return] identifier[return_value] | def _get_string_type_from_token(token_type):
"""Return 'Single' or 'Double' depending on what kind of string this is."""
return_value = None
if token_type in [TokenType.BeginSingleQuotedLiteral, TokenType.EndSingleQuotedLiteral]:
return_value = 'Single' # depends on [control=['if'], data=[]]
elif token_type in [TokenType.BeginDoubleQuotedLiteral, TokenType.EndDoubleQuotedLiteral]:
return_value = 'Double' # depends on [control=['if'], data=[]]
assert return_value is not None
return return_value |
def detect_metadata_url_scheme(url):
"""detect whether a url is a Service type that HHypermap supports"""
scheme = None
url_lower = url.lower()
if any(x in url_lower for x in ['wms', 'service=wms']):
scheme = 'OGC:WMS'
if any(x in url_lower for x in ['wmts', 'service=wmts']):
scheme = 'OGC:WMTS'
elif all(x in url for x in ['/MapServer', 'f=json']):
scheme = 'ESRI:ArcGIS:MapServer'
elif all(x in url for x in ['/ImageServer', 'f=json']):
scheme = 'ESRI:ArcGIS:ImageServer'
return scheme | def function[detect_metadata_url_scheme, parameter[url]]:
constant[detect whether a url is a Service type that HHypermap supports]
variable[scheme] assign[=] constant[None]
variable[url_lower] assign[=] call[name[url].lower, parameter[]]
if call[name[any], parameter[<ast.GeneratorExp object at 0x7da18c4cdc00>]] begin[:]
variable[scheme] assign[=] constant[OGC:WMS]
if call[name[any], parameter[<ast.GeneratorExp object at 0x7da18c4cc2e0>]] begin[:]
variable[scheme] assign[=] constant[OGC:WMTS]
return[name[scheme]] | keyword[def] identifier[detect_metadata_url_scheme] ( identifier[url] ):
literal[string]
identifier[scheme] = keyword[None]
identifier[url_lower] = identifier[url] . identifier[lower] ()
keyword[if] identifier[any] ( identifier[x] keyword[in] identifier[url_lower] keyword[for] identifier[x] keyword[in] [ literal[string] , literal[string] ]):
identifier[scheme] = literal[string]
keyword[if] identifier[any] ( identifier[x] keyword[in] identifier[url_lower] keyword[for] identifier[x] keyword[in] [ literal[string] , literal[string] ]):
identifier[scheme] = literal[string]
keyword[elif] identifier[all] ( identifier[x] keyword[in] identifier[url] keyword[for] identifier[x] keyword[in] [ literal[string] , literal[string] ]):
identifier[scheme] = literal[string]
keyword[elif] identifier[all] ( identifier[x] keyword[in] identifier[url] keyword[for] identifier[x] keyword[in] [ literal[string] , literal[string] ]):
identifier[scheme] = literal[string]
keyword[return] identifier[scheme] | def detect_metadata_url_scheme(url):
"""detect whether a url is a Service type that HHypermap supports"""
scheme = None
url_lower = url.lower()
if any((x in url_lower for x in ['wms', 'service=wms'])):
scheme = 'OGC:WMS' # depends on [control=['if'], data=[]]
if any((x in url_lower for x in ['wmts', 'service=wmts'])):
scheme = 'OGC:WMTS' # depends on [control=['if'], data=[]]
elif all((x in url for x in ['/MapServer', 'f=json'])):
scheme = 'ESRI:ArcGIS:MapServer' # depends on [control=['if'], data=[]]
elif all((x in url for x in ['/ImageServer', 'f=json'])):
scheme = 'ESRI:ArcGIS:ImageServer' # depends on [control=['if'], data=[]]
return scheme |
def Unconstrained(self, name, bits, uninitialized=True, inspect=True, events=True, key=None, eternal=False, **kwargs):
"""
Creates an unconstrained symbol or a default concrete value (0), based on the state options.
:param name: The name of the symbol.
:param bits: The size (in bits) of the symbol.
:param uninitialized: Whether this value should be counted as an "uninitialized" value in the course of an
analysis.
:param inspect: Set to False to avoid firing SimInspect breakpoints
:param events: Set to False to avoid generating a SimEvent for the occasion
:param key: Set this to a tuple of increasingly specific identifiers (for example,
``('mem', 0xffbeff00)`` or ``('file', 4, 0x20)`` to cause it to be tracked, i.e.
accessable through ``solver.get_variables``.
:param eternal: Set to True in conjunction with setting a key to cause all states with the same
ancestry to retrieve the same symbol when trying to create the value. If False, a
counter will be appended to the key.
:returns: an unconstrained symbol (or a concrete value of 0).
"""
if o.SYMBOLIC_INITIAL_VALUES in self.state.options:
# Return a symbolic value
if o.ABSTRACT_MEMORY in self.state.options:
l.debug("Creating new top StridedInterval")
r = claripy.TSI(bits=bits, name=name, uninitialized=uninitialized, **kwargs)
else:
l.debug("Creating new unconstrained BV named %s", name)
if o.UNDER_CONSTRAINED_SYMEXEC in self.state.options:
r = self.BVS(name, bits, uninitialized=uninitialized, key=key, eternal=eternal, inspect=inspect, events=events, **kwargs)
else:
r = self.BVS(name, bits, uninitialized=uninitialized, key=key, eternal=eternal, inspect=inspect, events=events, **kwargs)
return r
else:
# Return a default value, aka. 0
return claripy.BVV(0, bits) | def function[Unconstrained, parameter[self, name, bits, uninitialized, inspect, events, key, eternal]]:
constant[
Creates an unconstrained symbol or a default concrete value (0), based on the state options.
:param name: The name of the symbol.
:param bits: The size (in bits) of the symbol.
:param uninitialized: Whether this value should be counted as an "uninitialized" value in the course of an
analysis.
:param inspect: Set to False to avoid firing SimInspect breakpoints
:param events: Set to False to avoid generating a SimEvent for the occasion
:param key: Set this to a tuple of increasingly specific identifiers (for example,
``('mem', 0xffbeff00)`` or ``('file', 4, 0x20)`` to cause it to be tracked, i.e.
accessable through ``solver.get_variables``.
:param eternal: Set to True in conjunction with setting a key to cause all states with the same
ancestry to retrieve the same symbol when trying to create the value. If False, a
counter will be appended to the key.
:returns: an unconstrained symbol (or a concrete value of 0).
]
if compare[name[o].SYMBOLIC_INITIAL_VALUES in name[self].state.options] begin[:]
if compare[name[o].ABSTRACT_MEMORY in name[self].state.options] begin[:]
call[name[l].debug, parameter[constant[Creating new top StridedInterval]]]
variable[r] assign[=] call[name[claripy].TSI, parameter[]]
return[name[r]] | keyword[def] identifier[Unconstrained] ( identifier[self] , identifier[name] , identifier[bits] , identifier[uninitialized] = keyword[True] , identifier[inspect] = keyword[True] , identifier[events] = keyword[True] , identifier[key] = keyword[None] , identifier[eternal] = keyword[False] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[o] . identifier[SYMBOLIC_INITIAL_VALUES] keyword[in] identifier[self] . identifier[state] . identifier[options] :
keyword[if] identifier[o] . identifier[ABSTRACT_MEMORY] keyword[in] identifier[self] . identifier[state] . identifier[options] :
identifier[l] . identifier[debug] ( literal[string] )
identifier[r] = identifier[claripy] . identifier[TSI] ( identifier[bits] = identifier[bits] , identifier[name] = identifier[name] , identifier[uninitialized] = identifier[uninitialized] ,** identifier[kwargs] )
keyword[else] :
identifier[l] . identifier[debug] ( literal[string] , identifier[name] )
keyword[if] identifier[o] . identifier[UNDER_CONSTRAINED_SYMEXEC] keyword[in] identifier[self] . identifier[state] . identifier[options] :
identifier[r] = identifier[self] . identifier[BVS] ( identifier[name] , identifier[bits] , identifier[uninitialized] = identifier[uninitialized] , identifier[key] = identifier[key] , identifier[eternal] = identifier[eternal] , identifier[inspect] = identifier[inspect] , identifier[events] = identifier[events] ,** identifier[kwargs] )
keyword[else] :
identifier[r] = identifier[self] . identifier[BVS] ( identifier[name] , identifier[bits] , identifier[uninitialized] = identifier[uninitialized] , identifier[key] = identifier[key] , identifier[eternal] = identifier[eternal] , identifier[inspect] = identifier[inspect] , identifier[events] = identifier[events] ,** identifier[kwargs] )
keyword[return] identifier[r]
keyword[else] :
keyword[return] identifier[claripy] . identifier[BVV] ( literal[int] , identifier[bits] ) | def Unconstrained(self, name, bits, uninitialized=True, inspect=True, events=True, key=None, eternal=False, **kwargs):
"""
Creates an unconstrained symbol or a default concrete value (0), based on the state options.
:param name: The name of the symbol.
:param bits: The size (in bits) of the symbol.
:param uninitialized: Whether this value should be counted as an "uninitialized" value in the course of an
analysis.
:param inspect: Set to False to avoid firing SimInspect breakpoints
:param events: Set to False to avoid generating a SimEvent for the occasion
:param key: Set this to a tuple of increasingly specific identifiers (for example,
``('mem', 0xffbeff00)`` or ``('file', 4, 0x20)`` to cause it to be tracked, i.e.
accessable through ``solver.get_variables``.
:param eternal: Set to True in conjunction with setting a key to cause all states with the same
ancestry to retrieve the same symbol when trying to create the value. If False, a
counter will be appended to the key.
:returns: an unconstrained symbol (or a concrete value of 0).
"""
if o.SYMBOLIC_INITIAL_VALUES in self.state.options:
# Return a symbolic value
if o.ABSTRACT_MEMORY in self.state.options:
l.debug('Creating new top StridedInterval')
r = claripy.TSI(bits=bits, name=name, uninitialized=uninitialized, **kwargs) # depends on [control=['if'], data=[]]
else:
l.debug('Creating new unconstrained BV named %s', name)
if o.UNDER_CONSTRAINED_SYMEXEC in self.state.options:
r = self.BVS(name, bits, uninitialized=uninitialized, key=key, eternal=eternal, inspect=inspect, events=events, **kwargs) # depends on [control=['if'], data=[]]
else:
r = self.BVS(name, bits, uninitialized=uninitialized, key=key, eternal=eternal, inspect=inspect, events=events, **kwargs)
return r # depends on [control=['if'], data=[]]
else:
# Return a default value, aka. 0
return claripy.BVV(0, bits) |
def get_payload(self):
"""Return Payload."""
# Session id
ret = bytes([self.session_id >> 8 & 255, self.session_id & 255])
ret += bytes([self.originator.value])
ret += bytes([self.priority.value])
ret += bytes([0]) # ParameterActive pointing to main parameter (MP)
# FPI 1+2
ret += bytes([0])
ret += bytes([0])
# Main parameter + functional parameter
ret += bytes(self.parameter)
ret += bytes(32)
# Nodes array: Number of nodes + node array + padding
ret += bytes([len(self.node_ids)]) # index array count
ret += bytes(self.node_ids) + bytes(20-len(self.node_ids))
# Priority Level Lock
ret += bytes([0])
# Priority Level information 1+2
ret += bytes([0, 0])
# Locktime
ret += bytes([0])
return ret | def function[get_payload, parameter[self]]:
constant[Return Payload.]
variable[ret] assign[=] call[name[bytes], parameter[list[[<ast.BinOp object at 0x7da20c6c7ca0>, <ast.BinOp object at 0x7da20c6c7700>]]]]
<ast.AugAssign object at 0x7da20c6c4700>
<ast.AugAssign object at 0x7da20c6c5ba0>
<ast.AugAssign object at 0x7da20c6c6440>
<ast.AugAssign object at 0x7da20c6c72e0>
<ast.AugAssign object at 0x7da20c6c7040>
<ast.AugAssign object at 0x7da20c6c4e20>
<ast.AugAssign object at 0x7da20c6c4be0>
<ast.AugAssign object at 0x7da20c6c5d80>
<ast.AugAssign object at 0x7da20c6c7c70>
<ast.AugAssign object at 0x7da20c6c4b20>
<ast.AugAssign object at 0x7da20c6c53f0>
<ast.AugAssign object at 0x7da20c6c4d30>
return[name[ret]] | keyword[def] identifier[get_payload] ( identifier[self] ):
literal[string]
identifier[ret] = identifier[bytes] ([ identifier[self] . identifier[session_id] >> literal[int] & literal[int] , identifier[self] . identifier[session_id] & literal[int] ])
identifier[ret] += identifier[bytes] ([ identifier[self] . identifier[originator] . identifier[value] ])
identifier[ret] += identifier[bytes] ([ identifier[self] . identifier[priority] . identifier[value] ])
identifier[ret] += identifier[bytes] ([ literal[int] ])
identifier[ret] += identifier[bytes] ([ literal[int] ])
identifier[ret] += identifier[bytes] ([ literal[int] ])
identifier[ret] += identifier[bytes] ( identifier[self] . identifier[parameter] )
identifier[ret] += identifier[bytes] ( literal[int] )
identifier[ret] += identifier[bytes] ([ identifier[len] ( identifier[self] . identifier[node_ids] )])
identifier[ret] += identifier[bytes] ( identifier[self] . identifier[node_ids] )+ identifier[bytes] ( literal[int] - identifier[len] ( identifier[self] . identifier[node_ids] ))
identifier[ret] += identifier[bytes] ([ literal[int] ])
identifier[ret] += identifier[bytes] ([ literal[int] , literal[int] ])
identifier[ret] += identifier[bytes] ([ literal[int] ])
keyword[return] identifier[ret] | def get_payload(self):
"""Return Payload."""
# Session id
ret = bytes([self.session_id >> 8 & 255, self.session_id & 255])
ret += bytes([self.originator.value])
ret += bytes([self.priority.value])
ret += bytes([0]) # ParameterActive pointing to main parameter (MP)
# FPI 1+2
ret += bytes([0])
ret += bytes([0])
# Main parameter + functional parameter
ret += bytes(self.parameter)
ret += bytes(32)
# Nodes array: Number of nodes + node array + padding
ret += bytes([len(self.node_ids)]) # index array count
ret += bytes(self.node_ids) + bytes(20 - len(self.node_ids))
# Priority Level Lock
ret += bytes([0])
# Priority Level information 1+2
ret += bytes([0, 0])
# Locktime
ret += bytes([0])
return ret |
def get_premises_model():
"""
Support for custom company premises model
with developer friendly validation.
"""
try:
app_label, model_name = PREMISES_MODEL.split('.')
except ValueError:
raise ImproperlyConfigured("OPENINGHOURS_PREMISES_MODEL must be of the"
" form 'app_label.model_name'")
premises_model = get_model(app_label=app_label, model_name=model_name)
if premises_model is None:
raise ImproperlyConfigured("OPENINGHOURS_PREMISES_MODEL refers to"
" model '%s' that has not been installed"
% PREMISES_MODEL)
return premises_model | def function[get_premises_model, parameter[]]:
constant[
Support for custom company premises model
with developer friendly validation.
]
<ast.Try object at 0x7da2044c2680>
variable[premises_model] assign[=] call[name[get_model], parameter[]]
if compare[name[premises_model] is constant[None]] begin[:]
<ast.Raise object at 0x7da2044c03a0>
return[name[premises_model]] | keyword[def] identifier[get_premises_model] ():
literal[string]
keyword[try] :
identifier[app_label] , identifier[model_name] = identifier[PREMISES_MODEL] . identifier[split] ( literal[string] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[ImproperlyConfigured] ( literal[string]
literal[string] )
identifier[premises_model] = identifier[get_model] ( identifier[app_label] = identifier[app_label] , identifier[model_name] = identifier[model_name] )
keyword[if] identifier[premises_model] keyword[is] keyword[None] :
keyword[raise] identifier[ImproperlyConfigured] ( literal[string]
literal[string]
% identifier[PREMISES_MODEL] )
keyword[return] identifier[premises_model] | def get_premises_model():
"""
Support for custom company premises model
with developer friendly validation.
"""
try:
(app_label, model_name) = PREMISES_MODEL.split('.') # depends on [control=['try'], data=[]]
except ValueError:
raise ImproperlyConfigured("OPENINGHOURS_PREMISES_MODEL must be of the form 'app_label.model_name'") # depends on [control=['except'], data=[]]
premises_model = get_model(app_label=app_label, model_name=model_name)
if premises_model is None:
raise ImproperlyConfigured("OPENINGHOURS_PREMISES_MODEL refers to model '%s' that has not been installed" % PREMISES_MODEL) # depends on [control=['if'], data=[]]
return premises_model |
def get_installed_packages(site_packages, site_packages_64):
"""
Returns a dict of installed packages that Zappa cares about.
"""
import pkg_resources
package_to_keep = []
if os.path.isdir(site_packages):
package_to_keep += os.listdir(site_packages)
if os.path.isdir(site_packages_64):
package_to_keep += os.listdir(site_packages_64)
package_to_keep = [x.lower() for x in package_to_keep]
installed_packages = {package.project_name.lower(): package.version for package in
pkg_resources.WorkingSet()
if package.project_name.lower() in package_to_keep
or package.location.lower() in [site_packages.lower(), site_packages_64.lower()]}
return installed_packages | def function[get_installed_packages, parameter[site_packages, site_packages_64]]:
constant[
Returns a dict of installed packages that Zappa cares about.
]
import module[pkg_resources]
variable[package_to_keep] assign[=] list[[]]
if call[name[os].path.isdir, parameter[name[site_packages]]] begin[:]
<ast.AugAssign object at 0x7da1b21b4400>
if call[name[os].path.isdir, parameter[name[site_packages_64]]] begin[:]
<ast.AugAssign object at 0x7da1b21b4640>
variable[package_to_keep] assign[=] <ast.ListComp object at 0x7da1b21b47c0>
variable[installed_packages] assign[=] <ast.DictComp object at 0x7da1b21b49a0>
return[name[installed_packages]] | keyword[def] identifier[get_installed_packages] ( identifier[site_packages] , identifier[site_packages_64] ):
literal[string]
keyword[import] identifier[pkg_resources]
identifier[package_to_keep] =[]
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[site_packages] ):
identifier[package_to_keep] += identifier[os] . identifier[listdir] ( identifier[site_packages] )
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[site_packages_64] ):
identifier[package_to_keep] += identifier[os] . identifier[listdir] ( identifier[site_packages_64] )
identifier[package_to_keep] =[ identifier[x] . identifier[lower] () keyword[for] identifier[x] keyword[in] identifier[package_to_keep] ]
identifier[installed_packages] ={ identifier[package] . identifier[project_name] . identifier[lower] (): identifier[package] . identifier[version] keyword[for] identifier[package] keyword[in]
identifier[pkg_resources] . identifier[WorkingSet] ()
keyword[if] identifier[package] . identifier[project_name] . identifier[lower] () keyword[in] identifier[package_to_keep]
keyword[or] identifier[package] . identifier[location] . identifier[lower] () keyword[in] [ identifier[site_packages] . identifier[lower] (), identifier[site_packages_64] . identifier[lower] ()]}
keyword[return] identifier[installed_packages] | def get_installed_packages(site_packages, site_packages_64):
"""
Returns a dict of installed packages that Zappa cares about.
"""
import pkg_resources
package_to_keep = []
if os.path.isdir(site_packages):
package_to_keep += os.listdir(site_packages) # depends on [control=['if'], data=[]]
if os.path.isdir(site_packages_64):
package_to_keep += os.listdir(site_packages_64) # depends on [control=['if'], data=[]]
package_to_keep = [x.lower() for x in package_to_keep]
installed_packages = {package.project_name.lower(): package.version for package in pkg_resources.WorkingSet() if package.project_name.lower() in package_to_keep or package.location.lower() in [site_packages.lower(), site_packages_64.lower()]}
return installed_packages |
def list_roles():
"""
List roles.
"""
roles = role_manager.all()
if roles:
print_table(['ID', 'Name'], [(role.id, role.name) for role in roles])
else:
click.echo('No roles found.') | def function[list_roles, parameter[]]:
constant[
List roles.
]
variable[roles] assign[=] call[name[role_manager].all, parameter[]]
if name[roles] begin[:]
call[name[print_table], parameter[list[[<ast.Constant object at 0x7da18bccb8b0>, <ast.Constant object at 0x7da18bccb3a0>]], <ast.ListComp object at 0x7da18bcc9b70>]] | keyword[def] identifier[list_roles] ():
literal[string]
identifier[roles] = identifier[role_manager] . identifier[all] ()
keyword[if] identifier[roles] :
identifier[print_table] ([ literal[string] , literal[string] ],[( identifier[role] . identifier[id] , identifier[role] . identifier[name] ) keyword[for] identifier[role] keyword[in] identifier[roles] ])
keyword[else] :
identifier[click] . identifier[echo] ( literal[string] ) | def list_roles():
"""
List roles.
"""
roles = role_manager.all()
if roles:
print_table(['ID', 'Name'], [(role.id, role.name) for role in roles]) # depends on [control=['if'], data=[]]
else:
click.echo('No roles found.') |
def request_data(self):
"""
(0,'cpu_percent'),
(1,'virtual_memory_total'),
(2,'virtual_memory_available'),
(3,'virtual_memory_percent'),
(4,'virtual_memory_used'),
(5,'virtual_memory_free'),
(6,'virtual_memory_active'),
(7,'virtual_memory_inactive'),
(8,'virtual_memory_buffers'),
(9,'virtual_memory_cached'),
(10,'swap_memory_total'),
(11,'swap_memory_used'),
(12,'swap_memory_free'),
(13,'swap_memory_percent'),
(14,'swap_memory_sin'),
(15,'swap_memory_sout'),
(17,'disk_usage_systemdisk_percent'),
(18,'disk_usage_disk_percent'),
### APCUPSD Status
(100, 'STATUS'), # True/False
(101, 'LINEV'), # Volts
(102, 'BATTV'), # Volts
(103, 'BCHARGE'), # %
(104, 'TIMELEFT'), # Minutes
(105, 'LOADPCT'), #
"""
if not driver_ok:
return None
output = []
apcupsd_status_is_queried = False
for item in self.variables:
timestamp = time()
value = None
if item.systemstatvariable.information == 0:
# cpu_percent
if hasattr(psutil, 'cpu_percent'):
value = psutil.cpu_percent()
timestamp = time()
elif item.systemstatvariable.information == 1:
# virtual_memory_total
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().total
timestamp = time()
elif item.systemstatvariable.information == 2:
# virtual_memory_available
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().available
timestamp = time()
elif item.systemstatvariable.information == 3:
# virtual_memory_percent
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().percent
timestamp = time()
elif item.systemstatvariable.information == 4:
# virtual_memory_used
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().used
timestamp = time()
elif item.systemstatvariable.information == 5:
# virtual_memory_free
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().free
timestamp = time()
elif item.systemstatvariable.information == 6:
# virtual_memory_active
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().active
timestamp = time()
elif item.systemstatvariable.information == 7:
# virtual_memory_inactive
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().inactive
timestamp = time()
elif item.systemstatvariable.information == 8:
# virtual_memory_buffers
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().buffers
timestamp = time()
elif item.systemstatvariable.information == 9:
# virtual_memory_cached
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().cached
timestamp = time()
elif item.systemstatvariable.information == 10:
# swap_memory_total
if hasattr(psutil, 'swap_memory'):
value = psutil.swap_memory().total
timestamp = time()
elif item.systemstatvariable.information == 11:
# swap_memory_used
if hasattr(psutil, 'swap_memory'):
value = psutil.swap_memory().used
timestamp = time()
elif item.systemstatvariable.information == 12:
# swap_memory_free
if hasattr(psutil, 'swap_memory'):
value = psutil.swap_memory().free
timestamp = time()
elif item.systemstatvariable.information == 13:
# swap_memory_percent
if hasattr(psutil, 'swap_memory'):
value = psutil.swap_memory().percent
timestamp = time()
elif item.systemstatvariable.information == 14:
# swap_memory_sin
if hasattr(psutil, 'swap_memory'):
value = psutil.swap_memory().sin
timestamp = time()
elif item.systemstatvariable.information == 15:
# swap_memory_sout
if hasattr(psutil, 'swap_memory'):
value = psutil.swap_memory().sout
timestamp = time()
elif item.systemstatvariable.information == 17:
# disk_usage_systemdisk_percent
if hasattr(psutil, 'disk_usage'):
value = psutil.disk_usage('/').percent
timestamp = time()
elif item.systemstatvariable.information == 18:
# disk_usage_disk_percent
if hasattr(psutil, 'disk_usage'):
value = psutil.disk_usage(item.systemstatvariable.parameter).percent
timestamp = time()
elif 100 <= item.systemstatvariable.information <= 105:
# APCUPSD Status
apcupsd_status = None
if not apcupsd_status_is_queried:
apcupsd_status = query_apsupsd_status()
apcupsd_status_is_queried = True
if apcupsd_status is not None:
if item.systemstatvariable.information == 100:
if 'STATUS' in apcupsd_status:
value = apcupsd_status['STATUS']
timestamp = apcupsd_status['timestamp']
elif item.systemstatvariable.information == 101:
if 'LINEV' in apcupsd_status:
value = apcupsd_status['LINEV']
timestamp = apcupsd_status['timestamp']
elif item.systemstatvariable.information == 102:
if 'BATTV' in apcupsd_status:
value = apcupsd_status['BATTV']
timestamp = apcupsd_status['timestamp']
elif item.systemstatvariable.information == 103:
if 'BCHARGE' in apcupsd_status:
value = apcupsd_status['BCHARGE']
timestamp = apcupsd_status['timestamp']
elif item.systemstatvariable.information == 104:
if 'TIMELEFT' in apcupsd_status:
value = apcupsd_status['TIMELEFT']
timestamp = apcupsd_status['timestamp']
elif item.systemstatvariable.information == 105:
if 'LOADPCT' in apcupsd_status:
value = apcupsd_status['LOADPCT']
timestamp = apcupsd_status['timestamp']
else:
value = None
# update variable
if value is not None and item.update_value(value, timestamp):
output.append(item.create_recorded_data_element())
return output | def function[request_data, parameter[self]]:
constant[
(0,'cpu_percent'),
(1,'virtual_memory_total'),
(2,'virtual_memory_available'),
(3,'virtual_memory_percent'),
(4,'virtual_memory_used'),
(5,'virtual_memory_free'),
(6,'virtual_memory_active'),
(7,'virtual_memory_inactive'),
(8,'virtual_memory_buffers'),
(9,'virtual_memory_cached'),
(10,'swap_memory_total'),
(11,'swap_memory_used'),
(12,'swap_memory_free'),
(13,'swap_memory_percent'),
(14,'swap_memory_sin'),
(15,'swap_memory_sout'),
(17,'disk_usage_systemdisk_percent'),
(18,'disk_usage_disk_percent'),
### APCUPSD Status
(100, 'STATUS'), # True/False
(101, 'LINEV'), # Volts
(102, 'BATTV'), # Volts
(103, 'BCHARGE'), # %
(104, 'TIMELEFT'), # Minutes
(105, 'LOADPCT'), #
]
if <ast.UnaryOp object at 0x7da204623d60> begin[:]
return[constant[None]]
variable[output] assign[=] list[[]]
variable[apcupsd_status_is_queried] assign[=] constant[False]
for taget[name[item]] in starred[name[self].variables] begin[:]
variable[timestamp] assign[=] call[name[time], parameter[]]
variable[value] assign[=] constant[None]
if compare[name[item].systemstatvariable.information equal[==] constant[0]] begin[:]
if call[name[hasattr], parameter[name[psutil], constant[cpu_percent]]] begin[:]
variable[value] assign[=] call[name[psutil].cpu_percent, parameter[]]
variable[timestamp] assign[=] call[name[time], parameter[]]
if <ast.BoolOp object at 0x7da1b23464d0> begin[:]
call[name[output].append, parameter[call[name[item].create_recorded_data_element, parameter[]]]]
return[name[output]] | keyword[def] identifier[request_data] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[driver_ok] :
keyword[return] keyword[None]
identifier[output] =[]
identifier[apcupsd_status_is_queried] = keyword[False]
keyword[for] identifier[item] keyword[in] identifier[self] . identifier[variables] :
identifier[timestamp] = identifier[time] ()
identifier[value] = keyword[None]
keyword[if] identifier[item] . identifier[systemstatvariable] . identifier[information] == literal[int] :
keyword[if] identifier[hasattr] ( identifier[psutil] , literal[string] ):
identifier[value] = identifier[psutil] . identifier[cpu_percent] ()
identifier[timestamp] = identifier[time] ()
keyword[elif] identifier[item] . identifier[systemstatvariable] . identifier[information] == literal[int] :
keyword[if] identifier[hasattr] ( identifier[psutil] , literal[string] ):
identifier[value] = identifier[psutil] . identifier[virtual_memory] (). identifier[total]
identifier[timestamp] = identifier[time] ()
keyword[elif] identifier[item] . identifier[systemstatvariable] . identifier[information] == literal[int] :
keyword[if] identifier[hasattr] ( identifier[psutil] , literal[string] ):
identifier[value] = identifier[psutil] . identifier[virtual_memory] (). identifier[available]
identifier[timestamp] = identifier[time] ()
keyword[elif] identifier[item] . identifier[systemstatvariable] . identifier[information] == literal[int] :
keyword[if] identifier[hasattr] ( identifier[psutil] , literal[string] ):
identifier[value] = identifier[psutil] . identifier[virtual_memory] (). identifier[percent]
identifier[timestamp] = identifier[time] ()
keyword[elif] identifier[item] . identifier[systemstatvariable] . identifier[information] == literal[int] :
keyword[if] identifier[hasattr] ( identifier[psutil] , literal[string] ):
identifier[value] = identifier[psutil] . identifier[virtual_memory] (). identifier[used]
identifier[timestamp] = identifier[time] ()
keyword[elif] identifier[item] . identifier[systemstatvariable] . identifier[information] == literal[int] :
keyword[if] identifier[hasattr] ( identifier[psutil] , literal[string] ):
identifier[value] = identifier[psutil] . identifier[virtual_memory] (). identifier[free]
identifier[timestamp] = identifier[time] ()
keyword[elif] identifier[item] . identifier[systemstatvariable] . identifier[information] == literal[int] :
keyword[if] identifier[hasattr] ( identifier[psutil] , literal[string] ):
identifier[value] = identifier[psutil] . identifier[virtual_memory] (). identifier[active]
identifier[timestamp] = identifier[time] ()
keyword[elif] identifier[item] . identifier[systemstatvariable] . identifier[information] == literal[int] :
keyword[if] identifier[hasattr] ( identifier[psutil] , literal[string] ):
identifier[value] = identifier[psutil] . identifier[virtual_memory] (). identifier[inactive]
identifier[timestamp] = identifier[time] ()
keyword[elif] identifier[item] . identifier[systemstatvariable] . identifier[information] == literal[int] :
keyword[if] identifier[hasattr] ( identifier[psutil] , literal[string] ):
identifier[value] = identifier[psutil] . identifier[virtual_memory] (). identifier[buffers]
identifier[timestamp] = identifier[time] ()
keyword[elif] identifier[item] . identifier[systemstatvariable] . identifier[information] == literal[int] :
keyword[if] identifier[hasattr] ( identifier[psutil] , literal[string] ):
identifier[value] = identifier[psutil] . identifier[virtual_memory] (). identifier[cached]
identifier[timestamp] = identifier[time] ()
keyword[elif] identifier[item] . identifier[systemstatvariable] . identifier[information] == literal[int] :
keyword[if] identifier[hasattr] ( identifier[psutil] , literal[string] ):
identifier[value] = identifier[psutil] . identifier[swap_memory] (). identifier[total]
identifier[timestamp] = identifier[time] ()
keyword[elif] identifier[item] . identifier[systemstatvariable] . identifier[information] == literal[int] :
keyword[if] identifier[hasattr] ( identifier[psutil] , literal[string] ):
identifier[value] = identifier[psutil] . identifier[swap_memory] (). identifier[used]
identifier[timestamp] = identifier[time] ()
keyword[elif] identifier[item] . identifier[systemstatvariable] . identifier[information] == literal[int] :
keyword[if] identifier[hasattr] ( identifier[psutil] , literal[string] ):
identifier[value] = identifier[psutil] . identifier[swap_memory] (). identifier[free]
identifier[timestamp] = identifier[time] ()
keyword[elif] identifier[item] . identifier[systemstatvariable] . identifier[information] == literal[int] :
keyword[if] identifier[hasattr] ( identifier[psutil] , literal[string] ):
identifier[value] = identifier[psutil] . identifier[swap_memory] (). identifier[percent]
identifier[timestamp] = identifier[time] ()
keyword[elif] identifier[item] . identifier[systemstatvariable] . identifier[information] == literal[int] :
keyword[if] identifier[hasattr] ( identifier[psutil] , literal[string] ):
identifier[value] = identifier[psutil] . identifier[swap_memory] (). identifier[sin]
identifier[timestamp] = identifier[time] ()
keyword[elif] identifier[item] . identifier[systemstatvariable] . identifier[information] == literal[int] :
keyword[if] identifier[hasattr] ( identifier[psutil] , literal[string] ):
identifier[value] = identifier[psutil] . identifier[swap_memory] (). identifier[sout]
identifier[timestamp] = identifier[time] ()
keyword[elif] identifier[item] . identifier[systemstatvariable] . identifier[information] == literal[int] :
keyword[if] identifier[hasattr] ( identifier[psutil] , literal[string] ):
identifier[value] = identifier[psutil] . identifier[disk_usage] ( literal[string] ). identifier[percent]
identifier[timestamp] = identifier[time] ()
keyword[elif] identifier[item] . identifier[systemstatvariable] . identifier[information] == literal[int] :
keyword[if] identifier[hasattr] ( identifier[psutil] , literal[string] ):
identifier[value] = identifier[psutil] . identifier[disk_usage] ( identifier[item] . identifier[systemstatvariable] . identifier[parameter] ). identifier[percent]
identifier[timestamp] = identifier[time] ()
keyword[elif] literal[int] <= identifier[item] . identifier[systemstatvariable] . identifier[information] <= literal[int] :
identifier[apcupsd_status] = keyword[None]
keyword[if] keyword[not] identifier[apcupsd_status_is_queried] :
identifier[apcupsd_status] = identifier[query_apsupsd_status] ()
identifier[apcupsd_status_is_queried] = keyword[True]
keyword[if] identifier[apcupsd_status] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[item] . identifier[systemstatvariable] . identifier[information] == literal[int] :
keyword[if] literal[string] keyword[in] identifier[apcupsd_status] :
identifier[value] = identifier[apcupsd_status] [ literal[string] ]
identifier[timestamp] = identifier[apcupsd_status] [ literal[string] ]
keyword[elif] identifier[item] . identifier[systemstatvariable] . identifier[information] == literal[int] :
keyword[if] literal[string] keyword[in] identifier[apcupsd_status] :
identifier[value] = identifier[apcupsd_status] [ literal[string] ]
identifier[timestamp] = identifier[apcupsd_status] [ literal[string] ]
keyword[elif] identifier[item] . identifier[systemstatvariable] . identifier[information] == literal[int] :
keyword[if] literal[string] keyword[in] identifier[apcupsd_status] :
identifier[value] = identifier[apcupsd_status] [ literal[string] ]
identifier[timestamp] = identifier[apcupsd_status] [ literal[string] ]
keyword[elif] identifier[item] . identifier[systemstatvariable] . identifier[information] == literal[int] :
keyword[if] literal[string] keyword[in] identifier[apcupsd_status] :
identifier[value] = identifier[apcupsd_status] [ literal[string] ]
identifier[timestamp] = identifier[apcupsd_status] [ literal[string] ]
keyword[elif] identifier[item] . identifier[systemstatvariable] . identifier[information] == literal[int] :
keyword[if] literal[string] keyword[in] identifier[apcupsd_status] :
identifier[value] = identifier[apcupsd_status] [ literal[string] ]
identifier[timestamp] = identifier[apcupsd_status] [ literal[string] ]
keyword[elif] identifier[item] . identifier[systemstatvariable] . identifier[information] == literal[int] :
keyword[if] literal[string] keyword[in] identifier[apcupsd_status] :
identifier[value] = identifier[apcupsd_status] [ literal[string] ]
identifier[timestamp] = identifier[apcupsd_status] [ literal[string] ]
keyword[else] :
identifier[value] = keyword[None]
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] keyword[and] identifier[item] . identifier[update_value] ( identifier[value] , identifier[timestamp] ):
identifier[output] . identifier[append] ( identifier[item] . identifier[create_recorded_data_element] ())
keyword[return] identifier[output] | def request_data(self):
"""
(0,'cpu_percent'),
(1,'virtual_memory_total'),
(2,'virtual_memory_available'),
(3,'virtual_memory_percent'),
(4,'virtual_memory_used'),
(5,'virtual_memory_free'),
(6,'virtual_memory_active'),
(7,'virtual_memory_inactive'),
(8,'virtual_memory_buffers'),
(9,'virtual_memory_cached'),
(10,'swap_memory_total'),
(11,'swap_memory_used'),
(12,'swap_memory_free'),
(13,'swap_memory_percent'),
(14,'swap_memory_sin'),
(15,'swap_memory_sout'),
(17,'disk_usage_systemdisk_percent'),
(18,'disk_usage_disk_percent'),
### APCUPSD Status
(100, 'STATUS'), # True/False
(101, 'LINEV'), # Volts
(102, 'BATTV'), # Volts
(103, 'BCHARGE'), # %
(104, 'TIMELEFT'), # Minutes
(105, 'LOADPCT'), #
"""
if not driver_ok:
return None # depends on [control=['if'], data=[]]
output = []
apcupsd_status_is_queried = False
for item in self.variables:
timestamp = time()
value = None
if item.systemstatvariable.information == 0:
# cpu_percent
if hasattr(psutil, 'cpu_percent'):
value = psutil.cpu_percent()
timestamp = time() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif item.systemstatvariable.information == 1:
# virtual_memory_total
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().total
timestamp = time() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif item.systemstatvariable.information == 2:
# virtual_memory_available
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().available
timestamp = time() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif item.systemstatvariable.information == 3:
# virtual_memory_percent
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().percent
timestamp = time() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif item.systemstatvariable.information == 4:
# virtual_memory_used
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().used
timestamp = time() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif item.systemstatvariable.information == 5:
# virtual_memory_free
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().free
timestamp = time() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif item.systemstatvariable.information == 6:
# virtual_memory_active
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().active
timestamp = time() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif item.systemstatvariable.information == 7:
# virtual_memory_inactive
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().inactive
timestamp = time() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif item.systemstatvariable.information == 8:
# virtual_memory_buffers
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().buffers
timestamp = time() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif item.systemstatvariable.information == 9:
# virtual_memory_cached
if hasattr(psutil, 'virtual_memory'):
value = psutil.virtual_memory().cached
timestamp = time() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif item.systemstatvariable.information == 10:
# swap_memory_total
if hasattr(psutil, 'swap_memory'):
value = psutil.swap_memory().total
timestamp = time() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif item.systemstatvariable.information == 11:
# swap_memory_used
if hasattr(psutil, 'swap_memory'):
value = psutil.swap_memory().used
timestamp = time() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif item.systemstatvariable.information == 12:
# swap_memory_free
if hasattr(psutil, 'swap_memory'):
value = psutil.swap_memory().free
timestamp = time() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif item.systemstatvariable.information == 13:
# swap_memory_percent
if hasattr(psutil, 'swap_memory'):
value = psutil.swap_memory().percent
timestamp = time() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif item.systemstatvariable.information == 14:
# swap_memory_sin
if hasattr(psutil, 'swap_memory'):
value = psutil.swap_memory().sin
timestamp = time() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif item.systemstatvariable.information == 15:
# swap_memory_sout
if hasattr(psutil, 'swap_memory'):
value = psutil.swap_memory().sout
timestamp = time() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif item.systemstatvariable.information == 17:
# disk_usage_systemdisk_percent
if hasattr(psutil, 'disk_usage'):
value = psutil.disk_usage('/').percent
timestamp = time() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif item.systemstatvariable.information == 18:
# disk_usage_disk_percent
if hasattr(psutil, 'disk_usage'):
value = psutil.disk_usage(item.systemstatvariable.parameter).percent
timestamp = time() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif 100 <= item.systemstatvariable.information <= 105:
# APCUPSD Status
apcupsd_status = None
if not apcupsd_status_is_queried:
apcupsd_status = query_apsupsd_status()
apcupsd_status_is_queried = True # depends on [control=['if'], data=[]]
if apcupsd_status is not None:
if item.systemstatvariable.information == 100:
if 'STATUS' in apcupsd_status:
value = apcupsd_status['STATUS']
timestamp = apcupsd_status['timestamp'] # depends on [control=['if'], data=['apcupsd_status']] # depends on [control=['if'], data=[]]
elif item.systemstatvariable.information == 101:
if 'LINEV' in apcupsd_status:
value = apcupsd_status['LINEV']
timestamp = apcupsd_status['timestamp'] # depends on [control=['if'], data=['apcupsd_status']] # depends on [control=['if'], data=[]]
elif item.systemstatvariable.information == 102:
if 'BATTV' in apcupsd_status:
value = apcupsd_status['BATTV']
timestamp = apcupsd_status['timestamp'] # depends on [control=['if'], data=['apcupsd_status']] # depends on [control=['if'], data=[]]
elif item.systemstatvariable.information == 103:
if 'BCHARGE' in apcupsd_status:
value = apcupsd_status['BCHARGE']
timestamp = apcupsd_status['timestamp'] # depends on [control=['if'], data=['apcupsd_status']] # depends on [control=['if'], data=[]]
elif item.systemstatvariable.information == 104:
if 'TIMELEFT' in apcupsd_status:
value = apcupsd_status['TIMELEFT']
timestamp = apcupsd_status['timestamp'] # depends on [control=['if'], data=['apcupsd_status']] # depends on [control=['if'], data=[]]
elif item.systemstatvariable.information == 105:
if 'LOADPCT' in apcupsd_status:
value = apcupsd_status['LOADPCT']
timestamp = apcupsd_status['timestamp'] # depends on [control=['if'], data=['apcupsd_status']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['apcupsd_status']] # depends on [control=['if'], data=[]]
else:
value = None
# update variable
if value is not None and item.update_value(value, timestamp):
output.append(item.create_recorded_data_element()) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']]
return output |
def connect(self):
"""
Sets up your Phabricator session, it's not necessary to call
this directly
"""
if self.token:
self.phab_session = {'token': self.token}
return
req = self.req_session.post('%s/api/conduit.connect' % self.host, data={
'params': json.dumps(self.connect_params),
'output': 'json',
'__conduit__': True,
})
# Parse out the response (error handling ommitted)
result = req.json()['result']
self.phab_session = {
'sessionKey': result['sessionKey'],
'connectionID': result['connectionID'],
} | def function[connect, parameter[self]]:
constant[
Sets up your Phabricator session, it's not necessary to call
this directly
]
if name[self].token begin[:]
name[self].phab_session assign[=] dictionary[[<ast.Constant object at 0x7da207f03160>], [<ast.Attribute object at 0x7da207f02f50>]]
return[None]
variable[req] assign[=] call[name[self].req_session.post, parameter[binary_operation[constant[%s/api/conduit.connect] <ast.Mod object at 0x7da2590d6920> name[self].host]]]
variable[result] assign[=] call[call[name[req].json, parameter[]]][constant[result]]
name[self].phab_session assign[=] dictionary[[<ast.Constant object at 0x7da207f038e0>, <ast.Constant object at 0x7da207f02290>], [<ast.Subscript object at 0x7da207f027a0>, <ast.Subscript object at 0x7da18f09eda0>]] | keyword[def] identifier[connect] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[token] :
identifier[self] . identifier[phab_session] ={ literal[string] : identifier[self] . identifier[token] }
keyword[return]
identifier[req] = identifier[self] . identifier[req_session] . identifier[post] ( literal[string] % identifier[self] . identifier[host] , identifier[data] ={
literal[string] : identifier[json] . identifier[dumps] ( identifier[self] . identifier[connect_params] ),
literal[string] : literal[string] ,
literal[string] : keyword[True] ,
})
identifier[result] = identifier[req] . identifier[json] ()[ literal[string] ]
identifier[self] . identifier[phab_session] ={
literal[string] : identifier[result] [ literal[string] ],
literal[string] : identifier[result] [ literal[string] ],
} | def connect(self):
"""
Sets up your Phabricator session, it's not necessary to call
this directly
"""
if self.token:
self.phab_session = {'token': self.token}
return # depends on [control=['if'], data=[]]
req = self.req_session.post('%s/api/conduit.connect' % self.host, data={'params': json.dumps(self.connect_params), 'output': 'json', '__conduit__': True})
# Parse out the response (error handling ommitted)
result = req.json()['result']
self.phab_session = {'sessionKey': result['sessionKey'], 'connectionID': result['connectionID']} |
def matches_entry(self, othermatch, strict=False):
'''
Return True if this match object matches another
match object (e.g., a flow table entry).
NB: from OF 1.0 spec:
A match occurs "when a flow entry exactly matches
or is more specific than one" [in a flow_mod command]
(likely to be self in this case).
'''
if strict:
return self == othermatch
attrs = set(self.__slots__)
attrs.discard('_wildcards')
attrs.discard('_nw_src_wildcard')
attrs.discard('_nw_dst_wildcard')
matchtest = []
for a in attrs:
curr = getattr(self, a)
other = getattr(othermatch, a)
if a == '_nw_src' or a == '_nw_dst':
# FIXME: clean this up
wattr = "{}_wildcard".format(a)
otherbits = 32 - getattr(othermatch, wattr)
othernet = ip_network("{}/{}".format(getattr(othermatch, a), otherbits), strict=False)
iswildcarded = curr in othernet
else:
wc = _wildcard_attr_map[a].name
iswildcarded = wc in othermatch.wildcards
matchtest.append(iswildcarded or curr == other)
return all(matchtest) | def function[matches_entry, parameter[self, othermatch, strict]]:
constant[
Return True if this match object matches another
match object (e.g., a flow table entry).
NB: from OF 1.0 spec:
A match occurs "when a flow entry exactly matches
or is more specific than one" [in a flow_mod command]
(likely to be self in this case).
]
if name[strict] begin[:]
return[compare[name[self] equal[==] name[othermatch]]]
variable[attrs] assign[=] call[name[set], parameter[name[self].__slots__]]
call[name[attrs].discard, parameter[constant[_wildcards]]]
call[name[attrs].discard, parameter[constant[_nw_src_wildcard]]]
call[name[attrs].discard, parameter[constant[_nw_dst_wildcard]]]
variable[matchtest] assign[=] list[[]]
for taget[name[a]] in starred[name[attrs]] begin[:]
variable[curr] assign[=] call[name[getattr], parameter[name[self], name[a]]]
variable[other] assign[=] call[name[getattr], parameter[name[othermatch], name[a]]]
if <ast.BoolOp object at 0x7da2044c3790> begin[:]
variable[wattr] assign[=] call[constant[{}_wildcard].format, parameter[name[a]]]
variable[otherbits] assign[=] binary_operation[constant[32] - call[name[getattr], parameter[name[othermatch], name[wattr]]]]
variable[othernet] assign[=] call[name[ip_network], parameter[call[constant[{}/{}].format, parameter[call[name[getattr], parameter[name[othermatch], name[a]]], name[otherbits]]]]]
variable[iswildcarded] assign[=] compare[name[curr] in name[othernet]]
call[name[matchtest].append, parameter[<ast.BoolOp object at 0x7da1b283a3b0>]]
return[call[name[all], parameter[name[matchtest]]]] | keyword[def] identifier[matches_entry] ( identifier[self] , identifier[othermatch] , identifier[strict] = keyword[False] ):
literal[string]
keyword[if] identifier[strict] :
keyword[return] identifier[self] == identifier[othermatch]
identifier[attrs] = identifier[set] ( identifier[self] . identifier[__slots__] )
identifier[attrs] . identifier[discard] ( literal[string] )
identifier[attrs] . identifier[discard] ( literal[string] )
identifier[attrs] . identifier[discard] ( literal[string] )
identifier[matchtest] =[]
keyword[for] identifier[a] keyword[in] identifier[attrs] :
identifier[curr] = identifier[getattr] ( identifier[self] , identifier[a] )
identifier[other] = identifier[getattr] ( identifier[othermatch] , identifier[a] )
keyword[if] identifier[a] == literal[string] keyword[or] identifier[a] == literal[string] :
identifier[wattr] = literal[string] . identifier[format] ( identifier[a] )
identifier[otherbits] = literal[int] - identifier[getattr] ( identifier[othermatch] , identifier[wattr] )
identifier[othernet] = identifier[ip_network] ( literal[string] . identifier[format] ( identifier[getattr] ( identifier[othermatch] , identifier[a] ), identifier[otherbits] ), identifier[strict] = keyword[False] )
identifier[iswildcarded] = identifier[curr] keyword[in] identifier[othernet]
keyword[else] :
identifier[wc] = identifier[_wildcard_attr_map] [ identifier[a] ]. identifier[name]
identifier[iswildcarded] = identifier[wc] keyword[in] identifier[othermatch] . identifier[wildcards]
identifier[matchtest] . identifier[append] ( identifier[iswildcarded] keyword[or] identifier[curr] == identifier[other] )
keyword[return] identifier[all] ( identifier[matchtest] ) | def matches_entry(self, othermatch, strict=False):
"""
Return True if this match object matches another
match object (e.g., a flow table entry).
NB: from OF 1.0 spec:
A match occurs "when a flow entry exactly matches
or is more specific than one" [in a flow_mod command]
(likely to be self in this case).
"""
if strict:
return self == othermatch # depends on [control=['if'], data=[]]
attrs = set(self.__slots__)
attrs.discard('_wildcards')
attrs.discard('_nw_src_wildcard')
attrs.discard('_nw_dst_wildcard')
matchtest = []
for a in attrs:
curr = getattr(self, a)
other = getattr(othermatch, a)
if a == '_nw_src' or a == '_nw_dst':
# FIXME: clean this up
wattr = '{}_wildcard'.format(a)
otherbits = 32 - getattr(othermatch, wattr)
othernet = ip_network('{}/{}'.format(getattr(othermatch, a), otherbits), strict=False)
iswildcarded = curr in othernet # depends on [control=['if'], data=[]]
else:
wc = _wildcard_attr_map[a].name
iswildcarded = wc in othermatch.wildcards
matchtest.append(iswildcarded or curr == other) # depends on [control=['for'], data=['a']]
return all(matchtest) |
def _remove_from_world(self):
"""
Clear all the internal data the token needed while it was part of
the world.
Note that this method doesn't actually remove the token from the
world. That's what World._remove_token() does. This method is just
responsible for setting the internal state of the token being removed.
"""
self.on_remove_from_world()
self._extensions = {}
self._disable_forum_observation()
self._world = None
self._id = None | def function[_remove_from_world, parameter[self]]:
constant[
Clear all the internal data the token needed while it was part of
the world.
Note that this method doesn't actually remove the token from the
world. That's what World._remove_token() does. This method is just
responsible for setting the internal state of the token being removed.
]
call[name[self].on_remove_from_world, parameter[]]
name[self]._extensions assign[=] dictionary[[], []]
call[name[self]._disable_forum_observation, parameter[]]
name[self]._world assign[=] constant[None]
name[self]._id assign[=] constant[None] | keyword[def] identifier[_remove_from_world] ( identifier[self] ):
literal[string]
identifier[self] . identifier[on_remove_from_world] ()
identifier[self] . identifier[_extensions] ={}
identifier[self] . identifier[_disable_forum_observation] ()
identifier[self] . identifier[_world] = keyword[None]
identifier[self] . identifier[_id] = keyword[None] | def _remove_from_world(self):
"""
Clear all the internal data the token needed while it was part of
the world.
Note that this method doesn't actually remove the token from the
world. That's what World._remove_token() does. This method is just
responsible for setting the internal state of the token being removed.
"""
self.on_remove_from_world()
self._extensions = {}
self._disable_forum_observation()
self._world = None
self._id = None |
def real(self, newreal):
"""Setter for the real part.
This method is invoked by ``x.real = other``.
Parameters
----------
newreal : array-like or scalar
Values to be assigned to the real part of this element.
"""
try:
iter(newreal)
except TypeError:
# `newreal` is not iterable, assume it can be assigned to
# all indexed parts
for part in self.parts:
part.real = newreal
return
if self.space.is_power_space:
try:
# Set same value in all parts
for part in self.parts:
part.real = newreal
except (ValueError, TypeError):
# Iterate over all parts and set them separately
for part, new_re in zip(self.parts, newreal):
part.real = new_re
pass
elif len(newreal) == len(self):
for part, new_re in zip(self.parts, newreal):
part.real = new_re
else:
raise ValueError(
'dimensions of the new real part does not match the space, '
'got element {} to set real part of {}'.format(newreal, self)) | def function[real, parameter[self, newreal]]:
constant[Setter for the real part.
This method is invoked by ``x.real = other``.
Parameters
----------
newreal : array-like or scalar
Values to be assigned to the real part of this element.
]
<ast.Try object at 0x7da1b1e98e80>
if name[self].space.is_power_space begin[:]
<ast.Try object at 0x7da1b1e9b310> | keyword[def] identifier[real] ( identifier[self] , identifier[newreal] ):
literal[string]
keyword[try] :
identifier[iter] ( identifier[newreal] )
keyword[except] identifier[TypeError] :
keyword[for] identifier[part] keyword[in] identifier[self] . identifier[parts] :
identifier[part] . identifier[real] = identifier[newreal]
keyword[return]
keyword[if] identifier[self] . identifier[space] . identifier[is_power_space] :
keyword[try] :
keyword[for] identifier[part] keyword[in] identifier[self] . identifier[parts] :
identifier[part] . identifier[real] = identifier[newreal]
keyword[except] ( identifier[ValueError] , identifier[TypeError] ):
keyword[for] identifier[part] , identifier[new_re] keyword[in] identifier[zip] ( identifier[self] . identifier[parts] , identifier[newreal] ):
identifier[part] . identifier[real] = identifier[new_re]
keyword[pass]
keyword[elif] identifier[len] ( identifier[newreal] )== identifier[len] ( identifier[self] ):
keyword[for] identifier[part] , identifier[new_re] keyword[in] identifier[zip] ( identifier[self] . identifier[parts] , identifier[newreal] ):
identifier[part] . identifier[real] = identifier[new_re]
keyword[else] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] . identifier[format] ( identifier[newreal] , identifier[self] )) | def real(self, newreal):
"""Setter for the real part.
This method is invoked by ``x.real = other``.
Parameters
----------
newreal : array-like or scalar
Values to be assigned to the real part of this element.
"""
try:
iter(newreal) # depends on [control=['try'], data=[]]
except TypeError:
# `newreal` is not iterable, assume it can be assigned to
# all indexed parts
for part in self.parts:
part.real = newreal # depends on [control=['for'], data=['part']]
return # depends on [control=['except'], data=[]]
if self.space.is_power_space:
try:
# Set same value in all parts
for part in self.parts:
part.real = newreal # depends on [control=['for'], data=['part']] # depends on [control=['try'], data=[]]
except (ValueError, TypeError):
# Iterate over all parts and set them separately
for (part, new_re) in zip(self.parts, newreal):
part.real = new_re # depends on [control=['for'], data=[]]
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif len(newreal) == len(self):
for (part, new_re) in zip(self.parts, newreal):
part.real = new_re # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
raise ValueError('dimensions of the new real part does not match the space, got element {} to set real part of {}'.format(newreal, self)) |
def update_user(self, auth, username, update):
"""
Updates the user with username ``username`` according to ``update``.
:param auth.Authentication auth: authentication object, must be admin-level
:param str username: username of user to update
:param GogsUserUpdate update: a ``GogsUserUpdate`` object describing the requested update
:return: the updated user
:rtype: GogsUser
:raises NetworkFailure: if there is an error communicating with the server
:raises ApiFailure: if the request cannot be serviced
"""
path = "/admin/users/{}".format(username)
response = self.patch(path, auth=auth, data=update.as_dict())
return GogsUser.from_json(response.json()) | def function[update_user, parameter[self, auth, username, update]]:
constant[
Updates the user with username ``username`` according to ``update``.
:param auth.Authentication auth: authentication object, must be admin-level
:param str username: username of user to update
:param GogsUserUpdate update: a ``GogsUserUpdate`` object describing the requested update
:return: the updated user
:rtype: GogsUser
:raises NetworkFailure: if there is an error communicating with the server
:raises ApiFailure: if the request cannot be serviced
]
variable[path] assign[=] call[constant[/admin/users/{}].format, parameter[name[username]]]
variable[response] assign[=] call[name[self].patch, parameter[name[path]]]
return[call[name[GogsUser].from_json, parameter[call[name[response].json, parameter[]]]]] | keyword[def] identifier[update_user] ( identifier[self] , identifier[auth] , identifier[username] , identifier[update] ):
literal[string]
identifier[path] = literal[string] . identifier[format] ( identifier[username] )
identifier[response] = identifier[self] . identifier[patch] ( identifier[path] , identifier[auth] = identifier[auth] , identifier[data] = identifier[update] . identifier[as_dict] ())
keyword[return] identifier[GogsUser] . identifier[from_json] ( identifier[response] . identifier[json] ()) | def update_user(self, auth, username, update):
"""
Updates the user with username ``username`` according to ``update``.
:param auth.Authentication auth: authentication object, must be admin-level
:param str username: username of user to update
:param GogsUserUpdate update: a ``GogsUserUpdate`` object describing the requested update
:return: the updated user
:rtype: GogsUser
:raises NetworkFailure: if there is an error communicating with the server
:raises ApiFailure: if the request cannot be serviced
"""
path = '/admin/users/{}'.format(username)
response = self.patch(path, auth=auth, data=update.as_dict())
return GogsUser.from_json(response.json()) |
def _parse_raw_data(self):
"""
Parses the incoming data and determines if it is valid. Valid
data gets placed into self._messages
:return: None
"""
if self._START_OF_FRAME in self._raw and self._END_OF_FRAME in self._raw:
while self._raw[0] != self._START_OF_FRAME and len(self._raw) > 0:
self._raw.pop(0)
if self._raw[0] == self._START_OF_FRAME:
self._raw.pop(0)
eof_index = self._raw.index(self._END_OF_FRAME)
raw_message = self._raw[:eof_index]
self._raw = self._raw[eof_index:]
logger.debug('raw message: {}'.format(raw_message))
message = self._remove_esc_chars(raw_message)
logger.debug('message with checksum: {}'.format(message))
expected_checksum = (message[-1] << 8) | message[-2]
logger.debug('checksum: {}'.format(expected_checksum))
message = message[:-2] # checksum bytes
logger.debug('message: {}'.format(message))
sum1, sum2 = self._fletcher16_checksum(message)
calculated_checksum = (sum2 << 8) | sum1
if expected_checksum == calculated_checksum:
message = message[2:] # remove length
logger.debug('valid message received: {}'.format(message))
self._messages.append(message)
else:
logger.warning('invalid message received: {}, discarding'.format(message))
logger.debug('expected checksum: {}, calculated checksum: {}'.format(expected_checksum, calculated_checksum))
# remove any extra bytes at the beginning
try:
while self._raw[0] != self._START_OF_FRAME and len(self._raw) > 0:
self._raw.pop(0)
except IndexError:
pass | def function[_parse_raw_data, parameter[self]]:
constant[
Parses the incoming data and determines if it is valid. Valid
data gets placed into self._messages
:return: None
]
if <ast.BoolOp object at 0x7da1afef8e20> begin[:]
while <ast.BoolOp object at 0x7da1afef8580> begin[:]
call[name[self]._raw.pop, parameter[constant[0]]]
if compare[call[name[self]._raw][constant[0]] equal[==] name[self]._START_OF_FRAME] begin[:]
call[name[self]._raw.pop, parameter[constant[0]]]
variable[eof_index] assign[=] call[name[self]._raw.index, parameter[name[self]._END_OF_FRAME]]
variable[raw_message] assign[=] call[name[self]._raw][<ast.Slice object at 0x7da1afef8940>]
name[self]._raw assign[=] call[name[self]._raw][<ast.Slice object at 0x7da1afef86a0>]
call[name[logger].debug, parameter[call[constant[raw message: {}].format, parameter[name[raw_message]]]]]
variable[message] assign[=] call[name[self]._remove_esc_chars, parameter[name[raw_message]]]
call[name[logger].debug, parameter[call[constant[message with checksum: {}].format, parameter[name[message]]]]]
variable[expected_checksum] assign[=] binary_operation[binary_operation[call[name[message]][<ast.UnaryOp object at 0x7da1afef8280>] <ast.LShift object at 0x7da2590d69e0> constant[8]] <ast.BitOr object at 0x7da2590d6aa0> call[name[message]][<ast.UnaryOp object at 0x7da1afef80a0>]]
call[name[logger].debug, parameter[call[constant[checksum: {}].format, parameter[name[expected_checksum]]]]]
variable[message] assign[=] call[name[message]][<ast.Slice object at 0x7da1afef9330>]
call[name[logger].debug, parameter[call[constant[message: {}].format, parameter[name[message]]]]]
<ast.Tuple object at 0x7da1afef9ba0> assign[=] call[name[self]._fletcher16_checksum, parameter[name[message]]]
variable[calculated_checksum] assign[=] binary_operation[binary_operation[name[sum2] <ast.LShift object at 0x7da2590d69e0> constant[8]] <ast.BitOr object at 0x7da2590d6aa0> name[sum1]]
if compare[name[expected_checksum] equal[==] name[calculated_checksum]] begin[:]
variable[message] assign[=] call[name[message]][<ast.Slice object at 0x7da1afef9cf0>]
call[name[logger].debug, parameter[call[constant[valid message received: {}].format, parameter[name[message]]]]]
call[name[self]._messages.append, parameter[name[message]]]
<ast.Try object at 0x7da1afefa8c0> | keyword[def] identifier[_parse_raw_data] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_START_OF_FRAME] keyword[in] identifier[self] . identifier[_raw] keyword[and] identifier[self] . identifier[_END_OF_FRAME] keyword[in] identifier[self] . identifier[_raw] :
keyword[while] identifier[self] . identifier[_raw] [ literal[int] ]!= identifier[self] . identifier[_START_OF_FRAME] keyword[and] identifier[len] ( identifier[self] . identifier[_raw] )> literal[int] :
identifier[self] . identifier[_raw] . identifier[pop] ( literal[int] )
keyword[if] identifier[self] . identifier[_raw] [ literal[int] ]== identifier[self] . identifier[_START_OF_FRAME] :
identifier[self] . identifier[_raw] . identifier[pop] ( literal[int] )
identifier[eof_index] = identifier[self] . identifier[_raw] . identifier[index] ( identifier[self] . identifier[_END_OF_FRAME] )
identifier[raw_message] = identifier[self] . identifier[_raw] [: identifier[eof_index] ]
identifier[self] . identifier[_raw] = identifier[self] . identifier[_raw] [ identifier[eof_index] :]
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[raw_message] ))
identifier[message] = identifier[self] . identifier[_remove_esc_chars] ( identifier[raw_message] )
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[message] ))
identifier[expected_checksum] =( identifier[message] [- literal[int] ]<< literal[int] )| identifier[message] [- literal[int] ]
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[expected_checksum] ))
identifier[message] = identifier[message] [:- literal[int] ]
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[message] ))
identifier[sum1] , identifier[sum2] = identifier[self] . identifier[_fletcher16_checksum] ( identifier[message] )
identifier[calculated_checksum] =( identifier[sum2] << literal[int] )| identifier[sum1]
keyword[if] identifier[expected_checksum] == identifier[calculated_checksum] :
identifier[message] = identifier[message] [ literal[int] :]
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[message] ))
identifier[self] . identifier[_messages] . identifier[append] ( identifier[message] )
keyword[else] :
identifier[logger] . identifier[warning] ( literal[string] . identifier[format] ( identifier[message] ))
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[expected_checksum] , identifier[calculated_checksum] ))
keyword[try] :
keyword[while] identifier[self] . identifier[_raw] [ literal[int] ]!= identifier[self] . identifier[_START_OF_FRAME] keyword[and] identifier[len] ( identifier[self] . identifier[_raw] )> literal[int] :
identifier[self] . identifier[_raw] . identifier[pop] ( literal[int] )
keyword[except] identifier[IndexError] :
keyword[pass] | def _parse_raw_data(self):
"""
Parses the incoming data and determines if it is valid. Valid
data gets placed into self._messages
:return: None
"""
if self._START_OF_FRAME in self._raw and self._END_OF_FRAME in self._raw:
while self._raw[0] != self._START_OF_FRAME and len(self._raw) > 0:
self._raw.pop(0) # depends on [control=['while'], data=[]]
if self._raw[0] == self._START_OF_FRAME:
self._raw.pop(0) # depends on [control=['if'], data=[]]
eof_index = self._raw.index(self._END_OF_FRAME)
raw_message = self._raw[:eof_index]
self._raw = self._raw[eof_index:]
logger.debug('raw message: {}'.format(raw_message))
message = self._remove_esc_chars(raw_message)
logger.debug('message with checksum: {}'.format(message))
expected_checksum = message[-1] << 8 | message[-2]
logger.debug('checksum: {}'.format(expected_checksum))
message = message[:-2] # checksum bytes
logger.debug('message: {}'.format(message))
(sum1, sum2) = self._fletcher16_checksum(message)
calculated_checksum = sum2 << 8 | sum1
if expected_checksum == calculated_checksum:
message = message[2:] # remove length
logger.debug('valid message received: {}'.format(message))
self._messages.append(message) # depends on [control=['if'], data=[]]
else:
logger.warning('invalid message received: {}, discarding'.format(message))
logger.debug('expected checksum: {}, calculated checksum: {}'.format(expected_checksum, calculated_checksum)) # depends on [control=['if'], data=[]]
# remove any extra bytes at the beginning
try:
while self._raw[0] != self._START_OF_FRAME and len(self._raw) > 0:
self._raw.pop(0) # depends on [control=['while'], data=[]] # depends on [control=['try'], data=[]]
except IndexError:
pass # depends on [control=['except'], data=[]] |
def _extract_alternative_fields(self, fields, default=None, field_type=float):
'''
Extract a value for a list of ordered fields.
Return the value of the first existed field in the list
'''
for field in fields:
if field in self.tags:
if field_type is float:
value = eval_frac(self.tags[field].values[0])
if field_type is str:
value = str(self.tags[field].values)
if field_type is int:
value = int(self.tags[field].values[0])
return value, field
return default, None | def function[_extract_alternative_fields, parameter[self, fields, default, field_type]]:
constant[
Extract a value for a list of ordered fields.
Return the value of the first existed field in the list
]
for taget[name[field]] in starred[name[fields]] begin[:]
if compare[name[field] in name[self].tags] begin[:]
if compare[name[field_type] is name[float]] begin[:]
variable[value] assign[=] call[name[eval_frac], parameter[call[call[name[self].tags][name[field]].values][constant[0]]]]
if compare[name[field_type] is name[str]] begin[:]
variable[value] assign[=] call[name[str], parameter[call[name[self].tags][name[field]].values]]
if compare[name[field_type] is name[int]] begin[:]
variable[value] assign[=] call[name[int], parameter[call[call[name[self].tags][name[field]].values][constant[0]]]]
return[tuple[[<ast.Name object at 0x7da204345ea0>, <ast.Name object at 0x7da204344100>]]]
return[tuple[[<ast.Name object at 0x7da204346650>, <ast.Constant object at 0x7da204346ec0>]]] | keyword[def] identifier[_extract_alternative_fields] ( identifier[self] , identifier[fields] , identifier[default] = keyword[None] , identifier[field_type] = identifier[float] ):
literal[string]
keyword[for] identifier[field] keyword[in] identifier[fields] :
keyword[if] identifier[field] keyword[in] identifier[self] . identifier[tags] :
keyword[if] identifier[field_type] keyword[is] identifier[float] :
identifier[value] = identifier[eval_frac] ( identifier[self] . identifier[tags] [ identifier[field] ]. identifier[values] [ literal[int] ])
keyword[if] identifier[field_type] keyword[is] identifier[str] :
identifier[value] = identifier[str] ( identifier[self] . identifier[tags] [ identifier[field] ]. identifier[values] )
keyword[if] identifier[field_type] keyword[is] identifier[int] :
identifier[value] = identifier[int] ( identifier[self] . identifier[tags] [ identifier[field] ]. identifier[values] [ literal[int] ])
keyword[return] identifier[value] , identifier[field]
keyword[return] identifier[default] , keyword[None] | def _extract_alternative_fields(self, fields, default=None, field_type=float):
"""
Extract a value for a list of ordered fields.
Return the value of the first existed field in the list
"""
for field in fields:
if field in self.tags:
if field_type is float:
value = eval_frac(self.tags[field].values[0]) # depends on [control=['if'], data=[]]
if field_type is str:
value = str(self.tags[field].values) # depends on [control=['if'], data=['str']]
if field_type is int:
value = int(self.tags[field].values[0]) # depends on [control=['if'], data=['int']]
return (value, field) # depends on [control=['if'], data=['field']] # depends on [control=['for'], data=['field']]
return (default, None) |
def write(filename, headers, dcols, data, headerlines=[],
header_char='H', sldir='.', sep=' ', trajectory=False,
download=False):
'''
Method for writeing Ascii files.
Note the attribute name at position i in dcols will be associated
with the column data at index i in data. Also the number of data
columns(in data) must equal the number of data attributes (in dcols)
Also all the lengths of that columns must all be the same.
Parameters
----------
filename : string
The file where this data will be written.
Headers : list
A list of Header strings or if the file being written is of
type trajectory, this is a List of strings that contain header
attributes and their associated values which are seperated by
a '='.
dcols : list
A list of data attributes.
data : list
A list of lists (or of numpy arrays).
headerlines : list, optional
Additional list of strings of header data, only used in
trajectory data Types. The default is [].
header_char : character, optional
The character that indicates a header lines. The default is 'H'.
sldir : string, optional
Where this fill will be written. The default is '.'.
sep : string, optional
What seperates the data column attributes. The default is ' '.
trajectory : boolean, optional
Boolean of if we are writeing a trajectory type file. The
default is False.
download : boolean, optional
If using iPython notebook, do you want a download link for
the file you write?
The default is False.
'''
if sldir.endswith(os.sep):
filename = str(sldir)+str(filename)
else:
filename = str(sldir)+os.sep+str(filename)
tmp=[] #temp variable
lines=[]#list of the data lines
lengthList=[]# list of the longest element (data or column name)
# in each column
if os.path.exists(filename):
print('Warning this method will overwrite '+ filename)
print('Would you like to continue? (y)es or (n)no?')
s = input('--> ')
if s=='Y' or s=='y' or s=='Yes' or s=='yes':
print('Yes selected')
print('Continuing as normal')
else:
print('No Selected')
print('Returning None')
return None
if len(data)!=len(dcols):
print('The number of data columns does not equal the number of Data attributes')
print('returning none')
return None
if trajectory:
sep=' '
for i in range(len(headers)):
if not trajectory:
tmp.append(header_char+' '+headers[i]+'\n')
else:
tmp.append(headers[i]+'\n')
headers=tmp
tmp=''
for i in range(len(data)): #Line length stuff
length=len(dcols[i])
for j in range(len(data[dcols[i]])): #len(data[i]) throws error as type(data)=dict, not list
if len(str(data[dcols[i]][j]))>length: #data[i][j] throws error as type(data)=dict, not list
length=len(str(data[dcols[i]][j]))
lengthList.append(length)
print(lengthList)
tmp=''
tmp1='9'
if trajectory:
tmp='#'
for i in range(len(dcols)):
tmp1=dcols[i]
if not trajectory:
if len(dcols[i]) < lengthList[i]:
j=lengthList[i]-len(dcols[i])
for k in range(j):
tmp1+=' '
tmp+=sep+tmp1
else:
tmp+=' '+dcols[i]
tmp+='\n'
dcols=tmp
tmp=''
for i in range(len(data[0])):
for j in range(len(data)):
tmp1=str(data[j][i])
if len(str(data[j][i])) < lengthList[j]:
l=lengthList[j]-len(str(data[j][i]))
for k in range(l):
tmp1+=' '
tmp+=sep+tmp1
lines.append(tmp+'\n')
tmp=''
f=open(filename,'w')
if not trajectory:
for i in range(len(headers)):
f.write(headers[i])
f.write(dcols)
else:
f.write(dcols)
for i in range(len(headerlines)):
f.write('# '+headerlines[i]+'\n')
for i in range(len(headers)):
f.write(headers[i])
for i in range(len(lines)):
f.write(lines[i])
f.close()
if download:
from IPython.display import FileLink, FileLinks
return FileLink(filename)
else:
return None | def function[write, parameter[filename, headers, dcols, data, headerlines, header_char, sldir, sep, trajectory, download]]:
constant[
Method for writeing Ascii files.
Note the attribute name at position i in dcols will be associated
with the column data at index i in data. Also the number of data
columns(in data) must equal the number of data attributes (in dcols)
Also all the lengths of that columns must all be the same.
Parameters
----------
filename : string
The file where this data will be written.
Headers : list
A list of Header strings or if the file being written is of
type trajectory, this is a List of strings that contain header
attributes and their associated values which are seperated by
a '='.
dcols : list
A list of data attributes.
data : list
A list of lists (or of numpy arrays).
headerlines : list, optional
Additional list of strings of header data, only used in
trajectory data Types. The default is [].
header_char : character, optional
The character that indicates a header lines. The default is 'H'.
sldir : string, optional
Where this fill will be written. The default is '.'.
sep : string, optional
What seperates the data column attributes. The default is ' '.
trajectory : boolean, optional
Boolean of if we are writeing a trajectory type file. The
default is False.
download : boolean, optional
If using iPython notebook, do you want a download link for
the file you write?
The default is False.
]
if call[name[sldir].endswith, parameter[name[os].sep]] begin[:]
variable[filename] assign[=] binary_operation[call[name[str], parameter[name[sldir]]] + call[name[str], parameter[name[filename]]]]
variable[tmp] assign[=] list[[]]
variable[lines] assign[=] list[[]]
variable[lengthList] assign[=] list[[]]
if call[name[os].path.exists, parameter[name[filename]]] begin[:]
call[name[print], parameter[binary_operation[constant[Warning this method will overwrite ] + name[filename]]]]
call[name[print], parameter[constant[Would you like to continue? (y)es or (n)no?]]]
variable[s] assign[=] call[name[input], parameter[constant[--> ]]]
if <ast.BoolOp object at 0x7da20c6aace0> begin[:]
call[name[print], parameter[constant[Yes selected]]]
call[name[print], parameter[constant[Continuing as normal]]]
if compare[call[name[len], parameter[name[data]]] not_equal[!=] call[name[len], parameter[name[dcols]]]] begin[:]
call[name[print], parameter[constant[The number of data columns does not equal the number of Data attributes]]]
call[name[print], parameter[constant[returning none]]]
return[constant[None]]
if name[trajectory] begin[:]
variable[sep] assign[=] constant[ ]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[headers]]]]]] begin[:]
if <ast.UnaryOp object at 0x7da20c6a8790> begin[:]
call[name[tmp].append, parameter[binary_operation[binary_operation[binary_operation[name[header_char] + constant[ ]] + call[name[headers]][name[i]]] + constant[
]]]]
variable[headers] assign[=] name[tmp]
variable[tmp] assign[=] constant[]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[data]]]]]] begin[:]
variable[length] assign[=] call[name[len], parameter[call[name[dcols]][name[i]]]]
for taget[name[j]] in starred[call[name[range], parameter[call[name[len], parameter[call[name[data]][call[name[dcols]][name[i]]]]]]]] begin[:]
if compare[call[name[len], parameter[call[name[str], parameter[call[call[name[data]][call[name[dcols]][name[i]]]][name[j]]]]]] greater[>] name[length]] begin[:]
variable[length] assign[=] call[name[len], parameter[call[name[str], parameter[call[call[name[data]][call[name[dcols]][name[i]]]][name[j]]]]]]
call[name[lengthList].append, parameter[name[length]]]
call[name[print], parameter[name[lengthList]]]
variable[tmp] assign[=] constant[]
variable[tmp1] assign[=] constant[9]
if name[trajectory] begin[:]
variable[tmp] assign[=] constant[#]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[dcols]]]]]] begin[:]
variable[tmp1] assign[=] call[name[dcols]][name[i]]
if <ast.UnaryOp object at 0x7da204960cd0> begin[:]
if compare[call[name[len], parameter[call[name[dcols]][name[i]]]] less[<] call[name[lengthList]][name[i]]] begin[:]
variable[j] assign[=] binary_operation[call[name[lengthList]][name[i]] - call[name[len], parameter[call[name[dcols]][name[i]]]]]
for taget[name[k]] in starred[call[name[range], parameter[name[j]]]] begin[:]
<ast.AugAssign object at 0x7da204963160>
<ast.AugAssign object at 0x7da204960400>
<ast.AugAssign object at 0x7da204962350>
variable[dcols] assign[=] name[tmp]
variable[tmp] assign[=] constant[]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[call[name[data]][constant[0]]]]]]] begin[:]
for taget[name[j]] in starred[call[name[range], parameter[call[name[len], parameter[name[data]]]]]] begin[:]
variable[tmp1] assign[=] call[name[str], parameter[call[call[name[data]][name[j]]][name[i]]]]
if compare[call[name[len], parameter[call[name[str], parameter[call[call[name[data]][name[j]]][name[i]]]]]] less[<] call[name[lengthList]][name[j]]] begin[:]
variable[l] assign[=] binary_operation[call[name[lengthList]][name[j]] - call[name[len], parameter[call[name[str], parameter[call[call[name[data]][name[j]]][name[i]]]]]]]
for taget[name[k]] in starred[call[name[range], parameter[name[l]]]] begin[:]
<ast.AugAssign object at 0x7da204960ca0>
<ast.AugAssign object at 0x7da204963520>
call[name[lines].append, parameter[binary_operation[name[tmp] + constant[
]]]]
variable[tmp] assign[=] constant[]
variable[f] assign[=] call[name[open], parameter[name[filename], constant[w]]]
if <ast.UnaryOp object at 0x7da204961bd0> begin[:]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[headers]]]]]] begin[:]
call[name[f].write, parameter[call[name[headers]][name[i]]]]
call[name[f].write, parameter[name[dcols]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[lines]]]]]] begin[:]
call[name[f].write, parameter[call[name[lines]][name[i]]]]
call[name[f].close, parameter[]]
if name[download] begin[:]
from relative_module[IPython.display] import module[FileLink], module[FileLinks]
return[call[name[FileLink], parameter[name[filename]]]] | keyword[def] identifier[write] ( identifier[filename] , identifier[headers] , identifier[dcols] , identifier[data] , identifier[headerlines] =[],
identifier[header_char] = literal[string] , identifier[sldir] = literal[string] , identifier[sep] = literal[string] , identifier[trajectory] = keyword[False] ,
identifier[download] = keyword[False] ):
literal[string]
keyword[if] identifier[sldir] . identifier[endswith] ( identifier[os] . identifier[sep] ):
identifier[filename] = identifier[str] ( identifier[sldir] )+ identifier[str] ( identifier[filename] )
keyword[else] :
identifier[filename] = identifier[str] ( identifier[sldir] )+ identifier[os] . identifier[sep] + identifier[str] ( identifier[filename] )
identifier[tmp] =[]
identifier[lines] =[]
identifier[lengthList] =[]
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[filename] ):
identifier[print] ( literal[string] + identifier[filename] )
identifier[print] ( literal[string] )
identifier[s] = identifier[input] ( literal[string] )
keyword[if] identifier[s] == literal[string] keyword[or] identifier[s] == literal[string] keyword[or] identifier[s] == literal[string] keyword[or] identifier[s] == literal[string] :
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
keyword[else] :
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
keyword[return] keyword[None]
keyword[if] identifier[len] ( identifier[data] )!= identifier[len] ( identifier[dcols] ):
identifier[print] ( literal[string] )
identifier[print] ( literal[string] )
keyword[return] keyword[None]
keyword[if] identifier[trajectory] :
identifier[sep] = literal[string]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[headers] )):
keyword[if] keyword[not] identifier[trajectory] :
identifier[tmp] . identifier[append] ( identifier[header_char] + literal[string] + identifier[headers] [ identifier[i] ]+ literal[string] )
keyword[else] :
identifier[tmp] . identifier[append] ( identifier[headers] [ identifier[i] ]+ literal[string] )
identifier[headers] = identifier[tmp]
identifier[tmp] = literal[string]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[data] )):
identifier[length] = identifier[len] ( identifier[dcols] [ identifier[i] ])
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[len] ( identifier[data] [ identifier[dcols] [ identifier[i] ]])):
keyword[if] identifier[len] ( identifier[str] ( identifier[data] [ identifier[dcols] [ identifier[i] ]][ identifier[j] ]))> identifier[length] :
identifier[length] = identifier[len] ( identifier[str] ( identifier[data] [ identifier[dcols] [ identifier[i] ]][ identifier[j] ]))
identifier[lengthList] . identifier[append] ( identifier[length] )
identifier[print] ( identifier[lengthList] )
identifier[tmp] = literal[string]
identifier[tmp1] = literal[string]
keyword[if] identifier[trajectory] :
identifier[tmp] = literal[string]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[dcols] )):
identifier[tmp1] = identifier[dcols] [ identifier[i] ]
keyword[if] keyword[not] identifier[trajectory] :
keyword[if] identifier[len] ( identifier[dcols] [ identifier[i] ])< identifier[lengthList] [ identifier[i] ]:
identifier[j] = identifier[lengthList] [ identifier[i] ]- identifier[len] ( identifier[dcols] [ identifier[i] ])
keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[j] ):
identifier[tmp1] += literal[string]
identifier[tmp] += identifier[sep] + identifier[tmp1]
keyword[else] :
identifier[tmp] += literal[string] + identifier[dcols] [ identifier[i] ]
identifier[tmp] += literal[string]
identifier[dcols] = identifier[tmp]
identifier[tmp] = literal[string]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[data] [ literal[int] ])):
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[len] ( identifier[data] )):
identifier[tmp1] = identifier[str] ( identifier[data] [ identifier[j] ][ identifier[i] ])
keyword[if] identifier[len] ( identifier[str] ( identifier[data] [ identifier[j] ][ identifier[i] ]))< identifier[lengthList] [ identifier[j] ]:
identifier[l] = identifier[lengthList] [ identifier[j] ]- identifier[len] ( identifier[str] ( identifier[data] [ identifier[j] ][ identifier[i] ]))
keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[l] ):
identifier[tmp1] += literal[string]
identifier[tmp] += identifier[sep] + identifier[tmp1]
identifier[lines] . identifier[append] ( identifier[tmp] + literal[string] )
identifier[tmp] = literal[string]
identifier[f] = identifier[open] ( identifier[filename] , literal[string] )
keyword[if] keyword[not] identifier[trajectory] :
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[headers] )):
identifier[f] . identifier[write] ( identifier[headers] [ identifier[i] ])
identifier[f] . identifier[write] ( identifier[dcols] )
keyword[else] :
identifier[f] . identifier[write] ( identifier[dcols] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[headerlines] )):
identifier[f] . identifier[write] ( literal[string] + identifier[headerlines] [ identifier[i] ]+ literal[string] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[headers] )):
identifier[f] . identifier[write] ( identifier[headers] [ identifier[i] ])
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[lines] )):
identifier[f] . identifier[write] ( identifier[lines] [ identifier[i] ])
identifier[f] . identifier[close] ()
keyword[if] identifier[download] :
keyword[from] identifier[IPython] . identifier[display] keyword[import] identifier[FileLink] , identifier[FileLinks]
keyword[return] identifier[FileLink] ( identifier[filename] )
keyword[else] :
keyword[return] keyword[None] | def write(filename, headers, dcols, data, headerlines=[], header_char='H', sldir='.', sep=' ', trajectory=False, download=False):
"""
Method for writeing Ascii files.
Note the attribute name at position i in dcols will be associated
with the column data at index i in data. Also the number of data
columns(in data) must equal the number of data attributes (in dcols)
Also all the lengths of that columns must all be the same.
Parameters
----------
filename : string
The file where this data will be written.
Headers : list
A list of Header strings or if the file being written is of
type trajectory, this is a List of strings that contain header
attributes and their associated values which are seperated by
a '='.
dcols : list
A list of data attributes.
data : list
A list of lists (or of numpy arrays).
headerlines : list, optional
Additional list of strings of header data, only used in
trajectory data Types. The default is [].
header_char : character, optional
The character that indicates a header lines. The default is 'H'.
sldir : string, optional
Where this fill will be written. The default is '.'.
sep : string, optional
What seperates the data column attributes. The default is ' '.
trajectory : boolean, optional
Boolean of if we are writeing a trajectory type file. The
default is False.
download : boolean, optional
If using iPython notebook, do you want a download link for
the file you write?
The default is False.
"""
if sldir.endswith(os.sep):
filename = str(sldir) + str(filename) # depends on [control=['if'], data=[]]
else:
filename = str(sldir) + os.sep + str(filename)
tmp = [] #temp variable
lines = [] #list of the data lines
lengthList = [] # list of the longest element (data or column name)
# in each column
if os.path.exists(filename):
print('Warning this method will overwrite ' + filename)
print('Would you like to continue? (y)es or (n)no?')
s = input('--> ')
if s == 'Y' or s == 'y' or s == 'Yes' or (s == 'yes'):
print('Yes selected')
print('Continuing as normal') # depends on [control=['if'], data=[]]
else:
print('No Selected')
print('Returning None')
return None # depends on [control=['if'], data=[]]
if len(data) != len(dcols):
print('The number of data columns does not equal the number of Data attributes')
print('returning none')
return None # depends on [control=['if'], data=[]]
if trajectory:
sep = ' ' # depends on [control=['if'], data=[]]
for i in range(len(headers)):
if not trajectory:
tmp.append(header_char + ' ' + headers[i] + '\n') # depends on [control=['if'], data=[]]
else:
tmp.append(headers[i] + '\n') # depends on [control=['for'], data=['i']]
headers = tmp
tmp = ''
for i in range(len(data)): #Line length stuff
length = len(dcols[i])
for j in range(len(data[dcols[i]])): #len(data[i]) throws error as type(data)=dict, not list
if len(str(data[dcols[i]][j])) > length: #data[i][j] throws error as type(data)=dict, not list
length = len(str(data[dcols[i]][j])) # depends on [control=['if'], data=['length']] # depends on [control=['for'], data=['j']]
lengthList.append(length) # depends on [control=['for'], data=['i']]
print(lengthList)
tmp = ''
tmp1 = '9'
if trajectory:
tmp = '#' # depends on [control=['if'], data=[]]
for i in range(len(dcols)):
tmp1 = dcols[i]
if not trajectory:
if len(dcols[i]) < lengthList[i]:
j = lengthList[i] - len(dcols[i])
for k in range(j):
tmp1 += ' ' # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
tmp += sep + tmp1 # depends on [control=['if'], data=[]]
else:
tmp += ' ' + dcols[i] # depends on [control=['for'], data=['i']]
tmp += '\n'
dcols = tmp
tmp = ''
for i in range(len(data[0])):
for j in range(len(data)):
tmp1 = str(data[j][i])
if len(str(data[j][i])) < lengthList[j]:
l = lengthList[j] - len(str(data[j][i]))
for k in range(l):
tmp1 += ' ' # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
tmp += sep + tmp1 # depends on [control=['for'], data=['j']]
lines.append(tmp + '\n')
tmp = '' # depends on [control=['for'], data=['i']]
f = open(filename, 'w')
if not trajectory:
for i in range(len(headers)):
f.write(headers[i]) # depends on [control=['for'], data=['i']]
f.write(dcols) # depends on [control=['if'], data=[]]
else:
f.write(dcols)
for i in range(len(headerlines)):
f.write('# ' + headerlines[i] + '\n') # depends on [control=['for'], data=['i']]
for i in range(len(headers)):
f.write(headers[i]) # depends on [control=['for'], data=['i']]
for i in range(len(lines)):
f.write(lines[i]) # depends on [control=['for'], data=['i']]
f.close()
if download:
from IPython.display import FileLink, FileLinks
return FileLink(filename) # depends on [control=['if'], data=[]]
else:
return None |
def remove(self, value, _sa_initiator=None):
"""Remove an item by value, consulting the keyfunc for the key."""
key = self.keyfunc(value)
# Let self[key] raise if key is not in this collection
# testlib.pragma exempt:__ne__
if not self.__contains__(key) or value not in self[key]:
raise sa_exc.InvalidRequestError(
"Can not remove '%s': collection holds '%s' for key '%s'. "
"Possible cause: is the MappedCollection key function "
"based on mutable properties or properties that only obtain "
"values after flush?" %
(value, self[key], key))
self.__getitem__(key, _sa_initiator).remove(value) | def function[remove, parameter[self, value, _sa_initiator]]:
constant[Remove an item by value, consulting the keyfunc for the key.]
variable[key] assign[=] call[name[self].keyfunc, parameter[name[value]]]
if <ast.BoolOp object at 0x7da1b25ed4b0> begin[:]
<ast.Raise object at 0x7da1b25ef940>
call[call[name[self].__getitem__, parameter[name[key], name[_sa_initiator]]].remove, parameter[name[value]]] | keyword[def] identifier[remove] ( identifier[self] , identifier[value] , identifier[_sa_initiator] = keyword[None] ):
literal[string]
identifier[key] = identifier[self] . identifier[keyfunc] ( identifier[value] )
keyword[if] keyword[not] identifier[self] . identifier[__contains__] ( identifier[key] ) keyword[or] identifier[value] keyword[not] keyword[in] identifier[self] [ identifier[key] ]:
keyword[raise] identifier[sa_exc] . identifier[InvalidRequestError] (
literal[string]
literal[string]
literal[string]
literal[string] %
( identifier[value] , identifier[self] [ identifier[key] ], identifier[key] ))
identifier[self] . identifier[__getitem__] ( identifier[key] , identifier[_sa_initiator] ). identifier[remove] ( identifier[value] ) | def remove(self, value, _sa_initiator=None):
"""Remove an item by value, consulting the keyfunc for the key."""
key = self.keyfunc(value)
# Let self[key] raise if key is not in this collection
# testlib.pragma exempt:__ne__
if not self.__contains__(key) or value not in self[key]:
raise sa_exc.InvalidRequestError("Can not remove '%s': collection holds '%s' for key '%s'. Possible cause: is the MappedCollection key function based on mutable properties or properties that only obtain values after flush?" % (value, self[key], key)) # depends on [control=['if'], data=[]]
self.__getitem__(key, _sa_initiator).remove(value) |
def _set_camera_properties(self, msg):
""" Set the camera intrinsics from an info msg. """
focal_x = msg.K[0]
focal_y = msg.K[4]
center_x = msg.K[2]
center_y = msg.K[5]
im_height = msg.height
im_width = msg.width
self._camera_intr = CameraIntrinsics(self._frame, focal_x, focal_y,
center_x, center_y,
height=im_height,
width=im_width) | def function[_set_camera_properties, parameter[self, msg]]:
constant[ Set the camera intrinsics from an info msg. ]
variable[focal_x] assign[=] call[name[msg].K][constant[0]]
variable[focal_y] assign[=] call[name[msg].K][constant[4]]
variable[center_x] assign[=] call[name[msg].K][constant[2]]
variable[center_y] assign[=] call[name[msg].K][constant[5]]
variable[im_height] assign[=] name[msg].height
variable[im_width] assign[=] name[msg].width
name[self]._camera_intr assign[=] call[name[CameraIntrinsics], parameter[name[self]._frame, name[focal_x], name[focal_y], name[center_x], name[center_y]]] | keyword[def] identifier[_set_camera_properties] ( identifier[self] , identifier[msg] ):
literal[string]
identifier[focal_x] = identifier[msg] . identifier[K] [ literal[int] ]
identifier[focal_y] = identifier[msg] . identifier[K] [ literal[int] ]
identifier[center_x] = identifier[msg] . identifier[K] [ literal[int] ]
identifier[center_y] = identifier[msg] . identifier[K] [ literal[int] ]
identifier[im_height] = identifier[msg] . identifier[height]
identifier[im_width] = identifier[msg] . identifier[width]
identifier[self] . identifier[_camera_intr] = identifier[CameraIntrinsics] ( identifier[self] . identifier[_frame] , identifier[focal_x] , identifier[focal_y] ,
identifier[center_x] , identifier[center_y] ,
identifier[height] = identifier[im_height] ,
identifier[width] = identifier[im_width] ) | def _set_camera_properties(self, msg):
""" Set the camera intrinsics from an info msg. """
focal_x = msg.K[0]
focal_y = msg.K[4]
center_x = msg.K[2]
center_y = msg.K[5]
im_height = msg.height
im_width = msg.width
self._camera_intr = CameraIntrinsics(self._frame, focal_x, focal_y, center_x, center_y, height=im_height, width=im_width) |
def _add_file(self, tar, name, contents, mode=DEFAULT_FILE_MODE):
"""
Adds a single file in tarfile instance.
:param tar: tarfile instance
:param name: string representing filename or path
:param contents: string representing file contents
:param mode: string representing file mode, defaults to 644
:returns: None
"""
byte_contents = BytesIO(contents.encode('utf8'))
info = tarfile.TarInfo(name=name)
info.size = len(contents)
# mtime must be 0 or any checksum operation
# will return a different digest even when content is the same
info.mtime = 0
info.type = tarfile.REGTYPE
info.mode = int(mode, 8) # permissions converted to decimal notation
tar.addfile(tarinfo=info, fileobj=byte_contents) | def function[_add_file, parameter[self, tar, name, contents, mode]]:
constant[
Adds a single file in tarfile instance.
:param tar: tarfile instance
:param name: string representing filename or path
:param contents: string representing file contents
:param mode: string representing file mode, defaults to 644
:returns: None
]
variable[byte_contents] assign[=] call[name[BytesIO], parameter[call[name[contents].encode, parameter[constant[utf8]]]]]
variable[info] assign[=] call[name[tarfile].TarInfo, parameter[]]
name[info].size assign[=] call[name[len], parameter[name[contents]]]
name[info].mtime assign[=] constant[0]
name[info].type assign[=] name[tarfile].REGTYPE
name[info].mode assign[=] call[name[int], parameter[name[mode], constant[8]]]
call[name[tar].addfile, parameter[]] | keyword[def] identifier[_add_file] ( identifier[self] , identifier[tar] , identifier[name] , identifier[contents] , identifier[mode] = identifier[DEFAULT_FILE_MODE] ):
literal[string]
identifier[byte_contents] = identifier[BytesIO] ( identifier[contents] . identifier[encode] ( literal[string] ))
identifier[info] = identifier[tarfile] . identifier[TarInfo] ( identifier[name] = identifier[name] )
identifier[info] . identifier[size] = identifier[len] ( identifier[contents] )
identifier[info] . identifier[mtime] = literal[int]
identifier[info] . identifier[type] = identifier[tarfile] . identifier[REGTYPE]
identifier[info] . identifier[mode] = identifier[int] ( identifier[mode] , literal[int] )
identifier[tar] . identifier[addfile] ( identifier[tarinfo] = identifier[info] , identifier[fileobj] = identifier[byte_contents] ) | def _add_file(self, tar, name, contents, mode=DEFAULT_FILE_MODE):
"""
Adds a single file in tarfile instance.
:param tar: tarfile instance
:param name: string representing filename or path
:param contents: string representing file contents
:param mode: string representing file mode, defaults to 644
:returns: None
"""
byte_contents = BytesIO(contents.encode('utf8'))
info = tarfile.TarInfo(name=name)
info.size = len(contents)
# mtime must be 0 or any checksum operation
# will return a different digest even when content is the same
info.mtime = 0
info.type = tarfile.REGTYPE
info.mode = int(mode, 8) # permissions converted to decimal notation
tar.addfile(tarinfo=info, fileobj=byte_contents) |
def __get_ra_index_indices(self):
"""
Returns a list containing indices of the ra_index array, which correspond to the separate trajectory fragments,
i.e., ra_indices[fragment_indices[itraj]] are the ra indices for itraj (plus some offset by
cumulative length)
"""
fragment_indices = []
for idx, cumlen in enumerate(self._cumulative_lengths):
cumlen_prev = self._cumulative_lengths[idx - 1] if idx > 0 else 0
fragment_indices.append([np.argwhere(
np.logical_and(self.ra_indices >= cumlen_prev, self.ra_indices < cumlen)
)])
return fragment_indices | def function[__get_ra_index_indices, parameter[self]]:
constant[
Returns a list containing indices of the ra_index array, which correspond to the separate trajectory fragments,
i.e., ra_indices[fragment_indices[itraj]] are the ra indices for itraj (plus some offset by
cumulative length)
]
variable[fragment_indices] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18f721e40>, <ast.Name object at 0x7da18f720a00>]]] in starred[call[name[enumerate], parameter[name[self]._cumulative_lengths]]] begin[:]
variable[cumlen_prev] assign[=] <ast.IfExp object at 0x7da1b07e3ac0>
call[name[fragment_indices].append, parameter[list[[<ast.Call object at 0x7da18f00cd90>]]]]
return[name[fragment_indices]] | keyword[def] identifier[__get_ra_index_indices] ( identifier[self] ):
literal[string]
identifier[fragment_indices] =[]
keyword[for] identifier[idx] , identifier[cumlen] keyword[in] identifier[enumerate] ( identifier[self] . identifier[_cumulative_lengths] ):
identifier[cumlen_prev] = identifier[self] . identifier[_cumulative_lengths] [ identifier[idx] - literal[int] ] keyword[if] identifier[idx] > literal[int] keyword[else] literal[int]
identifier[fragment_indices] . identifier[append] ([ identifier[np] . identifier[argwhere] (
identifier[np] . identifier[logical_and] ( identifier[self] . identifier[ra_indices] >= identifier[cumlen_prev] , identifier[self] . identifier[ra_indices] < identifier[cumlen] )
)])
keyword[return] identifier[fragment_indices] | def __get_ra_index_indices(self):
"""
Returns a list containing indices of the ra_index array, which correspond to the separate trajectory fragments,
i.e., ra_indices[fragment_indices[itraj]] are the ra indices for itraj (plus some offset by
cumulative length)
"""
fragment_indices = []
for (idx, cumlen) in enumerate(self._cumulative_lengths):
cumlen_prev = self._cumulative_lengths[idx - 1] if idx > 0 else 0
fragment_indices.append([np.argwhere(np.logical_and(self.ra_indices >= cumlen_prev, self.ra_indices < cumlen))]) # depends on [control=['for'], data=[]]
return fragment_indices |
async def settings(dev: Device):
"""Print out all possible settings."""
settings_tree = await dev.get_settings()
for module in settings_tree:
await traverse_settings(dev, module.usage, module.settings) | <ast.AsyncFunctionDef object at 0x7da18bcca6e0> | keyword[async] keyword[def] identifier[settings] ( identifier[dev] : identifier[Device] ):
literal[string]
identifier[settings_tree] = keyword[await] identifier[dev] . identifier[get_settings] ()
keyword[for] identifier[module] keyword[in] identifier[settings_tree] :
keyword[await] identifier[traverse_settings] ( identifier[dev] , identifier[module] . identifier[usage] , identifier[module] . identifier[settings] ) | async def settings(dev: Device):
"""Print out all possible settings."""
settings_tree = await dev.get_settings()
for module in settings_tree:
await traverse_settings(dev, module.usage, module.settings) # depends on [control=['for'], data=['module']] |
def file_size(self, name, force_refresh=False):
"""Returns the size of the file.
For efficiency this operation does not use locking, so may return
inconsistent data. Use it for informational purposes.
"""
uname, version = split_name(name)
t = time.time()
logger.debug(' querying size of %s', name)
try:
if not self.remote_store or (version is not None
and not force_refresh):
try:
if self.local_store and self.local_store.exists(name):
return self.local_store.file_size(name)
except Exception:
if self.remote_store:
logger.warning("Error getting '%s' from local store",
name, exc_info=True)
else:
raise
if self.remote_store:
return self.remote_store.file_size(name)
raise FiletrackerError("File not available: %s" % name)
finally:
logger.debug(' processed %s in %.2fs', name, time.time() - t) | def function[file_size, parameter[self, name, force_refresh]]:
constant[Returns the size of the file.
For efficiency this operation does not use locking, so may return
inconsistent data. Use it for informational purposes.
]
<ast.Tuple object at 0x7da1b25d9a80> assign[=] call[name[split_name], parameter[name[name]]]
variable[t] assign[=] call[name[time].time, parameter[]]
call[name[logger].debug, parameter[constant[ querying size of %s], name[name]]]
<ast.Try object at 0x7da1b2556710> | keyword[def] identifier[file_size] ( identifier[self] , identifier[name] , identifier[force_refresh] = keyword[False] ):
literal[string]
identifier[uname] , identifier[version] = identifier[split_name] ( identifier[name] )
identifier[t] = identifier[time] . identifier[time] ()
identifier[logger] . identifier[debug] ( literal[string] , identifier[name] )
keyword[try] :
keyword[if] keyword[not] identifier[self] . identifier[remote_store] keyword[or] ( identifier[version] keyword[is] keyword[not] keyword[None]
keyword[and] keyword[not] identifier[force_refresh] ):
keyword[try] :
keyword[if] identifier[self] . identifier[local_store] keyword[and] identifier[self] . identifier[local_store] . identifier[exists] ( identifier[name] ):
keyword[return] identifier[self] . identifier[local_store] . identifier[file_size] ( identifier[name] )
keyword[except] identifier[Exception] :
keyword[if] identifier[self] . identifier[remote_store] :
identifier[logger] . identifier[warning] ( literal[string] ,
identifier[name] , identifier[exc_info] = keyword[True] )
keyword[else] :
keyword[raise]
keyword[if] identifier[self] . identifier[remote_store] :
keyword[return] identifier[self] . identifier[remote_store] . identifier[file_size] ( identifier[name] )
keyword[raise] identifier[FiletrackerError] ( literal[string] % identifier[name] )
keyword[finally] :
identifier[logger] . identifier[debug] ( literal[string] , identifier[name] , identifier[time] . identifier[time] ()- identifier[t] ) | def file_size(self, name, force_refresh=False):
"""Returns the size of the file.
For efficiency this operation does not use locking, so may return
inconsistent data. Use it for informational purposes.
"""
(uname, version) = split_name(name)
t = time.time()
logger.debug(' querying size of %s', name)
try:
if not self.remote_store or (version is not None and (not force_refresh)):
try:
if self.local_store and self.local_store.exists(name):
return self.local_store.file_size(name) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except Exception:
if self.remote_store:
logger.warning("Error getting '%s' from local store", name, exc_info=True) # depends on [control=['if'], data=[]]
else:
raise # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if self.remote_store:
return self.remote_store.file_size(name) # depends on [control=['if'], data=[]]
raise FiletrackerError('File not available: %s' % name) # depends on [control=['try'], data=[]]
finally:
logger.debug(' processed %s in %.2fs', name, time.time() - t) |
def contains(self, em):
'''
contains - Check if #em occurs within any of the elements within this list, as themselves or as a child, any
number of levels down.
To check if JUST an element is contained within this list directly, use the "in" operator.
@param em <AdvancedTag> - Element of interest
@return <bool> - True if contained, otherwise False
'''
for node in self:
if node.contains(em):
return True
return False | def function[contains, parameter[self, em]]:
constant[
contains - Check if #em occurs within any of the elements within this list, as themselves or as a child, any
number of levels down.
To check if JUST an element is contained within this list directly, use the "in" operator.
@param em <AdvancedTag> - Element of interest
@return <bool> - True if contained, otherwise False
]
for taget[name[node]] in starred[name[self]] begin[:]
if call[name[node].contains, parameter[name[em]]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[contains] ( identifier[self] , identifier[em] ):
literal[string]
keyword[for] identifier[node] keyword[in] identifier[self] :
keyword[if] identifier[node] . identifier[contains] ( identifier[em] ):
keyword[return] keyword[True]
keyword[return] keyword[False] | def contains(self, em):
"""
contains - Check if #em occurs within any of the elements within this list, as themselves or as a child, any
number of levels down.
To check if JUST an element is contained within this list directly, use the "in" operator.
@param em <AdvancedTag> - Element of interest
@return <bool> - True if contained, otherwise False
"""
for node in self:
if node.contains(em):
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['node']]
return False |
async def periodic_task(self, actor, **kw):
"""Override the :meth:`.Concurrency.periodic_task` to implement
the :class:`.Arbiter` :ref:`periodic task <actor-periodic-task>`.
"""
while True:
interval = 0
#
if actor.started():
# managed actors job
self.manage_actors(actor)
for m in list(self.monitors.values()):
_remove_monitor(m)
interval = MONITOR_TASK_PERIOD
#
actor.event('periodic_task').fire()
if actor.cfg.reload and autoreload.check_changes():
# reload changes
actor.stop(exit_code=autoreload.EXIT_CODE)
if not actor.stopped():
try:
await asyncio.sleep(interval)
except asyncio.CancelledError:
break | <ast.AsyncFunctionDef object at 0x7da18f58f4c0> | keyword[async] keyword[def] identifier[periodic_task] ( identifier[self] , identifier[actor] ,** identifier[kw] ):
literal[string]
keyword[while] keyword[True] :
identifier[interval] = literal[int]
keyword[if] identifier[actor] . identifier[started] ():
identifier[self] . identifier[manage_actors] ( identifier[actor] )
keyword[for] identifier[m] keyword[in] identifier[list] ( identifier[self] . identifier[monitors] . identifier[values] ()):
identifier[_remove_monitor] ( identifier[m] )
identifier[interval] = identifier[MONITOR_TASK_PERIOD]
identifier[actor] . identifier[event] ( literal[string] ). identifier[fire] ()
keyword[if] identifier[actor] . identifier[cfg] . identifier[reload] keyword[and] identifier[autoreload] . identifier[check_changes] ():
identifier[actor] . identifier[stop] ( identifier[exit_code] = identifier[autoreload] . identifier[EXIT_CODE] )
keyword[if] keyword[not] identifier[actor] . identifier[stopped] ():
keyword[try] :
keyword[await] identifier[asyncio] . identifier[sleep] ( identifier[interval] )
keyword[except] identifier[asyncio] . identifier[CancelledError] :
keyword[break] | async def periodic_task(self, actor, **kw):
"""Override the :meth:`.Concurrency.periodic_task` to implement
the :class:`.Arbiter` :ref:`periodic task <actor-periodic-task>`.
"""
while True:
interval = 0
#
if actor.started():
# managed actors job
self.manage_actors(actor)
for m in list(self.monitors.values()):
_remove_monitor(m) # depends on [control=['for'], data=['m']]
interval = MONITOR_TASK_PERIOD
#
actor.event('periodic_task').fire() # depends on [control=['if'], data=[]]
if actor.cfg.reload and autoreload.check_changes():
# reload changes
actor.stop(exit_code=autoreload.EXIT_CODE) # depends on [control=['if'], data=[]]
if not actor.stopped():
try:
await asyncio.sleep(interval) # depends on [control=['try'], data=[]]
except asyncio.CancelledError:
break # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] |
def export_serving(model_path):
"""Export trained model to use it in TensorFlow Serving or cloudML. """
pred_config = PredictConfig(
session_init=get_model_loader(model_path),
model=InferenceOnlyModel(),
input_names=['input_img_bytes'],
output_names=['prediction_img_bytes'])
ModelExporter(pred_config).export_serving('/tmp/exported') | def function[export_serving, parameter[model_path]]:
constant[Export trained model to use it in TensorFlow Serving or cloudML. ]
variable[pred_config] assign[=] call[name[PredictConfig], parameter[]]
call[call[name[ModelExporter], parameter[name[pred_config]]].export_serving, parameter[constant[/tmp/exported]]] | keyword[def] identifier[export_serving] ( identifier[model_path] ):
literal[string]
identifier[pred_config] = identifier[PredictConfig] (
identifier[session_init] = identifier[get_model_loader] ( identifier[model_path] ),
identifier[model] = identifier[InferenceOnlyModel] (),
identifier[input_names] =[ literal[string] ],
identifier[output_names] =[ literal[string] ])
identifier[ModelExporter] ( identifier[pred_config] ). identifier[export_serving] ( literal[string] ) | def export_serving(model_path):
"""Export trained model to use it in TensorFlow Serving or cloudML. """
pred_config = PredictConfig(session_init=get_model_loader(model_path), model=InferenceOnlyModel(), input_names=['input_img_bytes'], output_names=['prediction_img_bytes'])
ModelExporter(pred_config).export_serving('/tmp/exported') |
def points(self, value):
""" Setter for the points property,
Takes care of changing the point_format of the file
(as long as the point format of the new points it compatible with the file version)
Parameters
----------
value: numpy.array of the new points
"""
if value.dtype != self.points.dtype:
raise errors.IncompatibleDataFormat('Cannot set points with a different point format, convert first')
new_point_record = record.PackedPointRecord(value, self.points_data.point_format)
dims.raise_if_version_not_compatible_with_fmt(
new_point_record.point_format.id, self.header.version
)
self.points_data = new_point_record
self.update_header() | def function[points, parameter[self, value]]:
constant[ Setter for the points property,
Takes care of changing the point_format of the file
(as long as the point format of the new points it compatible with the file version)
Parameters
----------
value: numpy.array of the new points
]
if compare[name[value].dtype not_equal[!=] name[self].points.dtype] begin[:]
<ast.Raise object at 0x7da1b05c87f0>
variable[new_point_record] assign[=] call[name[record].PackedPointRecord, parameter[name[value], name[self].points_data.point_format]]
call[name[dims].raise_if_version_not_compatible_with_fmt, parameter[name[new_point_record].point_format.id, name[self].header.version]]
name[self].points_data assign[=] name[new_point_record]
call[name[self].update_header, parameter[]] | keyword[def] identifier[points] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[value] . identifier[dtype] != identifier[self] . identifier[points] . identifier[dtype] :
keyword[raise] identifier[errors] . identifier[IncompatibleDataFormat] ( literal[string] )
identifier[new_point_record] = identifier[record] . identifier[PackedPointRecord] ( identifier[value] , identifier[self] . identifier[points_data] . identifier[point_format] )
identifier[dims] . identifier[raise_if_version_not_compatible_with_fmt] (
identifier[new_point_record] . identifier[point_format] . identifier[id] , identifier[self] . identifier[header] . identifier[version]
)
identifier[self] . identifier[points_data] = identifier[new_point_record]
identifier[self] . identifier[update_header] () | def points(self, value):
""" Setter for the points property,
Takes care of changing the point_format of the file
(as long as the point format of the new points it compatible with the file version)
Parameters
----------
value: numpy.array of the new points
"""
if value.dtype != self.points.dtype:
raise errors.IncompatibleDataFormat('Cannot set points with a different point format, convert first') # depends on [control=['if'], data=[]]
new_point_record = record.PackedPointRecord(value, self.points_data.point_format)
dims.raise_if_version_not_compatible_with_fmt(new_point_record.point_format.id, self.header.version)
self.points_data = new_point_record
self.update_header() |
def get(self, status_item):
""" queries the database and returns that status of the item.
args:
status_item: the name of the item to check
"""
lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3]))
lg.setLevel(self.log_level)
sparql = '''
SELECT ?loaded
WHERE {{
kdr:{0} kds:{1} ?loaded .
}}'''
value = self.conn.query(sparql=sparql.format(self.group, status_item))
if len(value) > 0 and \
cbool(value[0].get('loaded',{}).get("value",False)):
return True
else:
return False | def function[get, parameter[self, status_item]]:
constant[ queries the database and returns that status of the item.
args:
status_item: the name of the item to check
]
variable[lg] assign[=] call[name[logging].getLogger, parameter[binary_operation[constant[%s.%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da20c76eef0>, <ast.Subscript object at 0x7da204565d50>]]]]]
call[name[lg].setLevel, parameter[name[self].log_level]]
variable[sparql] assign[=] constant[
SELECT ?loaded
WHERE {{
kdr:{0} kds:{1} ?loaded .
}}]
variable[value] assign[=] call[name[self].conn.query, parameter[]]
if <ast.BoolOp object at 0x7da204565840> begin[:]
return[constant[True]] | keyword[def] identifier[get] ( identifier[self] , identifier[status_item] ):
literal[string]
identifier[lg] = identifier[logging] . identifier[getLogger] ( literal[string] %( identifier[self] . identifier[ln] , identifier[inspect] . identifier[stack] ()[ literal[int] ][ literal[int] ]))
identifier[lg] . identifier[setLevel] ( identifier[self] . identifier[log_level] )
identifier[sparql] = literal[string]
identifier[value] = identifier[self] . identifier[conn] . identifier[query] ( identifier[sparql] = identifier[sparql] . identifier[format] ( identifier[self] . identifier[group] , identifier[status_item] ))
keyword[if] identifier[len] ( identifier[value] )> literal[int] keyword[and] identifier[cbool] ( identifier[value] [ literal[int] ]. identifier[get] ( literal[string] ,{}). identifier[get] ( literal[string] , keyword[False] )):
keyword[return] keyword[True]
keyword[else] :
keyword[return] keyword[False] | def get(self, status_item):
""" queries the database and returns that status of the item.
args:
status_item: the name of the item to check
"""
lg = logging.getLogger('%s.%s' % (self.ln, inspect.stack()[0][3]))
lg.setLevel(self.log_level)
sparql = '\n SELECT ?loaded\n WHERE {{\n kdr:{0} kds:{1} ?loaded .\n }}'
value = self.conn.query(sparql=sparql.format(self.group, status_item))
if len(value) > 0 and cbool(value[0].get('loaded', {}).get('value', False)):
return True # depends on [control=['if'], data=[]]
else:
return False |
def split_tokens(self):
"""Split string into tokens (lazy)."""
if not self._split_tokens_calculated:
# split into items (whitespace split)
self._split_tokens = self._line_str.split()
self._split_tokens_calculated = True
return self._split_tokens | def function[split_tokens, parameter[self]]:
constant[Split string into tokens (lazy).]
if <ast.UnaryOp object at 0x7da1b1783910> begin[:]
name[self]._split_tokens assign[=] call[name[self]._line_str.split, parameter[]]
name[self]._split_tokens_calculated assign[=] constant[True]
return[name[self]._split_tokens] | keyword[def] identifier[split_tokens] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_split_tokens_calculated] :
identifier[self] . identifier[_split_tokens] = identifier[self] . identifier[_line_str] . identifier[split] ()
identifier[self] . identifier[_split_tokens_calculated] = keyword[True]
keyword[return] identifier[self] . identifier[_split_tokens] | def split_tokens(self):
"""Split string into tokens (lazy)."""
if not self._split_tokens_calculated:
# split into items (whitespace split)
self._split_tokens = self._line_str.split()
self._split_tokens_calculated = True # depends on [control=['if'], data=[]]
return self._split_tokens |
def get_session():
"""Gets a session. If there's no yet, creates one.
:returns: a session
"""
if hasattr(g, 'session'):
return g.session
sess = create_session(bind=current_app.config['DATABASE_ENGINE'])
try:
g.session = sess
except RuntimeError:
pass
return sess | def function[get_session, parameter[]]:
constant[Gets a session. If there's no yet, creates one.
:returns: a session
]
if call[name[hasattr], parameter[name[g], constant[session]]] begin[:]
return[name[g].session]
variable[sess] assign[=] call[name[create_session], parameter[]]
<ast.Try object at 0x7da18c4cdc90>
return[name[sess]] | keyword[def] identifier[get_session] ():
literal[string]
keyword[if] identifier[hasattr] ( identifier[g] , literal[string] ):
keyword[return] identifier[g] . identifier[session]
identifier[sess] = identifier[create_session] ( identifier[bind] = identifier[current_app] . identifier[config] [ literal[string] ])
keyword[try] :
identifier[g] . identifier[session] = identifier[sess]
keyword[except] identifier[RuntimeError] :
keyword[pass]
keyword[return] identifier[sess] | def get_session():
"""Gets a session. If there's no yet, creates one.
:returns: a session
"""
if hasattr(g, 'session'):
return g.session # depends on [control=['if'], data=[]]
sess = create_session(bind=current_app.config['DATABASE_ENGINE'])
try:
g.session = sess # depends on [control=['try'], data=[]]
except RuntimeError:
pass # depends on [control=['except'], data=[]]
return sess |
def transform(self, Z):
"""Transform ArrayRDD's (or DictRDD's 'X' column's) feature->value dicts
to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
Z : ArrayRDD or DictRDD with column 'X' containing Mapping or
iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
Returns
-------
Z : transformed, containing {array, sparse matrix}
Feature vectors; always 2-d.
"""
mapper = self.broadcast(super(SparkDictVectorizer, self).transform,
Z.context)
dtype = sp.spmatrix if self.sparse else np.ndarray
return Z.transform(mapper, column='X', dtype=dtype) | def function[transform, parameter[self, Z]]:
constant[Transform ArrayRDD's (or DictRDD's 'X' column's) feature->value dicts
to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
Z : ArrayRDD or DictRDD with column 'X' containing Mapping or
iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
Returns
-------
Z : transformed, containing {array, sparse matrix}
Feature vectors; always 2-d.
]
variable[mapper] assign[=] call[name[self].broadcast, parameter[call[name[super], parameter[name[SparkDictVectorizer], name[self]]].transform, name[Z].context]]
variable[dtype] assign[=] <ast.IfExp object at 0x7da20c6c49a0>
return[call[name[Z].transform, parameter[name[mapper]]]] | keyword[def] identifier[transform] ( identifier[self] , identifier[Z] ):
literal[string]
identifier[mapper] = identifier[self] . identifier[broadcast] ( identifier[super] ( identifier[SparkDictVectorizer] , identifier[self] ). identifier[transform] ,
identifier[Z] . identifier[context] )
identifier[dtype] = identifier[sp] . identifier[spmatrix] keyword[if] identifier[self] . identifier[sparse] keyword[else] identifier[np] . identifier[ndarray]
keyword[return] identifier[Z] . identifier[transform] ( identifier[mapper] , identifier[column] = literal[string] , identifier[dtype] = identifier[dtype] ) | def transform(self, Z):
"""Transform ArrayRDD's (or DictRDD's 'X' column's) feature->value dicts
to array or sparse matrix.
Named features not encountered during fit or fit_transform will be
silently ignored.
Parameters
----------
Z : ArrayRDD or DictRDD with column 'X' containing Mapping or
iterable over Mappings, length = n_samples
Dict(s) or Mapping(s) from feature names (arbitrary Python
objects) to feature values (strings or convertible to dtype).
Returns
-------
Z : transformed, containing {array, sparse matrix}
Feature vectors; always 2-d.
"""
mapper = self.broadcast(super(SparkDictVectorizer, self).transform, Z.context)
dtype = sp.spmatrix if self.sparse else np.ndarray
return Z.transform(mapper, column='X', dtype=dtype) |
def batch_update(self, outcomes, expparams, resample_interval=5):
r"""
Updates based on a batch of outcomes and experiments, rather than just
one.
:param numpy.ndarray outcomes: An array of outcomes of the experiments that
were performed.
:param numpy.ndarray expparams: Either a scalar or record single-index
array of experiments that were performed.
:param int resample_interval: Controls how often to check whether
:math:`N_{\text{ess}}` falls below the resample threshold.
"""
# TODO: write a faster implementation here using vectorized calls to
# likelihood.
# Check that the number of outcomes and experiments is the same.
n_exps = outcomes.shape[0]
if expparams.shape[0] != n_exps:
raise ValueError("The number of outcomes and experiments must match.")
if len(expparams.shape) == 1:
expparams = expparams[:, None]
# Loop over experiments and update one at a time.
for idx_exp, (outcome, experiment) in enumerate(zip(iter(outcomes), iter(expparams))):
self.update(outcome, experiment, check_for_resample=False)
if (idx_exp + 1) % resample_interval == 0:
self._maybe_resample() | def function[batch_update, parameter[self, outcomes, expparams, resample_interval]]:
constant[
Updates based on a batch of outcomes and experiments, rather than just
one.
:param numpy.ndarray outcomes: An array of outcomes of the experiments that
were performed.
:param numpy.ndarray expparams: Either a scalar or record single-index
array of experiments that were performed.
:param int resample_interval: Controls how often to check whether
:math:`N_{\text{ess}}` falls below the resample threshold.
]
variable[n_exps] assign[=] call[name[outcomes].shape][constant[0]]
if compare[call[name[expparams].shape][constant[0]] not_equal[!=] name[n_exps]] begin[:]
<ast.Raise object at 0x7da1b0ea2590>
if compare[call[name[len], parameter[name[expparams].shape]] equal[==] constant[1]] begin[:]
variable[expparams] assign[=] call[name[expparams]][tuple[[<ast.Slice object at 0x7da1b0ea1f30>, <ast.Constant object at 0x7da1b0ea32b0>]]]
for taget[tuple[[<ast.Name object at 0x7da1b0ea1d20>, <ast.Tuple object at 0x7da1b0ea2830>]]] in starred[call[name[enumerate], parameter[call[name[zip], parameter[call[name[iter], parameter[name[outcomes]]], call[name[iter], parameter[name[expparams]]]]]]]] begin[:]
call[name[self].update, parameter[name[outcome], name[experiment]]]
if compare[binary_operation[binary_operation[name[idx_exp] + constant[1]] <ast.Mod object at 0x7da2590d6920> name[resample_interval]] equal[==] constant[0]] begin[:]
call[name[self]._maybe_resample, parameter[]] | keyword[def] identifier[batch_update] ( identifier[self] , identifier[outcomes] , identifier[expparams] , identifier[resample_interval] = literal[int] ):
literal[string]
identifier[n_exps] = identifier[outcomes] . identifier[shape] [ literal[int] ]
keyword[if] identifier[expparams] . identifier[shape] [ literal[int] ]!= identifier[n_exps] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[len] ( identifier[expparams] . identifier[shape] )== literal[int] :
identifier[expparams] = identifier[expparams] [:, keyword[None] ]
keyword[for] identifier[idx_exp] ,( identifier[outcome] , identifier[experiment] ) keyword[in] identifier[enumerate] ( identifier[zip] ( identifier[iter] ( identifier[outcomes] ), identifier[iter] ( identifier[expparams] ))):
identifier[self] . identifier[update] ( identifier[outcome] , identifier[experiment] , identifier[check_for_resample] = keyword[False] )
keyword[if] ( identifier[idx_exp] + literal[int] )% identifier[resample_interval] == literal[int] :
identifier[self] . identifier[_maybe_resample] () | def batch_update(self, outcomes, expparams, resample_interval=5):
"""
Updates based on a batch of outcomes and experiments, rather than just
one.
:param numpy.ndarray outcomes: An array of outcomes of the experiments that
were performed.
:param numpy.ndarray expparams: Either a scalar or record single-index
array of experiments that were performed.
:param int resample_interval: Controls how often to check whether
:math:`N_{\\text{ess}}` falls below the resample threshold.
"""
# TODO: write a faster implementation here using vectorized calls to
# likelihood.
# Check that the number of outcomes and experiments is the same.
n_exps = outcomes.shape[0]
if expparams.shape[0] != n_exps:
raise ValueError('The number of outcomes and experiments must match.') # depends on [control=['if'], data=[]]
if len(expparams.shape) == 1:
expparams = expparams[:, None] # depends on [control=['if'], data=[]]
# Loop over experiments and update one at a time.
for (idx_exp, (outcome, experiment)) in enumerate(zip(iter(outcomes), iter(expparams))):
self.update(outcome, experiment, check_for_resample=False)
if (idx_exp + 1) % resample_interval == 0:
self._maybe_resample() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] |
def add_command(self, handler, name=None):
"""Add a subcommand `name` which invokes `handler`.
"""
if name is None:
name = docstring_to_subcommand(handler.__doc__)
# TODO: Prevent overwriting 'help'?
self._commands[name] = handler | def function[add_command, parameter[self, handler, name]]:
constant[Add a subcommand `name` which invokes `handler`.
]
if compare[name[name] is constant[None]] begin[:]
variable[name] assign[=] call[name[docstring_to_subcommand], parameter[name[handler].__doc__]]
call[name[self]._commands][name[name]] assign[=] name[handler] | keyword[def] identifier[add_command] ( identifier[self] , identifier[handler] , identifier[name] = keyword[None] ):
literal[string]
keyword[if] identifier[name] keyword[is] keyword[None] :
identifier[name] = identifier[docstring_to_subcommand] ( identifier[handler] . identifier[__doc__] )
identifier[self] . identifier[_commands] [ identifier[name] ]= identifier[handler] | def add_command(self, handler, name=None):
"""Add a subcommand `name` which invokes `handler`.
"""
if name is None:
name = docstring_to_subcommand(handler.__doc__) # depends on [control=['if'], data=['name']]
# TODO: Prevent overwriting 'help'?
self._commands[name] = handler |
async def sessiondestroy(self):
"""
Destroy current session. The session object is discarded and can no longer be used in other requests.
"""
if hasattr(self, 'session') and self.session:
setcookies = await call_api(self.container, 'session', 'destroy', {'sessionid':self.session.id})
self.session.unlock()
del self.session
for nc in setcookies:
self.sent_cookies = [c for c in self.sent_cookies if c.key != nc.key]
self.sent_cookies.append(nc) | <ast.AsyncFunctionDef object at 0x7da1b26ae470> | keyword[async] keyword[def] identifier[sessiondestroy] ( identifier[self] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ) keyword[and] identifier[self] . identifier[session] :
identifier[setcookies] = keyword[await] identifier[call_api] ( identifier[self] . identifier[container] , literal[string] , literal[string] ,{ literal[string] : identifier[self] . identifier[session] . identifier[id] })
identifier[self] . identifier[session] . identifier[unlock] ()
keyword[del] identifier[self] . identifier[session]
keyword[for] identifier[nc] keyword[in] identifier[setcookies] :
identifier[self] . identifier[sent_cookies] =[ identifier[c] keyword[for] identifier[c] keyword[in] identifier[self] . identifier[sent_cookies] keyword[if] identifier[c] . identifier[key] != identifier[nc] . identifier[key] ]
identifier[self] . identifier[sent_cookies] . identifier[append] ( identifier[nc] ) | async def sessiondestroy(self):
"""
Destroy current session. The session object is discarded and can no longer be used in other requests.
"""
if hasattr(self, 'session') and self.session:
setcookies = await call_api(self.container, 'session', 'destroy', {'sessionid': self.session.id})
self.session.unlock()
del self.session
for nc in setcookies:
self.sent_cookies = [c for c in self.sent_cookies if c.key != nc.key]
self.sent_cookies.append(nc) # depends on [control=['for'], data=['nc']] # depends on [control=['if'], data=[]] |
def gatherkeys_missing(args, distro, rlogger, keypath, keytype, dest_dir):
"""
Get or create the keyring from the mon using the mon keyring by keytype and
copy to dest_dir
"""
args_prefix = [
'/usr/bin/ceph',
'--connect-timeout=25',
'--cluster={cluster}'.format(
cluster=args.cluster),
'--name', 'mon.',
'--keyring={keypath}'.format(
keypath=keypath),
]
identity = keytype_identity(keytype)
if identity is None:
raise RuntimeError('Could not find identity for keytype:%s' % keytype)
capabilites = keytype_capabilities(keytype)
if capabilites is None:
raise RuntimeError('Could not find capabilites for keytype:%s' % keytype)
# First try getting the key if it already exists, to handle the case where
# it exists but doesn't match the caps we would pass into get-or-create.
# This is the same behvaior as in newer ceph-create-keys
out, err, code = remoto.process.check(
distro.conn,
args_prefix + ['auth', 'get', identity]
)
if code == errno.ENOENT:
out, err, code = remoto.process.check(
distro.conn,
args_prefix + ['auth', 'get-or-create', identity] + capabilites
)
if code != 0:
rlogger.error(
'"ceph auth get-or-create for keytype %s returned %s',
keytype, code
)
for line in err:
rlogger.debug(line)
return False
keyring_name_local = keytype_path_to(args, keytype)
keyring_path_local = os.path.join(dest_dir, keyring_name_local)
with open(keyring_path_local, 'wb') as f:
for line in out:
f.write(line + b'\n')
return True | def function[gatherkeys_missing, parameter[args, distro, rlogger, keypath, keytype, dest_dir]]:
constant[
Get or create the keyring from the mon using the mon keyring by keytype and
copy to dest_dir
]
variable[args_prefix] assign[=] list[[<ast.Constant object at 0x7da1b16b5900>, <ast.Constant object at 0x7da1b16b70d0>, <ast.Call object at 0x7da1b16b67a0>, <ast.Constant object at 0x7da1b16b6e90>, <ast.Constant object at 0x7da1b16b5540>, <ast.Call object at 0x7da1b16b6b30>]]
variable[identity] assign[=] call[name[keytype_identity], parameter[name[keytype]]]
if compare[name[identity] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b16b6a70>
variable[capabilites] assign[=] call[name[keytype_capabilities], parameter[name[keytype]]]
if compare[name[capabilites] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b16b4bb0>
<ast.Tuple object at 0x7da1b16b6dd0> assign[=] call[name[remoto].process.check, parameter[name[distro].conn, binary_operation[name[args_prefix] + list[[<ast.Constant object at 0x7da1b16b78b0>, <ast.Constant object at 0x7da1b16b60e0>, <ast.Name object at 0x7da1b16b5ba0>]]]]]
if compare[name[code] equal[==] name[errno].ENOENT] begin[:]
<ast.Tuple object at 0x7da1b16b4250> assign[=] call[name[remoto].process.check, parameter[name[distro].conn, binary_operation[binary_operation[name[args_prefix] + list[[<ast.Constant object at 0x7da1b16b6770>, <ast.Constant object at 0x7da1b16b47f0>, <ast.Name object at 0x7da1b16b5c90>]]] + name[capabilites]]]]
if compare[name[code] not_equal[!=] constant[0]] begin[:]
call[name[rlogger].error, parameter[constant["ceph auth get-or-create for keytype %s returned %s], name[keytype], name[code]]]
for taget[name[line]] in starred[name[err]] begin[:]
call[name[rlogger].debug, parameter[name[line]]]
return[constant[False]]
variable[keyring_name_local] assign[=] call[name[keytype_path_to], parameter[name[args], name[keytype]]]
variable[keyring_path_local] assign[=] call[name[os].path.join, parameter[name[dest_dir], name[keyring_name_local]]]
with call[name[open], parameter[name[keyring_path_local], constant[wb]]] begin[:]
for taget[name[line]] in starred[name[out]] begin[:]
call[name[f].write, parameter[binary_operation[name[line] + constant[b'\n']]]]
return[constant[True]] | keyword[def] identifier[gatherkeys_missing] ( identifier[args] , identifier[distro] , identifier[rlogger] , identifier[keypath] , identifier[keytype] , identifier[dest_dir] ):
literal[string]
identifier[args_prefix] =[
literal[string] ,
literal[string] ,
literal[string] . identifier[format] (
identifier[cluster] = identifier[args] . identifier[cluster] ),
literal[string] , literal[string] ,
literal[string] . identifier[format] (
identifier[keypath] = identifier[keypath] ),
]
identifier[identity] = identifier[keytype_identity] ( identifier[keytype] )
keyword[if] identifier[identity] keyword[is] keyword[None] :
keyword[raise] identifier[RuntimeError] ( literal[string] % identifier[keytype] )
identifier[capabilites] = identifier[keytype_capabilities] ( identifier[keytype] )
keyword[if] identifier[capabilites] keyword[is] keyword[None] :
keyword[raise] identifier[RuntimeError] ( literal[string] % identifier[keytype] )
identifier[out] , identifier[err] , identifier[code] = identifier[remoto] . identifier[process] . identifier[check] (
identifier[distro] . identifier[conn] ,
identifier[args_prefix] +[ literal[string] , literal[string] , identifier[identity] ]
)
keyword[if] identifier[code] == identifier[errno] . identifier[ENOENT] :
identifier[out] , identifier[err] , identifier[code] = identifier[remoto] . identifier[process] . identifier[check] (
identifier[distro] . identifier[conn] ,
identifier[args_prefix] +[ literal[string] , literal[string] , identifier[identity] ]+ identifier[capabilites]
)
keyword[if] identifier[code] != literal[int] :
identifier[rlogger] . identifier[error] (
literal[string] ,
identifier[keytype] , identifier[code]
)
keyword[for] identifier[line] keyword[in] identifier[err] :
identifier[rlogger] . identifier[debug] ( identifier[line] )
keyword[return] keyword[False]
identifier[keyring_name_local] = identifier[keytype_path_to] ( identifier[args] , identifier[keytype] )
identifier[keyring_path_local] = identifier[os] . identifier[path] . identifier[join] ( identifier[dest_dir] , identifier[keyring_name_local] )
keyword[with] identifier[open] ( identifier[keyring_path_local] , literal[string] ) keyword[as] identifier[f] :
keyword[for] identifier[line] keyword[in] identifier[out] :
identifier[f] . identifier[write] ( identifier[line] + literal[string] )
keyword[return] keyword[True] | def gatherkeys_missing(args, distro, rlogger, keypath, keytype, dest_dir):
"""
Get or create the keyring from the mon using the mon keyring by keytype and
copy to dest_dir
"""
args_prefix = ['/usr/bin/ceph', '--connect-timeout=25', '--cluster={cluster}'.format(cluster=args.cluster), '--name', 'mon.', '--keyring={keypath}'.format(keypath=keypath)]
identity = keytype_identity(keytype)
if identity is None:
raise RuntimeError('Could not find identity for keytype:%s' % keytype) # depends on [control=['if'], data=[]]
capabilites = keytype_capabilities(keytype)
if capabilites is None:
raise RuntimeError('Could not find capabilites for keytype:%s' % keytype) # depends on [control=['if'], data=[]]
# First try getting the key if it already exists, to handle the case where
# it exists but doesn't match the caps we would pass into get-or-create.
# This is the same behvaior as in newer ceph-create-keys
(out, err, code) = remoto.process.check(distro.conn, args_prefix + ['auth', 'get', identity])
if code == errno.ENOENT:
(out, err, code) = remoto.process.check(distro.conn, args_prefix + ['auth', 'get-or-create', identity] + capabilites) # depends on [control=['if'], data=['code']]
if code != 0:
rlogger.error('"ceph auth get-or-create for keytype %s returned %s', keytype, code)
for line in err:
rlogger.debug(line) # depends on [control=['for'], data=['line']]
return False # depends on [control=['if'], data=['code']]
keyring_name_local = keytype_path_to(args, keytype)
keyring_path_local = os.path.join(dest_dir, keyring_name_local)
with open(keyring_path_local, 'wb') as f:
for line in out:
f.write(line + b'\n') # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['f']]
return True |
def get_bonded_structure(self, structure, decorate=False):
"""
Obtain a MoleculeGraph object using this NearNeighbor
class. Requires the optional dependency networkx
(pip install networkx).
Args:
structure: Molecule object.
decorate (bool): whether to annotate site properties
with order parameters using neighbors determined by
this NearNeighbor class
Returns: a pymatgen.analysis.graphs.MoleculeGraph object
"""
# requires optional dependency which is why it's not a top-level import
from pymatgen.analysis.graphs import MoleculeGraph
if decorate:
# Decorate all sites in the underlying structure
# with site properties that provides information on the
# coordination number and coordination pattern based
# on the (current) structure of this graph.
order_parameters = [self.get_local_order_parameters(structure, n)
for n in range(len(structure))]
structure.add_site_property('order_parameters', order_parameters)
mg = MoleculeGraph.with_local_env_strategy(structure, self)
return mg | def function[get_bonded_structure, parameter[self, structure, decorate]]:
constant[
Obtain a MoleculeGraph object using this NearNeighbor
class. Requires the optional dependency networkx
(pip install networkx).
Args:
structure: Molecule object.
decorate (bool): whether to annotate site properties
with order parameters using neighbors determined by
this NearNeighbor class
Returns: a pymatgen.analysis.graphs.MoleculeGraph object
]
from relative_module[pymatgen.analysis.graphs] import module[MoleculeGraph]
if name[decorate] begin[:]
variable[order_parameters] assign[=] <ast.ListComp object at 0x7da2044c3d90>
call[name[structure].add_site_property, parameter[constant[order_parameters], name[order_parameters]]]
variable[mg] assign[=] call[name[MoleculeGraph].with_local_env_strategy, parameter[name[structure], name[self]]]
return[name[mg]] | keyword[def] identifier[get_bonded_structure] ( identifier[self] , identifier[structure] , identifier[decorate] = keyword[False] ):
literal[string]
keyword[from] identifier[pymatgen] . identifier[analysis] . identifier[graphs] keyword[import] identifier[MoleculeGraph]
keyword[if] identifier[decorate] :
identifier[order_parameters] =[ identifier[self] . identifier[get_local_order_parameters] ( identifier[structure] , identifier[n] )
keyword[for] identifier[n] keyword[in] identifier[range] ( identifier[len] ( identifier[structure] ))]
identifier[structure] . identifier[add_site_property] ( literal[string] , identifier[order_parameters] )
identifier[mg] = identifier[MoleculeGraph] . identifier[with_local_env_strategy] ( identifier[structure] , identifier[self] )
keyword[return] identifier[mg] | def get_bonded_structure(self, structure, decorate=False):
"""
Obtain a MoleculeGraph object using this NearNeighbor
class. Requires the optional dependency networkx
(pip install networkx).
Args:
structure: Molecule object.
decorate (bool): whether to annotate site properties
with order parameters using neighbors determined by
this NearNeighbor class
Returns: a pymatgen.analysis.graphs.MoleculeGraph object
"""
# requires optional dependency which is why it's not a top-level import
from pymatgen.analysis.graphs import MoleculeGraph
if decorate:
# Decorate all sites in the underlying structure
# with site properties that provides information on the
# coordination number and coordination pattern based
# on the (current) structure of this graph.
order_parameters = [self.get_local_order_parameters(structure, n) for n in range(len(structure))]
structure.add_site_property('order_parameters', order_parameters) # depends on [control=['if'], data=[]]
mg = MoleculeGraph.with_local_env_strategy(structure, self)
return mg |
def hist1d(data_list,
channel=0,
xscale='logicle',
bins=256,
histtype='stepfilled',
normed_area=False,
normed_height=False,
xlabel=None,
ylabel=None,
xlim=None,
ylim=None,
title=None,
legend=False,
legend_loc='best',
legend_fontsize='medium',
legend_labels=None,
facecolor=None,
edgecolor=None,
savefig=None,
**kwargs):
"""
Plot one 1D histogram from one or more flow cytometry data sets.
Parameters
----------
data_list : FCSData or numpy array or list of FCSData or numpy array
Flow cytometry data to plot.
channel : int or str, optional
Channel from where to take the events to plot. If ndim == 1,
channel is ignored. String channel specifications are only
supported for data types which support string-based indexing
(e.g. FCSData).
xscale : str, optional
Scale of the x axis, either ``linear``, ``log``, or ``logicle``.
bins : int or array_like, optional
If `bins` is an integer, it specifies the number of bins to use.
If `bins` is an array, it specifies the bin edges to use. If `bins`
is None or an integer, `hist1d` will attempt to use
``data.hist_bins`` to generate the bins automatically.
histtype : {'bar', 'barstacked', 'step', 'stepfilled'}, str, optional
Histogram type. Directly passed to ``plt.hist``.
normed_area : bool, optional
Flag indicating whether to normalize the histogram such that the
area under the curve is equal to one. The resulting plot is
equivalent to a probability density function.
normed_height : bool, optional
Flag indicating whether to normalize the histogram such that the
sum of all bins' heights is equal to one. The resulting plot is
equivalent to a probability mass function. `normed_height` is
ignored if `normed_area` is True.
savefig : str, optional
The name of the file to save the figure to. If None, do not save.
Other parameters
----------------
xlabel : str, optional
Label to use on the x axis. If None, attempts to extract channel
name from last data object.
ylabel : str, optional
Label to use on the y axis. If None and ``normed_area==True``, use
'Probability'. If None, ``normed_area==False``, and
``normed_height==True``, use 'Counts (normalized)'. If None,
``normed_area==False``, and ``normed_height==False``, use 'Counts'.
xlim : tuple, optional
Limits for the x axis. If not specified and `bins` exists, use
the lowest and highest values of `bins`.
ylim : tuple, optional
Limits for the y axis.
title : str, optional
Plot title.
legend : bool, optional
Flag specifying whether to include a legend. If `legend` is True,
the legend labels will be taken from `legend_labels` if present,
else they will be taken from ``str(data_list[i])``.
legend_loc : str, optional
Location of the legend.
legend_fontsize : int or str, optional
Font size for the legend.
legend_labels : list, optional
Labels to use for the legend.
facecolor : matplotlib color or list of matplotlib colors, optional
The histogram's facecolor. It can be a list with the same length as
`data_list`. If `edgecolor` and `facecolor` are not specified, and
``histtype == 'stepfilled'``, the facecolor will be taken from the
module-level variable `cmap_default`.
edgecolor : matplotlib color or list of matplotlib colors, optional
The histogram's edgecolor. It can be a list with the same length as
`data_list`. If `edgecolor` and `facecolor` are not specified, and
``histtype == 'step'``, the edgecolor will be taken from the
module-level variable `cmap_default`.
kwargs : dict, optional
Additional parameters passed directly to matploblib's ``hist``.
Notes
-----
`hist1d` calls matplotlib's ``hist`` function for each object in
`data_list`. `hist_type`, the type of histogram to draw, is directly
passed to ``plt.hist``. Additional keyword arguments provided to
`hist1d` are passed directly to ``plt.hist``.
If `normed_area` is set to True, `hist1d` calls ``plt.hist`` with
``density`` (or ``normed``, if matplotlib's version is older than
2.2.0) set to True. There is a bug in matplotlib 2.1.0 that
produces an incorrect plot in these conditions. We do not recommend
using matplotlib 2.1.0 if `normed_area` is expected to be used.
"""
# Using `normed_area` with matplotlib 2.1.0 causes an incorrect plot to be
# produced. Raise warning in these conditions.
if normed_area and packaging.version.parse(matplotlib.__version__) \
== packaging.version.parse('2.1.0'):
warnings.warn("bug in matplotlib 2.1.0 will result in an incorrect plot"
" when normed_area is set to True")
# Convert to list if necessary
if not isinstance(data_list, list):
data_list = [data_list]
# Default colors
if histtype == 'stepfilled':
if facecolor is None:
facecolor = [cmap_default(i)
for i in np.linspace(0, 1, len(data_list))]
if edgecolor is None:
edgecolor = ['black']*len(data_list)
elif histtype == 'step':
if edgecolor is None:
edgecolor = [cmap_default(i)
for i in np.linspace(0, 1, len(data_list))]
# Convert colors to lists if necessary
if not isinstance(edgecolor, list):
edgecolor = [edgecolor]*len(data_list)
if not isinstance(facecolor, list):
facecolor = [facecolor]*len(data_list)
# Collect scale parameters that depend on all elements in data_list
xscale_kwargs = {}
if xscale=='logicle':
t = _LogicleTransform(data=data_list, channel=channel)
xscale_kwargs['T'] = t.T
xscale_kwargs['M'] = t.M
xscale_kwargs['W'] = t.W
# Iterate through data_list
for i, data in enumerate(data_list):
# Extract channel
if data.ndim > 1:
y = data[:, channel]
else:
y = data
# If ``data_plot.hist_bins()`` exists, obtain bin edges from it if
# necessary. If it does not exist, do not modify ``bins``.
if hasattr(y, 'hist_bins') and hasattr(y.hist_bins, '__call__'):
# If bins is None or an integer, get bin edges from
# ``data_plot.hist_bins()``.
if bins is None or isinstance(bins, int):
bins = y.hist_bins(channels=0,
nbins=bins,
scale=xscale,
**xscale_kwargs)
# Decide whether to normalize
if normed_height and not normed_area:
weights = np.ones_like(y)/float(len(y))
else:
weights = None
# Actually plot
if packaging.version.parse(matplotlib.__version__) \
>= packaging.version.parse('2.2'):
if bins is not None:
n, edges, patches = plt.hist(y,
bins,
weights=weights,
density=normed_area,
histtype=histtype,
edgecolor=edgecolor[i],
facecolor=facecolor[i],
**kwargs)
else:
n, edges, patches = plt.hist(y,
weights=weights,
density=normed_area,
histtype=histtype,
edgecolor=edgecolor[i],
facecolor=facecolor[i],
**kwargs)
else:
if bins is not None:
n, edges, patches = plt.hist(y,
bins,
weights=weights,
normed=normed_area,
histtype=histtype,
edgecolor=edgecolor[i],
facecolor=facecolor[i],
**kwargs)
else:
n, edges, patches = plt.hist(y,
weights=weights,
normed=normed_area,
histtype=histtype,
edgecolor=edgecolor[i],
facecolor=facecolor[i],
**kwargs)
# Set scale of x axis
if xscale=='logicle':
plt.gca().set_xscale(xscale, data=data_list, channel=channel)
else:
plt.gca().set_xscale(xscale)
###
# Final configuration
###
# x and y labels
if xlabel is not None:
# Highest priority is user-provided label
plt.xlabel(xlabel)
elif hasattr(y, 'channels'):
# Attempt to use channel name
plt.xlabel(y.channels[0])
if ylabel is not None:
# Highest priority is user-provided label
plt.ylabel(ylabel)
elif normed_area:
plt.ylabel('Probability')
elif normed_height:
plt.ylabel('Counts (normalized)')
else:
# Default is "Counts"
plt.ylabel('Counts')
# x and y limits
if xlim is not None:
# Highest priority is user-provided limits
plt.xlim(xlim)
elif bins is not None:
# Use bins if specified
plt.xlim((edges[0], edges[-1]))
if ylim is not None:
plt.ylim(ylim)
# Title
if title is not None:
plt.title(title)
# Legend
if legend:
if legend_labels is None:
legend_labels = [str(data) for data in data_list]
plt.legend(legend_labels,
loc=legend_loc,
prop={'size': legend_fontsize})
# Save if necessary
if savefig is not None:
plt.tight_layout()
plt.savefig(savefig, dpi=savefig_dpi)
plt.close() | def function[hist1d, parameter[data_list, channel, xscale, bins, histtype, normed_area, normed_height, xlabel, ylabel, xlim, ylim, title, legend, legend_loc, legend_fontsize, legend_labels, facecolor, edgecolor, savefig]]:
constant[
Plot one 1D histogram from one or more flow cytometry data sets.
Parameters
----------
data_list : FCSData or numpy array or list of FCSData or numpy array
Flow cytometry data to plot.
channel : int or str, optional
Channel from where to take the events to plot. If ndim == 1,
channel is ignored. String channel specifications are only
supported for data types which support string-based indexing
(e.g. FCSData).
xscale : str, optional
Scale of the x axis, either ``linear``, ``log``, or ``logicle``.
bins : int or array_like, optional
If `bins` is an integer, it specifies the number of bins to use.
If `bins` is an array, it specifies the bin edges to use. If `bins`
is None or an integer, `hist1d` will attempt to use
``data.hist_bins`` to generate the bins automatically.
histtype : {'bar', 'barstacked', 'step', 'stepfilled'}, str, optional
Histogram type. Directly passed to ``plt.hist``.
normed_area : bool, optional
Flag indicating whether to normalize the histogram such that the
area under the curve is equal to one. The resulting plot is
equivalent to a probability density function.
normed_height : bool, optional
Flag indicating whether to normalize the histogram such that the
sum of all bins' heights is equal to one. The resulting plot is
equivalent to a probability mass function. `normed_height` is
ignored if `normed_area` is True.
savefig : str, optional
The name of the file to save the figure to. If None, do not save.
Other parameters
----------------
xlabel : str, optional
Label to use on the x axis. If None, attempts to extract channel
name from last data object.
ylabel : str, optional
Label to use on the y axis. If None and ``normed_area==True``, use
'Probability'. If None, ``normed_area==False``, and
``normed_height==True``, use 'Counts (normalized)'. If None,
``normed_area==False``, and ``normed_height==False``, use 'Counts'.
xlim : tuple, optional
Limits for the x axis. If not specified and `bins` exists, use
the lowest and highest values of `bins`.
ylim : tuple, optional
Limits for the y axis.
title : str, optional
Plot title.
legend : bool, optional
Flag specifying whether to include a legend. If `legend` is True,
the legend labels will be taken from `legend_labels` if present,
else they will be taken from ``str(data_list[i])``.
legend_loc : str, optional
Location of the legend.
legend_fontsize : int or str, optional
Font size for the legend.
legend_labels : list, optional
Labels to use for the legend.
facecolor : matplotlib color or list of matplotlib colors, optional
The histogram's facecolor. It can be a list with the same length as
`data_list`. If `edgecolor` and `facecolor` are not specified, and
``histtype == 'stepfilled'``, the facecolor will be taken from the
module-level variable `cmap_default`.
edgecolor : matplotlib color or list of matplotlib colors, optional
The histogram's edgecolor. It can be a list with the same length as
`data_list`. If `edgecolor` and `facecolor` are not specified, and
``histtype == 'step'``, the edgecolor will be taken from the
module-level variable `cmap_default`.
kwargs : dict, optional
Additional parameters passed directly to matploblib's ``hist``.
Notes
-----
`hist1d` calls matplotlib's ``hist`` function for each object in
`data_list`. `hist_type`, the type of histogram to draw, is directly
passed to ``plt.hist``. Additional keyword arguments provided to
`hist1d` are passed directly to ``plt.hist``.
If `normed_area` is set to True, `hist1d` calls ``plt.hist`` with
``density`` (or ``normed``, if matplotlib's version is older than
2.2.0) set to True. There is a bug in matplotlib 2.1.0 that
produces an incorrect plot in these conditions. We do not recommend
using matplotlib 2.1.0 if `normed_area` is expected to be used.
]
if <ast.BoolOp object at 0x7da1b1c92440> begin[:]
call[name[warnings].warn, parameter[constant[bug in matplotlib 2.1.0 will result in an incorrect plot when normed_area is set to True]]]
if <ast.UnaryOp object at 0x7da1b1c90070> begin[:]
variable[data_list] assign[=] list[[<ast.Name object at 0x7da1b1c91930>]]
if compare[name[histtype] equal[==] constant[stepfilled]] begin[:]
if compare[name[facecolor] is constant[None]] begin[:]
variable[facecolor] assign[=] <ast.ListComp object at 0x7da1b1c903d0>
if compare[name[edgecolor] is constant[None]] begin[:]
variable[edgecolor] assign[=] binary_operation[list[[<ast.Constant object at 0x7da1b1c90a00>]] * call[name[len], parameter[name[data_list]]]]
if <ast.UnaryOp object at 0x7da1b1c90e50> begin[:]
variable[edgecolor] assign[=] binary_operation[list[[<ast.Name object at 0x7da1b1c90d00>]] * call[name[len], parameter[name[data_list]]]]
if <ast.UnaryOp object at 0x7da1b1c90880> begin[:]
variable[facecolor] assign[=] binary_operation[list[[<ast.Name object at 0x7da1b1c90520>]] * call[name[len], parameter[name[data_list]]]]
variable[xscale_kwargs] assign[=] dictionary[[], []]
if compare[name[xscale] equal[==] constant[logicle]] begin[:]
variable[t] assign[=] call[name[_LogicleTransform], parameter[]]
call[name[xscale_kwargs]][constant[T]] assign[=] name[t].T
call[name[xscale_kwargs]][constant[M]] assign[=] name[t].M
call[name[xscale_kwargs]][constant[W]] assign[=] name[t].W
for taget[tuple[[<ast.Name object at 0x7da1b1c91270>, <ast.Name object at 0x7da1b1c934f0>]]] in starred[call[name[enumerate], parameter[name[data_list]]]] begin[:]
if compare[name[data].ndim greater[>] constant[1]] begin[:]
variable[y] assign[=] call[name[data]][tuple[[<ast.Slice object at 0x7da1b1c93fd0>, <ast.Name object at 0x7da1b1c93f70>]]]
if <ast.BoolOp object at 0x7da1b1c93c10> begin[:]
if <ast.BoolOp object at 0x7da1b1c93af0> begin[:]
variable[bins] assign[=] call[name[y].hist_bins, parameter[]]
if <ast.BoolOp object at 0x7da1b1c938b0> begin[:]
variable[weights] assign[=] binary_operation[call[name[np].ones_like, parameter[name[y]]] / call[name[float], parameter[call[name[len], parameter[name[y]]]]]]
if compare[call[name[packaging].version.parse, parameter[name[matplotlib].__version__]] greater_or_equal[>=] call[name[packaging].version.parse, parameter[constant[2.2]]]] begin[:]
if compare[name[bins] is_not constant[None]] begin[:]
<ast.Tuple object at 0x7da1b1c96770> assign[=] call[name[plt].hist, parameter[name[y], name[bins]]]
if compare[name[xscale] equal[==] constant[logicle]] begin[:]
call[call[name[plt].gca, parameter[]].set_xscale, parameter[name[xscale]]]
if compare[name[xlabel] is_not constant[None]] begin[:]
call[name[plt].xlabel, parameter[name[xlabel]]]
if compare[name[ylabel] is_not constant[None]] begin[:]
call[name[plt].ylabel, parameter[name[ylabel]]]
if compare[name[xlim] is_not constant[None]] begin[:]
call[name[plt].xlim, parameter[name[xlim]]]
if compare[name[ylim] is_not constant[None]] begin[:]
call[name[plt].ylim, parameter[name[ylim]]]
if compare[name[title] is_not constant[None]] begin[:]
call[name[plt].title, parameter[name[title]]]
if name[legend] begin[:]
if compare[name[legend_labels] is constant[None]] begin[:]
variable[legend_labels] assign[=] <ast.ListComp object at 0x7da1b1cb7a00>
call[name[plt].legend, parameter[name[legend_labels]]]
if compare[name[savefig] is_not constant[None]] begin[:]
call[name[plt].tight_layout, parameter[]]
call[name[plt].savefig, parameter[name[savefig]]]
call[name[plt].close, parameter[]] | keyword[def] identifier[hist1d] ( identifier[data_list] ,
identifier[channel] = literal[int] ,
identifier[xscale] = literal[string] ,
identifier[bins] = literal[int] ,
identifier[histtype] = literal[string] ,
identifier[normed_area] = keyword[False] ,
identifier[normed_height] = keyword[False] ,
identifier[xlabel] = keyword[None] ,
identifier[ylabel] = keyword[None] ,
identifier[xlim] = keyword[None] ,
identifier[ylim] = keyword[None] ,
identifier[title] = keyword[None] ,
identifier[legend] = keyword[False] ,
identifier[legend_loc] = literal[string] ,
identifier[legend_fontsize] = literal[string] ,
identifier[legend_labels] = keyword[None] ,
identifier[facecolor] = keyword[None] ,
identifier[edgecolor] = keyword[None] ,
identifier[savefig] = keyword[None] ,
** identifier[kwargs] ):
literal[string]
keyword[if] identifier[normed_area] keyword[and] identifier[packaging] . identifier[version] . identifier[parse] ( identifier[matplotlib] . identifier[__version__] )== identifier[packaging] . identifier[version] . identifier[parse] ( literal[string] ):
identifier[warnings] . identifier[warn] ( literal[string]
literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[data_list] , identifier[list] ):
identifier[data_list] =[ identifier[data_list] ]
keyword[if] identifier[histtype] == literal[string] :
keyword[if] identifier[facecolor] keyword[is] keyword[None] :
identifier[facecolor] =[ identifier[cmap_default] ( identifier[i] )
keyword[for] identifier[i] keyword[in] identifier[np] . identifier[linspace] ( literal[int] , literal[int] , identifier[len] ( identifier[data_list] ))]
keyword[if] identifier[edgecolor] keyword[is] keyword[None] :
identifier[edgecolor] =[ literal[string] ]* identifier[len] ( identifier[data_list] )
keyword[elif] identifier[histtype] == literal[string] :
keyword[if] identifier[edgecolor] keyword[is] keyword[None] :
identifier[edgecolor] =[ identifier[cmap_default] ( identifier[i] )
keyword[for] identifier[i] keyword[in] identifier[np] . identifier[linspace] ( literal[int] , literal[int] , identifier[len] ( identifier[data_list] ))]
keyword[if] keyword[not] identifier[isinstance] ( identifier[edgecolor] , identifier[list] ):
identifier[edgecolor] =[ identifier[edgecolor] ]* identifier[len] ( identifier[data_list] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[facecolor] , identifier[list] ):
identifier[facecolor] =[ identifier[facecolor] ]* identifier[len] ( identifier[data_list] )
identifier[xscale_kwargs] ={}
keyword[if] identifier[xscale] == literal[string] :
identifier[t] = identifier[_LogicleTransform] ( identifier[data] = identifier[data_list] , identifier[channel] = identifier[channel] )
identifier[xscale_kwargs] [ literal[string] ]= identifier[t] . identifier[T]
identifier[xscale_kwargs] [ literal[string] ]= identifier[t] . identifier[M]
identifier[xscale_kwargs] [ literal[string] ]= identifier[t] . identifier[W]
keyword[for] identifier[i] , identifier[data] keyword[in] identifier[enumerate] ( identifier[data_list] ):
keyword[if] identifier[data] . identifier[ndim] > literal[int] :
identifier[y] = identifier[data] [:, identifier[channel] ]
keyword[else] :
identifier[y] = identifier[data]
keyword[if] identifier[hasattr] ( identifier[y] , literal[string] ) keyword[and] identifier[hasattr] ( identifier[y] . identifier[hist_bins] , literal[string] ):
keyword[if] identifier[bins] keyword[is] keyword[None] keyword[or] identifier[isinstance] ( identifier[bins] , identifier[int] ):
identifier[bins] = identifier[y] . identifier[hist_bins] ( identifier[channels] = literal[int] ,
identifier[nbins] = identifier[bins] ,
identifier[scale] = identifier[xscale] ,
** identifier[xscale_kwargs] )
keyword[if] identifier[normed_height] keyword[and] keyword[not] identifier[normed_area] :
identifier[weights] = identifier[np] . identifier[ones_like] ( identifier[y] )/ identifier[float] ( identifier[len] ( identifier[y] ))
keyword[else] :
identifier[weights] = keyword[None]
keyword[if] identifier[packaging] . identifier[version] . identifier[parse] ( identifier[matplotlib] . identifier[__version__] )>= identifier[packaging] . identifier[version] . identifier[parse] ( literal[string] ):
keyword[if] identifier[bins] keyword[is] keyword[not] keyword[None] :
identifier[n] , identifier[edges] , identifier[patches] = identifier[plt] . identifier[hist] ( identifier[y] ,
identifier[bins] ,
identifier[weights] = identifier[weights] ,
identifier[density] = identifier[normed_area] ,
identifier[histtype] = identifier[histtype] ,
identifier[edgecolor] = identifier[edgecolor] [ identifier[i] ],
identifier[facecolor] = identifier[facecolor] [ identifier[i] ],
** identifier[kwargs] )
keyword[else] :
identifier[n] , identifier[edges] , identifier[patches] = identifier[plt] . identifier[hist] ( identifier[y] ,
identifier[weights] = identifier[weights] ,
identifier[density] = identifier[normed_area] ,
identifier[histtype] = identifier[histtype] ,
identifier[edgecolor] = identifier[edgecolor] [ identifier[i] ],
identifier[facecolor] = identifier[facecolor] [ identifier[i] ],
** identifier[kwargs] )
keyword[else] :
keyword[if] identifier[bins] keyword[is] keyword[not] keyword[None] :
identifier[n] , identifier[edges] , identifier[patches] = identifier[plt] . identifier[hist] ( identifier[y] ,
identifier[bins] ,
identifier[weights] = identifier[weights] ,
identifier[normed] = identifier[normed_area] ,
identifier[histtype] = identifier[histtype] ,
identifier[edgecolor] = identifier[edgecolor] [ identifier[i] ],
identifier[facecolor] = identifier[facecolor] [ identifier[i] ],
** identifier[kwargs] )
keyword[else] :
identifier[n] , identifier[edges] , identifier[patches] = identifier[plt] . identifier[hist] ( identifier[y] ,
identifier[weights] = identifier[weights] ,
identifier[normed] = identifier[normed_area] ,
identifier[histtype] = identifier[histtype] ,
identifier[edgecolor] = identifier[edgecolor] [ identifier[i] ],
identifier[facecolor] = identifier[facecolor] [ identifier[i] ],
** identifier[kwargs] )
keyword[if] identifier[xscale] == literal[string] :
identifier[plt] . identifier[gca] (). identifier[set_xscale] ( identifier[xscale] , identifier[data] = identifier[data_list] , identifier[channel] = identifier[channel] )
keyword[else] :
identifier[plt] . identifier[gca] (). identifier[set_xscale] ( identifier[xscale] )
keyword[if] identifier[xlabel] keyword[is] keyword[not] keyword[None] :
identifier[plt] . identifier[xlabel] ( identifier[xlabel] )
keyword[elif] identifier[hasattr] ( identifier[y] , literal[string] ):
identifier[plt] . identifier[xlabel] ( identifier[y] . identifier[channels] [ literal[int] ])
keyword[if] identifier[ylabel] keyword[is] keyword[not] keyword[None] :
identifier[plt] . identifier[ylabel] ( identifier[ylabel] )
keyword[elif] identifier[normed_area] :
identifier[plt] . identifier[ylabel] ( literal[string] )
keyword[elif] identifier[normed_height] :
identifier[plt] . identifier[ylabel] ( literal[string] )
keyword[else] :
identifier[plt] . identifier[ylabel] ( literal[string] )
keyword[if] identifier[xlim] keyword[is] keyword[not] keyword[None] :
identifier[plt] . identifier[xlim] ( identifier[xlim] )
keyword[elif] identifier[bins] keyword[is] keyword[not] keyword[None] :
identifier[plt] . identifier[xlim] (( identifier[edges] [ literal[int] ], identifier[edges] [- literal[int] ]))
keyword[if] identifier[ylim] keyword[is] keyword[not] keyword[None] :
identifier[plt] . identifier[ylim] ( identifier[ylim] )
keyword[if] identifier[title] keyword[is] keyword[not] keyword[None] :
identifier[plt] . identifier[title] ( identifier[title] )
keyword[if] identifier[legend] :
keyword[if] identifier[legend_labels] keyword[is] keyword[None] :
identifier[legend_labels] =[ identifier[str] ( identifier[data] ) keyword[for] identifier[data] keyword[in] identifier[data_list] ]
identifier[plt] . identifier[legend] ( identifier[legend_labels] ,
identifier[loc] = identifier[legend_loc] ,
identifier[prop] ={ literal[string] : identifier[legend_fontsize] })
keyword[if] identifier[savefig] keyword[is] keyword[not] keyword[None] :
identifier[plt] . identifier[tight_layout] ()
identifier[plt] . identifier[savefig] ( identifier[savefig] , identifier[dpi] = identifier[savefig_dpi] )
identifier[plt] . identifier[close] () | def hist1d(data_list, channel=0, xscale='logicle', bins=256, histtype='stepfilled', normed_area=False, normed_height=False, xlabel=None, ylabel=None, xlim=None, ylim=None, title=None, legend=False, legend_loc='best', legend_fontsize='medium', legend_labels=None, facecolor=None, edgecolor=None, savefig=None, **kwargs):
"""
Plot one 1D histogram from one or more flow cytometry data sets.
Parameters
----------
data_list : FCSData or numpy array or list of FCSData or numpy array
Flow cytometry data to plot.
channel : int or str, optional
Channel from where to take the events to plot. If ndim == 1,
channel is ignored. String channel specifications are only
supported for data types which support string-based indexing
(e.g. FCSData).
xscale : str, optional
Scale of the x axis, either ``linear``, ``log``, or ``logicle``.
bins : int or array_like, optional
If `bins` is an integer, it specifies the number of bins to use.
If `bins` is an array, it specifies the bin edges to use. If `bins`
is None or an integer, `hist1d` will attempt to use
``data.hist_bins`` to generate the bins automatically.
histtype : {'bar', 'barstacked', 'step', 'stepfilled'}, str, optional
Histogram type. Directly passed to ``plt.hist``.
normed_area : bool, optional
Flag indicating whether to normalize the histogram such that the
area under the curve is equal to one. The resulting plot is
equivalent to a probability density function.
normed_height : bool, optional
Flag indicating whether to normalize the histogram such that the
sum of all bins' heights is equal to one. The resulting plot is
equivalent to a probability mass function. `normed_height` is
ignored if `normed_area` is True.
savefig : str, optional
The name of the file to save the figure to. If None, do not save.
Other parameters
----------------
xlabel : str, optional
Label to use on the x axis. If None, attempts to extract channel
name from last data object.
ylabel : str, optional
Label to use on the y axis. If None and ``normed_area==True``, use
'Probability'. If None, ``normed_area==False``, and
``normed_height==True``, use 'Counts (normalized)'. If None,
``normed_area==False``, and ``normed_height==False``, use 'Counts'.
xlim : tuple, optional
Limits for the x axis. If not specified and `bins` exists, use
the lowest and highest values of `bins`.
ylim : tuple, optional
Limits for the y axis.
title : str, optional
Plot title.
legend : bool, optional
Flag specifying whether to include a legend. If `legend` is True,
the legend labels will be taken from `legend_labels` if present,
else they will be taken from ``str(data_list[i])``.
legend_loc : str, optional
Location of the legend.
legend_fontsize : int or str, optional
Font size for the legend.
legend_labels : list, optional
Labels to use for the legend.
facecolor : matplotlib color or list of matplotlib colors, optional
The histogram's facecolor. It can be a list with the same length as
`data_list`. If `edgecolor` and `facecolor` are not specified, and
``histtype == 'stepfilled'``, the facecolor will be taken from the
module-level variable `cmap_default`.
edgecolor : matplotlib color or list of matplotlib colors, optional
The histogram's edgecolor. It can be a list with the same length as
`data_list`. If `edgecolor` and `facecolor` are not specified, and
``histtype == 'step'``, the edgecolor will be taken from the
module-level variable `cmap_default`.
kwargs : dict, optional
Additional parameters passed directly to matploblib's ``hist``.
Notes
-----
`hist1d` calls matplotlib's ``hist`` function for each object in
`data_list`. `hist_type`, the type of histogram to draw, is directly
passed to ``plt.hist``. Additional keyword arguments provided to
`hist1d` are passed directly to ``plt.hist``.
If `normed_area` is set to True, `hist1d` calls ``plt.hist`` with
``density`` (or ``normed``, if matplotlib's version is older than
2.2.0) set to True. There is a bug in matplotlib 2.1.0 that
produces an incorrect plot in these conditions. We do not recommend
using matplotlib 2.1.0 if `normed_area` is expected to be used.
"""
# Using `normed_area` with matplotlib 2.1.0 causes an incorrect plot to be
# produced. Raise warning in these conditions.
if normed_area and packaging.version.parse(matplotlib.__version__) == packaging.version.parse('2.1.0'):
warnings.warn('bug in matplotlib 2.1.0 will result in an incorrect plot when normed_area is set to True') # depends on [control=['if'], data=[]]
# Convert to list if necessary
if not isinstance(data_list, list):
data_list = [data_list] # depends on [control=['if'], data=[]]
# Default colors
if histtype == 'stepfilled':
if facecolor is None:
facecolor = [cmap_default(i) for i in np.linspace(0, 1, len(data_list))] # depends on [control=['if'], data=['facecolor']]
if edgecolor is None:
edgecolor = ['black'] * len(data_list) # depends on [control=['if'], data=['edgecolor']] # depends on [control=['if'], data=[]]
elif histtype == 'step':
if edgecolor is None:
edgecolor = [cmap_default(i) for i in np.linspace(0, 1, len(data_list))] # depends on [control=['if'], data=['edgecolor']] # depends on [control=['if'], data=[]]
# Convert colors to lists if necessary
if not isinstance(edgecolor, list):
edgecolor = [edgecolor] * len(data_list) # depends on [control=['if'], data=[]]
if not isinstance(facecolor, list):
facecolor = [facecolor] * len(data_list) # depends on [control=['if'], data=[]]
# Collect scale parameters that depend on all elements in data_list
xscale_kwargs = {}
if xscale == 'logicle':
t = _LogicleTransform(data=data_list, channel=channel)
xscale_kwargs['T'] = t.T
xscale_kwargs['M'] = t.M
xscale_kwargs['W'] = t.W # depends on [control=['if'], data=[]]
# Iterate through data_list
for (i, data) in enumerate(data_list):
# Extract channel
if data.ndim > 1:
y = data[:, channel] # depends on [control=['if'], data=[]]
else:
y = data
# If ``data_plot.hist_bins()`` exists, obtain bin edges from it if
# necessary. If it does not exist, do not modify ``bins``.
if hasattr(y, 'hist_bins') and hasattr(y.hist_bins, '__call__'):
# If bins is None or an integer, get bin edges from
# ``data_plot.hist_bins()``.
if bins is None or isinstance(bins, int):
bins = y.hist_bins(channels=0, nbins=bins, scale=xscale, **xscale_kwargs) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Decide whether to normalize
if normed_height and (not normed_area):
weights = np.ones_like(y) / float(len(y)) # depends on [control=['if'], data=[]]
else:
weights = None
# Actually plot
if packaging.version.parse(matplotlib.__version__) >= packaging.version.parse('2.2'):
if bins is not None:
(n, edges, patches) = plt.hist(y, bins, weights=weights, density=normed_area, histtype=histtype, edgecolor=edgecolor[i], facecolor=facecolor[i], **kwargs) # depends on [control=['if'], data=['bins']]
else:
(n, edges, patches) = plt.hist(y, weights=weights, density=normed_area, histtype=histtype, edgecolor=edgecolor[i], facecolor=facecolor[i], **kwargs) # depends on [control=['if'], data=[]]
elif bins is not None:
(n, edges, patches) = plt.hist(y, bins, weights=weights, normed=normed_area, histtype=histtype, edgecolor=edgecolor[i], facecolor=facecolor[i], **kwargs) # depends on [control=['if'], data=['bins']]
else:
(n, edges, patches) = plt.hist(y, weights=weights, normed=normed_area, histtype=histtype, edgecolor=edgecolor[i], facecolor=facecolor[i], **kwargs) # depends on [control=['for'], data=[]]
# Set scale of x axis
if xscale == 'logicle':
plt.gca().set_xscale(xscale, data=data_list, channel=channel) # depends on [control=['if'], data=['xscale']]
else:
plt.gca().set_xscale(xscale)
###
# Final configuration
###
# x and y labels
if xlabel is not None:
# Highest priority is user-provided label
plt.xlabel(xlabel) # depends on [control=['if'], data=['xlabel']]
elif hasattr(y, 'channels'):
# Attempt to use channel name
plt.xlabel(y.channels[0]) # depends on [control=['if'], data=[]]
if ylabel is not None:
# Highest priority is user-provided label
plt.ylabel(ylabel) # depends on [control=['if'], data=['ylabel']]
elif normed_area:
plt.ylabel('Probability') # depends on [control=['if'], data=[]]
elif normed_height:
plt.ylabel('Counts (normalized)') # depends on [control=['if'], data=[]]
else:
# Default is "Counts"
plt.ylabel('Counts')
# x and y limits
if xlim is not None:
# Highest priority is user-provided limits
plt.xlim(xlim) # depends on [control=['if'], data=['xlim']]
elif bins is not None:
# Use bins if specified
plt.xlim((edges[0], edges[-1])) # depends on [control=['if'], data=[]]
if ylim is not None:
plt.ylim(ylim) # depends on [control=['if'], data=['ylim']]
# Title
if title is not None:
plt.title(title) # depends on [control=['if'], data=['title']]
# Legend
if legend:
if legend_labels is None:
legend_labels = [str(data) for data in data_list] # depends on [control=['if'], data=['legend_labels']]
plt.legend(legend_labels, loc=legend_loc, prop={'size': legend_fontsize}) # depends on [control=['if'], data=[]]
# Save if necessary
if savefig is not None:
plt.tight_layout()
plt.savefig(savefig, dpi=savefig_dpi)
plt.close() # depends on [control=['if'], data=['savefig']] |
def clean_fsbackend(opts):
'''
Clean out the old fileserver backends
'''
# Clear remote fileserver backend caches so they get recreated
for backend in ('git', 'hg', 'svn'):
if backend in opts['fileserver_backend']:
env_cache = os.path.join(
opts['cachedir'],
'{0}fs'.format(backend),
'envs.p'
)
if os.path.isfile(env_cache):
log.debug('Clearing %sfs env cache', backend)
try:
os.remove(env_cache)
except OSError as exc:
log.critical(
'Unable to clear env cache file %s: %s',
env_cache, exc
)
file_lists_dir = os.path.join(
opts['cachedir'],
'file_lists',
'{0}fs'.format(backend)
)
try:
file_lists_caches = os.listdir(file_lists_dir)
except OSError:
continue
for file_lists_cache in fnmatch.filter(file_lists_caches, '*.p'):
cache_file = os.path.join(file_lists_dir, file_lists_cache)
try:
os.remove(cache_file)
except OSError as exc:
log.critical(
'Unable to file_lists cache file %s: %s',
cache_file, exc
) | def function[clean_fsbackend, parameter[opts]]:
constant[
Clean out the old fileserver backends
]
for taget[name[backend]] in starred[tuple[[<ast.Constant object at 0x7da1b21ab310>, <ast.Constant object at 0x7da1b21aada0>, <ast.Constant object at 0x7da1b21ab3a0>]]] begin[:]
if compare[name[backend] in call[name[opts]][constant[fileserver_backend]]] begin[:]
variable[env_cache] assign[=] call[name[os].path.join, parameter[call[name[opts]][constant[cachedir]], call[constant[{0}fs].format, parameter[name[backend]]], constant[envs.p]]]
if call[name[os].path.isfile, parameter[name[env_cache]]] begin[:]
call[name[log].debug, parameter[constant[Clearing %sfs env cache], name[backend]]]
<ast.Try object at 0x7da1b21ab2b0>
variable[file_lists_dir] assign[=] call[name[os].path.join, parameter[call[name[opts]][constant[cachedir]], constant[file_lists], call[constant[{0}fs].format, parameter[name[backend]]]]]
<ast.Try object at 0x7da207f9bc10>
for taget[name[file_lists_cache]] in starred[call[name[fnmatch].filter, parameter[name[file_lists_caches], constant[*.p]]]] begin[:]
variable[cache_file] assign[=] call[name[os].path.join, parameter[name[file_lists_dir], name[file_lists_cache]]]
<ast.Try object at 0x7da207f9ba00> | keyword[def] identifier[clean_fsbackend] ( identifier[opts] ):
literal[string]
keyword[for] identifier[backend] keyword[in] ( literal[string] , literal[string] , literal[string] ):
keyword[if] identifier[backend] keyword[in] identifier[opts] [ literal[string] ]:
identifier[env_cache] = identifier[os] . identifier[path] . identifier[join] (
identifier[opts] [ literal[string] ],
literal[string] . identifier[format] ( identifier[backend] ),
literal[string]
)
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[env_cache] ):
identifier[log] . identifier[debug] ( literal[string] , identifier[backend] )
keyword[try] :
identifier[os] . identifier[remove] ( identifier[env_cache] )
keyword[except] identifier[OSError] keyword[as] identifier[exc] :
identifier[log] . identifier[critical] (
literal[string] ,
identifier[env_cache] , identifier[exc]
)
identifier[file_lists_dir] = identifier[os] . identifier[path] . identifier[join] (
identifier[opts] [ literal[string] ],
literal[string] ,
literal[string] . identifier[format] ( identifier[backend] )
)
keyword[try] :
identifier[file_lists_caches] = identifier[os] . identifier[listdir] ( identifier[file_lists_dir] )
keyword[except] identifier[OSError] :
keyword[continue]
keyword[for] identifier[file_lists_cache] keyword[in] identifier[fnmatch] . identifier[filter] ( identifier[file_lists_caches] , literal[string] ):
identifier[cache_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[file_lists_dir] , identifier[file_lists_cache] )
keyword[try] :
identifier[os] . identifier[remove] ( identifier[cache_file] )
keyword[except] identifier[OSError] keyword[as] identifier[exc] :
identifier[log] . identifier[critical] (
literal[string] ,
identifier[cache_file] , identifier[exc]
) | def clean_fsbackend(opts):
"""
Clean out the old fileserver backends
"""
# Clear remote fileserver backend caches so they get recreated
for backend in ('git', 'hg', 'svn'):
if backend in opts['fileserver_backend']:
env_cache = os.path.join(opts['cachedir'], '{0}fs'.format(backend), 'envs.p')
if os.path.isfile(env_cache):
log.debug('Clearing %sfs env cache', backend)
try:
os.remove(env_cache) # depends on [control=['try'], data=[]]
except OSError as exc:
log.critical('Unable to clear env cache file %s: %s', env_cache, exc) # depends on [control=['except'], data=['exc']] # depends on [control=['if'], data=[]]
file_lists_dir = os.path.join(opts['cachedir'], 'file_lists', '{0}fs'.format(backend))
try:
file_lists_caches = os.listdir(file_lists_dir) # depends on [control=['try'], data=[]]
except OSError:
continue # depends on [control=['except'], data=[]]
for file_lists_cache in fnmatch.filter(file_lists_caches, '*.p'):
cache_file = os.path.join(file_lists_dir, file_lists_cache)
try:
os.remove(cache_file) # depends on [control=['try'], data=[]]
except OSError as exc:
log.critical('Unable to file_lists cache file %s: %s', cache_file, exc) # depends on [control=['except'], data=['exc']] # depends on [control=['for'], data=['file_lists_cache']] # depends on [control=['if'], data=['backend']] # depends on [control=['for'], data=['backend']] |
def trimiquants(self, col: str, inf: float):
"""
Remove superior and inferior quantiles from the dataframe
:param col: column name
:type col: str
:param inf: inferior quantile
:type inf: float
:example: ``ds.trimiquants("Col 1", 0.05)``
"""
try:
self.df = self._trimquants(col, inf, None)
except Exception as e:
self.err(e, self.trimiquants, "Can not trim inferior quantiles") | def function[trimiquants, parameter[self, col, inf]]:
constant[
Remove superior and inferior quantiles from the dataframe
:param col: column name
:type col: str
:param inf: inferior quantile
:type inf: float
:example: ``ds.trimiquants("Col 1", 0.05)``
]
<ast.Try object at 0x7da207f98a00> | keyword[def] identifier[trimiquants] ( identifier[self] , identifier[col] : identifier[str] , identifier[inf] : identifier[float] ):
literal[string]
keyword[try] :
identifier[self] . identifier[df] = identifier[self] . identifier[_trimquants] ( identifier[col] , identifier[inf] , keyword[None] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[err] ( identifier[e] , identifier[self] . identifier[trimiquants] , literal[string] ) | def trimiquants(self, col: str, inf: float):
"""
Remove superior and inferior quantiles from the dataframe
:param col: column name
:type col: str
:param inf: inferior quantile
:type inf: float
:example: ``ds.trimiquants("Col 1", 0.05)``
"""
try:
self.df = self._trimquants(col, inf, None) # depends on [control=['try'], data=[]]
except Exception as e:
self.err(e, self.trimiquants, 'Can not trim inferior quantiles') # depends on [control=['except'], data=['e']] |
def menuitemenabled(self, window_name, object_name):
"""
Verify a menu item is enabled
@param window_name: Window name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to look for, either full name,
LDTP's name convention, or a Unix glob. Or menu heirarchy
@type object_name: string
@return: 1 on success.
@rtype: integer
"""
try:
menu_handle = self._get_menu_handle(window_name, object_name,
False)
if menu_handle.AXEnabled:
return 1
except LdtpServerException:
pass
return 0 | def function[menuitemenabled, parameter[self, window_name, object_name]]:
constant[
Verify a menu item is enabled
@param window_name: Window name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to look for, either full name,
LDTP's name convention, or a Unix glob. Or menu heirarchy
@type object_name: string
@return: 1 on success.
@rtype: integer
]
<ast.Try object at 0x7da18dc05000>
return[constant[0]] | keyword[def] identifier[menuitemenabled] ( identifier[self] , identifier[window_name] , identifier[object_name] ):
literal[string]
keyword[try] :
identifier[menu_handle] = identifier[self] . identifier[_get_menu_handle] ( identifier[window_name] , identifier[object_name] ,
keyword[False] )
keyword[if] identifier[menu_handle] . identifier[AXEnabled] :
keyword[return] literal[int]
keyword[except] identifier[LdtpServerException] :
keyword[pass]
keyword[return] literal[int] | def menuitemenabled(self, window_name, object_name):
"""
Verify a menu item is enabled
@param window_name: Window name to look for, either full name,
LDTP's name convention, or a Unix glob.
@type window_name: string
@param object_name: Object name to look for, either full name,
LDTP's name convention, or a Unix glob. Or menu heirarchy
@type object_name: string
@return: 1 on success.
@rtype: integer
"""
try:
menu_handle = self._get_menu_handle(window_name, object_name, False)
if menu_handle.AXEnabled:
return 1 # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except LdtpServerException:
pass # depends on [control=['except'], data=[]]
return 0 |
def floor(x):
"""
Floor function (round towards negative infinity)
"""
if isinstance(x, UncertainFunction):
mcpts = np.floor(x._mcpts)
return UncertainFunction(mcpts)
else:
return np.floor(x) | def function[floor, parameter[x]]:
constant[
Floor function (round towards negative infinity)
]
if call[name[isinstance], parameter[name[x], name[UncertainFunction]]] begin[:]
variable[mcpts] assign[=] call[name[np].floor, parameter[name[x]._mcpts]]
return[call[name[UncertainFunction], parameter[name[mcpts]]]] | keyword[def] identifier[floor] ( identifier[x] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[x] , identifier[UncertainFunction] ):
identifier[mcpts] = identifier[np] . identifier[floor] ( identifier[x] . identifier[_mcpts] )
keyword[return] identifier[UncertainFunction] ( identifier[mcpts] )
keyword[else] :
keyword[return] identifier[np] . identifier[floor] ( identifier[x] ) | def floor(x):
"""
Floor function (round towards negative infinity)
"""
if isinstance(x, UncertainFunction):
mcpts = np.floor(x._mcpts)
return UncertainFunction(mcpts) # depends on [control=['if'], data=[]]
else:
return np.floor(x) |
def _request_bulk(self, urls: List[str]) -> List:
"""Batch the requests going out."""
if not urls:
raise Exception("No results were found")
session: FuturesSession = FuturesSession(max_workers=len(urls))
self.log.info("Bulk requesting: %d" % len(urls))
futures = [session.get(u, headers=gen_headers(), timeout=3) for u in urls]
done, incomplete = wait(futures)
results: List = list()
for response in done:
try:
results.append(response.result())
except Exception as err:
self.log.warn("Failed result: %s" % err)
return results | def function[_request_bulk, parameter[self, urls]]:
constant[Batch the requests going out.]
if <ast.UnaryOp object at 0x7da1b0a6df30> begin[:]
<ast.Raise object at 0x7da1b0a6df90>
<ast.AnnAssign object at 0x7da1b0a6d900>
call[name[self].log.info, parameter[binary_operation[constant[Bulk requesting: %d] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[urls]]]]]]
variable[futures] assign[=] <ast.ListComp object at 0x7da1b0a6c850>
<ast.Tuple object at 0x7da1b0a6d360> assign[=] call[name[wait], parameter[name[futures]]]
<ast.AnnAssign object at 0x7da1b0a6d090>
for taget[name[response]] in starred[name[done]] begin[:]
<ast.Try object at 0x7da1b0a6c280>
return[name[results]] | keyword[def] identifier[_request_bulk] ( identifier[self] , identifier[urls] : identifier[List] [ identifier[str] ])-> identifier[List] :
literal[string]
keyword[if] keyword[not] identifier[urls] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[session] : identifier[FuturesSession] = identifier[FuturesSession] ( identifier[max_workers] = identifier[len] ( identifier[urls] ))
identifier[self] . identifier[log] . identifier[info] ( literal[string] % identifier[len] ( identifier[urls] ))
identifier[futures] =[ identifier[session] . identifier[get] ( identifier[u] , identifier[headers] = identifier[gen_headers] (), identifier[timeout] = literal[int] ) keyword[for] identifier[u] keyword[in] identifier[urls] ]
identifier[done] , identifier[incomplete] = identifier[wait] ( identifier[futures] )
identifier[results] : identifier[List] = identifier[list] ()
keyword[for] identifier[response] keyword[in] identifier[done] :
keyword[try] :
identifier[results] . identifier[append] ( identifier[response] . identifier[result] ())
keyword[except] identifier[Exception] keyword[as] identifier[err] :
identifier[self] . identifier[log] . identifier[warn] ( literal[string] % identifier[err] )
keyword[return] identifier[results] | def _request_bulk(self, urls: List[str]) -> List:
"""Batch the requests going out."""
if not urls:
raise Exception('No results were found') # depends on [control=['if'], data=[]]
session: FuturesSession = FuturesSession(max_workers=len(urls))
self.log.info('Bulk requesting: %d' % len(urls))
futures = [session.get(u, headers=gen_headers(), timeout=3) for u in urls]
(done, incomplete) = wait(futures)
results: List = list()
for response in done:
try:
results.append(response.result()) # depends on [control=['try'], data=[]]
except Exception as err:
self.log.warn('Failed result: %s' % err) # depends on [control=['except'], data=['err']] # depends on [control=['for'], data=['response']]
return results |
def for_category(self, category, live_only=False):
"""
Returns queryset of EntryTag instances for specified category.
:param category: the Category instance.
:param live_only: flag to include only "live" entries.
:rtype: django.db.models.query.QuerySet.
"""
filters = {'tag': category.tag}
if live_only:
filters.update({'entry__live': True})
return self.filter(**filters) | def function[for_category, parameter[self, category, live_only]]:
constant[
Returns queryset of EntryTag instances for specified category.
:param category: the Category instance.
:param live_only: flag to include only "live" entries.
:rtype: django.db.models.query.QuerySet.
]
variable[filters] assign[=] dictionary[[<ast.Constant object at 0x7da1b10e6cb0>], [<ast.Attribute object at 0x7da1b10e7f10>]]
if name[live_only] begin[:]
call[name[filters].update, parameter[dictionary[[<ast.Constant object at 0x7da1b10e67d0>], [<ast.Constant object at 0x7da1b10e47c0>]]]]
return[call[name[self].filter, parameter[]]] | keyword[def] identifier[for_category] ( identifier[self] , identifier[category] , identifier[live_only] = keyword[False] ):
literal[string]
identifier[filters] ={ literal[string] : identifier[category] . identifier[tag] }
keyword[if] identifier[live_only] :
identifier[filters] . identifier[update] ({ literal[string] : keyword[True] })
keyword[return] identifier[self] . identifier[filter] (** identifier[filters] ) | def for_category(self, category, live_only=False):
"""
Returns queryset of EntryTag instances for specified category.
:param category: the Category instance.
:param live_only: flag to include only "live" entries.
:rtype: django.db.models.query.QuerySet.
"""
filters = {'tag': category.tag}
if live_only:
filters.update({'entry__live': True}) # depends on [control=['if'], data=[]]
return self.filter(**filters) |
def revoke_oauth_credential(self):
"""Revoke the session's OAuth 2.0 credentials."""
if self.session.token_type == auth.SERVER_TOKEN_TYPE:
return
credential = self.session.oauth2credential
revoke_access_token(credential) | def function[revoke_oauth_credential, parameter[self]]:
constant[Revoke the session's OAuth 2.0 credentials.]
if compare[name[self].session.token_type equal[==] name[auth].SERVER_TOKEN_TYPE] begin[:]
return[None]
variable[credential] assign[=] name[self].session.oauth2credential
call[name[revoke_access_token], parameter[name[credential]]] | keyword[def] identifier[revoke_oauth_credential] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[session] . identifier[token_type] == identifier[auth] . identifier[SERVER_TOKEN_TYPE] :
keyword[return]
identifier[credential] = identifier[self] . identifier[session] . identifier[oauth2credential]
identifier[revoke_access_token] ( identifier[credential] ) | def revoke_oauth_credential(self):
"""Revoke the session's OAuth 2.0 credentials."""
if self.session.token_type == auth.SERVER_TOKEN_TYPE:
return # depends on [control=['if'], data=[]]
credential = self.session.oauth2credential
revoke_access_token(credential) |
def add_media(dest, media):
"""
Optimized version of django.forms.Media.__add__() that doesn't create new objects.
"""
if django.VERSION >= (2, 2):
dest._css_lists += media._css_lists
dest._js_lists += media._js_lists
elif django.VERSION >= (2, 0):
combined = dest + media
dest._css = combined._css
dest._js = combined._js
else:
dest.add_css(media._css)
dest.add_js(media._js) | def function[add_media, parameter[dest, media]]:
constant[
Optimized version of django.forms.Media.__add__() that doesn't create new objects.
]
if compare[name[django].VERSION greater_or_equal[>=] tuple[[<ast.Constant object at 0x7da1b11c3d60>, <ast.Constant object at 0x7da1b11c2b00>]]] begin[:]
<ast.AugAssign object at 0x7da1b11c3e50>
<ast.AugAssign object at 0x7da1b11c0070> | keyword[def] identifier[add_media] ( identifier[dest] , identifier[media] ):
literal[string]
keyword[if] identifier[django] . identifier[VERSION] >=( literal[int] , literal[int] ):
identifier[dest] . identifier[_css_lists] += identifier[media] . identifier[_css_lists]
identifier[dest] . identifier[_js_lists] += identifier[media] . identifier[_js_lists]
keyword[elif] identifier[django] . identifier[VERSION] >=( literal[int] , literal[int] ):
identifier[combined] = identifier[dest] + identifier[media]
identifier[dest] . identifier[_css] = identifier[combined] . identifier[_css]
identifier[dest] . identifier[_js] = identifier[combined] . identifier[_js]
keyword[else] :
identifier[dest] . identifier[add_css] ( identifier[media] . identifier[_css] )
identifier[dest] . identifier[add_js] ( identifier[media] . identifier[_js] ) | def add_media(dest, media):
"""
Optimized version of django.forms.Media.__add__() that doesn't create new objects.
"""
if django.VERSION >= (2, 2):
dest._css_lists += media._css_lists
dest._js_lists += media._js_lists # depends on [control=['if'], data=[]]
elif django.VERSION >= (2, 0):
combined = dest + media
dest._css = combined._css
dest._js = combined._js # depends on [control=['if'], data=[]]
else:
dest.add_css(media._css)
dest.add_js(media._js) |
def require_remote_ref_path(func):
"""A decorator raising a TypeError if we are not a valid remote, based on the path"""
def wrapper(self, *args):
if not self.is_remote():
raise ValueError("ref path does not point to a remote reference: %s" % self.path)
return func(self, *args)
# END wrapper
wrapper.__name__ = func.__name__
return wrapper | def function[require_remote_ref_path, parameter[func]]:
constant[A decorator raising a TypeError if we are not a valid remote, based on the path]
def function[wrapper, parameter[self]]:
if <ast.UnaryOp object at 0x7da2047e97b0> begin[:]
<ast.Raise object at 0x7da2047e8eb0>
return[call[name[func], parameter[name[self], <ast.Starred object at 0x7da2047e9300>]]]
name[wrapper].__name__ assign[=] name[func].__name__
return[name[wrapper]] | keyword[def] identifier[require_remote_ref_path] ( identifier[func] ):
literal[string]
keyword[def] identifier[wrapper] ( identifier[self] ,* identifier[args] ):
keyword[if] keyword[not] identifier[self] . identifier[is_remote] ():
keyword[raise] identifier[ValueError] ( literal[string] % identifier[self] . identifier[path] )
keyword[return] identifier[func] ( identifier[self] ,* identifier[args] )
identifier[wrapper] . identifier[__name__] = identifier[func] . identifier[__name__]
keyword[return] identifier[wrapper] | def require_remote_ref_path(func):
"""A decorator raising a TypeError if we are not a valid remote, based on the path"""
def wrapper(self, *args):
if not self.is_remote():
raise ValueError('ref path does not point to a remote reference: %s' % self.path) # depends on [control=['if'], data=[]]
return func(self, *args)
# END wrapper
wrapper.__name__ = func.__name__
return wrapper |
def from_string(cls, public_key):
"""Construct an Verifier instance from a public key or public
certificate string.
Args:
public_key (Union[str, bytes]): The public key in PEM format or the
x509 public key certificate.
Returns:
Verifier: The constructed verifier.
Raises:
ValueError: If the public key can't be parsed.
"""
public_key_data = _helpers.to_bytes(public_key)
if _CERTIFICATE_MARKER in public_key_data:
cert = cryptography.x509.load_pem_x509_certificate(
public_key_data, _BACKEND)
pubkey = cert.public_key()
else:
pubkey = serialization.load_pem_public_key(
public_key_data, _BACKEND)
return cls(pubkey) | def function[from_string, parameter[cls, public_key]]:
constant[Construct an Verifier instance from a public key or public
certificate string.
Args:
public_key (Union[str, bytes]): The public key in PEM format or the
x509 public key certificate.
Returns:
Verifier: The constructed verifier.
Raises:
ValueError: If the public key can't be parsed.
]
variable[public_key_data] assign[=] call[name[_helpers].to_bytes, parameter[name[public_key]]]
if compare[name[_CERTIFICATE_MARKER] in name[public_key_data]] begin[:]
variable[cert] assign[=] call[name[cryptography].x509.load_pem_x509_certificate, parameter[name[public_key_data], name[_BACKEND]]]
variable[pubkey] assign[=] call[name[cert].public_key, parameter[]]
return[call[name[cls], parameter[name[pubkey]]]] | keyword[def] identifier[from_string] ( identifier[cls] , identifier[public_key] ):
literal[string]
identifier[public_key_data] = identifier[_helpers] . identifier[to_bytes] ( identifier[public_key] )
keyword[if] identifier[_CERTIFICATE_MARKER] keyword[in] identifier[public_key_data] :
identifier[cert] = identifier[cryptography] . identifier[x509] . identifier[load_pem_x509_certificate] (
identifier[public_key_data] , identifier[_BACKEND] )
identifier[pubkey] = identifier[cert] . identifier[public_key] ()
keyword[else] :
identifier[pubkey] = identifier[serialization] . identifier[load_pem_public_key] (
identifier[public_key_data] , identifier[_BACKEND] )
keyword[return] identifier[cls] ( identifier[pubkey] ) | def from_string(cls, public_key):
"""Construct an Verifier instance from a public key or public
certificate string.
Args:
public_key (Union[str, bytes]): The public key in PEM format or the
x509 public key certificate.
Returns:
Verifier: The constructed verifier.
Raises:
ValueError: If the public key can't be parsed.
"""
public_key_data = _helpers.to_bytes(public_key)
if _CERTIFICATE_MARKER in public_key_data:
cert = cryptography.x509.load_pem_x509_certificate(public_key_data, _BACKEND)
pubkey = cert.public_key() # depends on [control=['if'], data=['public_key_data']]
else:
pubkey = serialization.load_pem_public_key(public_key_data, _BACKEND)
return cls(pubkey) |
def kill_pane(self, pane):
"""
Kill the given pane, and remove it from the arrangement.
"""
assert isinstance(pane, Pane)
# Send kill signal.
if not pane.process.is_terminated:
pane.process.kill()
# Remove from layout.
self.arrangement.remove_pane(pane) | def function[kill_pane, parameter[self, pane]]:
constant[
Kill the given pane, and remove it from the arrangement.
]
assert[call[name[isinstance], parameter[name[pane], name[Pane]]]]
if <ast.UnaryOp object at 0x7da207f9bb80> begin[:]
call[name[pane].process.kill, parameter[]]
call[name[self].arrangement.remove_pane, parameter[name[pane]]] | keyword[def] identifier[kill_pane] ( identifier[self] , identifier[pane] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[pane] , identifier[Pane] )
keyword[if] keyword[not] identifier[pane] . identifier[process] . identifier[is_terminated] :
identifier[pane] . identifier[process] . identifier[kill] ()
identifier[self] . identifier[arrangement] . identifier[remove_pane] ( identifier[pane] ) | def kill_pane(self, pane):
"""
Kill the given pane, and remove it from the arrangement.
"""
assert isinstance(pane, Pane)
# Send kill signal.
if not pane.process.is_terminated:
pane.process.kill() # depends on [control=['if'], data=[]]
# Remove from layout.
self.arrangement.remove_pane(pane) |
def _parse_message(self, message):
"""Parse a message received from an AMQP service.
:param message: The received C message.
:type message: uamqp.c_uamqp.cMessage
"""
_logger.debug("Parsing received message %r.", self.delivery_no)
self._message = message
body_type = message.body_type
if body_type == c_uamqp.MessageBodyType.NoneType:
self._body = None
elif body_type == c_uamqp.MessageBodyType.DataType:
self._body = DataBody(self._message)
elif body_type == c_uamqp.MessageBodyType.SequenceType:
raise TypeError("Message body type Sequence not supported.")
else:
self._body = ValueBody(self._message)
_props = self._message.properties
if _props:
_logger.debug("Parsing received message properties %r.", self.delivery_no)
self.properties = MessageProperties(properties=_props, encoding=self._encoding)
_header = self._message.header
if _header:
_logger.debug("Parsing received message header %r.", self.delivery_no)
self.header = MessageHeader(header=_header)
_footer = self._message.footer
if _footer:
_logger.debug("Parsing received message footer %r.", self.delivery_no)
self.footer = _footer.map
_app_props = self._message.application_properties
if _app_props:
_logger.debug("Parsing received message application properties %r.", self.delivery_no)
self.application_properties = _app_props.map
_ann = self._message.message_annotations
if _ann:
_logger.debug("Parsing received message annotations %r.", self.delivery_no)
self.annotations = _ann.map
_delivery_ann = self._message.delivery_annotations
if _delivery_ann:
_logger.debug("Parsing received message delivery annotations %r.", self.delivery_no)
self.delivery_annotations = _delivery_ann.map | def function[_parse_message, parameter[self, message]]:
constant[Parse a message received from an AMQP service.
:param message: The received C message.
:type message: uamqp.c_uamqp.cMessage
]
call[name[_logger].debug, parameter[constant[Parsing received message %r.], name[self].delivery_no]]
name[self]._message assign[=] name[message]
variable[body_type] assign[=] name[message].body_type
if compare[name[body_type] equal[==] name[c_uamqp].MessageBodyType.NoneType] begin[:]
name[self]._body assign[=] constant[None]
variable[_props] assign[=] name[self]._message.properties
if name[_props] begin[:]
call[name[_logger].debug, parameter[constant[Parsing received message properties %r.], name[self].delivery_no]]
name[self].properties assign[=] call[name[MessageProperties], parameter[]]
variable[_header] assign[=] name[self]._message.header
if name[_header] begin[:]
call[name[_logger].debug, parameter[constant[Parsing received message header %r.], name[self].delivery_no]]
name[self].header assign[=] call[name[MessageHeader], parameter[]]
variable[_footer] assign[=] name[self]._message.footer
if name[_footer] begin[:]
call[name[_logger].debug, parameter[constant[Parsing received message footer %r.], name[self].delivery_no]]
name[self].footer assign[=] name[_footer].map
variable[_app_props] assign[=] name[self]._message.application_properties
if name[_app_props] begin[:]
call[name[_logger].debug, parameter[constant[Parsing received message application properties %r.], name[self].delivery_no]]
name[self].application_properties assign[=] name[_app_props].map
variable[_ann] assign[=] name[self]._message.message_annotations
if name[_ann] begin[:]
call[name[_logger].debug, parameter[constant[Parsing received message annotations %r.], name[self].delivery_no]]
name[self].annotations assign[=] name[_ann].map
variable[_delivery_ann] assign[=] name[self]._message.delivery_annotations
if name[_delivery_ann] begin[:]
call[name[_logger].debug, parameter[constant[Parsing received message delivery annotations %r.], name[self].delivery_no]]
name[self].delivery_annotations assign[=] name[_delivery_ann].map | keyword[def] identifier[_parse_message] ( identifier[self] , identifier[message] ):
literal[string]
identifier[_logger] . identifier[debug] ( literal[string] , identifier[self] . identifier[delivery_no] )
identifier[self] . identifier[_message] = identifier[message]
identifier[body_type] = identifier[message] . identifier[body_type]
keyword[if] identifier[body_type] == identifier[c_uamqp] . identifier[MessageBodyType] . identifier[NoneType] :
identifier[self] . identifier[_body] = keyword[None]
keyword[elif] identifier[body_type] == identifier[c_uamqp] . identifier[MessageBodyType] . identifier[DataType] :
identifier[self] . identifier[_body] = identifier[DataBody] ( identifier[self] . identifier[_message] )
keyword[elif] identifier[body_type] == identifier[c_uamqp] . identifier[MessageBodyType] . identifier[SequenceType] :
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[else] :
identifier[self] . identifier[_body] = identifier[ValueBody] ( identifier[self] . identifier[_message] )
identifier[_props] = identifier[self] . identifier[_message] . identifier[properties]
keyword[if] identifier[_props] :
identifier[_logger] . identifier[debug] ( literal[string] , identifier[self] . identifier[delivery_no] )
identifier[self] . identifier[properties] = identifier[MessageProperties] ( identifier[properties] = identifier[_props] , identifier[encoding] = identifier[self] . identifier[_encoding] )
identifier[_header] = identifier[self] . identifier[_message] . identifier[header]
keyword[if] identifier[_header] :
identifier[_logger] . identifier[debug] ( literal[string] , identifier[self] . identifier[delivery_no] )
identifier[self] . identifier[header] = identifier[MessageHeader] ( identifier[header] = identifier[_header] )
identifier[_footer] = identifier[self] . identifier[_message] . identifier[footer]
keyword[if] identifier[_footer] :
identifier[_logger] . identifier[debug] ( literal[string] , identifier[self] . identifier[delivery_no] )
identifier[self] . identifier[footer] = identifier[_footer] . identifier[map]
identifier[_app_props] = identifier[self] . identifier[_message] . identifier[application_properties]
keyword[if] identifier[_app_props] :
identifier[_logger] . identifier[debug] ( literal[string] , identifier[self] . identifier[delivery_no] )
identifier[self] . identifier[application_properties] = identifier[_app_props] . identifier[map]
identifier[_ann] = identifier[self] . identifier[_message] . identifier[message_annotations]
keyword[if] identifier[_ann] :
identifier[_logger] . identifier[debug] ( literal[string] , identifier[self] . identifier[delivery_no] )
identifier[self] . identifier[annotations] = identifier[_ann] . identifier[map]
identifier[_delivery_ann] = identifier[self] . identifier[_message] . identifier[delivery_annotations]
keyword[if] identifier[_delivery_ann] :
identifier[_logger] . identifier[debug] ( literal[string] , identifier[self] . identifier[delivery_no] )
identifier[self] . identifier[delivery_annotations] = identifier[_delivery_ann] . identifier[map] | def _parse_message(self, message):
"""Parse a message received from an AMQP service.
:param message: The received C message.
:type message: uamqp.c_uamqp.cMessage
"""
_logger.debug('Parsing received message %r.', self.delivery_no)
self._message = message
body_type = message.body_type
if body_type == c_uamqp.MessageBodyType.NoneType:
self._body = None # depends on [control=['if'], data=[]]
elif body_type == c_uamqp.MessageBodyType.DataType:
self._body = DataBody(self._message) # depends on [control=['if'], data=[]]
elif body_type == c_uamqp.MessageBodyType.SequenceType:
raise TypeError('Message body type Sequence not supported.') # depends on [control=['if'], data=[]]
else:
self._body = ValueBody(self._message)
_props = self._message.properties
if _props:
_logger.debug('Parsing received message properties %r.', self.delivery_no)
self.properties = MessageProperties(properties=_props, encoding=self._encoding) # depends on [control=['if'], data=[]]
_header = self._message.header
if _header:
_logger.debug('Parsing received message header %r.', self.delivery_no)
self.header = MessageHeader(header=_header) # depends on [control=['if'], data=[]]
_footer = self._message.footer
if _footer:
_logger.debug('Parsing received message footer %r.', self.delivery_no)
self.footer = _footer.map # depends on [control=['if'], data=[]]
_app_props = self._message.application_properties
if _app_props:
_logger.debug('Parsing received message application properties %r.', self.delivery_no)
self.application_properties = _app_props.map # depends on [control=['if'], data=[]]
_ann = self._message.message_annotations
if _ann:
_logger.debug('Parsing received message annotations %r.', self.delivery_no)
self.annotations = _ann.map # depends on [control=['if'], data=[]]
_delivery_ann = self._message.delivery_annotations
if _delivery_ann:
_logger.debug('Parsing received message delivery annotations %r.', self.delivery_no)
self.delivery_annotations = _delivery_ann.map # depends on [control=['if'], data=[]] |
def attention_lm_decoder(decoder_input,
decoder_self_attention_bias,
hparams,
name="decoder"):
"""A stack of attention_lm layers.
Args:
decoder_input: a Tensor
decoder_self_attention_bias: bias Tensor for self-attention
(see common_attention.attention_bias())
hparams: hyperparameters for model
name: a string
Returns:
y: a Tensors
"""
x = decoder_input
with tf.variable_scope(name):
for layer in range(hparams.num_hidden_layers):
with tf.variable_scope("layer_%d" % layer):
with tf.variable_scope("self_attention"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(
x, hparams), None, decoder_self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size, hparams.num_heads, hparams.attention_dropout)
x = common_layers.layer_postprocess(x, y, hparams)
with tf.variable_scope("ffn"):
y = common_layers.conv_hidden_relu(
common_layers.layer_preprocess(x, hparams),
hparams.filter_size,
hparams.hidden_size,
dropout=hparams.relu_dropout)
x = common_layers.layer_postprocess(x, y, hparams)
return common_layers.layer_preprocess(x, hparams) | def function[attention_lm_decoder, parameter[decoder_input, decoder_self_attention_bias, hparams, name]]:
constant[A stack of attention_lm layers.
Args:
decoder_input: a Tensor
decoder_self_attention_bias: bias Tensor for self-attention
(see common_attention.attention_bias())
hparams: hyperparameters for model
name: a string
Returns:
y: a Tensors
]
variable[x] assign[=] name[decoder_input]
with call[name[tf].variable_scope, parameter[name[name]]] begin[:]
for taget[name[layer]] in starred[call[name[range], parameter[name[hparams].num_hidden_layers]]] begin[:]
with call[name[tf].variable_scope, parameter[binary_operation[constant[layer_%d] <ast.Mod object at 0x7da2590d6920> name[layer]]]] begin[:]
with call[name[tf].variable_scope, parameter[constant[self_attention]]] begin[:]
variable[y] assign[=] call[name[common_attention].multihead_attention, parameter[call[name[common_layers].layer_preprocess, parameter[name[x], name[hparams]]], constant[None], name[decoder_self_attention_bias], <ast.BoolOp object at 0x7da18fe90430>, <ast.BoolOp object at 0x7da18fe93580>, name[hparams].hidden_size, name[hparams].num_heads, name[hparams].attention_dropout]]
variable[x] assign[=] call[name[common_layers].layer_postprocess, parameter[name[x], name[y], name[hparams]]]
with call[name[tf].variable_scope, parameter[constant[ffn]]] begin[:]
variable[y] assign[=] call[name[common_layers].conv_hidden_relu, parameter[call[name[common_layers].layer_preprocess, parameter[name[x], name[hparams]]], name[hparams].filter_size, name[hparams].hidden_size]]
variable[x] assign[=] call[name[common_layers].layer_postprocess, parameter[name[x], name[y], name[hparams]]]
return[call[name[common_layers].layer_preprocess, parameter[name[x], name[hparams]]]] | keyword[def] identifier[attention_lm_decoder] ( identifier[decoder_input] ,
identifier[decoder_self_attention_bias] ,
identifier[hparams] ,
identifier[name] = literal[string] ):
literal[string]
identifier[x] = identifier[decoder_input]
keyword[with] identifier[tf] . identifier[variable_scope] ( identifier[name] ):
keyword[for] identifier[layer] keyword[in] identifier[range] ( identifier[hparams] . identifier[num_hidden_layers] ):
keyword[with] identifier[tf] . identifier[variable_scope] ( literal[string] % identifier[layer] ):
keyword[with] identifier[tf] . identifier[variable_scope] ( literal[string] ):
identifier[y] = identifier[common_attention] . identifier[multihead_attention] (
identifier[common_layers] . identifier[layer_preprocess] (
identifier[x] , identifier[hparams] ), keyword[None] , identifier[decoder_self_attention_bias] ,
identifier[hparams] . identifier[attention_key_channels] keyword[or] identifier[hparams] . identifier[hidden_size] ,
identifier[hparams] . identifier[attention_value_channels] keyword[or] identifier[hparams] . identifier[hidden_size] ,
identifier[hparams] . identifier[hidden_size] , identifier[hparams] . identifier[num_heads] , identifier[hparams] . identifier[attention_dropout] )
identifier[x] = identifier[common_layers] . identifier[layer_postprocess] ( identifier[x] , identifier[y] , identifier[hparams] )
keyword[with] identifier[tf] . identifier[variable_scope] ( literal[string] ):
identifier[y] = identifier[common_layers] . identifier[conv_hidden_relu] (
identifier[common_layers] . identifier[layer_preprocess] ( identifier[x] , identifier[hparams] ),
identifier[hparams] . identifier[filter_size] ,
identifier[hparams] . identifier[hidden_size] ,
identifier[dropout] = identifier[hparams] . identifier[relu_dropout] )
identifier[x] = identifier[common_layers] . identifier[layer_postprocess] ( identifier[x] , identifier[y] , identifier[hparams] )
keyword[return] identifier[common_layers] . identifier[layer_preprocess] ( identifier[x] , identifier[hparams] ) | def attention_lm_decoder(decoder_input, decoder_self_attention_bias, hparams, name='decoder'):
"""A stack of attention_lm layers.
Args:
decoder_input: a Tensor
decoder_self_attention_bias: bias Tensor for self-attention
(see common_attention.attention_bias())
hparams: hyperparameters for model
name: a string
Returns:
y: a Tensors
"""
x = decoder_input
with tf.variable_scope(name):
for layer in range(hparams.num_hidden_layers):
with tf.variable_scope('layer_%d' % layer):
with tf.variable_scope('self_attention'):
y = common_attention.multihead_attention(common_layers.layer_preprocess(x, hparams), None, decoder_self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout)
x = common_layers.layer_postprocess(x, y, hparams) # depends on [control=['with'], data=[]]
with tf.variable_scope('ffn'):
y = common_layers.conv_hidden_relu(common_layers.layer_preprocess(x, hparams), hparams.filter_size, hparams.hidden_size, dropout=hparams.relu_dropout)
x = common_layers.layer_postprocess(x, y, hparams) # depends on [control=['with'], data=[]] # depends on [control=['with'], data=[]] # depends on [control=['for'], data=['layer']]
return common_layers.layer_preprocess(x, hparams) # depends on [control=['with'], data=[]] |
def dumps(obj, *args, **kwargs):
"""Helper function that wraps :func:`json.dumps`.
Recursive function that handles all BSON types including
:class:`~bson.binary.Binary` and :class:`~bson.code.Code`.
:Parameters:
- `json_options`: A :class:`JSONOptions` instance used to modify the
encoding of MongoDB Extended JSON types. Defaults to
:const:`DEFAULT_JSON_OPTIONS`.
.. versionchanged:: 3.4
Accepts optional parameter `json_options`. See :class:`JSONOptions`.
.. versionchanged:: 2.7
Preserves order when rendering SON, Timestamp, Code, Binary, and DBRef
instances.
"""
json_options = kwargs.pop("json_options", DEFAULT_JSON_OPTIONS)
return json.dumps(_json_convert(obj, json_options), *args, **kwargs) | def function[dumps, parameter[obj]]:
constant[Helper function that wraps :func:`json.dumps`.
Recursive function that handles all BSON types including
:class:`~bson.binary.Binary` and :class:`~bson.code.Code`.
:Parameters:
- `json_options`: A :class:`JSONOptions` instance used to modify the
encoding of MongoDB Extended JSON types. Defaults to
:const:`DEFAULT_JSON_OPTIONS`.
.. versionchanged:: 3.4
Accepts optional parameter `json_options`. See :class:`JSONOptions`.
.. versionchanged:: 2.7
Preserves order when rendering SON, Timestamp, Code, Binary, and DBRef
instances.
]
variable[json_options] assign[=] call[name[kwargs].pop, parameter[constant[json_options], name[DEFAULT_JSON_OPTIONS]]]
return[call[name[json].dumps, parameter[call[name[_json_convert], parameter[name[obj], name[json_options]]], <ast.Starred object at 0x7da20c6c4700>]]] | keyword[def] identifier[dumps] ( identifier[obj] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[json_options] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[DEFAULT_JSON_OPTIONS] )
keyword[return] identifier[json] . identifier[dumps] ( identifier[_json_convert] ( identifier[obj] , identifier[json_options] ),* identifier[args] ,** identifier[kwargs] ) | def dumps(obj, *args, **kwargs):
"""Helper function that wraps :func:`json.dumps`.
Recursive function that handles all BSON types including
:class:`~bson.binary.Binary` and :class:`~bson.code.Code`.
:Parameters:
- `json_options`: A :class:`JSONOptions` instance used to modify the
encoding of MongoDB Extended JSON types. Defaults to
:const:`DEFAULT_JSON_OPTIONS`.
.. versionchanged:: 3.4
Accepts optional parameter `json_options`. See :class:`JSONOptions`.
.. versionchanged:: 2.7
Preserves order when rendering SON, Timestamp, Code, Binary, and DBRef
instances.
"""
json_options = kwargs.pop('json_options', DEFAULT_JSON_OPTIONS)
return json.dumps(_json_convert(obj, json_options), *args, **kwargs) |
def start_executing_host_checks(self):
"""Enable host check execution (globally)
Format of the line that triggers function call::
START_EXECUTING_HOST_CHECKS
:return: None
"""
# todo: #783 create a dedicated brok for global parameters
if not self.my_conf.execute_host_checks:
self.my_conf.modified_attributes |= DICT_MODATTR["MODATTR_ACTIVE_CHECKS_ENABLED"].value
self.my_conf.execute_host_checks = True
self.my_conf.explode_global_conf()
self.daemon.update_program_status() | def function[start_executing_host_checks, parameter[self]]:
constant[Enable host check execution (globally)
Format of the line that triggers function call::
START_EXECUTING_HOST_CHECKS
:return: None
]
if <ast.UnaryOp object at 0x7da2054a6590> begin[:]
<ast.AugAssign object at 0x7da2054a4460>
name[self].my_conf.execute_host_checks assign[=] constant[True]
call[name[self].my_conf.explode_global_conf, parameter[]]
call[name[self].daemon.update_program_status, parameter[]] | keyword[def] identifier[start_executing_host_checks] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[my_conf] . identifier[execute_host_checks] :
identifier[self] . identifier[my_conf] . identifier[modified_attributes] |= identifier[DICT_MODATTR] [ literal[string] ]. identifier[value]
identifier[self] . identifier[my_conf] . identifier[execute_host_checks] = keyword[True]
identifier[self] . identifier[my_conf] . identifier[explode_global_conf] ()
identifier[self] . identifier[daemon] . identifier[update_program_status] () | def start_executing_host_checks(self):
"""Enable host check execution (globally)
Format of the line that triggers function call::
START_EXECUTING_HOST_CHECKS
:return: None
"""
# todo: #783 create a dedicated brok for global parameters
if not self.my_conf.execute_host_checks:
self.my_conf.modified_attributes |= DICT_MODATTR['MODATTR_ACTIVE_CHECKS_ENABLED'].value
self.my_conf.execute_host_checks = True
self.my_conf.explode_global_conf()
self.daemon.update_program_status() # depends on [control=['if'], data=[]] |
def list_all_order_line_items(cls, **kwargs):
"""List OrderLineItems
Return a list of OrderLineItems
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_order_line_items(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[OrderLineItem]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_order_line_items_with_http_info(**kwargs)
else:
(data) = cls._list_all_order_line_items_with_http_info(**kwargs)
return data | def function[list_all_order_line_items, parameter[cls]]:
constant[List OrderLineItems
Return a list of OrderLineItems
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_order_line_items(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[OrderLineItem]
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async]]] begin[:]
return[call[name[cls]._list_all_order_line_items_with_http_info, parameter[]]] | keyword[def] identifier[list_all_order_line_items] ( identifier[cls] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[cls] . identifier[_list_all_order_line_items_with_http_info] (** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[cls] . identifier[_list_all_order_line_items_with_http_info] (** identifier[kwargs] )
keyword[return] identifier[data] | def list_all_order_line_items(cls, **kwargs):
"""List OrderLineItems
Return a list of OrderLineItems
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_all_order_line_items(async=True)
>>> result = thread.get()
:param async bool
:param int page: page number
:param int size: page size
:param str sort: page order
:return: page[OrderLineItem]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._list_all_order_line_items_with_http_info(**kwargs) # depends on [control=['if'], data=[]]
else:
data = cls._list_all_order_line_items_with_http_info(**kwargs)
return data |
def feed(self, token, test_newline=True):
"""Consume a token and calculate the new line & column.
As an optional optimization, set test_newline=False is token doesn't contain a newline.
"""
if test_newline:
newlines = token.count(self.newline_char)
if newlines:
self.line += newlines
self.line_start_pos = self.char_pos + token.rindex(self.newline_char) + 1
self.char_pos += len(token)
self.column = self.char_pos - self.line_start_pos + 1 | def function[feed, parameter[self, token, test_newline]]:
constant[Consume a token and calculate the new line & column.
As an optional optimization, set test_newline=False is token doesn't contain a newline.
]
if name[test_newline] begin[:]
variable[newlines] assign[=] call[name[token].count, parameter[name[self].newline_char]]
if name[newlines] begin[:]
<ast.AugAssign object at 0x7da20c7c95d0>
name[self].line_start_pos assign[=] binary_operation[binary_operation[name[self].char_pos + call[name[token].rindex, parameter[name[self].newline_char]]] + constant[1]]
<ast.AugAssign object at 0x7da2041d89d0>
name[self].column assign[=] binary_operation[binary_operation[name[self].char_pos - name[self].line_start_pos] + constant[1]] | keyword[def] identifier[feed] ( identifier[self] , identifier[token] , identifier[test_newline] = keyword[True] ):
literal[string]
keyword[if] identifier[test_newline] :
identifier[newlines] = identifier[token] . identifier[count] ( identifier[self] . identifier[newline_char] )
keyword[if] identifier[newlines] :
identifier[self] . identifier[line] += identifier[newlines]
identifier[self] . identifier[line_start_pos] = identifier[self] . identifier[char_pos] + identifier[token] . identifier[rindex] ( identifier[self] . identifier[newline_char] )+ literal[int]
identifier[self] . identifier[char_pos] += identifier[len] ( identifier[token] )
identifier[self] . identifier[column] = identifier[self] . identifier[char_pos] - identifier[self] . identifier[line_start_pos] + literal[int] | def feed(self, token, test_newline=True):
"""Consume a token and calculate the new line & column.
As an optional optimization, set test_newline=False is token doesn't contain a newline.
"""
if test_newline:
newlines = token.count(self.newline_char)
if newlines:
self.line += newlines
self.line_start_pos = self.char_pos + token.rindex(self.newline_char) + 1 # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
self.char_pos += len(token)
self.column = self.char_pos - self.line_start_pos + 1 |
def bool(anon, obj, field, val):
"""
Returns a random boolean value (True/False)
"""
return anon.faker.bool(field=field) | def function[bool, parameter[anon, obj, field, val]]:
constant[
Returns a random boolean value (True/False)
]
return[call[name[anon].faker.bool, parameter[]]] | keyword[def] identifier[bool] ( identifier[anon] , identifier[obj] , identifier[field] , identifier[val] ):
literal[string]
keyword[return] identifier[anon] . identifier[faker] . identifier[bool] ( identifier[field] = identifier[field] ) | def bool(anon, obj, field, val):
"""
Returns a random boolean value (True/False)
"""
return anon.faker.bool(field=field) |
def listFileChildren(self, logical_file_name='', block_name='', block_id=0):
"""
required parameter: logical_file_name or block_name or block_id
returns: logical_file_name, child_logical_file_name, parent_file_id
"""
conn = self.dbi.connection()
try:
if not logical_file_name and not block_name and not block_id:
dbsExceptionHandler('dbsException-invalid-input',\
"Logical_file_name, block_id or block_name is required for listFileChildren api")
sqlresult = self.filechildlist.execute(conn, logical_file_name, block_name, block_id)
d = {}
result = []
for i in range(len(sqlresult)):
k = sqlresult[i]['logical_file_name']
v = sqlresult[i]['child_logical_file_name']
if k in d:
d[k].append(v)
else:
d[k] = [v]
for k, v in d.iteritems():
r = {'logical_file_name':k, 'child_logical_file_name': v}
result.append(r)
return result
finally:
if conn:
conn.close() | def function[listFileChildren, parameter[self, logical_file_name, block_name, block_id]]:
constant[
required parameter: logical_file_name or block_name or block_id
returns: logical_file_name, child_logical_file_name, parent_file_id
]
variable[conn] assign[=] call[name[self].dbi.connection, parameter[]]
<ast.Try object at 0x7da20c7955a0> | keyword[def] identifier[listFileChildren] ( identifier[self] , identifier[logical_file_name] = literal[string] , identifier[block_name] = literal[string] , identifier[block_id] = literal[int] ):
literal[string]
identifier[conn] = identifier[self] . identifier[dbi] . identifier[connection] ()
keyword[try] :
keyword[if] keyword[not] identifier[logical_file_name] keyword[and] keyword[not] identifier[block_name] keyword[and] keyword[not] identifier[block_id] :
identifier[dbsExceptionHandler] ( literal[string] , literal[string] )
identifier[sqlresult] = identifier[self] . identifier[filechildlist] . identifier[execute] ( identifier[conn] , identifier[logical_file_name] , identifier[block_name] , identifier[block_id] )
identifier[d] ={}
identifier[result] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[sqlresult] )):
identifier[k] = identifier[sqlresult] [ identifier[i] ][ literal[string] ]
identifier[v] = identifier[sqlresult] [ identifier[i] ][ literal[string] ]
keyword[if] identifier[k] keyword[in] identifier[d] :
identifier[d] [ identifier[k] ]. identifier[append] ( identifier[v] )
keyword[else] :
identifier[d] [ identifier[k] ]=[ identifier[v] ]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[d] . identifier[iteritems] ():
identifier[r] ={ literal[string] : identifier[k] , literal[string] : identifier[v] }
identifier[result] . identifier[append] ( identifier[r] )
keyword[return] identifier[result]
keyword[finally] :
keyword[if] identifier[conn] :
identifier[conn] . identifier[close] () | def listFileChildren(self, logical_file_name='', block_name='', block_id=0):
"""
required parameter: logical_file_name or block_name or block_id
returns: logical_file_name, child_logical_file_name, parent_file_id
"""
conn = self.dbi.connection()
try:
if not logical_file_name and (not block_name) and (not block_id):
dbsExceptionHandler('dbsException-invalid-input', 'Logical_file_name, block_id or block_name is required for listFileChildren api') # depends on [control=['if'], data=[]]
sqlresult = self.filechildlist.execute(conn, logical_file_name, block_name, block_id)
d = {}
result = []
for i in range(len(sqlresult)):
k = sqlresult[i]['logical_file_name']
v = sqlresult[i]['child_logical_file_name']
if k in d:
d[k].append(v) # depends on [control=['if'], data=['k', 'd']]
else:
d[k] = [v] # depends on [control=['for'], data=['i']]
for (k, v) in d.iteritems():
r = {'logical_file_name': k, 'child_logical_file_name': v}
result.append(r) # depends on [control=['for'], data=[]]
return result # depends on [control=['try'], data=[]]
finally:
if conn:
conn.close() # depends on [control=['if'], data=[]] |
def reset_queues(queues):
"""Resets original queue._put() method."""
for queue in queues:
with queue.mutex:
queue._put = queue._pebble_old_method
delattr(queue, '_pebble_old_method')
delattr(queue, '_pebble_lock') | def function[reset_queues, parameter[queues]]:
constant[Resets original queue._put() method.]
for taget[name[queue]] in starred[name[queues]] begin[:]
with name[queue].mutex begin[:]
name[queue]._put assign[=] name[queue]._pebble_old_method
call[name[delattr], parameter[name[queue], constant[_pebble_old_method]]]
call[name[delattr], parameter[name[queue], constant[_pebble_lock]]] | keyword[def] identifier[reset_queues] ( identifier[queues] ):
literal[string]
keyword[for] identifier[queue] keyword[in] identifier[queues] :
keyword[with] identifier[queue] . identifier[mutex] :
identifier[queue] . identifier[_put] = identifier[queue] . identifier[_pebble_old_method]
identifier[delattr] ( identifier[queue] , literal[string] )
identifier[delattr] ( identifier[queue] , literal[string] ) | def reset_queues(queues):
"""Resets original queue._put() method."""
for queue in queues:
with queue.mutex:
queue._put = queue._pebble_old_method # depends on [control=['with'], data=[]]
delattr(queue, '_pebble_old_method')
delattr(queue, '_pebble_lock') # depends on [control=['for'], data=['queue']] |
def WriteFD(
self,
Channel,
MessageBuffer):
"""
Transmits a CAN message over a FD capable PCAN Channel
Parameters:
Channel : The handle of a FD capable PCAN Channel
MessageBuffer: A TPCANMsgFD buffer with the message to be sent
Returns:
A TPCANStatus error code
"""
try:
res = self.__m_dllBasic.CAN_WriteFD(Channel,byref(MessageBuffer))
return TPCANStatus(res)
except:
logger.error("Exception on PCANBasic.WriteFD")
raise | def function[WriteFD, parameter[self, Channel, MessageBuffer]]:
constant[
Transmits a CAN message over a FD capable PCAN Channel
Parameters:
Channel : The handle of a FD capable PCAN Channel
MessageBuffer: A TPCANMsgFD buffer with the message to be sent
Returns:
A TPCANStatus error code
]
<ast.Try object at 0x7da1b1bf8eb0> | keyword[def] identifier[WriteFD] (
identifier[self] ,
identifier[Channel] ,
identifier[MessageBuffer] ):
literal[string]
keyword[try] :
identifier[res] = identifier[self] . identifier[__m_dllBasic] . identifier[CAN_WriteFD] ( identifier[Channel] , identifier[byref] ( identifier[MessageBuffer] ))
keyword[return] identifier[TPCANStatus] ( identifier[res] )
keyword[except] :
identifier[logger] . identifier[error] ( literal[string] )
keyword[raise] | def WriteFD(self, Channel, MessageBuffer):
"""
Transmits a CAN message over a FD capable PCAN Channel
Parameters:
Channel : The handle of a FD capable PCAN Channel
MessageBuffer: A TPCANMsgFD buffer with the message to be sent
Returns:
A TPCANStatus error code
"""
try:
res = self.__m_dllBasic.CAN_WriteFD(Channel, byref(MessageBuffer))
return TPCANStatus(res) # depends on [control=['try'], data=[]]
except:
logger.error('Exception on PCANBasic.WriteFD')
raise # depends on [control=['except'], data=[]] |
def evaluate_cached(self, **kwargs):
"""Wraps evaluate(), caching results"""
if not hasattr(self, 'result'):
self.result = self.evaluate(cache=True, **kwargs)
return self.result | def function[evaluate_cached, parameter[self]]:
constant[Wraps evaluate(), caching results]
if <ast.UnaryOp object at 0x7da1b0f39c60> begin[:]
name[self].result assign[=] call[name[self].evaluate, parameter[]]
return[name[self].result] | keyword[def] identifier[evaluate_cached] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[result] = identifier[self] . identifier[evaluate] ( identifier[cache] = keyword[True] ,** identifier[kwargs] )
keyword[return] identifier[self] . identifier[result] | def evaluate_cached(self, **kwargs):
"""Wraps evaluate(), caching results"""
if not hasattr(self, 'result'):
self.result = self.evaluate(cache=True, **kwargs) # depends on [control=['if'], data=[]]
return self.result |
def add_other_cflags(self, flags, target_name=None, configuration_name=None):
"""
Adds flag values to the OTHER_CFLAGS flag.
:param flags: A string or array of strings. If none, removes all values from the flag.
:param target_name: Target name or list of target names to add the flag to or None for every target
:param configuration_name: Configuration name to add the flag to or None for every configuration
:return: void
"""
self.add_flags(XCBuildConfigurationFlags.OTHER_CFLAGS, flags, target_name, configuration_name) | def function[add_other_cflags, parameter[self, flags, target_name, configuration_name]]:
constant[
Adds flag values to the OTHER_CFLAGS flag.
:param flags: A string or array of strings. If none, removes all values from the flag.
:param target_name: Target name or list of target names to add the flag to or None for every target
:param configuration_name: Configuration name to add the flag to or None for every configuration
:return: void
]
call[name[self].add_flags, parameter[name[XCBuildConfigurationFlags].OTHER_CFLAGS, name[flags], name[target_name], name[configuration_name]]] | keyword[def] identifier[add_other_cflags] ( identifier[self] , identifier[flags] , identifier[target_name] = keyword[None] , identifier[configuration_name] = keyword[None] ):
literal[string]
identifier[self] . identifier[add_flags] ( identifier[XCBuildConfigurationFlags] . identifier[OTHER_CFLAGS] , identifier[flags] , identifier[target_name] , identifier[configuration_name] ) | def add_other_cflags(self, flags, target_name=None, configuration_name=None):
"""
Adds flag values to the OTHER_CFLAGS flag.
:param flags: A string or array of strings. If none, removes all values from the flag.
:param target_name: Target name or list of target names to add the flag to or None for every target
:param configuration_name: Configuration name to add the flag to or None for every configuration
:return: void
"""
self.add_flags(XCBuildConfigurationFlags.OTHER_CFLAGS, flags, target_name, configuration_name) |
def add_connection_throttle(self, loadbalancer, maxConnectionRate=None,
maxConnections=None, minConnections=None, rateInterval=None):
"""
Creates or updates the connection throttling information for the load
balancer. When first creating the connection throttle, all 4 parameters
must be supplied. When updating an existing connection throttle, at
least one of the parameters must be supplied.
"""
settings = {}
if maxConnectionRate:
settings["maxConnectionRate"] = maxConnectionRate
if maxConnections:
settings["maxConnections"] = maxConnections
if minConnections:
settings["minConnections"] = minConnections
if rateInterval:
settings["rateInterval"] = rateInterval
req_body = {"connectionThrottle": settings}
uri = "/loadbalancers/%s/connectionthrottle" % utils.get_id(loadbalancer)
resp, body = self.api.method_put(uri, body=req_body)
return body | def function[add_connection_throttle, parameter[self, loadbalancer, maxConnectionRate, maxConnections, minConnections, rateInterval]]:
constant[
Creates or updates the connection throttling information for the load
balancer. When first creating the connection throttle, all 4 parameters
must be supplied. When updating an existing connection throttle, at
least one of the parameters must be supplied.
]
variable[settings] assign[=] dictionary[[], []]
if name[maxConnectionRate] begin[:]
call[name[settings]][constant[maxConnectionRate]] assign[=] name[maxConnectionRate]
if name[maxConnections] begin[:]
call[name[settings]][constant[maxConnections]] assign[=] name[maxConnections]
if name[minConnections] begin[:]
call[name[settings]][constant[minConnections]] assign[=] name[minConnections]
if name[rateInterval] begin[:]
call[name[settings]][constant[rateInterval]] assign[=] name[rateInterval]
variable[req_body] assign[=] dictionary[[<ast.Constant object at 0x7da1b0558be0>], [<ast.Name object at 0x7da1b0559f90>]]
variable[uri] assign[=] binary_operation[constant[/loadbalancers/%s/connectionthrottle] <ast.Mod object at 0x7da2590d6920> call[name[utils].get_id, parameter[name[loadbalancer]]]]
<ast.Tuple object at 0x7da1b05594b0> assign[=] call[name[self].api.method_put, parameter[name[uri]]]
return[name[body]] | keyword[def] identifier[add_connection_throttle] ( identifier[self] , identifier[loadbalancer] , identifier[maxConnectionRate] = keyword[None] ,
identifier[maxConnections] = keyword[None] , identifier[minConnections] = keyword[None] , identifier[rateInterval] = keyword[None] ):
literal[string]
identifier[settings] ={}
keyword[if] identifier[maxConnectionRate] :
identifier[settings] [ literal[string] ]= identifier[maxConnectionRate]
keyword[if] identifier[maxConnections] :
identifier[settings] [ literal[string] ]= identifier[maxConnections]
keyword[if] identifier[minConnections] :
identifier[settings] [ literal[string] ]= identifier[minConnections]
keyword[if] identifier[rateInterval] :
identifier[settings] [ literal[string] ]= identifier[rateInterval]
identifier[req_body] ={ literal[string] : identifier[settings] }
identifier[uri] = literal[string] % identifier[utils] . identifier[get_id] ( identifier[loadbalancer] )
identifier[resp] , identifier[body] = identifier[self] . identifier[api] . identifier[method_put] ( identifier[uri] , identifier[body] = identifier[req_body] )
keyword[return] identifier[body] | def add_connection_throttle(self, loadbalancer, maxConnectionRate=None, maxConnections=None, minConnections=None, rateInterval=None):
"""
Creates or updates the connection throttling information for the load
balancer. When first creating the connection throttle, all 4 parameters
must be supplied. When updating an existing connection throttle, at
least one of the parameters must be supplied.
"""
settings = {}
if maxConnectionRate:
settings['maxConnectionRate'] = maxConnectionRate # depends on [control=['if'], data=[]]
if maxConnections:
settings['maxConnections'] = maxConnections # depends on [control=['if'], data=[]]
if minConnections:
settings['minConnections'] = minConnections # depends on [control=['if'], data=[]]
if rateInterval:
settings['rateInterval'] = rateInterval # depends on [control=['if'], data=[]]
req_body = {'connectionThrottle': settings}
uri = '/loadbalancers/%s/connectionthrottle' % utils.get_id(loadbalancer)
(resp, body) = self.api.method_put(uri, body=req_body)
return body |
def _GetFormatter(self, format_str):
"""
The user's formatters are consulted first, then the default formatters.
"""
formatter, args, func_type = self.formatters.LookupWithType(format_str)
if formatter:
return formatter, args, func_type
else:
raise BadFormatter('%r is not a valid formatter' % format_str) | def function[_GetFormatter, parameter[self, format_str]]:
constant[
The user's formatters are consulted first, then the default formatters.
]
<ast.Tuple object at 0x7da18f09e5c0> assign[=] call[name[self].formatters.LookupWithType, parameter[name[format_str]]]
if name[formatter] begin[:]
return[tuple[[<ast.Name object at 0x7da18f09e5f0>, <ast.Name object at 0x7da18f09e1a0>, <ast.Name object at 0x7da18f09ef80>]]] | keyword[def] identifier[_GetFormatter] ( identifier[self] , identifier[format_str] ):
literal[string]
identifier[formatter] , identifier[args] , identifier[func_type] = identifier[self] . identifier[formatters] . identifier[LookupWithType] ( identifier[format_str] )
keyword[if] identifier[formatter] :
keyword[return] identifier[formatter] , identifier[args] , identifier[func_type]
keyword[else] :
keyword[raise] identifier[BadFormatter] ( literal[string] % identifier[format_str] ) | def _GetFormatter(self, format_str):
"""
The user's formatters are consulted first, then the default formatters.
"""
(formatter, args, func_type) = self.formatters.LookupWithType(format_str)
if formatter:
return (formatter, args, func_type) # depends on [control=['if'], data=[]]
else:
raise BadFormatter('%r is not a valid formatter' % format_str) |
def _prepare_request_json(self, kwargs):
"""Prepare request args for sending to device as JSON."""
# Check for python keywords in dict
kwargs = self._check_for_python_keywords(kwargs)
# Check for the key 'check' in kwargs
if 'check' in kwargs:
od = OrderedDict()
od['check'] = kwargs['check']
kwargs.pop('check')
od.update(kwargs)
return od
return kwargs | def function[_prepare_request_json, parameter[self, kwargs]]:
constant[Prepare request args for sending to device as JSON.]
variable[kwargs] assign[=] call[name[self]._check_for_python_keywords, parameter[name[kwargs]]]
if compare[constant[check] in name[kwargs]] begin[:]
variable[od] assign[=] call[name[OrderedDict], parameter[]]
call[name[od]][constant[check]] assign[=] call[name[kwargs]][constant[check]]
call[name[kwargs].pop, parameter[constant[check]]]
call[name[od].update, parameter[name[kwargs]]]
return[name[od]]
return[name[kwargs]] | keyword[def] identifier[_prepare_request_json] ( identifier[self] , identifier[kwargs] ):
literal[string]
identifier[kwargs] = identifier[self] . identifier[_check_for_python_keywords] ( identifier[kwargs] )
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[od] = identifier[OrderedDict] ()
identifier[od] [ literal[string] ]= identifier[kwargs] [ literal[string] ]
identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[od] . identifier[update] ( identifier[kwargs] )
keyword[return] identifier[od]
keyword[return] identifier[kwargs] | def _prepare_request_json(self, kwargs):
"""Prepare request args for sending to device as JSON."""
# Check for python keywords in dict
kwargs = self._check_for_python_keywords(kwargs)
# Check for the key 'check' in kwargs
if 'check' in kwargs:
od = OrderedDict()
od['check'] = kwargs['check']
kwargs.pop('check')
od.update(kwargs)
return od # depends on [control=['if'], data=['kwargs']]
return kwargs |
def browserify_file(entry_point, output_file, babelify=False, export_as=None):
"""
Browserify a single javascript entry point plus non-external
dependencies into a single javascript file. Generates source maps
in debug mode. Minifies the output in release mode.
By default, it is not possible to ``require()`` any exports from the entry
point or included files. If ``export_as`` is specified, any module exports
in the specified entry point are exposed for ``require()`` with the
name specified by ``export_as``.
"""
from .modules import browserify
if not isinstance(entry_point, str):
raise RuntimeError('Browserify File compiler takes a single entry point as input.')
return {
'dependencies_fn': browserify.browserify_deps_file,
'compiler_fn': browserify.browserify_compile_file,
'input': entry_point,
'output': output_file,
'kwargs': {
'babelify': babelify,
'export_as': export_as,
},
} | def function[browserify_file, parameter[entry_point, output_file, babelify, export_as]]:
constant[
Browserify a single javascript entry point plus non-external
dependencies into a single javascript file. Generates source maps
in debug mode. Minifies the output in release mode.
By default, it is not possible to ``require()`` any exports from the entry
point or included files. If ``export_as`` is specified, any module exports
in the specified entry point are exposed for ``require()`` with the
name specified by ``export_as``.
]
from relative_module[modules] import module[browserify]
if <ast.UnaryOp object at 0x7da1b1301150> begin[:]
<ast.Raise object at 0x7da1b1302e60>
return[dictionary[[<ast.Constant object at 0x7da1b13001c0>, <ast.Constant object at 0x7da1b1301300>, <ast.Constant object at 0x7da1b1300550>, <ast.Constant object at 0x7da1b1302770>, <ast.Constant object at 0x7da1b13000d0>], [<ast.Attribute object at 0x7da1b1302bf0>, <ast.Attribute object at 0x7da1b1300460>, <ast.Name object at 0x7da1b13030a0>, <ast.Name object at 0x7da1b1300070>, <ast.Dict object at 0x7da1b1301e70>]]] | keyword[def] identifier[browserify_file] ( identifier[entry_point] , identifier[output_file] , identifier[babelify] = keyword[False] , identifier[export_as] = keyword[None] ):
literal[string]
keyword[from] . identifier[modules] keyword[import] identifier[browserify]
keyword[if] keyword[not] identifier[isinstance] ( identifier[entry_point] , identifier[str] ):
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[return] {
literal[string] : identifier[browserify] . identifier[browserify_deps_file] ,
literal[string] : identifier[browserify] . identifier[browserify_compile_file] ,
literal[string] : identifier[entry_point] ,
literal[string] : identifier[output_file] ,
literal[string] :{
literal[string] : identifier[babelify] ,
literal[string] : identifier[export_as] ,
},
} | def browserify_file(entry_point, output_file, babelify=False, export_as=None):
"""
Browserify a single javascript entry point plus non-external
dependencies into a single javascript file. Generates source maps
in debug mode. Minifies the output in release mode.
By default, it is not possible to ``require()`` any exports from the entry
point or included files. If ``export_as`` is specified, any module exports
in the specified entry point are exposed for ``require()`` with the
name specified by ``export_as``.
"""
from .modules import browserify
if not isinstance(entry_point, str):
raise RuntimeError('Browserify File compiler takes a single entry point as input.') # depends on [control=['if'], data=[]]
return {'dependencies_fn': browserify.browserify_deps_file, 'compiler_fn': browserify.browserify_compile_file, 'input': entry_point, 'output': output_file, 'kwargs': {'babelify': babelify, 'export_as': export_as}} |
def execute(self, input_data):
''' Execute the URL worker '''
string_output = input_data['strings']['string_list']
flatten = ' '.join(string_output)
urls = self.url_match.findall(flatten)
return {'url_list': urls} | def function[execute, parameter[self, input_data]]:
constant[ Execute the URL worker ]
variable[string_output] assign[=] call[call[name[input_data]][constant[strings]]][constant[string_list]]
variable[flatten] assign[=] call[constant[ ].join, parameter[name[string_output]]]
variable[urls] assign[=] call[name[self].url_match.findall, parameter[name[flatten]]]
return[dictionary[[<ast.Constant object at 0x7da20c7c9e10>], [<ast.Name object at 0x7da20c7ca7d0>]]] | keyword[def] identifier[execute] ( identifier[self] , identifier[input_data] ):
literal[string]
identifier[string_output] = identifier[input_data] [ literal[string] ][ literal[string] ]
identifier[flatten] = literal[string] . identifier[join] ( identifier[string_output] )
identifier[urls] = identifier[self] . identifier[url_match] . identifier[findall] ( identifier[flatten] )
keyword[return] { literal[string] : identifier[urls] } | def execute(self, input_data):
""" Execute the URL worker """
string_output = input_data['strings']['string_list']
flatten = ' '.join(string_output)
urls = self.url_match.findall(flatten)
return {'url_list': urls} |
def panes(self):
" List with all panes from this Window. "
result = []
for s in self.splits:
for item in s:
if isinstance(item, Pane):
result.append(item)
return result | def function[panes, parameter[self]]:
constant[ List with all panes from this Window. ]
variable[result] assign[=] list[[]]
for taget[name[s]] in starred[name[self].splits] begin[:]
for taget[name[item]] in starred[name[s]] begin[:]
if call[name[isinstance], parameter[name[item], name[Pane]]] begin[:]
call[name[result].append, parameter[name[item]]]
return[name[result]] | keyword[def] identifier[panes] ( identifier[self] ):
literal[string]
identifier[result] =[]
keyword[for] identifier[s] keyword[in] identifier[self] . identifier[splits] :
keyword[for] identifier[item] keyword[in] identifier[s] :
keyword[if] identifier[isinstance] ( identifier[item] , identifier[Pane] ):
identifier[result] . identifier[append] ( identifier[item] )
keyword[return] identifier[result] | def panes(self):
""" List with all panes from this Window. """
result = []
for s in self.splits:
for item in s:
if isinstance(item, Pane):
result.append(item) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['item']] # depends on [control=['for'], data=['s']]
return result |
def get_alternative_nested_val(key_tuple, dict_obj):
"""Return a value from nested dicts by any path in the given keys tuple.
Parameters
---------
key_tuple : tuple
Describe all possible paths for extraction.
dict_obj : dict
The outer-most dict to extract from.
Returns
-------
value : object
The extracted value, if exists. Otherwise, raises KeyError.
Example:
--------
>>> dict_obj = {'a': {'b': 7}}
>>> get_alternative_nested_val(('a', ('b', 'c')), dict_obj)
7
"""
# print('key_tuple: {}'.format(key_tuple))
# print('dict_obj: {}'.format(dict_obj))
top_keys = key_tuple[0] if isinstance(key_tuple[0], (list, tuple)) else [
key_tuple[0]]
for key in top_keys:
try:
if len(key_tuple) < 2:
return dict_obj[key]
return get_alternative_nested_val(key_tuple[1:], dict_obj[key])
except (KeyError, TypeError, IndexError):
pass
raise KeyError | def function[get_alternative_nested_val, parameter[key_tuple, dict_obj]]:
constant[Return a value from nested dicts by any path in the given keys tuple.
Parameters
---------
key_tuple : tuple
Describe all possible paths for extraction.
dict_obj : dict
The outer-most dict to extract from.
Returns
-------
value : object
The extracted value, if exists. Otherwise, raises KeyError.
Example:
--------
>>> dict_obj = {'a': {'b': 7}}
>>> get_alternative_nested_val(('a', ('b', 'c')), dict_obj)
7
]
variable[top_keys] assign[=] <ast.IfExp object at 0x7da18ede4160>
for taget[name[key]] in starred[name[top_keys]] begin[:]
<ast.Try object at 0x7da18ede78e0>
<ast.Raise object at 0x7da18ede4c70> | keyword[def] identifier[get_alternative_nested_val] ( identifier[key_tuple] , identifier[dict_obj] ):
literal[string]
identifier[top_keys] = identifier[key_tuple] [ literal[int] ] keyword[if] identifier[isinstance] ( identifier[key_tuple] [ literal[int] ],( identifier[list] , identifier[tuple] )) keyword[else] [
identifier[key_tuple] [ literal[int] ]]
keyword[for] identifier[key] keyword[in] identifier[top_keys] :
keyword[try] :
keyword[if] identifier[len] ( identifier[key_tuple] )< literal[int] :
keyword[return] identifier[dict_obj] [ identifier[key] ]
keyword[return] identifier[get_alternative_nested_val] ( identifier[key_tuple] [ literal[int] :], identifier[dict_obj] [ identifier[key] ])
keyword[except] ( identifier[KeyError] , identifier[TypeError] , identifier[IndexError] ):
keyword[pass]
keyword[raise] identifier[KeyError] | def get_alternative_nested_val(key_tuple, dict_obj):
"""Return a value from nested dicts by any path in the given keys tuple.
Parameters
---------
key_tuple : tuple
Describe all possible paths for extraction.
dict_obj : dict
The outer-most dict to extract from.
Returns
-------
value : object
The extracted value, if exists. Otherwise, raises KeyError.
Example:
--------
>>> dict_obj = {'a': {'b': 7}}
>>> get_alternative_nested_val(('a', ('b', 'c')), dict_obj)
7
"""
# print('key_tuple: {}'.format(key_tuple))
# print('dict_obj: {}'.format(dict_obj))
top_keys = key_tuple[0] if isinstance(key_tuple[0], (list, tuple)) else [key_tuple[0]]
for key in top_keys:
try:
if len(key_tuple) < 2:
return dict_obj[key] # depends on [control=['if'], data=[]]
return get_alternative_nested_val(key_tuple[1:], dict_obj[key]) # depends on [control=['try'], data=[]]
except (KeyError, TypeError, IndexError):
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['key']]
raise KeyError |
def make_model(self):
"""Assemble the graph from the assembler's list of INDRA Statements."""
# Assemble in two stages.
# First, create the nodes of the graph
for stmt in self.statements:
# Skip SelfModification (self loops) -- has one node
if isinstance(stmt, SelfModification) or \
isinstance(stmt, Translocation) or \
isinstance(stmt, ActiveForm):
continue
# Special handling for Associations -- more than 1 node and members
# are Events
elif isinstance(stmt, Association):
for m in stmt.members:
self._add_node(m.concept)
# Special handling for Complexes -- more than 1 node
elif isinstance(stmt, Complex):
for m in stmt.members:
self._add_node(m)
# All else should have exactly 2 nodes
elif all([ag is not None for ag in stmt.agent_list()]):
if not len(stmt.agent_list()) == 2:
logger.warning(
'%s has less/more than the expected 2 agents.' % stmt)
continue
for ag in stmt.agent_list():
self._add_node(ag)
# Second, create the edges of the graph
for stmt in self.statements:
# Skip SelfModification (self loops) -- has one node
if isinstance(stmt, SelfModification) or \
isinstance(stmt, Translocation) or \
isinstance(stmt, ActiveForm):
continue
elif isinstance(stmt, Association):
self._add_complex(stmt.members, is_association=True)
elif isinstance(stmt, Complex):
self._add_complex(stmt.members)
elif all([ag is not None for ag in stmt.agent_list()]):
self._add_stmt_edge(stmt) | def function[make_model, parameter[self]]:
constant[Assemble the graph from the assembler's list of INDRA Statements.]
for taget[name[stmt]] in starred[name[self].statements] begin[:]
if <ast.BoolOp object at 0x7da18bcc9fc0> begin[:]
continue
for taget[name[stmt]] in starred[name[self].statements] begin[:]
if <ast.BoolOp object at 0x7da18bccb070> begin[:]
continue | keyword[def] identifier[make_model] ( identifier[self] ):
literal[string]
keyword[for] identifier[stmt] keyword[in] identifier[self] . identifier[statements] :
keyword[if] identifier[isinstance] ( identifier[stmt] , identifier[SelfModification] ) keyword[or] identifier[isinstance] ( identifier[stmt] , identifier[Translocation] ) keyword[or] identifier[isinstance] ( identifier[stmt] , identifier[ActiveForm] ):
keyword[continue]
keyword[elif] identifier[isinstance] ( identifier[stmt] , identifier[Association] ):
keyword[for] identifier[m] keyword[in] identifier[stmt] . identifier[members] :
identifier[self] . identifier[_add_node] ( identifier[m] . identifier[concept] )
keyword[elif] identifier[isinstance] ( identifier[stmt] , identifier[Complex] ):
keyword[for] identifier[m] keyword[in] identifier[stmt] . identifier[members] :
identifier[self] . identifier[_add_node] ( identifier[m] )
keyword[elif] identifier[all] ([ identifier[ag] keyword[is] keyword[not] keyword[None] keyword[for] identifier[ag] keyword[in] identifier[stmt] . identifier[agent_list] ()]):
keyword[if] keyword[not] identifier[len] ( identifier[stmt] . identifier[agent_list] ())== literal[int] :
identifier[logger] . identifier[warning] (
literal[string] % identifier[stmt] )
keyword[continue]
keyword[for] identifier[ag] keyword[in] identifier[stmt] . identifier[agent_list] ():
identifier[self] . identifier[_add_node] ( identifier[ag] )
keyword[for] identifier[stmt] keyword[in] identifier[self] . identifier[statements] :
keyword[if] identifier[isinstance] ( identifier[stmt] , identifier[SelfModification] ) keyword[or] identifier[isinstance] ( identifier[stmt] , identifier[Translocation] ) keyword[or] identifier[isinstance] ( identifier[stmt] , identifier[ActiveForm] ):
keyword[continue]
keyword[elif] identifier[isinstance] ( identifier[stmt] , identifier[Association] ):
identifier[self] . identifier[_add_complex] ( identifier[stmt] . identifier[members] , identifier[is_association] = keyword[True] )
keyword[elif] identifier[isinstance] ( identifier[stmt] , identifier[Complex] ):
identifier[self] . identifier[_add_complex] ( identifier[stmt] . identifier[members] )
keyword[elif] identifier[all] ([ identifier[ag] keyword[is] keyword[not] keyword[None] keyword[for] identifier[ag] keyword[in] identifier[stmt] . identifier[agent_list] ()]):
identifier[self] . identifier[_add_stmt_edge] ( identifier[stmt] ) | def make_model(self):
"""Assemble the graph from the assembler's list of INDRA Statements."""
# Assemble in two stages.
# First, create the nodes of the graph
for stmt in self.statements:
# Skip SelfModification (self loops) -- has one node
if isinstance(stmt, SelfModification) or isinstance(stmt, Translocation) or isinstance(stmt, ActiveForm):
continue # depends on [control=['if'], data=[]]
# Special handling for Associations -- more than 1 node and members
# are Events
elif isinstance(stmt, Association):
for m in stmt.members:
self._add_node(m.concept) # depends on [control=['for'], data=['m']] # depends on [control=['if'], data=[]]
# Special handling for Complexes -- more than 1 node
elif isinstance(stmt, Complex):
for m in stmt.members:
self._add_node(m) # depends on [control=['for'], data=['m']] # depends on [control=['if'], data=[]]
# All else should have exactly 2 nodes
elif all([ag is not None for ag in stmt.agent_list()]):
if not len(stmt.agent_list()) == 2:
logger.warning('%s has less/more than the expected 2 agents.' % stmt)
continue # depends on [control=['if'], data=[]]
for ag in stmt.agent_list():
self._add_node(ag) # depends on [control=['for'], data=['ag']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['stmt']]
# Second, create the edges of the graph
for stmt in self.statements:
# Skip SelfModification (self loops) -- has one node
if isinstance(stmt, SelfModification) or isinstance(stmt, Translocation) or isinstance(stmt, ActiveForm):
continue # depends on [control=['if'], data=[]]
elif isinstance(stmt, Association):
self._add_complex(stmt.members, is_association=True) # depends on [control=['if'], data=[]]
elif isinstance(stmt, Complex):
self._add_complex(stmt.members) # depends on [control=['if'], data=[]]
elif all([ag is not None for ag in stmt.agent_list()]):
self._add_stmt_edge(stmt) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['stmt']] |
def GetLinkedFileEntry(self):
"""Retrieves the linked file entry, e.g. for a symbolic link.
Returns:
APFSFileEntry: linked file entry or None if not available.
"""
link = self._GetLink()
if not link:
return None
# TODO: is there a way to determine the identifier here?
link_identifier = None
parent_path_spec = getattr(self.path_spec, 'parent', None)
path_spec = apfs_path_spec.APFSPathSpec(
location=link, parent=parent_path_spec)
is_root = bool(
link == self._file_system.LOCATION_ROOT or
link_identifier == self._file_system.ROOT_DIRECTORY_IDENTIFIER)
return APFSFileEntry(
self._resolver_context, self._file_system, path_spec, is_root=is_root) | def function[GetLinkedFileEntry, parameter[self]]:
constant[Retrieves the linked file entry, e.g. for a symbolic link.
Returns:
APFSFileEntry: linked file entry or None if not available.
]
variable[link] assign[=] call[name[self]._GetLink, parameter[]]
if <ast.UnaryOp object at 0x7da1b0678e50> begin[:]
return[constant[None]]
variable[link_identifier] assign[=] constant[None]
variable[parent_path_spec] assign[=] call[name[getattr], parameter[name[self].path_spec, constant[parent], constant[None]]]
variable[path_spec] assign[=] call[name[apfs_path_spec].APFSPathSpec, parameter[]]
variable[is_root] assign[=] call[name[bool], parameter[<ast.BoolOp object at 0x7da1b0678fd0>]]
return[call[name[APFSFileEntry], parameter[name[self]._resolver_context, name[self]._file_system, name[path_spec]]]] | keyword[def] identifier[GetLinkedFileEntry] ( identifier[self] ):
literal[string]
identifier[link] = identifier[self] . identifier[_GetLink] ()
keyword[if] keyword[not] identifier[link] :
keyword[return] keyword[None]
identifier[link_identifier] = keyword[None]
identifier[parent_path_spec] = identifier[getattr] ( identifier[self] . identifier[path_spec] , literal[string] , keyword[None] )
identifier[path_spec] = identifier[apfs_path_spec] . identifier[APFSPathSpec] (
identifier[location] = identifier[link] , identifier[parent] = identifier[parent_path_spec] )
identifier[is_root] = identifier[bool] (
identifier[link] == identifier[self] . identifier[_file_system] . identifier[LOCATION_ROOT] keyword[or]
identifier[link_identifier] == identifier[self] . identifier[_file_system] . identifier[ROOT_DIRECTORY_IDENTIFIER] )
keyword[return] identifier[APFSFileEntry] (
identifier[self] . identifier[_resolver_context] , identifier[self] . identifier[_file_system] , identifier[path_spec] , identifier[is_root] = identifier[is_root] ) | def GetLinkedFileEntry(self):
"""Retrieves the linked file entry, e.g. for a symbolic link.
Returns:
APFSFileEntry: linked file entry or None if not available.
"""
link = self._GetLink()
if not link:
return None # depends on [control=['if'], data=[]]
# TODO: is there a way to determine the identifier here?
link_identifier = None
parent_path_spec = getattr(self.path_spec, 'parent', None)
path_spec = apfs_path_spec.APFSPathSpec(location=link, parent=parent_path_spec)
is_root = bool(link == self._file_system.LOCATION_ROOT or link_identifier == self._file_system.ROOT_DIRECTORY_IDENTIFIER)
return APFSFileEntry(self._resolver_context, self._file_system, path_spec, is_root=is_root) |
def get_summary(result):
""" get summary from test result
Args:
result (instance): HtmlTestResult() instance
Returns:
dict: summary extracted from result.
{
"success": True,
"stat": {},
"time": {},
"records": []
}
"""
summary = {
"success": result.wasSuccessful(),
"stat": {
'total': result.testsRun,
'failures': len(result.failures),
'errors': len(result.errors),
'skipped': len(result.skipped),
'expectedFailures': len(result.expectedFailures),
'unexpectedSuccesses': len(result.unexpectedSuccesses)
}
}
summary["stat"]["successes"] = summary["stat"]["total"] \
- summary["stat"]["failures"] \
- summary["stat"]["errors"] \
- summary["stat"]["skipped"] \
- summary["stat"]["expectedFailures"] \
- summary["stat"]["unexpectedSuccesses"]
summary["time"] = {
'start_at': result.start_at,
'duration': result.duration
}
summary["records"] = result.records
return summary | def function[get_summary, parameter[result]]:
constant[ get summary from test result
Args:
result (instance): HtmlTestResult() instance
Returns:
dict: summary extracted from result.
{
"success": True,
"stat": {},
"time": {},
"records": []
}
]
variable[summary] assign[=] dictionary[[<ast.Constant object at 0x7da18ede5840>, <ast.Constant object at 0x7da18ede55d0>], [<ast.Call object at 0x7da18ede4b80>, <ast.Dict object at 0x7da18ede46a0>]]
call[call[name[summary]][constant[stat]]][constant[successes]] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[call[call[name[summary]][constant[stat]]][constant[total]] - call[call[name[summary]][constant[stat]]][constant[failures]]] - call[call[name[summary]][constant[stat]]][constant[errors]]] - call[call[name[summary]][constant[stat]]][constant[skipped]]] - call[call[name[summary]][constant[stat]]][constant[expectedFailures]]] - call[call[name[summary]][constant[stat]]][constant[unexpectedSuccesses]]]
call[name[summary]][constant[time]] assign[=] dictionary[[<ast.Constant object at 0x7da2044c2c80>, <ast.Constant object at 0x7da2044c1e70>], [<ast.Attribute object at 0x7da2044c0dc0>, <ast.Attribute object at 0x7da2044c2da0>]]
call[name[summary]][constant[records]] assign[=] name[result].records
return[name[summary]] | keyword[def] identifier[get_summary] ( identifier[result] ):
literal[string]
identifier[summary] ={
literal[string] : identifier[result] . identifier[wasSuccessful] (),
literal[string] :{
literal[string] : identifier[result] . identifier[testsRun] ,
literal[string] : identifier[len] ( identifier[result] . identifier[failures] ),
literal[string] : identifier[len] ( identifier[result] . identifier[errors] ),
literal[string] : identifier[len] ( identifier[result] . identifier[skipped] ),
literal[string] : identifier[len] ( identifier[result] . identifier[expectedFailures] ),
literal[string] : identifier[len] ( identifier[result] . identifier[unexpectedSuccesses] )
}
}
identifier[summary] [ literal[string] ][ literal[string] ]= identifier[summary] [ literal[string] ][ literal[string] ]- identifier[summary] [ literal[string] ][ literal[string] ]- identifier[summary] [ literal[string] ][ literal[string] ]- identifier[summary] [ literal[string] ][ literal[string] ]- identifier[summary] [ literal[string] ][ literal[string] ]- identifier[summary] [ literal[string] ][ literal[string] ]
identifier[summary] [ literal[string] ]={
literal[string] : identifier[result] . identifier[start_at] ,
literal[string] : identifier[result] . identifier[duration]
}
identifier[summary] [ literal[string] ]= identifier[result] . identifier[records]
keyword[return] identifier[summary] | def get_summary(result):
""" get summary from test result
Args:
result (instance): HtmlTestResult() instance
Returns:
dict: summary extracted from result.
{
"success": True,
"stat": {},
"time": {},
"records": []
}
"""
summary = {'success': result.wasSuccessful(), 'stat': {'total': result.testsRun, 'failures': len(result.failures), 'errors': len(result.errors), 'skipped': len(result.skipped), 'expectedFailures': len(result.expectedFailures), 'unexpectedSuccesses': len(result.unexpectedSuccesses)}}
summary['stat']['successes'] = summary['stat']['total'] - summary['stat']['failures'] - summary['stat']['errors'] - summary['stat']['skipped'] - summary['stat']['expectedFailures'] - summary['stat']['unexpectedSuccesses']
summary['time'] = {'start_at': result.start_at, 'duration': result.duration}
summary['records'] = result.records
return summary |
def describe(self, name=str(), **options):
""" Returns the **metadata** of the `Structure` as an
:class:`ordered dictionary <collections.OrderedDict>`.
.. code-block:: python
metadata = {
'class': self.__class__.__name__,
'name': name if name else self.__class__.__name__,
'size': len(self),
'type': Structure.item_type.name
'member': [
item.describe(member) for member, item in self.items()
]
}
:param str name: optional name for the `Structure`.
Fallback is the class name.
:keyword bool nested: if ``True`` all :class:`Pointer` fields of the
`Structure` lists their referenced :attr:`~Pointer.data` object fields
as well (chained method call). Default is ``True``.
"""
members = list()
metadata = OrderedDict()
metadata['class'] = self.__class__.__name__
metadata['name'] = name if name else self.__class__.__name__
metadata['size'] = len(self)
metadata['type'] = self.item_type.name
metadata['member'] = members
for member_name, item in self.items():
# Container
if is_container(item):
members.append(item.describe(member_name, **options))
# Pointer
elif is_pointer(item) and get_nested(options):
members.append(item.describe(member_name, **options))
# Field
elif is_field(item):
members.append(item.describe(member_name, nested=False))
else:
raise MemberTypeError(self, item, member_name)
return metadata | def function[describe, parameter[self, name]]:
constant[ Returns the **metadata** of the `Structure` as an
:class:`ordered dictionary <collections.OrderedDict>`.
.. code-block:: python
metadata = {
'class': self.__class__.__name__,
'name': name if name else self.__class__.__name__,
'size': len(self),
'type': Structure.item_type.name
'member': [
item.describe(member) for member, item in self.items()
]
}
:param str name: optional name for the `Structure`.
Fallback is the class name.
:keyword bool nested: if ``True`` all :class:`Pointer` fields of the
`Structure` lists their referenced :attr:`~Pointer.data` object fields
as well (chained method call). Default is ``True``.
]
variable[members] assign[=] call[name[list], parameter[]]
variable[metadata] assign[=] call[name[OrderedDict], parameter[]]
call[name[metadata]][constant[class]] assign[=] name[self].__class__.__name__
call[name[metadata]][constant[name]] assign[=] <ast.IfExp object at 0x7da1b229ac80>
call[name[metadata]][constant[size]] assign[=] call[name[len], parameter[name[self]]]
call[name[metadata]][constant[type]] assign[=] name[self].item_type.name
call[name[metadata]][constant[member]] assign[=] name[members]
for taget[tuple[[<ast.Name object at 0x7da1b22980a0>, <ast.Name object at 0x7da1b229a9e0>]]] in starred[call[name[self].items, parameter[]]] begin[:]
if call[name[is_container], parameter[name[item]]] begin[:]
call[name[members].append, parameter[call[name[item].describe, parameter[name[member_name]]]]]
return[name[metadata]] | keyword[def] identifier[describe] ( identifier[self] , identifier[name] = identifier[str] (),** identifier[options] ):
literal[string]
identifier[members] = identifier[list] ()
identifier[metadata] = identifier[OrderedDict] ()
identifier[metadata] [ literal[string] ]= identifier[self] . identifier[__class__] . identifier[__name__]
identifier[metadata] [ literal[string] ]= identifier[name] keyword[if] identifier[name] keyword[else] identifier[self] . identifier[__class__] . identifier[__name__]
identifier[metadata] [ literal[string] ]= identifier[len] ( identifier[self] )
identifier[metadata] [ literal[string] ]= identifier[self] . identifier[item_type] . identifier[name]
identifier[metadata] [ literal[string] ]= identifier[members]
keyword[for] identifier[member_name] , identifier[item] keyword[in] identifier[self] . identifier[items] ():
keyword[if] identifier[is_container] ( identifier[item] ):
identifier[members] . identifier[append] ( identifier[item] . identifier[describe] ( identifier[member_name] ,** identifier[options] ))
keyword[elif] identifier[is_pointer] ( identifier[item] ) keyword[and] identifier[get_nested] ( identifier[options] ):
identifier[members] . identifier[append] ( identifier[item] . identifier[describe] ( identifier[member_name] ,** identifier[options] ))
keyword[elif] identifier[is_field] ( identifier[item] ):
identifier[members] . identifier[append] ( identifier[item] . identifier[describe] ( identifier[member_name] , identifier[nested] = keyword[False] ))
keyword[else] :
keyword[raise] identifier[MemberTypeError] ( identifier[self] , identifier[item] , identifier[member_name] )
keyword[return] identifier[metadata] | def describe(self, name=str(), **options):
""" Returns the **metadata** of the `Structure` as an
:class:`ordered dictionary <collections.OrderedDict>`.
.. code-block:: python
metadata = {
'class': self.__class__.__name__,
'name': name if name else self.__class__.__name__,
'size': len(self),
'type': Structure.item_type.name
'member': [
item.describe(member) for member, item in self.items()
]
}
:param str name: optional name for the `Structure`.
Fallback is the class name.
:keyword bool nested: if ``True`` all :class:`Pointer` fields of the
`Structure` lists their referenced :attr:`~Pointer.data` object fields
as well (chained method call). Default is ``True``.
"""
members = list()
metadata = OrderedDict()
metadata['class'] = self.__class__.__name__
metadata['name'] = name if name else self.__class__.__name__
metadata['size'] = len(self)
metadata['type'] = self.item_type.name
metadata['member'] = members
for (member_name, item) in self.items():
# Container
if is_container(item):
members.append(item.describe(member_name, **options)) # depends on [control=['if'], data=[]]
# Pointer
elif is_pointer(item) and get_nested(options):
members.append(item.describe(member_name, **options)) # depends on [control=['if'], data=[]]
# Field
elif is_field(item):
members.append(item.describe(member_name, nested=False)) # depends on [control=['if'], data=[]]
else:
raise MemberTypeError(self, item, member_name) # depends on [control=['for'], data=[]]
return metadata |
def find_max_rad_npnp(self):
"""Finds the maximum radius and npnp in the force field.
Returns
-------
(max_rad, max_npnp): (float, float)
Maximum radius and npnp distance in the loaded force field.
"""
max_rad = 0
max_npnp = 0
for res, _ in self.items():
if res != 'KEY':
for _, ff_params in self[res].items():
if max_rad < ff_params[1]:
max_rad = ff_params[1]
if max_npnp < ff_params[4]:
max_npnp = ff_params[4]
return max_rad, max_npnp | def function[find_max_rad_npnp, parameter[self]]:
constant[Finds the maximum radius and npnp in the force field.
Returns
-------
(max_rad, max_npnp): (float, float)
Maximum radius and npnp distance in the loaded force field.
]
variable[max_rad] assign[=] constant[0]
variable[max_npnp] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da1b01a6920>, <ast.Name object at 0x7da1b01a5960>]]] in starred[call[name[self].items, parameter[]]] begin[:]
if compare[name[res] not_equal[!=] constant[KEY]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b01a4400>, <ast.Name object at 0x7da1b01a48b0>]]] in starred[call[call[name[self]][name[res]].items, parameter[]]] begin[:]
if compare[name[max_rad] less[<] call[name[ff_params]][constant[1]]] begin[:]
variable[max_rad] assign[=] call[name[ff_params]][constant[1]]
if compare[name[max_npnp] less[<] call[name[ff_params]][constant[4]]] begin[:]
variable[max_npnp] assign[=] call[name[ff_params]][constant[4]]
return[tuple[[<ast.Name object at 0x7da2054a5a20>, <ast.Name object at 0x7da2054a6500>]]] | keyword[def] identifier[find_max_rad_npnp] ( identifier[self] ):
literal[string]
identifier[max_rad] = literal[int]
identifier[max_npnp] = literal[int]
keyword[for] identifier[res] , identifier[_] keyword[in] identifier[self] . identifier[items] ():
keyword[if] identifier[res] != literal[string] :
keyword[for] identifier[_] , identifier[ff_params] keyword[in] identifier[self] [ identifier[res] ]. identifier[items] ():
keyword[if] identifier[max_rad] < identifier[ff_params] [ literal[int] ]:
identifier[max_rad] = identifier[ff_params] [ literal[int] ]
keyword[if] identifier[max_npnp] < identifier[ff_params] [ literal[int] ]:
identifier[max_npnp] = identifier[ff_params] [ literal[int] ]
keyword[return] identifier[max_rad] , identifier[max_npnp] | def find_max_rad_npnp(self):
"""Finds the maximum radius and npnp in the force field.
Returns
-------
(max_rad, max_npnp): (float, float)
Maximum radius and npnp distance in the loaded force field.
"""
max_rad = 0
max_npnp = 0
for (res, _) in self.items():
if res != 'KEY':
for (_, ff_params) in self[res].items():
if max_rad < ff_params[1]:
max_rad = ff_params[1] # depends on [control=['if'], data=['max_rad']]
if max_npnp < ff_params[4]:
max_npnp = ff_params[4] # depends on [control=['if'], data=['max_npnp']] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=['res']] # depends on [control=['for'], data=[]]
return (max_rad, max_npnp) |
def get_orm_classes_by_table_name_from_base(base: Type) -> Dict[str, Type]:
"""
Given an SQLAlchemy ORM base class, returns a dictionary whose keys are
table names and whose values are ORM classes.
If you begin with the proper :class`Base` class, then this should give all
tables and ORM classes in use.
"""
# noinspection PyUnresolvedReferences
return {cls.__tablename__: cls for cls in gen_orm_classes_from_base(base)} | def function[get_orm_classes_by_table_name_from_base, parameter[base]]:
constant[
Given an SQLAlchemy ORM base class, returns a dictionary whose keys are
table names and whose values are ORM classes.
If you begin with the proper :class`Base` class, then this should give all
tables and ORM classes in use.
]
return[<ast.DictComp object at 0x7da1b18482b0>] | keyword[def] identifier[get_orm_classes_by_table_name_from_base] ( identifier[base] : identifier[Type] )-> identifier[Dict] [ identifier[str] , identifier[Type] ]:
literal[string]
keyword[return] { identifier[cls] . identifier[__tablename__] : identifier[cls] keyword[for] identifier[cls] keyword[in] identifier[gen_orm_classes_from_base] ( identifier[base] )} | def get_orm_classes_by_table_name_from_base(base: Type) -> Dict[str, Type]:
"""
Given an SQLAlchemy ORM base class, returns a dictionary whose keys are
table names and whose values are ORM classes.
If you begin with the proper :class`Base` class, then this should give all
tables and ORM classes in use.
"""
# noinspection PyUnresolvedReferences
return {cls.__tablename__: cls for cls in gen_orm_classes_from_base(base)} |
def run(self, pcap):
"""
Runs snort against the supplied pcap.
:param pcap: Filepath to pcap file to scan
:returns: tuple of version, list of alerts
"""
proc = Popen(self._snort_cmd(pcap), stdout=PIPE,
stderr=PIPE, universal_newlines=True)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
raise Exception("\n".join(["Execution failed return code: {0}" \
.format(proc.returncode), stderr or ""]))
return (parse_version(stderr),
[ x for x in parse_alert(stdout) ]) | def function[run, parameter[self, pcap]]:
constant[
Runs snort against the supplied pcap.
:param pcap: Filepath to pcap file to scan
:returns: tuple of version, list of alerts
]
variable[proc] assign[=] call[name[Popen], parameter[call[name[self]._snort_cmd, parameter[name[pcap]]]]]
<ast.Tuple object at 0x7da18bcc9750> assign[=] call[name[proc].communicate, parameter[]]
if compare[name[proc].returncode not_equal[!=] constant[0]] begin[:]
<ast.Raise object at 0x7da18bccb160>
return[tuple[[<ast.Call object at 0x7da20e9b2b00>, <ast.ListComp object at 0x7da20e9b0fd0>]]] | keyword[def] identifier[run] ( identifier[self] , identifier[pcap] ):
literal[string]
identifier[proc] = identifier[Popen] ( identifier[self] . identifier[_snort_cmd] ( identifier[pcap] ), identifier[stdout] = identifier[PIPE] ,
identifier[stderr] = identifier[PIPE] , identifier[universal_newlines] = keyword[True] )
identifier[stdout] , identifier[stderr] = identifier[proc] . identifier[communicate] ()
keyword[if] identifier[proc] . identifier[returncode] != literal[int] :
keyword[raise] identifier[Exception] ( literal[string] . identifier[join] ([ literal[string] . identifier[format] ( identifier[proc] . identifier[returncode] ), identifier[stderr] keyword[or] literal[string] ]))
keyword[return] ( identifier[parse_version] ( identifier[stderr] ),
[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[parse_alert] ( identifier[stdout] )]) | def run(self, pcap):
"""
Runs snort against the supplied pcap.
:param pcap: Filepath to pcap file to scan
:returns: tuple of version, list of alerts
"""
proc = Popen(self._snort_cmd(pcap), stdout=PIPE, stderr=PIPE, universal_newlines=True)
(stdout, stderr) = proc.communicate()
if proc.returncode != 0:
raise Exception('\n'.join(['Execution failed return code: {0}'.format(proc.returncode), stderr or ''])) # depends on [control=['if'], data=[]]
return (parse_version(stderr), [x for x in parse_alert(stdout)]) |
def handle_pubcomp(self):
"""Handle incoming PUBCOMP packet."""
self.logger.info("PUBCOMP received")
ret, mid = self.in_packet.read_uint16()
if ret != NC.ERR_SUCCESS:
return ret
evt = event.EventPubcomp(mid)
self.push_event(evt)
return NC.ERR_SUCCESS | def function[handle_pubcomp, parameter[self]]:
constant[Handle incoming PUBCOMP packet.]
call[name[self].logger.info, parameter[constant[PUBCOMP received]]]
<ast.Tuple object at 0x7da20c7cbc10> assign[=] call[name[self].in_packet.read_uint16, parameter[]]
if compare[name[ret] not_equal[!=] name[NC].ERR_SUCCESS] begin[:]
return[name[ret]]
variable[evt] assign[=] call[name[event].EventPubcomp, parameter[name[mid]]]
call[name[self].push_event, parameter[name[evt]]]
return[name[NC].ERR_SUCCESS] | keyword[def] identifier[handle_pubcomp] ( identifier[self] ):
literal[string]
identifier[self] . identifier[logger] . identifier[info] ( literal[string] )
identifier[ret] , identifier[mid] = identifier[self] . identifier[in_packet] . identifier[read_uint16] ()
keyword[if] identifier[ret] != identifier[NC] . identifier[ERR_SUCCESS] :
keyword[return] identifier[ret]
identifier[evt] = identifier[event] . identifier[EventPubcomp] ( identifier[mid] )
identifier[self] . identifier[push_event] ( identifier[evt] )
keyword[return] identifier[NC] . identifier[ERR_SUCCESS] | def handle_pubcomp(self):
"""Handle incoming PUBCOMP packet."""
self.logger.info('PUBCOMP received')
(ret, mid) = self.in_packet.read_uint16()
if ret != NC.ERR_SUCCESS:
return ret # depends on [control=['if'], data=['ret']]
evt = event.EventPubcomp(mid)
self.push_event(evt)
return NC.ERR_SUCCESS |
def predict_dataflow(df, model_func, tqdm_bar=None):
"""
Args:
df: a DataFlow which produces (image, image_id)
model_func: a callable from the TF model.
It takes image and returns (boxes, probs, labels, [masks])
tqdm_bar: a tqdm object to be shared among multiple evaluation instances. If None,
will create a new one.
Returns:
list of dict, in the format used by
`DetectionDataset.eval_or_save_inference_results`
"""
df.reset_state()
all_results = []
with ExitStack() as stack:
# tqdm is not quite thread-safe: https://github.com/tqdm/tqdm/issues/323
if tqdm_bar is None:
tqdm_bar = stack.enter_context(get_tqdm(total=df.size()))
for img, img_id in df:
results = predict_image(img, model_func)
for r in results:
# int()/float() to make it json-serializable
res = {
'image_id': img_id,
'category_id': int(r.class_id),
'bbox': [round(float(x), 4) for x in r.box],
'score': round(float(r.score), 4),
}
# also append segmentation to results
if r.mask is not None:
rle = cocomask.encode(
np.array(r.mask[:, :, None], order='F'))[0]
rle['counts'] = rle['counts'].decode('ascii')
res['segmentation'] = rle
all_results.append(res)
tqdm_bar.update(1)
return all_results | def function[predict_dataflow, parameter[df, model_func, tqdm_bar]]:
constant[
Args:
df: a DataFlow which produces (image, image_id)
model_func: a callable from the TF model.
It takes image and returns (boxes, probs, labels, [masks])
tqdm_bar: a tqdm object to be shared among multiple evaluation instances. If None,
will create a new one.
Returns:
list of dict, in the format used by
`DetectionDataset.eval_or_save_inference_results`
]
call[name[df].reset_state, parameter[]]
variable[all_results] assign[=] list[[]]
with call[name[ExitStack], parameter[]] begin[:]
if compare[name[tqdm_bar] is constant[None]] begin[:]
variable[tqdm_bar] assign[=] call[name[stack].enter_context, parameter[call[name[get_tqdm], parameter[]]]]
for taget[tuple[[<ast.Name object at 0x7da2054a71c0>, <ast.Name object at 0x7da2054a6c20>]]] in starred[name[df]] begin[:]
variable[results] assign[=] call[name[predict_image], parameter[name[img], name[model_func]]]
for taget[name[r]] in starred[name[results]] begin[:]
variable[res] assign[=] dictionary[[<ast.Constant object at 0x7da2054a6050>, <ast.Constant object at 0x7da2054a5090>, <ast.Constant object at 0x7da2054a69b0>, <ast.Constant object at 0x7da2054a4af0>], [<ast.Name object at 0x7da2054a59f0>, <ast.Call object at 0x7da2054a4610>, <ast.ListComp object at 0x7da2054a44c0>, <ast.Call object at 0x7da207f00250>]]
if compare[name[r].mask is_not constant[None]] begin[:]
variable[rle] assign[=] call[call[name[cocomask].encode, parameter[call[name[np].array, parameter[call[name[r].mask][tuple[[<ast.Slice object at 0x7da2041d9fc0>, <ast.Slice object at 0x7da2041d98d0>, <ast.Constant object at 0x7da2041d9780>]]]]]]]][constant[0]]
call[name[rle]][constant[counts]] assign[=] call[call[name[rle]][constant[counts]].decode, parameter[constant[ascii]]]
call[name[res]][constant[segmentation]] assign[=] name[rle]
call[name[all_results].append, parameter[name[res]]]
call[name[tqdm_bar].update, parameter[constant[1]]]
return[name[all_results]] | keyword[def] identifier[predict_dataflow] ( identifier[df] , identifier[model_func] , identifier[tqdm_bar] = keyword[None] ):
literal[string]
identifier[df] . identifier[reset_state] ()
identifier[all_results] =[]
keyword[with] identifier[ExitStack] () keyword[as] identifier[stack] :
keyword[if] identifier[tqdm_bar] keyword[is] keyword[None] :
identifier[tqdm_bar] = identifier[stack] . identifier[enter_context] ( identifier[get_tqdm] ( identifier[total] = identifier[df] . identifier[size] ()))
keyword[for] identifier[img] , identifier[img_id] keyword[in] identifier[df] :
identifier[results] = identifier[predict_image] ( identifier[img] , identifier[model_func] )
keyword[for] identifier[r] keyword[in] identifier[results] :
identifier[res] ={
literal[string] : identifier[img_id] ,
literal[string] : identifier[int] ( identifier[r] . identifier[class_id] ),
literal[string] :[ identifier[round] ( identifier[float] ( identifier[x] ), literal[int] ) keyword[for] identifier[x] keyword[in] identifier[r] . identifier[box] ],
literal[string] : identifier[round] ( identifier[float] ( identifier[r] . identifier[score] ), literal[int] ),
}
keyword[if] identifier[r] . identifier[mask] keyword[is] keyword[not] keyword[None] :
identifier[rle] = identifier[cocomask] . identifier[encode] (
identifier[np] . identifier[array] ( identifier[r] . identifier[mask] [:,:, keyword[None] ], identifier[order] = literal[string] ))[ literal[int] ]
identifier[rle] [ literal[string] ]= identifier[rle] [ literal[string] ]. identifier[decode] ( literal[string] )
identifier[res] [ literal[string] ]= identifier[rle]
identifier[all_results] . identifier[append] ( identifier[res] )
identifier[tqdm_bar] . identifier[update] ( literal[int] )
keyword[return] identifier[all_results] | def predict_dataflow(df, model_func, tqdm_bar=None):
"""
Args:
df: a DataFlow which produces (image, image_id)
model_func: a callable from the TF model.
It takes image and returns (boxes, probs, labels, [masks])
tqdm_bar: a tqdm object to be shared among multiple evaluation instances. If None,
will create a new one.
Returns:
list of dict, in the format used by
`DetectionDataset.eval_or_save_inference_results`
"""
df.reset_state()
all_results = []
with ExitStack() as stack:
# tqdm is not quite thread-safe: https://github.com/tqdm/tqdm/issues/323
if tqdm_bar is None:
tqdm_bar = stack.enter_context(get_tqdm(total=df.size())) # depends on [control=['if'], data=['tqdm_bar']]
for (img, img_id) in df:
results = predict_image(img, model_func)
for r in results:
# int()/float() to make it json-serializable
res = {'image_id': img_id, 'category_id': int(r.class_id), 'bbox': [round(float(x), 4) for x in r.box], 'score': round(float(r.score), 4)}
# also append segmentation to results
if r.mask is not None:
rle = cocomask.encode(np.array(r.mask[:, :, None], order='F'))[0]
rle['counts'] = rle['counts'].decode('ascii')
res['segmentation'] = rle # depends on [control=['if'], data=[]]
all_results.append(res) # depends on [control=['for'], data=['r']]
tqdm_bar.update(1) # depends on [control=['for'], data=[]] # depends on [control=['with'], data=['stack']]
return all_results |
def simplify(graph):
"""
Simplify the CFG by merging/deleting statement nodes when possible:
If statement B follows statement A and if B has no other predecessor
besides A, then we can merge A and B into a new statement node.
We also remove nodes which do nothing except redirecting the control
flow (nodes which only contains a goto).
"""
redo = True
while redo:
redo = False
node_map = {}
to_update = set()
for node in graph.nodes[:]:
if node.type.is_stmt and node in graph:
sucs = graph.all_sucs(node)
if len(sucs) != 1:
continue
suc = sucs[0]
if len(node.get_ins()) == 0:
if any(pred.type.is_switch
for pred in graph.all_preds(node)):
continue
if node is suc:
continue
node_map[node] = suc
for pred in graph.all_preds(node):
pred.update_attribute_with(node_map)
if node not in graph.sucs(pred):
graph.add_catch_edge(pred, suc)
continue
graph.add_edge(pred, suc)
redo = True
if node is graph.entry:
graph.entry = suc
graph.remove_node(node)
elif (suc.type.is_stmt and len(graph.all_preds(suc)) == 1 and
not (suc in graph.catch_edges) and not (
(node is suc) or (suc is graph.entry))):
ins_to_merge = suc.get_ins()
node.add_ins(ins_to_merge)
for var in suc.var_to_declare:
node.add_variable_declaration(var)
new_suc = graph.sucs(suc)[0]
if new_suc:
graph.add_edge(node, new_suc)
for exception_suc in graph.catch_edges.get(suc, []):
graph.add_catch_edge(node, exception_suc)
redo = True
graph.remove_node(suc)
else:
to_update.add(node)
for node in to_update:
node.update_attribute_with(node_map) | def function[simplify, parameter[graph]]:
constant[
Simplify the CFG by merging/deleting statement nodes when possible:
If statement B follows statement A and if B has no other predecessor
besides A, then we can merge A and B into a new statement node.
We also remove nodes which do nothing except redirecting the control
flow (nodes which only contains a goto).
]
variable[redo] assign[=] constant[True]
while name[redo] begin[:]
variable[redo] assign[=] constant[False]
variable[node_map] assign[=] dictionary[[], []]
variable[to_update] assign[=] call[name[set], parameter[]]
for taget[name[node]] in starred[call[name[graph].nodes][<ast.Slice object at 0x7da2047eac80>]] begin[:]
if <ast.BoolOp object at 0x7da2047e87f0> begin[:]
variable[sucs] assign[=] call[name[graph].all_sucs, parameter[name[node]]]
if compare[call[name[len], parameter[name[sucs]]] not_equal[!=] constant[1]] begin[:]
continue
variable[suc] assign[=] call[name[sucs]][constant[0]]
if compare[call[name[len], parameter[call[name[node].get_ins, parameter[]]]] equal[==] constant[0]] begin[:]
if call[name[any], parameter[<ast.GeneratorExp object at 0x7da2047e80a0>]] begin[:]
continue
if compare[name[node] is name[suc]] begin[:]
continue
call[name[node_map]][name[node]] assign[=] name[suc]
for taget[name[pred]] in starred[call[name[graph].all_preds, parameter[name[node]]]] begin[:]
call[name[pred].update_attribute_with, parameter[name[node_map]]]
if compare[name[node] <ast.NotIn object at 0x7da2590d7190> call[name[graph].sucs, parameter[name[pred]]]] begin[:]
call[name[graph].add_catch_edge, parameter[name[pred], name[suc]]]
continue
call[name[graph].add_edge, parameter[name[pred], name[suc]]]
variable[redo] assign[=] constant[True]
if compare[name[node] is name[graph].entry] begin[:]
name[graph].entry assign[=] name[suc]
call[name[graph].remove_node, parameter[name[node]]]
for taget[name[node]] in starred[name[to_update]] begin[:]
call[name[node].update_attribute_with, parameter[name[node_map]]] | keyword[def] identifier[simplify] ( identifier[graph] ):
literal[string]
identifier[redo] = keyword[True]
keyword[while] identifier[redo] :
identifier[redo] = keyword[False]
identifier[node_map] ={}
identifier[to_update] = identifier[set] ()
keyword[for] identifier[node] keyword[in] identifier[graph] . identifier[nodes] [:]:
keyword[if] identifier[node] . identifier[type] . identifier[is_stmt] keyword[and] identifier[node] keyword[in] identifier[graph] :
identifier[sucs] = identifier[graph] . identifier[all_sucs] ( identifier[node] )
keyword[if] identifier[len] ( identifier[sucs] )!= literal[int] :
keyword[continue]
identifier[suc] = identifier[sucs] [ literal[int] ]
keyword[if] identifier[len] ( identifier[node] . identifier[get_ins] ())== literal[int] :
keyword[if] identifier[any] ( identifier[pred] . identifier[type] . identifier[is_switch]
keyword[for] identifier[pred] keyword[in] identifier[graph] . identifier[all_preds] ( identifier[node] )):
keyword[continue]
keyword[if] identifier[node] keyword[is] identifier[suc] :
keyword[continue]
identifier[node_map] [ identifier[node] ]= identifier[suc]
keyword[for] identifier[pred] keyword[in] identifier[graph] . identifier[all_preds] ( identifier[node] ):
identifier[pred] . identifier[update_attribute_with] ( identifier[node_map] )
keyword[if] identifier[node] keyword[not] keyword[in] identifier[graph] . identifier[sucs] ( identifier[pred] ):
identifier[graph] . identifier[add_catch_edge] ( identifier[pred] , identifier[suc] )
keyword[continue]
identifier[graph] . identifier[add_edge] ( identifier[pred] , identifier[suc] )
identifier[redo] = keyword[True]
keyword[if] identifier[node] keyword[is] identifier[graph] . identifier[entry] :
identifier[graph] . identifier[entry] = identifier[suc]
identifier[graph] . identifier[remove_node] ( identifier[node] )
keyword[elif] ( identifier[suc] . identifier[type] . identifier[is_stmt] keyword[and] identifier[len] ( identifier[graph] . identifier[all_preds] ( identifier[suc] ))== literal[int] keyword[and]
keyword[not] ( identifier[suc] keyword[in] identifier[graph] . identifier[catch_edges] ) keyword[and] keyword[not] (
( identifier[node] keyword[is] identifier[suc] ) keyword[or] ( identifier[suc] keyword[is] identifier[graph] . identifier[entry] ))):
identifier[ins_to_merge] = identifier[suc] . identifier[get_ins] ()
identifier[node] . identifier[add_ins] ( identifier[ins_to_merge] )
keyword[for] identifier[var] keyword[in] identifier[suc] . identifier[var_to_declare] :
identifier[node] . identifier[add_variable_declaration] ( identifier[var] )
identifier[new_suc] = identifier[graph] . identifier[sucs] ( identifier[suc] )[ literal[int] ]
keyword[if] identifier[new_suc] :
identifier[graph] . identifier[add_edge] ( identifier[node] , identifier[new_suc] )
keyword[for] identifier[exception_suc] keyword[in] identifier[graph] . identifier[catch_edges] . identifier[get] ( identifier[suc] ,[]):
identifier[graph] . identifier[add_catch_edge] ( identifier[node] , identifier[exception_suc] )
identifier[redo] = keyword[True]
identifier[graph] . identifier[remove_node] ( identifier[suc] )
keyword[else] :
identifier[to_update] . identifier[add] ( identifier[node] )
keyword[for] identifier[node] keyword[in] identifier[to_update] :
identifier[node] . identifier[update_attribute_with] ( identifier[node_map] ) | def simplify(graph):
"""
Simplify the CFG by merging/deleting statement nodes when possible:
If statement B follows statement A and if B has no other predecessor
besides A, then we can merge A and B into a new statement node.
We also remove nodes which do nothing except redirecting the control
flow (nodes which only contains a goto).
"""
redo = True
while redo:
redo = False
node_map = {}
to_update = set()
for node in graph.nodes[:]:
if node.type.is_stmt and node in graph:
sucs = graph.all_sucs(node)
if len(sucs) != 1:
continue # depends on [control=['if'], data=[]]
suc = sucs[0]
if len(node.get_ins()) == 0:
if any((pred.type.is_switch for pred in graph.all_preds(node))):
continue # depends on [control=['if'], data=[]]
if node is suc:
continue # depends on [control=['if'], data=[]]
node_map[node] = suc
for pred in graph.all_preds(node):
pred.update_attribute_with(node_map)
if node not in graph.sucs(pred):
graph.add_catch_edge(pred, suc)
continue # depends on [control=['if'], data=[]]
graph.add_edge(pred, suc) # depends on [control=['for'], data=['pred']]
redo = True
if node is graph.entry:
graph.entry = suc # depends on [control=['if'], data=[]]
graph.remove_node(node) # depends on [control=['if'], data=[]]
elif suc.type.is_stmt and len(graph.all_preds(suc)) == 1 and (not suc in graph.catch_edges) and (not (node is suc or suc is graph.entry)):
ins_to_merge = suc.get_ins()
node.add_ins(ins_to_merge)
for var in suc.var_to_declare:
node.add_variable_declaration(var) # depends on [control=['for'], data=['var']]
new_suc = graph.sucs(suc)[0]
if new_suc:
graph.add_edge(node, new_suc) # depends on [control=['if'], data=[]]
for exception_suc in graph.catch_edges.get(suc, []):
graph.add_catch_edge(node, exception_suc) # depends on [control=['for'], data=['exception_suc']]
redo = True
graph.remove_node(suc) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
to_update.add(node) # depends on [control=['for'], data=['node']]
for node in to_update:
node.update_attribute_with(node_map) # depends on [control=['for'], data=['node']] # depends on [control=['while'], data=[]] |
def load_config(self, framework_config: ConfigObject = None, config_location=DEFAULT_RLBOT_CONFIG_LOCATION,
bot_configs=None,
looks_configs=None):
"""
Loads the configuration into internal data structures, which prepares us to later
launch bot processes and start the match.
:param framework_config: A config object that indicates what bots to run. May come from parsing a rlbot.cfg.
:param config_location: The location of the rlbot.cfg file, which will be used to resolve relative paths.
:param bot_configs: Overrides for bot configurations.
:param looks_configs: Overrides for looks configurations.
"""
self.logger.debug('reading the configs')
# Set up RLBot.cfg
if framework_config is None:
framework_config = create_bot_config_layout()
framework_config.parse_file(config_location, max_index=MAX_PLAYERS)
if bot_configs is None:
bot_configs = {}
if looks_configs is None:
looks_configs = {}
match_config = parse_match_config(framework_config, config_location, bot_configs, looks_configs)
self.load_match_config(match_config, bot_configs) | def function[load_config, parameter[self, framework_config, config_location, bot_configs, looks_configs]]:
constant[
Loads the configuration into internal data structures, which prepares us to later
launch bot processes and start the match.
:param framework_config: A config object that indicates what bots to run. May come from parsing a rlbot.cfg.
:param config_location: The location of the rlbot.cfg file, which will be used to resolve relative paths.
:param bot_configs: Overrides for bot configurations.
:param looks_configs: Overrides for looks configurations.
]
call[name[self].logger.debug, parameter[constant[reading the configs]]]
if compare[name[framework_config] is constant[None]] begin[:]
variable[framework_config] assign[=] call[name[create_bot_config_layout], parameter[]]
call[name[framework_config].parse_file, parameter[name[config_location]]]
if compare[name[bot_configs] is constant[None]] begin[:]
variable[bot_configs] assign[=] dictionary[[], []]
if compare[name[looks_configs] is constant[None]] begin[:]
variable[looks_configs] assign[=] dictionary[[], []]
variable[match_config] assign[=] call[name[parse_match_config], parameter[name[framework_config], name[config_location], name[bot_configs], name[looks_configs]]]
call[name[self].load_match_config, parameter[name[match_config], name[bot_configs]]] | keyword[def] identifier[load_config] ( identifier[self] , identifier[framework_config] : identifier[ConfigObject] = keyword[None] , identifier[config_location] = identifier[DEFAULT_RLBOT_CONFIG_LOCATION] ,
identifier[bot_configs] = keyword[None] ,
identifier[looks_configs] = keyword[None] ):
literal[string]
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] )
keyword[if] identifier[framework_config] keyword[is] keyword[None] :
identifier[framework_config] = identifier[create_bot_config_layout] ()
identifier[framework_config] . identifier[parse_file] ( identifier[config_location] , identifier[max_index] = identifier[MAX_PLAYERS] )
keyword[if] identifier[bot_configs] keyword[is] keyword[None] :
identifier[bot_configs] ={}
keyword[if] identifier[looks_configs] keyword[is] keyword[None] :
identifier[looks_configs] ={}
identifier[match_config] = identifier[parse_match_config] ( identifier[framework_config] , identifier[config_location] , identifier[bot_configs] , identifier[looks_configs] )
identifier[self] . identifier[load_match_config] ( identifier[match_config] , identifier[bot_configs] ) | def load_config(self, framework_config: ConfigObject=None, config_location=DEFAULT_RLBOT_CONFIG_LOCATION, bot_configs=None, looks_configs=None):
"""
Loads the configuration into internal data structures, which prepares us to later
launch bot processes and start the match.
:param framework_config: A config object that indicates what bots to run. May come from parsing a rlbot.cfg.
:param config_location: The location of the rlbot.cfg file, which will be used to resolve relative paths.
:param bot_configs: Overrides for bot configurations.
:param looks_configs: Overrides for looks configurations.
"""
self.logger.debug('reading the configs')
# Set up RLBot.cfg
if framework_config is None:
framework_config = create_bot_config_layout()
framework_config.parse_file(config_location, max_index=MAX_PLAYERS) # depends on [control=['if'], data=['framework_config']]
if bot_configs is None:
bot_configs = {} # depends on [control=['if'], data=['bot_configs']]
if looks_configs is None:
looks_configs = {} # depends on [control=['if'], data=['looks_configs']]
match_config = parse_match_config(framework_config, config_location, bot_configs, looks_configs)
self.load_match_config(match_config, bot_configs) |
def save(self, t, base=0, heap=False):
'''Save this typedef plus its class typedef.
'''
c, k = _keytuple(t)
if k and k not in _typedefs: # instance key
_typedefs[k] = self
if c and c not in _typedefs: # class key
if t.__module__ in _builtin_modules:
k = _kind_ignored # default
else:
k = self.kind
_typedefs[c] = _Typedef(base=_basicsize(type(t), base=base, heap=heap),
refs=_type_refs,
both=False, kind=k, type=t)
elif isbuiltin(t) and t not in _typedefs: # array, range, xrange in Python 2.x
_typedefs[t] = _Typedef(base=_basicsize(t, base=base),
both=False, kind=_kind_ignored, type=t)
else:
raise KeyError('asizeof typedef %r bad: %r %r' % (self, (c, k), self.both)) | def function[save, parameter[self, t, base, heap]]:
constant[Save this typedef plus its class typedef.
]
<ast.Tuple object at 0x7da204566230> assign[=] call[name[_keytuple], parameter[name[t]]]
if <ast.BoolOp object at 0x7da2045675e0> begin[:]
call[name[_typedefs]][name[k]] assign[=] name[self]
if <ast.BoolOp object at 0x7da18dc9a2f0> begin[:]
if compare[name[t].__module__ in name[_builtin_modules]] begin[:]
variable[k] assign[=] name[_kind_ignored]
call[name[_typedefs]][name[c]] assign[=] call[name[_Typedef], parameter[]] | keyword[def] identifier[save] ( identifier[self] , identifier[t] , identifier[base] = literal[int] , identifier[heap] = keyword[False] ):
literal[string]
identifier[c] , identifier[k] = identifier[_keytuple] ( identifier[t] )
keyword[if] identifier[k] keyword[and] identifier[k] keyword[not] keyword[in] identifier[_typedefs] :
identifier[_typedefs] [ identifier[k] ]= identifier[self]
keyword[if] identifier[c] keyword[and] identifier[c] keyword[not] keyword[in] identifier[_typedefs] :
keyword[if] identifier[t] . identifier[__module__] keyword[in] identifier[_builtin_modules] :
identifier[k] = identifier[_kind_ignored]
keyword[else] :
identifier[k] = identifier[self] . identifier[kind]
identifier[_typedefs] [ identifier[c] ]= identifier[_Typedef] ( identifier[base] = identifier[_basicsize] ( identifier[type] ( identifier[t] ), identifier[base] = identifier[base] , identifier[heap] = identifier[heap] ),
identifier[refs] = identifier[_type_refs] ,
identifier[both] = keyword[False] , identifier[kind] = identifier[k] , identifier[type] = identifier[t] )
keyword[elif] identifier[isbuiltin] ( identifier[t] ) keyword[and] identifier[t] keyword[not] keyword[in] identifier[_typedefs] :
identifier[_typedefs] [ identifier[t] ]= identifier[_Typedef] ( identifier[base] = identifier[_basicsize] ( identifier[t] , identifier[base] = identifier[base] ),
identifier[both] = keyword[False] , identifier[kind] = identifier[_kind_ignored] , identifier[type] = identifier[t] )
keyword[else] :
keyword[raise] identifier[KeyError] ( literal[string] %( identifier[self] ,( identifier[c] , identifier[k] ), identifier[self] . identifier[both] )) | def save(self, t, base=0, heap=False):
"""Save this typedef plus its class typedef.
"""
(c, k) = _keytuple(t)
if k and k not in _typedefs: # instance key
_typedefs[k] = self
if c and c not in _typedefs: # class key
if t.__module__ in _builtin_modules:
k = _kind_ignored # default # depends on [control=['if'], data=[]]
else:
k = self.kind
_typedefs[c] = _Typedef(base=_basicsize(type(t), base=base, heap=heap), refs=_type_refs, both=False, kind=k, type=t) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif isbuiltin(t) and t not in _typedefs: # array, range, xrange in Python 2.x
_typedefs[t] = _Typedef(base=_basicsize(t, base=base), both=False, kind=_kind_ignored, type=t) # depends on [control=['if'], data=[]]
else:
raise KeyError('asizeof typedef %r bad: %r %r' % (self, (c, k), self.both)) |
def is_filelike(obj):
"""Filename or file object?
"""
if isinstance(obj, (bytes, unicode)):
return False
res = True
for a in ('read', 'tell', 'seek'):
res = res and hasattr(obj, a)
if not res:
raise ValueError("Invalid object passed as file")
return True | def function[is_filelike, parameter[obj]]:
constant[Filename or file object?
]
if call[name[isinstance], parameter[name[obj], tuple[[<ast.Name object at 0x7da18dc04100>, <ast.Name object at 0x7da18dc05060>]]]] begin[:]
return[constant[False]]
variable[res] assign[=] constant[True]
for taget[name[a]] in starred[tuple[[<ast.Constant object at 0x7da18dc05a50>, <ast.Constant object at 0x7da18dc06380>, <ast.Constant object at 0x7da18dc04340>]]] begin[:]
variable[res] assign[=] <ast.BoolOp object at 0x7da18dc06530>
if <ast.UnaryOp object at 0x7da18ede7c10> begin[:]
<ast.Raise object at 0x7da18ede69e0>
return[constant[True]] | keyword[def] identifier[is_filelike] ( identifier[obj] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[obj] ,( identifier[bytes] , identifier[unicode] )):
keyword[return] keyword[False]
identifier[res] = keyword[True]
keyword[for] identifier[a] keyword[in] ( literal[string] , literal[string] , literal[string] ):
identifier[res] = identifier[res] keyword[and] identifier[hasattr] ( identifier[obj] , identifier[a] )
keyword[if] keyword[not] identifier[res] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] keyword[True] | def is_filelike(obj):
"""Filename or file object?
"""
if isinstance(obj, (bytes, unicode)):
return False # depends on [control=['if'], data=[]]
res = True
for a in ('read', 'tell', 'seek'):
res = res and hasattr(obj, a) # depends on [control=['for'], data=['a']]
if not res:
raise ValueError('Invalid object passed as file') # depends on [control=['if'], data=[]]
return True |
def checkForChanges(f, sde, isTable):
"""
returns False if there are no changes
"""
# try simple feature count first
fCount = int(arcpy.GetCount_management(f).getOutput(0))
sdeCount = int(arcpy.GetCount_management(sde).getOutput(0))
if fCount != sdeCount:
return True
fields = [fld.name for fld in arcpy.ListFields(f)]
# filter out shape fields
if not isTable:
fields = filter_fields(fields)
d = arcpy.Describe(f)
shapeType = d.shapeType
if shapeType == 'Polygon':
shapeToken = 'SHAPE@AREA'
elif shapeType == 'Polyline':
shapeToken = 'SHAPE@LENGTH'
elif shapeType == 'Point':
shapeToken = 'SHAPE@XY'
else:
shapeToken = 'SHAPE@JSON'
fields.append(shapeToken)
def parseShape(shapeValue):
if shapeValue is None:
return 0
elif shapeType in ['Polygon', 'Polyline']:
return shapeValue
elif shapeType == 'Point':
if shapeValue[0] is not None and shapeValue[1] is not None:
return shapeValue[0] + shapeValue[1]
else:
return 0
else:
return shapeValue
outputSR = arcpy.Describe(f).spatialReference
else:
outputSR = None
changed = False
with arcpy.da.SearchCursor(f, fields, sql_clause=(None, 'ORDER BY OBJECTID')) as fCursor, \
arcpy.da.SearchCursor(sde, fields, sql_clause=(None, 'ORDER BY OBJECTID'),
spatial_reference=outputSR) as sdeCursor:
for fRow, sdeRow in izip(fCursor, sdeCursor):
if fRow != sdeRow:
# check shapes first
if fRow[-1] != sdeRow[-1] and not isTable:
if shapeType not in ['Polygon', 'Polyline', 'Point']:
changed = True
break
fShape = parseShape(fRow[-1])
sdeShape = parseShape(sdeRow[-1])
try:
assert_almost_equal(fShape, sdeShape, -1)
# trim off shapes
fRow = list(fRow[:-1])
sdeRow = list(sdeRow[:-1])
except AssertionError:
changed = True
break
# trim microseconds since they can be off by one between file and sde databases
for i in range(len(fRow)):
if type(fRow[i]) is datetime:
fRow = list(fRow)
sdeRow = list(sdeRow)
fRow[i] = fRow[i].replace(microsecond=0)
try:
sdeRow[i] = sdeRow[i].replace(microsecond=0)
except:
pass
# compare all values except OBJECTID
if fRow[1:] != sdeRow[1:]:
changed = True
break
return changed | def function[checkForChanges, parameter[f, sde, isTable]]:
constant[
returns False if there are no changes
]
variable[fCount] assign[=] call[name[int], parameter[call[call[name[arcpy].GetCount_management, parameter[name[f]]].getOutput, parameter[constant[0]]]]]
variable[sdeCount] assign[=] call[name[int], parameter[call[call[name[arcpy].GetCount_management, parameter[name[sde]]].getOutput, parameter[constant[0]]]]]
if compare[name[fCount] not_equal[!=] name[sdeCount]] begin[:]
return[constant[True]]
variable[fields] assign[=] <ast.ListComp object at 0x7da1b28eb850>
if <ast.UnaryOp object at 0x7da1b28eb640> begin[:]
variable[fields] assign[=] call[name[filter_fields], parameter[name[fields]]]
variable[d] assign[=] call[name[arcpy].Describe, parameter[name[f]]]
variable[shapeType] assign[=] name[d].shapeType
if compare[name[shapeType] equal[==] constant[Polygon]] begin[:]
variable[shapeToken] assign[=] constant[SHAPE@AREA]
call[name[fields].append, parameter[name[shapeToken]]]
def function[parseShape, parameter[shapeValue]]:
if compare[name[shapeValue] is constant[None]] begin[:]
return[constant[0]]
variable[outputSR] assign[=] call[name[arcpy].Describe, parameter[name[f]]].spatialReference
variable[changed] assign[=] constant[False]
with call[name[arcpy].da.SearchCursor, parameter[name[f], name[fields]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b28e9c90>, <ast.Name object at 0x7da1b28e9c60>]]] in starred[call[name[izip], parameter[name[fCursor], name[sdeCursor]]]] begin[:]
if compare[name[fRow] not_equal[!=] name[sdeRow]] begin[:]
if <ast.BoolOp object at 0x7da1b28e9a80> begin[:]
if compare[name[shapeType] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da1b28e9780>, <ast.Constant object at 0x7da1b28e9750>, <ast.Constant object at 0x7da1b28e9720>]]] begin[:]
variable[changed] assign[=] constant[True]
break
variable[fShape] assign[=] call[name[parseShape], parameter[call[name[fRow]][<ast.UnaryOp object at 0x7da1b28e94e0>]]]
variable[sdeShape] assign[=] call[name[parseShape], parameter[call[name[sdeRow]][<ast.UnaryOp object at 0x7da1b28e9360>]]]
<ast.Try object at 0x7da1b28e9300>
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[fRow]]]]]] begin[:]
if compare[call[name[type], parameter[call[name[fRow]][name[i]]]] is name[datetime]] begin[:]
variable[fRow] assign[=] call[name[list], parameter[name[fRow]]]
variable[sdeRow] assign[=] call[name[list], parameter[name[sdeRow]]]
call[name[fRow]][name[i]] assign[=] call[call[name[fRow]][name[i]].replace, parameter[]]
<ast.Try object at 0x7da1b28f2920>
if compare[call[name[fRow]][<ast.Slice object at 0x7da1b28f3700>] not_equal[!=] call[name[sdeRow]][<ast.Slice object at 0x7da1b28f2dd0>]] begin[:]
variable[changed] assign[=] constant[True]
break
return[name[changed]] | keyword[def] identifier[checkForChanges] ( identifier[f] , identifier[sde] , identifier[isTable] ):
literal[string]
identifier[fCount] = identifier[int] ( identifier[arcpy] . identifier[GetCount_management] ( identifier[f] ). identifier[getOutput] ( literal[int] ))
identifier[sdeCount] = identifier[int] ( identifier[arcpy] . identifier[GetCount_management] ( identifier[sde] ). identifier[getOutput] ( literal[int] ))
keyword[if] identifier[fCount] != identifier[sdeCount] :
keyword[return] keyword[True]
identifier[fields] =[ identifier[fld] . identifier[name] keyword[for] identifier[fld] keyword[in] identifier[arcpy] . identifier[ListFields] ( identifier[f] )]
keyword[if] keyword[not] identifier[isTable] :
identifier[fields] = identifier[filter_fields] ( identifier[fields] )
identifier[d] = identifier[arcpy] . identifier[Describe] ( identifier[f] )
identifier[shapeType] = identifier[d] . identifier[shapeType]
keyword[if] identifier[shapeType] == literal[string] :
identifier[shapeToken] = literal[string]
keyword[elif] identifier[shapeType] == literal[string] :
identifier[shapeToken] = literal[string]
keyword[elif] identifier[shapeType] == literal[string] :
identifier[shapeToken] = literal[string]
keyword[else] :
identifier[shapeToken] = literal[string]
identifier[fields] . identifier[append] ( identifier[shapeToken] )
keyword[def] identifier[parseShape] ( identifier[shapeValue] ):
keyword[if] identifier[shapeValue] keyword[is] keyword[None] :
keyword[return] literal[int]
keyword[elif] identifier[shapeType] keyword[in] [ literal[string] , literal[string] ]:
keyword[return] identifier[shapeValue]
keyword[elif] identifier[shapeType] == literal[string] :
keyword[if] identifier[shapeValue] [ literal[int] ] keyword[is] keyword[not] keyword[None] keyword[and] identifier[shapeValue] [ literal[int] ] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[shapeValue] [ literal[int] ]+ identifier[shapeValue] [ literal[int] ]
keyword[else] :
keyword[return] literal[int]
keyword[else] :
keyword[return] identifier[shapeValue]
identifier[outputSR] = identifier[arcpy] . identifier[Describe] ( identifier[f] ). identifier[spatialReference]
keyword[else] :
identifier[outputSR] = keyword[None]
identifier[changed] = keyword[False]
keyword[with] identifier[arcpy] . identifier[da] . identifier[SearchCursor] ( identifier[f] , identifier[fields] , identifier[sql_clause] =( keyword[None] , literal[string] )) keyword[as] identifier[fCursor] , identifier[arcpy] . identifier[da] . identifier[SearchCursor] ( identifier[sde] , identifier[fields] , identifier[sql_clause] =( keyword[None] , literal[string] ),
identifier[spatial_reference] = identifier[outputSR] ) keyword[as] identifier[sdeCursor] :
keyword[for] identifier[fRow] , identifier[sdeRow] keyword[in] identifier[izip] ( identifier[fCursor] , identifier[sdeCursor] ):
keyword[if] identifier[fRow] != identifier[sdeRow] :
keyword[if] identifier[fRow] [- literal[int] ]!= identifier[sdeRow] [- literal[int] ] keyword[and] keyword[not] identifier[isTable] :
keyword[if] identifier[shapeType] keyword[not] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
identifier[changed] = keyword[True]
keyword[break]
identifier[fShape] = identifier[parseShape] ( identifier[fRow] [- literal[int] ])
identifier[sdeShape] = identifier[parseShape] ( identifier[sdeRow] [- literal[int] ])
keyword[try] :
identifier[assert_almost_equal] ( identifier[fShape] , identifier[sdeShape] ,- literal[int] )
identifier[fRow] = identifier[list] ( identifier[fRow] [:- literal[int] ])
identifier[sdeRow] = identifier[list] ( identifier[sdeRow] [:- literal[int] ])
keyword[except] identifier[AssertionError] :
identifier[changed] = keyword[True]
keyword[break]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[fRow] )):
keyword[if] identifier[type] ( identifier[fRow] [ identifier[i] ]) keyword[is] identifier[datetime] :
identifier[fRow] = identifier[list] ( identifier[fRow] )
identifier[sdeRow] = identifier[list] ( identifier[sdeRow] )
identifier[fRow] [ identifier[i] ]= identifier[fRow] [ identifier[i] ]. identifier[replace] ( identifier[microsecond] = literal[int] )
keyword[try] :
identifier[sdeRow] [ identifier[i] ]= identifier[sdeRow] [ identifier[i] ]. identifier[replace] ( identifier[microsecond] = literal[int] )
keyword[except] :
keyword[pass]
keyword[if] identifier[fRow] [ literal[int] :]!= identifier[sdeRow] [ literal[int] :]:
identifier[changed] = keyword[True]
keyword[break]
keyword[return] identifier[changed] | def checkForChanges(f, sde, isTable):
"""
returns False if there are no changes
"""
# try simple feature count first
fCount = int(arcpy.GetCount_management(f).getOutput(0))
sdeCount = int(arcpy.GetCount_management(sde).getOutput(0))
if fCount != sdeCount:
return True # depends on [control=['if'], data=[]]
fields = [fld.name for fld in arcpy.ListFields(f)]
# filter out shape fields
if not isTable:
fields = filter_fields(fields)
d = arcpy.Describe(f)
shapeType = d.shapeType
if shapeType == 'Polygon':
shapeToken = 'SHAPE@AREA' # depends on [control=['if'], data=[]]
elif shapeType == 'Polyline':
shapeToken = 'SHAPE@LENGTH' # depends on [control=['if'], data=[]]
elif shapeType == 'Point':
shapeToken = 'SHAPE@XY' # depends on [control=['if'], data=[]]
else:
shapeToken = 'SHAPE@JSON'
fields.append(shapeToken)
def parseShape(shapeValue):
if shapeValue is None:
return 0 # depends on [control=['if'], data=[]]
elif shapeType in ['Polygon', 'Polyline']:
return shapeValue # depends on [control=['if'], data=[]]
elif shapeType == 'Point':
if shapeValue[0] is not None and shapeValue[1] is not None:
return shapeValue[0] + shapeValue[1] # depends on [control=['if'], data=[]]
else:
return 0 # depends on [control=['if'], data=[]]
else:
return shapeValue
outputSR = arcpy.Describe(f).spatialReference # depends on [control=['if'], data=[]]
else:
outputSR = None
changed = False
with arcpy.da.SearchCursor(f, fields, sql_clause=(None, 'ORDER BY OBJECTID')) as fCursor, arcpy.da.SearchCursor(sde, fields, sql_clause=(None, 'ORDER BY OBJECTID'), spatial_reference=outputSR) as sdeCursor:
for (fRow, sdeRow) in izip(fCursor, sdeCursor):
if fRow != sdeRow:
# check shapes first
if fRow[-1] != sdeRow[-1] and (not isTable):
if shapeType not in ['Polygon', 'Polyline', 'Point']:
changed = True
break # depends on [control=['if'], data=[]]
fShape = parseShape(fRow[-1])
sdeShape = parseShape(sdeRow[-1])
try:
assert_almost_equal(fShape, sdeShape, -1)
# trim off shapes
fRow = list(fRow[:-1])
sdeRow = list(sdeRow[:-1]) # depends on [control=['try'], data=[]]
except AssertionError:
changed = True
break # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
# trim microseconds since they can be off by one between file and sde databases
for i in range(len(fRow)):
if type(fRow[i]) is datetime:
fRow = list(fRow)
sdeRow = list(sdeRow)
fRow[i] = fRow[i].replace(microsecond=0)
try:
sdeRow[i] = sdeRow[i].replace(microsecond=0) # depends on [control=['try'], data=[]]
except:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
# compare all values except OBJECTID
if fRow[1:] != sdeRow[1:]:
changed = True
break # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['fRow', 'sdeRow']] # depends on [control=['for'], data=[]] # depends on [control=['with'], data=['fCursor']]
return changed |
def daily404summary(date, return_format=None):
"""Returns daily summary information of submitted 404 Error Page
Information.
:param date: string or datetime.date() (required)
"""
uri = 'daily404summary'
if date:
try:
uri = '/'.join([uri, date.strftime("%Y-%m-%d")])
except AttributeError:
uri = '/'.join([uri, date])
return _get(uri, return_format) | def function[daily404summary, parameter[date, return_format]]:
constant[Returns daily summary information of submitted 404 Error Page
Information.
:param date: string or datetime.date() (required)
]
variable[uri] assign[=] constant[daily404summary]
if name[date] begin[:]
<ast.Try object at 0x7da1b1aa5de0>
return[call[name[_get], parameter[name[uri], name[return_format]]]] | keyword[def] identifier[daily404summary] ( identifier[date] , identifier[return_format] = keyword[None] ):
literal[string]
identifier[uri] = literal[string]
keyword[if] identifier[date] :
keyword[try] :
identifier[uri] = literal[string] . identifier[join] ([ identifier[uri] , identifier[date] . identifier[strftime] ( literal[string] )])
keyword[except] identifier[AttributeError] :
identifier[uri] = literal[string] . identifier[join] ([ identifier[uri] , identifier[date] ])
keyword[return] identifier[_get] ( identifier[uri] , identifier[return_format] ) | def daily404summary(date, return_format=None):
"""Returns daily summary information of submitted 404 Error Page
Information.
:param date: string or datetime.date() (required)
"""
uri = 'daily404summary'
if date:
try:
uri = '/'.join([uri, date.strftime('%Y-%m-%d')]) # depends on [control=['try'], data=[]]
except AttributeError:
uri = '/'.join([uri, date]) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
return _get(uri, return_format) |
def _get(self, url, headers={}):
""" Get a JSON API endpoint and return the parsed data.
:param url: str, *relative* URL (relative to pp-admin/ api endpoint)
:param headers: dict (optional)
:returns: deferred that when fired returns the parsed data from JSON
or errbacks with ProductPagesException
"""
# print('getting %s' % url)
headers = headers.copy()
headers['Accept'] = 'application/json'
url = posixpath.join(self.url, url)
try:
response = yield treq.get(url, headers=headers, timeout=5)
if response.code != 200:
err = '%s returned %s' % (url, response.code)
raise ProductPagesException(err)
else:
content = yield treq.json_content(response)
defer.returnValue(content)
except Exception as e:
# For example, if treq.get() timed out, or if treq.json_content()
# could not parse the JSON, etc.
# TODO: better handling here for the specific errors?
# I suspect it's not good to catch Exception with inlineCallbacks
raise ProductPagesException('treq error: %s' % e.message) | def function[_get, parameter[self, url, headers]]:
constant[ Get a JSON API endpoint and return the parsed data.
:param url: str, *relative* URL (relative to pp-admin/ api endpoint)
:param headers: dict (optional)
:returns: deferred that when fired returns the parsed data from JSON
or errbacks with ProductPagesException
]
variable[headers] assign[=] call[name[headers].copy, parameter[]]
call[name[headers]][constant[Accept]] assign[=] constant[application/json]
variable[url] assign[=] call[name[posixpath].join, parameter[name[self].url, name[url]]]
<ast.Try object at 0x7da1b15b4100> | keyword[def] identifier[_get] ( identifier[self] , identifier[url] , identifier[headers] ={}):
literal[string]
identifier[headers] = identifier[headers] . identifier[copy] ()
identifier[headers] [ literal[string] ]= literal[string]
identifier[url] = identifier[posixpath] . identifier[join] ( identifier[self] . identifier[url] , identifier[url] )
keyword[try] :
identifier[response] = keyword[yield] identifier[treq] . identifier[get] ( identifier[url] , identifier[headers] = identifier[headers] , identifier[timeout] = literal[int] )
keyword[if] identifier[response] . identifier[code] != literal[int] :
identifier[err] = literal[string] %( identifier[url] , identifier[response] . identifier[code] )
keyword[raise] identifier[ProductPagesException] ( identifier[err] )
keyword[else] :
identifier[content] = keyword[yield] identifier[treq] . identifier[json_content] ( identifier[response] )
identifier[defer] . identifier[returnValue] ( identifier[content] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[raise] identifier[ProductPagesException] ( literal[string] % identifier[e] . identifier[message] ) | def _get(self, url, headers={}):
""" Get a JSON API endpoint and return the parsed data.
:param url: str, *relative* URL (relative to pp-admin/ api endpoint)
:param headers: dict (optional)
:returns: deferred that when fired returns the parsed data from JSON
or errbacks with ProductPagesException
"""
# print('getting %s' % url)
headers = headers.copy()
headers['Accept'] = 'application/json'
url = posixpath.join(self.url, url)
try:
response = (yield treq.get(url, headers=headers, timeout=5))
if response.code != 200:
err = '%s returned %s' % (url, response.code)
raise ProductPagesException(err) # depends on [control=['if'], data=[]]
else:
content = (yield treq.json_content(response))
defer.returnValue(content) # depends on [control=['try'], data=[]]
except Exception as e:
# For example, if treq.get() timed out, or if treq.json_content()
# could not parse the JSON, etc.
# TODO: better handling here for the specific errors?
# I suspect it's not good to catch Exception with inlineCallbacks
raise ProductPagesException('treq error: %s' % e.message) # depends on [control=['except'], data=['e']] |
def scale(table):
"""
scale table based on the column with the largest sum
"""
t = []
columns = [[] for i in table[0]]
for row in table:
for i, v in enumerate(row):
columns[i].append(v)
sums = [float(sum(i)) for i in columns]
scale_to = float(max(sums))
scale_factor = [scale_to/i for i in sums if i != 0]
for row in table:
t.append([a * b for a,b in zip(row, scale_factor)])
return t | def function[scale, parameter[table]]:
constant[
scale table based on the column with the largest sum
]
variable[t] assign[=] list[[]]
variable[columns] assign[=] <ast.ListComp object at 0x7da2054a4dc0>
for taget[name[row]] in starred[name[table]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da2054a5870>, <ast.Name object at 0x7da2054a4ca0>]]] in starred[call[name[enumerate], parameter[name[row]]]] begin[:]
call[call[name[columns]][name[i]].append, parameter[name[v]]]
variable[sums] assign[=] <ast.ListComp object at 0x7da2054a5390>
variable[scale_to] assign[=] call[name[float], parameter[call[name[max], parameter[name[sums]]]]]
variable[scale_factor] assign[=] <ast.ListComp object at 0x7da2054a4a00>
for taget[name[row]] in starred[name[table]] begin[:]
call[name[t].append, parameter[<ast.ListComp object at 0x7da2045656f0>]]
return[name[t]] | keyword[def] identifier[scale] ( identifier[table] ):
literal[string]
identifier[t] =[]
identifier[columns] =[[] keyword[for] identifier[i] keyword[in] identifier[table] [ literal[int] ]]
keyword[for] identifier[row] keyword[in] identifier[table] :
keyword[for] identifier[i] , identifier[v] keyword[in] identifier[enumerate] ( identifier[row] ):
identifier[columns] [ identifier[i] ]. identifier[append] ( identifier[v] )
identifier[sums] =[ identifier[float] ( identifier[sum] ( identifier[i] )) keyword[for] identifier[i] keyword[in] identifier[columns] ]
identifier[scale_to] = identifier[float] ( identifier[max] ( identifier[sums] ))
identifier[scale_factor] =[ identifier[scale_to] / identifier[i] keyword[for] identifier[i] keyword[in] identifier[sums] keyword[if] identifier[i] != literal[int] ]
keyword[for] identifier[row] keyword[in] identifier[table] :
identifier[t] . identifier[append] ([ identifier[a] * identifier[b] keyword[for] identifier[a] , identifier[b] keyword[in] identifier[zip] ( identifier[row] , identifier[scale_factor] )])
keyword[return] identifier[t] | def scale(table):
"""
scale table based on the column with the largest sum
"""
t = []
columns = [[] for i in table[0]]
for row in table:
for (i, v) in enumerate(row):
columns[i].append(v) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['row']]
sums = [float(sum(i)) for i in columns]
scale_to = float(max(sums))
scale_factor = [scale_to / i for i in sums if i != 0]
for row in table:
t.append([a * b for (a, b) in zip(row, scale_factor)]) # depends on [control=['for'], data=['row']]
return t |
def _swap_on_miss(partition_result):
"""
Given a partition_dict result, if the partition missed, swap
the before and after.
"""
before, item, after = partition_result
return (before, item, after) if item else (after, item, before) | def function[_swap_on_miss, parameter[partition_result]]:
constant[
Given a partition_dict result, if the partition missed, swap
the before and after.
]
<ast.Tuple object at 0x7da20c795de0> assign[=] name[partition_result]
return[<ast.IfExp object at 0x7da20c7945b0>] | keyword[def] identifier[_swap_on_miss] ( identifier[partition_result] ):
literal[string]
identifier[before] , identifier[item] , identifier[after] = identifier[partition_result]
keyword[return] ( identifier[before] , identifier[item] , identifier[after] ) keyword[if] identifier[item] keyword[else] ( identifier[after] , identifier[item] , identifier[before] ) | def _swap_on_miss(partition_result):
"""
Given a partition_dict result, if the partition missed, swap
the before and after.
"""
(before, item, after) = partition_result
return (before, item, after) if item else (after, item, before) |
def source_line(self, lineno):
"""
Returns line ``lineno`` from source, taking ``first_line`` into account,
or raises :exc:`IndexError` if ``lineno`` is out of range.
"""
line_begins = self._extract_line_begins()
lineno = lineno - self.first_line
if lineno >= 0 and lineno + 1 < len(line_begins):
first, last = line_begins[lineno:lineno + 2]
return self.source[first:last]
elif lineno >= 0 and lineno < len(line_begins):
return self.source[line_begins[-1]:]
else:
raise IndexError | def function[source_line, parameter[self, lineno]]:
constant[
Returns line ``lineno`` from source, taking ``first_line`` into account,
or raises :exc:`IndexError` if ``lineno`` is out of range.
]
variable[line_begins] assign[=] call[name[self]._extract_line_begins, parameter[]]
variable[lineno] assign[=] binary_operation[name[lineno] - name[self].first_line]
if <ast.BoolOp object at 0x7da18dc061a0> begin[:]
<ast.Tuple object at 0x7da18dc057e0> assign[=] call[name[line_begins]][<ast.Slice object at 0x7da18dc078b0>]
return[call[name[self].source][<ast.Slice object at 0x7da18dc070a0>]] | keyword[def] identifier[source_line] ( identifier[self] , identifier[lineno] ):
literal[string]
identifier[line_begins] = identifier[self] . identifier[_extract_line_begins] ()
identifier[lineno] = identifier[lineno] - identifier[self] . identifier[first_line]
keyword[if] identifier[lineno] >= literal[int] keyword[and] identifier[lineno] + literal[int] < identifier[len] ( identifier[line_begins] ):
identifier[first] , identifier[last] = identifier[line_begins] [ identifier[lineno] : identifier[lineno] + literal[int] ]
keyword[return] identifier[self] . identifier[source] [ identifier[first] : identifier[last] ]
keyword[elif] identifier[lineno] >= literal[int] keyword[and] identifier[lineno] < identifier[len] ( identifier[line_begins] ):
keyword[return] identifier[self] . identifier[source] [ identifier[line_begins] [- literal[int] ]:]
keyword[else] :
keyword[raise] identifier[IndexError] | def source_line(self, lineno):
"""
Returns line ``lineno`` from source, taking ``first_line`` into account,
or raises :exc:`IndexError` if ``lineno`` is out of range.
"""
line_begins = self._extract_line_begins()
lineno = lineno - self.first_line
if lineno >= 0 and lineno + 1 < len(line_begins):
(first, last) = line_begins[lineno:lineno + 2]
return self.source[first:last] # depends on [control=['if'], data=[]]
elif lineno >= 0 and lineno < len(line_begins):
return self.source[line_begins[-1]:] # depends on [control=['if'], data=[]]
else:
raise IndexError |
def correlation(left, right, where=None, how='sample'):
"""
Compute correlation of two numeric array
Parameters
----------
how : {'sample', 'pop'}, default 'sample'
Returns
-------
corr : double scalar
"""
expr = ops.Correlation(left, right, how, where).to_expr()
return expr | def function[correlation, parameter[left, right, where, how]]:
constant[
Compute correlation of two numeric array
Parameters
----------
how : {'sample', 'pop'}, default 'sample'
Returns
-------
corr : double scalar
]
variable[expr] assign[=] call[call[name[ops].Correlation, parameter[name[left], name[right], name[how], name[where]]].to_expr, parameter[]]
return[name[expr]] | keyword[def] identifier[correlation] ( identifier[left] , identifier[right] , identifier[where] = keyword[None] , identifier[how] = literal[string] ):
literal[string]
identifier[expr] = identifier[ops] . identifier[Correlation] ( identifier[left] , identifier[right] , identifier[how] , identifier[where] ). identifier[to_expr] ()
keyword[return] identifier[expr] | def correlation(left, right, where=None, how='sample'):
"""
Compute correlation of two numeric array
Parameters
----------
how : {'sample', 'pop'}, default 'sample'
Returns
-------
corr : double scalar
"""
expr = ops.Correlation(left, right, how, where).to_expr()
return expr |
def get_subreddit_recommendations(self, subreddits, omit=None):
"""Return a list of recommended subreddits as Subreddit objects.
Subreddits with activity less than a certain threshold, will not have
any recommendations due to lack of data.
:param subreddits: A list of subreddits (either names or Subreddit
objects) to base the recommendations on.
:param omit: A list of subreddits (either names or Subreddit
objects) that will be filtered out of the result.
"""
params = {'omit': _to_reddit_list(omit or [])}
url = self.config['sub_recommendations'].format(
subreddits=_to_reddit_list(subreddits))
result = self.request_json(url, params=params)
return [objects.Subreddit(self, sub['sr_name']) for sub in result] | def function[get_subreddit_recommendations, parameter[self, subreddits, omit]]:
constant[Return a list of recommended subreddits as Subreddit objects.
Subreddits with activity less than a certain threshold, will not have
any recommendations due to lack of data.
:param subreddits: A list of subreddits (either names or Subreddit
objects) to base the recommendations on.
:param omit: A list of subreddits (either names or Subreddit
objects) that will be filtered out of the result.
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da18dc046a0>], [<ast.Call object at 0x7da18dc071c0>]]
variable[url] assign[=] call[call[name[self].config][constant[sub_recommendations]].format, parameter[]]
variable[result] assign[=] call[name[self].request_json, parameter[name[url]]]
return[<ast.ListComp object at 0x7da18fe91960>] | keyword[def] identifier[get_subreddit_recommendations] ( identifier[self] , identifier[subreddits] , identifier[omit] = keyword[None] ):
literal[string]
identifier[params] ={ literal[string] : identifier[_to_reddit_list] ( identifier[omit] keyword[or] [])}
identifier[url] = identifier[self] . identifier[config] [ literal[string] ]. identifier[format] (
identifier[subreddits] = identifier[_to_reddit_list] ( identifier[subreddits] ))
identifier[result] = identifier[self] . identifier[request_json] ( identifier[url] , identifier[params] = identifier[params] )
keyword[return] [ identifier[objects] . identifier[Subreddit] ( identifier[self] , identifier[sub] [ literal[string] ]) keyword[for] identifier[sub] keyword[in] identifier[result] ] | def get_subreddit_recommendations(self, subreddits, omit=None):
"""Return a list of recommended subreddits as Subreddit objects.
Subreddits with activity less than a certain threshold, will not have
any recommendations due to lack of data.
:param subreddits: A list of subreddits (either names or Subreddit
objects) to base the recommendations on.
:param omit: A list of subreddits (either names or Subreddit
objects) that will be filtered out of the result.
"""
params = {'omit': _to_reddit_list(omit or [])}
url = self.config['sub_recommendations'].format(subreddits=_to_reddit_list(subreddits))
result = self.request_json(url, params=params)
return [objects.Subreddit(self, sub['sr_name']) for sub in result] |
async def _connect_websocket(self, url, headers, engineio_path):
"""Establish or upgrade to a WebSocket connection with the server."""
if websockets is None: # pragma: no cover
self.logger.error('websockets package not installed')
return False
websocket_url = self._get_engineio_url(url, engineio_path,
'websocket')
if self.sid:
self.logger.info(
'Attempting WebSocket upgrade to ' + websocket_url)
upgrade = True
websocket_url += '&sid=' + self.sid
else:
upgrade = False
self.base_url = websocket_url
self.logger.info(
'Attempting WebSocket connection to ' + websocket_url)
# get the cookies from the long-polling connection so that they can
# also be sent the the WebSocket route
cookies = None
if self.http:
cookies = '; '.join(["{}={}".format(cookie.key, cookie.value)
for cookie in self.http._cookie_jar])
headers = headers.copy()
headers['Cookie'] = cookies
try:
ws = await websockets.connect(
websocket_url + self._get_url_timestamp(),
extra_headers=headers)
except (websockets.exceptions.InvalidURI,
websockets.exceptions.InvalidHandshake):
if upgrade:
self.logger.warning(
'WebSocket upgrade failed: connection error')
return False
else:
raise exceptions.ConnectionError('Connection error')
if upgrade:
p = packet.Packet(packet.PING, data='probe').encode(
always_bytes=False)
try:
await ws.send(p)
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected send exception: %s',
str(e))
return False
try:
p = await ws.recv()
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected recv exception: %s',
str(e))
return False
pkt = packet.Packet(encoded_packet=p)
if pkt.packet_type != packet.PONG or pkt.data != 'probe':
self.logger.warning(
'WebSocket upgrade failed: no PONG packet')
return False
p = packet.Packet(packet.UPGRADE).encode(always_bytes=False)
try:
await ws.send(p)
except Exception as e: # pragma: no cover
self.logger.warning(
'WebSocket upgrade failed: unexpected send exception: %s',
str(e))
return False
self.current_transport = 'websocket'
if self.http: # pragma: no cover
await self.http.close()
self.logger.info('WebSocket upgrade was successful')
else:
try:
p = await ws.recv()
except Exception as e: # pragma: no cover
raise exceptions.ConnectionError(
'Unexpected recv exception: ' + str(e))
open_packet = packet.Packet(encoded_packet=p)
if open_packet.packet_type != packet.OPEN:
raise exceptions.ConnectionError('no OPEN packet')
self.logger.info(
'WebSocket connection accepted with ' + str(open_packet.data))
self.sid = open_packet.data['sid']
self.upgrades = open_packet.data['upgrades']
self.ping_interval = open_packet.data['pingInterval'] / 1000.0
self.ping_timeout = open_packet.data['pingTimeout'] / 1000.0
self.current_transport = 'websocket'
self.state = 'connected'
client.connected_clients.append(self)
await self._trigger_event('connect', run_async=False)
self.ws = ws
self.ping_loop_task = self.start_background_task(self._ping_loop)
self.write_loop_task = self.start_background_task(self._write_loop)
self.read_loop_task = self.start_background_task(
self._read_loop_websocket)
return True | <ast.AsyncFunctionDef object at 0x7da1b09807f0> | keyword[async] keyword[def] identifier[_connect_websocket] ( identifier[self] , identifier[url] , identifier[headers] , identifier[engineio_path] ):
literal[string]
keyword[if] identifier[websockets] keyword[is] keyword[None] :
identifier[self] . identifier[logger] . identifier[error] ( literal[string] )
keyword[return] keyword[False]
identifier[websocket_url] = identifier[self] . identifier[_get_engineio_url] ( identifier[url] , identifier[engineio_path] ,
literal[string] )
keyword[if] identifier[self] . identifier[sid] :
identifier[self] . identifier[logger] . identifier[info] (
literal[string] + identifier[websocket_url] )
identifier[upgrade] = keyword[True]
identifier[websocket_url] += literal[string] + identifier[self] . identifier[sid]
keyword[else] :
identifier[upgrade] = keyword[False]
identifier[self] . identifier[base_url] = identifier[websocket_url]
identifier[self] . identifier[logger] . identifier[info] (
literal[string] + identifier[websocket_url] )
identifier[cookies] = keyword[None]
keyword[if] identifier[self] . identifier[http] :
identifier[cookies] = literal[string] . identifier[join] ([ literal[string] . identifier[format] ( identifier[cookie] . identifier[key] , identifier[cookie] . identifier[value] )
keyword[for] identifier[cookie] keyword[in] identifier[self] . identifier[http] . identifier[_cookie_jar] ])
identifier[headers] = identifier[headers] . identifier[copy] ()
identifier[headers] [ literal[string] ]= identifier[cookies]
keyword[try] :
identifier[ws] = keyword[await] identifier[websockets] . identifier[connect] (
identifier[websocket_url] + identifier[self] . identifier[_get_url_timestamp] (),
identifier[extra_headers] = identifier[headers] )
keyword[except] ( identifier[websockets] . identifier[exceptions] . identifier[InvalidURI] ,
identifier[websockets] . identifier[exceptions] . identifier[InvalidHandshake] ):
keyword[if] identifier[upgrade] :
identifier[self] . identifier[logger] . identifier[warning] (
literal[string] )
keyword[return] keyword[False]
keyword[else] :
keyword[raise] identifier[exceptions] . identifier[ConnectionError] ( literal[string] )
keyword[if] identifier[upgrade] :
identifier[p] = identifier[packet] . identifier[Packet] ( identifier[packet] . identifier[PING] , identifier[data] = literal[string] ). identifier[encode] (
identifier[always_bytes] = keyword[False] )
keyword[try] :
keyword[await] identifier[ws] . identifier[send] ( identifier[p] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[logger] . identifier[warning] (
literal[string] ,
identifier[str] ( identifier[e] ))
keyword[return] keyword[False]
keyword[try] :
identifier[p] = keyword[await] identifier[ws] . identifier[recv] ()
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[logger] . identifier[warning] (
literal[string] ,
identifier[str] ( identifier[e] ))
keyword[return] keyword[False]
identifier[pkt] = identifier[packet] . identifier[Packet] ( identifier[encoded_packet] = identifier[p] )
keyword[if] identifier[pkt] . identifier[packet_type] != identifier[packet] . identifier[PONG] keyword[or] identifier[pkt] . identifier[data] != literal[string] :
identifier[self] . identifier[logger] . identifier[warning] (
literal[string] )
keyword[return] keyword[False]
identifier[p] = identifier[packet] . identifier[Packet] ( identifier[packet] . identifier[UPGRADE] ). identifier[encode] ( identifier[always_bytes] = keyword[False] )
keyword[try] :
keyword[await] identifier[ws] . identifier[send] ( identifier[p] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[logger] . identifier[warning] (
literal[string] ,
identifier[str] ( identifier[e] ))
keyword[return] keyword[False]
identifier[self] . identifier[current_transport] = literal[string]
keyword[if] identifier[self] . identifier[http] :
keyword[await] identifier[self] . identifier[http] . identifier[close] ()
identifier[self] . identifier[logger] . identifier[info] ( literal[string] )
keyword[else] :
keyword[try] :
identifier[p] = keyword[await] identifier[ws] . identifier[recv] ()
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[raise] identifier[exceptions] . identifier[ConnectionError] (
literal[string] + identifier[str] ( identifier[e] ))
identifier[open_packet] = identifier[packet] . identifier[Packet] ( identifier[encoded_packet] = identifier[p] )
keyword[if] identifier[open_packet] . identifier[packet_type] != identifier[packet] . identifier[OPEN] :
keyword[raise] identifier[exceptions] . identifier[ConnectionError] ( literal[string] )
identifier[self] . identifier[logger] . identifier[info] (
literal[string] + identifier[str] ( identifier[open_packet] . identifier[data] ))
identifier[self] . identifier[sid] = identifier[open_packet] . identifier[data] [ literal[string] ]
identifier[self] . identifier[upgrades] = identifier[open_packet] . identifier[data] [ literal[string] ]
identifier[self] . identifier[ping_interval] = identifier[open_packet] . identifier[data] [ literal[string] ]/ literal[int]
identifier[self] . identifier[ping_timeout] = identifier[open_packet] . identifier[data] [ literal[string] ]/ literal[int]
identifier[self] . identifier[current_transport] = literal[string]
identifier[self] . identifier[state] = literal[string]
identifier[client] . identifier[connected_clients] . identifier[append] ( identifier[self] )
keyword[await] identifier[self] . identifier[_trigger_event] ( literal[string] , identifier[run_async] = keyword[False] )
identifier[self] . identifier[ws] = identifier[ws]
identifier[self] . identifier[ping_loop_task] = identifier[self] . identifier[start_background_task] ( identifier[self] . identifier[_ping_loop] )
identifier[self] . identifier[write_loop_task] = identifier[self] . identifier[start_background_task] ( identifier[self] . identifier[_write_loop] )
identifier[self] . identifier[read_loop_task] = identifier[self] . identifier[start_background_task] (
identifier[self] . identifier[_read_loop_websocket] )
keyword[return] keyword[True] | async def _connect_websocket(self, url, headers, engineio_path):
"""Establish or upgrade to a WebSocket connection with the server."""
if websockets is None: # pragma: no cover
self.logger.error('websockets package not installed')
return False # depends on [control=['if'], data=[]]
websocket_url = self._get_engineio_url(url, engineio_path, 'websocket')
if self.sid:
self.logger.info('Attempting WebSocket upgrade to ' + websocket_url)
upgrade = True
websocket_url += '&sid=' + self.sid # depends on [control=['if'], data=[]]
else:
upgrade = False
self.base_url = websocket_url
self.logger.info('Attempting WebSocket connection to ' + websocket_url)
# get the cookies from the long-polling connection so that they can
# also be sent the the WebSocket route
cookies = None
if self.http:
cookies = '; '.join(['{}={}'.format(cookie.key, cookie.value) for cookie in self.http._cookie_jar])
headers = headers.copy()
headers['Cookie'] = cookies # depends on [control=['if'], data=[]]
try:
ws = await websockets.connect(websocket_url + self._get_url_timestamp(), extra_headers=headers) # depends on [control=['try'], data=[]]
except (websockets.exceptions.InvalidURI, websockets.exceptions.InvalidHandshake):
if upgrade:
self.logger.warning('WebSocket upgrade failed: connection error')
return False # depends on [control=['if'], data=[]]
else:
raise exceptions.ConnectionError('Connection error') # depends on [control=['except'], data=[]]
if upgrade:
p = packet.Packet(packet.PING, data='probe').encode(always_bytes=False)
try:
await ws.send(p) # depends on [control=['try'], data=[]]
except Exception as e: # pragma: no cover
self.logger.warning('WebSocket upgrade failed: unexpected send exception: %s', str(e))
return False # depends on [control=['except'], data=['e']]
try:
p = await ws.recv() # depends on [control=['try'], data=[]]
except Exception as e: # pragma: no cover
self.logger.warning('WebSocket upgrade failed: unexpected recv exception: %s', str(e))
return False # depends on [control=['except'], data=['e']]
pkt = packet.Packet(encoded_packet=p)
if pkt.packet_type != packet.PONG or pkt.data != 'probe':
self.logger.warning('WebSocket upgrade failed: no PONG packet')
return False # depends on [control=['if'], data=[]]
p = packet.Packet(packet.UPGRADE).encode(always_bytes=False)
try:
await ws.send(p) # depends on [control=['try'], data=[]]
except Exception as e: # pragma: no cover
self.logger.warning('WebSocket upgrade failed: unexpected send exception: %s', str(e))
return False # depends on [control=['except'], data=['e']]
self.current_transport = 'websocket'
if self.http: # pragma: no cover
await self.http.close() # depends on [control=['if'], data=[]]
self.logger.info('WebSocket upgrade was successful') # depends on [control=['if'], data=[]]
else:
try:
p = await ws.recv() # depends on [control=['try'], data=[]]
except Exception as e: # pragma: no cover
raise exceptions.ConnectionError('Unexpected recv exception: ' + str(e)) # depends on [control=['except'], data=['e']]
open_packet = packet.Packet(encoded_packet=p)
if open_packet.packet_type != packet.OPEN:
raise exceptions.ConnectionError('no OPEN packet') # depends on [control=['if'], data=[]]
self.logger.info('WebSocket connection accepted with ' + str(open_packet.data))
self.sid = open_packet.data['sid']
self.upgrades = open_packet.data['upgrades']
self.ping_interval = open_packet.data['pingInterval'] / 1000.0
self.ping_timeout = open_packet.data['pingTimeout'] / 1000.0
self.current_transport = 'websocket'
self.state = 'connected'
client.connected_clients.append(self)
await self._trigger_event('connect', run_async=False)
self.ws = ws
self.ping_loop_task = self.start_background_task(self._ping_loop)
self.write_loop_task = self.start_background_task(self._write_loop)
self.read_loop_task = self.start_background_task(self._read_loop_websocket)
return True |
def replace(self, key):
"""Selects a different image to be shown.
Parameters:
| key - a key in the original dictionary to specify which image to show
"""
if not (key in self.imagesDict):
print('The key', key, 'was not found in the collection of images dictionary')
raise KeyError
self.originalImage = self.imagesDict[key]
self.image = self.originalImage.copy()
# Set the rect of the image to appropriate values - using the current image
# then scale and rotate
self.rect = self.image.get_rect()
self.rect.x = self.loc[0]
self.rect.y = self.loc[1]
self.scale(self.percent, self.scaleFromCenter)
self.rotate(self.angle) | def function[replace, parameter[self, key]]:
constant[Selects a different image to be shown.
Parameters:
| key - a key in the original dictionary to specify which image to show
]
if <ast.UnaryOp object at 0x7da207f98790> begin[:]
call[name[print], parameter[constant[The key], name[key], constant[was not found in the collection of images dictionary]]]
<ast.Raise object at 0x7da2044c3250>
name[self].originalImage assign[=] call[name[self].imagesDict][name[key]]
name[self].image assign[=] call[name[self].originalImage.copy, parameter[]]
name[self].rect assign[=] call[name[self].image.get_rect, parameter[]]
name[self].rect.x assign[=] call[name[self].loc][constant[0]]
name[self].rect.y assign[=] call[name[self].loc][constant[1]]
call[name[self].scale, parameter[name[self].percent, name[self].scaleFromCenter]]
call[name[self].rotate, parameter[name[self].angle]] | keyword[def] identifier[replace] ( identifier[self] , identifier[key] ):
literal[string]
keyword[if] keyword[not] ( identifier[key] keyword[in] identifier[self] . identifier[imagesDict] ):
identifier[print] ( literal[string] , identifier[key] , literal[string] )
keyword[raise] identifier[KeyError]
identifier[self] . identifier[originalImage] = identifier[self] . identifier[imagesDict] [ identifier[key] ]
identifier[self] . identifier[image] = identifier[self] . identifier[originalImage] . identifier[copy] ()
identifier[self] . identifier[rect] = identifier[self] . identifier[image] . identifier[get_rect] ()
identifier[self] . identifier[rect] . identifier[x] = identifier[self] . identifier[loc] [ literal[int] ]
identifier[self] . identifier[rect] . identifier[y] = identifier[self] . identifier[loc] [ literal[int] ]
identifier[self] . identifier[scale] ( identifier[self] . identifier[percent] , identifier[self] . identifier[scaleFromCenter] )
identifier[self] . identifier[rotate] ( identifier[self] . identifier[angle] ) | def replace(self, key):
"""Selects a different image to be shown.
Parameters:
| key - a key in the original dictionary to specify which image to show
"""
if not key in self.imagesDict:
print('The key', key, 'was not found in the collection of images dictionary')
raise KeyError # depends on [control=['if'], data=[]]
self.originalImage = self.imagesDict[key]
self.image = self.originalImage.copy() # Set the rect of the image to appropriate values - using the current image
# then scale and rotate
self.rect = self.image.get_rect()
self.rect.x = self.loc[0]
self.rect.y = self.loc[1]
self.scale(self.percent, self.scaleFromCenter)
self.rotate(self.angle) |
def generate_file_name(self):
"""generate a suitable file name for the experiment"""
if not self.project:
raise UnderDefined("project name not given")
out_data_dir = prms.Paths.outdatadir
project_dir = os.path.join(out_data_dir, self.project)
file_name = "cellpy_batch_%s.json" % self.name
self.file_name = os.path.join(project_dir, file_name) | def function[generate_file_name, parameter[self]]:
constant[generate a suitable file name for the experiment]
if <ast.UnaryOp object at 0x7da1b192c3a0> begin[:]
<ast.Raise object at 0x7da1b192fb50>
variable[out_data_dir] assign[=] name[prms].Paths.outdatadir
variable[project_dir] assign[=] call[name[os].path.join, parameter[name[out_data_dir], name[self].project]]
variable[file_name] assign[=] binary_operation[constant[cellpy_batch_%s.json] <ast.Mod object at 0x7da2590d6920> name[self].name]
name[self].file_name assign[=] call[name[os].path.join, parameter[name[project_dir], name[file_name]]] | keyword[def] identifier[generate_file_name] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[project] :
keyword[raise] identifier[UnderDefined] ( literal[string] )
identifier[out_data_dir] = identifier[prms] . identifier[Paths] . identifier[outdatadir]
identifier[project_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[out_data_dir] , identifier[self] . identifier[project] )
identifier[file_name] = literal[string] % identifier[self] . identifier[name]
identifier[self] . identifier[file_name] = identifier[os] . identifier[path] . identifier[join] ( identifier[project_dir] , identifier[file_name] ) | def generate_file_name(self):
"""generate a suitable file name for the experiment"""
if not self.project:
raise UnderDefined('project name not given') # depends on [control=['if'], data=[]]
out_data_dir = prms.Paths.outdatadir
project_dir = os.path.join(out_data_dir, self.project)
file_name = 'cellpy_batch_%s.json' % self.name
self.file_name = os.path.join(project_dir, file_name) |
def decrypt_cbc_cts(self, data, init_vector):
"""
Return an iterator that decrypts `data` using the Cipher-Block Chaining
with Ciphertext Stealing (CBC-CTS) mode of operation.
CBC-CTS mode can only operate on `data` that is greater than 8 bytes in
length.
Each iteration, except the last, always returns a block-sized :obj:`bytes`
object (i.e. 8 bytes). The last iteration may return a :obj:`bytes` object
with a length less than the block-size, if `data` is not a multiple of the
block-size in length.
`data` should be a :obj:`bytes`-like object that is greater than 8 bytes in
length.
If it is not, a :exc:`ValueError` exception is raised.
`init_vector` is the initialization vector and should be a
:obj:`bytes`-like object with exactly 8 bytes.
If it is not, a :exc:`ValueError` exception is raised.
"""
data_len = len(data)
if data_len <= 8:
raise ValueError("data is not greater than 8 bytes in length")
S1, S2, S3, S4 = self.S
P = self.P
u4_1_pack = self._u4_1_pack
u1_4_unpack = self._u1_4_unpack
u4_2_pack = self._u4_2_pack
u4_2_unpack = self._u4_2_unpack
decrypt = self._decrypt
try:
prev_cipher_L, prev_cipher_R = u4_2_unpack(init_vector)
except struct_error:
raise ValueError("initialization vector is not 8 bytes in length")
extra_bytes = data_len % 8
last_block_stop_i = data_len - extra_bytes
last_block_start_i = last_block_stop_i - 8
for cipher_L, cipher_R in self._u4_2_iter_unpack(
data[0:last_block_start_i]
):
L, R = decrypt(
cipher_L, cipher_R,
P, S1, S2, S3, S4,
u4_1_pack, u1_4_unpack
)
yield u4_2_pack(L ^ prev_cipher_L, R ^ prev_cipher_R)
prev_cipher_L = cipher_L
prev_cipher_R = cipher_R
cipher_L, cipher_R = u4_2_unpack(data[last_block_start_i:last_block_stop_i])
L, R = decrypt(
cipher_L, cipher_R,
P, S1, S2, S3, S4,
u4_1_pack, u1_4_unpack
)
C_L, C_R = u4_2_unpack(data[last_block_stop_i:] + bytes(8 - extra_bytes))
Xn = u4_2_pack(L ^ C_L, R ^ C_R)
E_L, E_R = u4_2_unpack(data[last_block_stop_i:] + Xn[extra_bytes:])
L, R = decrypt(
E_L, E_R,
P, S1, S2, S3, S4,
u4_1_pack, u1_4_unpack
)
yield u4_2_pack(L ^ prev_cipher_L, R ^ prev_cipher_R)
yield Xn[:extra_bytes] | def function[decrypt_cbc_cts, parameter[self, data, init_vector]]:
constant[
Return an iterator that decrypts `data` using the Cipher-Block Chaining
with Ciphertext Stealing (CBC-CTS) mode of operation.
CBC-CTS mode can only operate on `data` that is greater than 8 bytes in
length.
Each iteration, except the last, always returns a block-sized :obj:`bytes`
object (i.e. 8 bytes). The last iteration may return a :obj:`bytes` object
with a length less than the block-size, if `data` is not a multiple of the
block-size in length.
`data` should be a :obj:`bytes`-like object that is greater than 8 bytes in
length.
If it is not, a :exc:`ValueError` exception is raised.
`init_vector` is the initialization vector and should be a
:obj:`bytes`-like object with exactly 8 bytes.
If it is not, a :exc:`ValueError` exception is raised.
]
variable[data_len] assign[=] call[name[len], parameter[name[data]]]
if compare[name[data_len] less_or_equal[<=] constant[8]] begin[:]
<ast.Raise object at 0x7da18bccb5b0>
<ast.Tuple object at 0x7da18bcc9fc0> assign[=] name[self].S
variable[P] assign[=] name[self].P
variable[u4_1_pack] assign[=] name[self]._u4_1_pack
variable[u1_4_unpack] assign[=] name[self]._u1_4_unpack
variable[u4_2_pack] assign[=] name[self]._u4_2_pack
variable[u4_2_unpack] assign[=] name[self]._u4_2_unpack
variable[decrypt] assign[=] name[self]._decrypt
<ast.Try object at 0x7da18bcc9780>
variable[extra_bytes] assign[=] binary_operation[name[data_len] <ast.Mod object at 0x7da2590d6920> constant[8]]
variable[last_block_stop_i] assign[=] binary_operation[name[data_len] - name[extra_bytes]]
variable[last_block_start_i] assign[=] binary_operation[name[last_block_stop_i] - constant[8]]
for taget[tuple[[<ast.Name object at 0x7da18bcc8ac0>, <ast.Name object at 0x7da18bcc8b20>]]] in starred[call[name[self]._u4_2_iter_unpack, parameter[call[name[data]][<ast.Slice object at 0x7da18bcca710>]]]] begin[:]
<ast.Tuple object at 0x7da18bccac80> assign[=] call[name[decrypt], parameter[name[cipher_L], name[cipher_R], name[P], name[S1], name[S2], name[S3], name[S4], name[u4_1_pack], name[u1_4_unpack]]]
<ast.Yield object at 0x7da18bccb6d0>
variable[prev_cipher_L] assign[=] name[cipher_L]
variable[prev_cipher_R] assign[=] name[cipher_R]
<ast.Tuple object at 0x7da18f811030> assign[=] call[name[u4_2_unpack], parameter[call[name[data]][<ast.Slice object at 0x7da18f813250>]]]
<ast.Tuple object at 0x7da18f811a80> assign[=] call[name[decrypt], parameter[name[cipher_L], name[cipher_R], name[P], name[S1], name[S2], name[S3], name[S4], name[u4_1_pack], name[u1_4_unpack]]]
<ast.Tuple object at 0x7da18f811c90> assign[=] call[name[u4_2_unpack], parameter[binary_operation[call[name[data]][<ast.Slice object at 0x7da18f810bb0>] + call[name[bytes], parameter[binary_operation[constant[8] - name[extra_bytes]]]]]]]
variable[Xn] assign[=] call[name[u4_2_pack], parameter[binary_operation[name[L] <ast.BitXor object at 0x7da2590d6b00> name[C_L]], binary_operation[name[R] <ast.BitXor object at 0x7da2590d6b00> name[C_R]]]]
<ast.Tuple object at 0x7da18f811ed0> assign[=] call[name[u4_2_unpack], parameter[binary_operation[call[name[data]][<ast.Slice object at 0x7da18ede4dc0>] + call[name[Xn]][<ast.Slice object at 0x7da18ede4730>]]]]
<ast.Tuple object at 0x7da18ede5210> assign[=] call[name[decrypt], parameter[name[E_L], name[E_R], name[P], name[S1], name[S2], name[S3], name[S4], name[u4_1_pack], name[u1_4_unpack]]]
<ast.Yield object at 0x7da18ede54e0>
<ast.Yield object at 0x7da18ede4a00> | keyword[def] identifier[decrypt_cbc_cts] ( identifier[self] , identifier[data] , identifier[init_vector] ):
literal[string]
identifier[data_len] = identifier[len] ( identifier[data] )
keyword[if] identifier[data_len] <= literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[S1] , identifier[S2] , identifier[S3] , identifier[S4] = identifier[self] . identifier[S]
identifier[P] = identifier[self] . identifier[P]
identifier[u4_1_pack] = identifier[self] . identifier[_u4_1_pack]
identifier[u1_4_unpack] = identifier[self] . identifier[_u1_4_unpack]
identifier[u4_2_pack] = identifier[self] . identifier[_u4_2_pack]
identifier[u4_2_unpack] = identifier[self] . identifier[_u4_2_unpack]
identifier[decrypt] = identifier[self] . identifier[_decrypt]
keyword[try] :
identifier[prev_cipher_L] , identifier[prev_cipher_R] = identifier[u4_2_unpack] ( identifier[init_vector] )
keyword[except] identifier[struct_error] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[extra_bytes] = identifier[data_len] % literal[int]
identifier[last_block_stop_i] = identifier[data_len] - identifier[extra_bytes]
identifier[last_block_start_i] = identifier[last_block_stop_i] - literal[int]
keyword[for] identifier[cipher_L] , identifier[cipher_R] keyword[in] identifier[self] . identifier[_u4_2_iter_unpack] (
identifier[data] [ literal[int] : identifier[last_block_start_i] ]
):
identifier[L] , identifier[R] = identifier[decrypt] (
identifier[cipher_L] , identifier[cipher_R] ,
identifier[P] , identifier[S1] , identifier[S2] , identifier[S3] , identifier[S4] ,
identifier[u4_1_pack] , identifier[u1_4_unpack]
)
keyword[yield] identifier[u4_2_pack] ( identifier[L] ^ identifier[prev_cipher_L] , identifier[R] ^ identifier[prev_cipher_R] )
identifier[prev_cipher_L] = identifier[cipher_L]
identifier[prev_cipher_R] = identifier[cipher_R]
identifier[cipher_L] , identifier[cipher_R] = identifier[u4_2_unpack] ( identifier[data] [ identifier[last_block_start_i] : identifier[last_block_stop_i] ])
identifier[L] , identifier[R] = identifier[decrypt] (
identifier[cipher_L] , identifier[cipher_R] ,
identifier[P] , identifier[S1] , identifier[S2] , identifier[S3] , identifier[S4] ,
identifier[u4_1_pack] , identifier[u1_4_unpack]
)
identifier[C_L] , identifier[C_R] = identifier[u4_2_unpack] ( identifier[data] [ identifier[last_block_stop_i] :]+ identifier[bytes] ( literal[int] - identifier[extra_bytes] ))
identifier[Xn] = identifier[u4_2_pack] ( identifier[L] ^ identifier[C_L] , identifier[R] ^ identifier[C_R] )
identifier[E_L] , identifier[E_R] = identifier[u4_2_unpack] ( identifier[data] [ identifier[last_block_stop_i] :]+ identifier[Xn] [ identifier[extra_bytes] :])
identifier[L] , identifier[R] = identifier[decrypt] (
identifier[E_L] , identifier[E_R] ,
identifier[P] , identifier[S1] , identifier[S2] , identifier[S3] , identifier[S4] ,
identifier[u4_1_pack] , identifier[u1_4_unpack]
)
keyword[yield] identifier[u4_2_pack] ( identifier[L] ^ identifier[prev_cipher_L] , identifier[R] ^ identifier[prev_cipher_R] )
keyword[yield] identifier[Xn] [: identifier[extra_bytes] ] | def decrypt_cbc_cts(self, data, init_vector):
"""
Return an iterator that decrypts `data` using the Cipher-Block Chaining
with Ciphertext Stealing (CBC-CTS) mode of operation.
CBC-CTS mode can only operate on `data` that is greater than 8 bytes in
length.
Each iteration, except the last, always returns a block-sized :obj:`bytes`
object (i.e. 8 bytes). The last iteration may return a :obj:`bytes` object
with a length less than the block-size, if `data` is not a multiple of the
block-size in length.
`data` should be a :obj:`bytes`-like object that is greater than 8 bytes in
length.
If it is not, a :exc:`ValueError` exception is raised.
`init_vector` is the initialization vector and should be a
:obj:`bytes`-like object with exactly 8 bytes.
If it is not, a :exc:`ValueError` exception is raised.
"""
data_len = len(data)
if data_len <= 8:
raise ValueError('data is not greater than 8 bytes in length') # depends on [control=['if'], data=[]]
(S1, S2, S3, S4) = self.S
P = self.P
u4_1_pack = self._u4_1_pack
u1_4_unpack = self._u1_4_unpack
u4_2_pack = self._u4_2_pack
u4_2_unpack = self._u4_2_unpack
decrypt = self._decrypt
try:
(prev_cipher_L, prev_cipher_R) = u4_2_unpack(init_vector) # depends on [control=['try'], data=[]]
except struct_error:
raise ValueError('initialization vector is not 8 bytes in length') # depends on [control=['except'], data=[]]
extra_bytes = data_len % 8
last_block_stop_i = data_len - extra_bytes
last_block_start_i = last_block_stop_i - 8
for (cipher_L, cipher_R) in self._u4_2_iter_unpack(data[0:last_block_start_i]):
(L, R) = decrypt(cipher_L, cipher_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack)
yield u4_2_pack(L ^ prev_cipher_L, R ^ prev_cipher_R)
prev_cipher_L = cipher_L
prev_cipher_R = cipher_R # depends on [control=['for'], data=[]]
(cipher_L, cipher_R) = u4_2_unpack(data[last_block_start_i:last_block_stop_i])
(L, R) = decrypt(cipher_L, cipher_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack)
(C_L, C_R) = u4_2_unpack(data[last_block_stop_i:] + bytes(8 - extra_bytes))
Xn = u4_2_pack(L ^ C_L, R ^ C_R)
(E_L, E_R) = u4_2_unpack(data[last_block_stop_i:] + Xn[extra_bytes:])
(L, R) = decrypt(E_L, E_R, P, S1, S2, S3, S4, u4_1_pack, u1_4_unpack)
yield u4_2_pack(L ^ prev_cipher_L, R ^ prev_cipher_R)
yield Xn[:extra_bytes] |
def hide_routemap_holder_route_map_content_set_dampening_half_life(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy")
route_map = ET.SubElement(hide_routemap_holder, "route-map")
name_key = ET.SubElement(route_map, "name")
name_key.text = kwargs.pop('name')
action_rm_key = ET.SubElement(route_map, "action-rm")
action_rm_key.text = kwargs.pop('action_rm')
instance_key = ET.SubElement(route_map, "instance")
instance_key.text = kwargs.pop('instance')
content = ET.SubElement(route_map, "content")
set = ET.SubElement(content, "set")
dampening = ET.SubElement(set, "dampening")
half_life = ET.SubElement(dampening, "half-life")
half_life.text = kwargs.pop('half_life')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[hide_routemap_holder_route_map_content_set_dampening_half_life, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[hide_routemap_holder] assign[=] call[name[ET].SubElement, parameter[name[config], constant[hide-routemap-holder]]]
variable[route_map] assign[=] call[name[ET].SubElement, parameter[name[hide_routemap_holder], constant[route-map]]]
variable[name_key] assign[=] call[name[ET].SubElement, parameter[name[route_map], constant[name]]]
name[name_key].text assign[=] call[name[kwargs].pop, parameter[constant[name]]]
variable[action_rm_key] assign[=] call[name[ET].SubElement, parameter[name[route_map], constant[action-rm]]]
name[action_rm_key].text assign[=] call[name[kwargs].pop, parameter[constant[action_rm]]]
variable[instance_key] assign[=] call[name[ET].SubElement, parameter[name[route_map], constant[instance]]]
name[instance_key].text assign[=] call[name[kwargs].pop, parameter[constant[instance]]]
variable[content] assign[=] call[name[ET].SubElement, parameter[name[route_map], constant[content]]]
variable[set] assign[=] call[name[ET].SubElement, parameter[name[content], constant[set]]]
variable[dampening] assign[=] call[name[ET].SubElement, parameter[name[set], constant[dampening]]]
variable[half_life] assign[=] call[name[ET].SubElement, parameter[name[dampening], constant[half-life]]]
name[half_life].text assign[=] call[name[kwargs].pop, parameter[constant[half_life]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[hide_routemap_holder_route_map_content_set_dampening_half_life] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[hide_routemap_holder] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[route_map] = identifier[ET] . identifier[SubElement] ( identifier[hide_routemap_holder] , literal[string] )
identifier[name_key] = identifier[ET] . identifier[SubElement] ( identifier[route_map] , literal[string] )
identifier[name_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[action_rm_key] = identifier[ET] . identifier[SubElement] ( identifier[route_map] , literal[string] )
identifier[action_rm_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[instance_key] = identifier[ET] . identifier[SubElement] ( identifier[route_map] , literal[string] )
identifier[instance_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[content] = identifier[ET] . identifier[SubElement] ( identifier[route_map] , literal[string] )
identifier[set] = identifier[ET] . identifier[SubElement] ( identifier[content] , literal[string] )
identifier[dampening] = identifier[ET] . identifier[SubElement] ( identifier[set] , literal[string] )
identifier[half_life] = identifier[ET] . identifier[SubElement] ( identifier[dampening] , literal[string] )
identifier[half_life] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def hide_routemap_holder_route_map_content_set_dampening_half_life(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
hide_routemap_holder = ET.SubElement(config, 'hide-routemap-holder', xmlns='urn:brocade.com:mgmt:brocade-ip-policy')
route_map = ET.SubElement(hide_routemap_holder, 'route-map')
name_key = ET.SubElement(route_map, 'name')
name_key.text = kwargs.pop('name')
action_rm_key = ET.SubElement(route_map, 'action-rm')
action_rm_key.text = kwargs.pop('action_rm')
instance_key = ET.SubElement(route_map, 'instance')
instance_key.text = kwargs.pop('instance')
content = ET.SubElement(route_map, 'content')
set = ET.SubElement(content, 'set')
dampening = ET.SubElement(set, 'dampening')
half_life = ET.SubElement(dampening, 'half-life')
half_life.text = kwargs.pop('half_life')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def get_query(query_id, session, retry_count=5):
"""attemps to get the query and retry if it cannot"""
query = None
attempt = 0
while not query and attempt < retry_count:
try:
query = session.query(Query).filter_by(id=query_id).one()
except Exception:
attempt += 1
logging.error(
'Query with id `{}` could not be retrieved'.format(query_id))
stats_logger.incr('error_attempting_orm_query_' + str(attempt))
logging.error('Sleeping for a sec before retrying...')
sleep(1)
if not query:
stats_logger.incr('error_failed_at_getting_orm_query')
raise SqlLabException('Failed at getting query')
return query | def function[get_query, parameter[query_id, session, retry_count]]:
constant[attemps to get the query and retry if it cannot]
variable[query] assign[=] constant[None]
variable[attempt] assign[=] constant[0]
while <ast.BoolOp object at 0x7da1b20e4b20> begin[:]
<ast.Try object at 0x7da1b20e4a00>
if <ast.UnaryOp object at 0x7da1b20e61a0> begin[:]
call[name[stats_logger].incr, parameter[constant[error_failed_at_getting_orm_query]]]
<ast.Raise object at 0x7da1b20e5ff0>
return[name[query]] | keyword[def] identifier[get_query] ( identifier[query_id] , identifier[session] , identifier[retry_count] = literal[int] ):
literal[string]
identifier[query] = keyword[None]
identifier[attempt] = literal[int]
keyword[while] keyword[not] identifier[query] keyword[and] identifier[attempt] < identifier[retry_count] :
keyword[try] :
identifier[query] = identifier[session] . identifier[query] ( identifier[Query] ). identifier[filter_by] ( identifier[id] = identifier[query_id] ). identifier[one] ()
keyword[except] identifier[Exception] :
identifier[attempt] += literal[int]
identifier[logging] . identifier[error] (
literal[string] . identifier[format] ( identifier[query_id] ))
identifier[stats_logger] . identifier[incr] ( literal[string] + identifier[str] ( identifier[attempt] ))
identifier[logging] . identifier[error] ( literal[string] )
identifier[sleep] ( literal[int] )
keyword[if] keyword[not] identifier[query] :
identifier[stats_logger] . identifier[incr] ( literal[string] )
keyword[raise] identifier[SqlLabException] ( literal[string] )
keyword[return] identifier[query] | def get_query(query_id, session, retry_count=5):
"""attemps to get the query and retry if it cannot"""
query = None
attempt = 0
while not query and attempt < retry_count:
try:
query = session.query(Query).filter_by(id=query_id).one() # depends on [control=['try'], data=[]]
except Exception:
attempt += 1
logging.error('Query with id `{}` could not be retrieved'.format(query_id))
stats_logger.incr('error_attempting_orm_query_' + str(attempt))
logging.error('Sleeping for a sec before retrying...')
sleep(1) # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]]
if not query:
stats_logger.incr('error_failed_at_getting_orm_query')
raise SqlLabException('Failed at getting query') # depends on [control=['if'], data=[]]
return query |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.