code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def validate(template_dict, schema=None):
"""
Is this a valid SAM template dictionary
:param dict template_dict: Data to be validated
:param dict schema: Optional, dictionary containing JSON Schema representing SAM template
:return: Empty string if there are no validation errors in template
"""
if not schema:
schema = SamTemplateValidator._read_schema()
validation_errors = ""
try:
jsonschema.validate(template_dict, schema)
except ValidationError as ex:
# Stringifying the exception will give us useful error message
validation_errors = str(ex)
# Swallowing expected exception here as our caller is expecting validation errors and
# not the valiation exception itself
pass
return validation_errors | def function[validate, parameter[template_dict, schema]]:
constant[
Is this a valid SAM template dictionary
:param dict template_dict: Data to be validated
:param dict schema: Optional, dictionary containing JSON Schema representing SAM template
:return: Empty string if there are no validation errors in template
]
if <ast.UnaryOp object at 0x7da2041d9990> begin[:]
variable[schema] assign[=] call[name[SamTemplateValidator]._read_schema, parameter[]]
variable[validation_errors] assign[=] constant[]
<ast.Try object at 0x7da2041d9090>
return[name[validation_errors]] | keyword[def] identifier[validate] ( identifier[template_dict] , identifier[schema] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[schema] :
identifier[schema] = identifier[SamTemplateValidator] . identifier[_read_schema] ()
identifier[validation_errors] = literal[string]
keyword[try] :
identifier[jsonschema] . identifier[validate] ( identifier[template_dict] , identifier[schema] )
keyword[except] identifier[ValidationError] keyword[as] identifier[ex] :
identifier[validation_errors] = identifier[str] ( identifier[ex] )
keyword[pass]
keyword[return] identifier[validation_errors] | def validate(template_dict, schema=None):
"""
Is this a valid SAM template dictionary
:param dict template_dict: Data to be validated
:param dict schema: Optional, dictionary containing JSON Schema representing SAM template
:return: Empty string if there are no validation errors in template
"""
if not schema:
schema = SamTemplateValidator._read_schema() # depends on [control=['if'], data=[]]
validation_errors = ''
try:
jsonschema.validate(template_dict, schema) # depends on [control=['try'], data=[]]
except ValidationError as ex:
# Stringifying the exception will give us useful error message
validation_errors = str(ex)
# Swallowing expected exception here as our caller is expecting validation errors and
# not the valiation exception itself
pass # depends on [control=['except'], data=['ex']]
return validation_errors |
def _exception(etype, eval_, etrace):
""" Wrap exception in debugger if not in tty """
if hasattr(sys, 'ps1') or not sys.stderr.isatty():
# we are in interactive mode or we don't have a tty-like
# device, so we call the default hook
sys.__excepthook__(etype, eval_, etrace)
else:
# we are NOT in interactive mode, print the exception...
traceback.print_exception(etype, eval_, etrace, limit=2,
file=sys.stdout)
six.print_()
# ...then start the debugger in post-mortem mode.
pdb.pm() | def function[_exception, parameter[etype, eval_, etrace]]:
constant[ Wrap exception in debugger if not in tty ]
if <ast.BoolOp object at 0x7da18f58cbe0> begin[:]
call[name[sys].__excepthook__, parameter[name[etype], name[eval_], name[etrace]]] | keyword[def] identifier[_exception] ( identifier[etype] , identifier[eval_] , identifier[etrace] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[sys] , literal[string] ) keyword[or] keyword[not] identifier[sys] . identifier[stderr] . identifier[isatty] ():
identifier[sys] . identifier[__excepthook__] ( identifier[etype] , identifier[eval_] , identifier[etrace] )
keyword[else] :
identifier[traceback] . identifier[print_exception] ( identifier[etype] , identifier[eval_] , identifier[etrace] , identifier[limit] = literal[int] ,
identifier[file] = identifier[sys] . identifier[stdout] )
identifier[six] . identifier[print_] ()
identifier[pdb] . identifier[pm] () | def _exception(etype, eval_, etrace):
""" Wrap exception in debugger if not in tty """
if hasattr(sys, 'ps1') or not sys.stderr.isatty():
# we are in interactive mode or we don't have a tty-like
# device, so we call the default hook
sys.__excepthook__(etype, eval_, etrace) # depends on [control=['if'], data=[]]
else:
# we are NOT in interactive mode, print the exception...
traceback.print_exception(etype, eval_, etrace, limit=2, file=sys.stdout)
six.print_()
# ...then start the debugger in post-mortem mode.
pdb.pm() |
def caches(self, options={}):
"""Query the server for a list of caches, parse the JSON response, and
return the result.
Keyword arguments:
options -- a dict of arguments to send with the request. See
http://dev.iron.io/cache/reference/api/#list_caches for more
information on defaults and possible values.
"""
query = urllib.urlencode(options)
url = "caches"
if query != "":
url = "%s?%s" % (url, query)
result = self.client.get(url)
return [cache["name"] for cache in result["body"]] | def function[caches, parameter[self, options]]:
constant[Query the server for a list of caches, parse the JSON response, and
return the result.
Keyword arguments:
options -- a dict of arguments to send with the request. See
http://dev.iron.io/cache/reference/api/#list_caches for more
information on defaults and possible values.
]
variable[query] assign[=] call[name[urllib].urlencode, parameter[name[options]]]
variable[url] assign[=] constant[caches]
if compare[name[query] not_equal[!=] constant[]] begin[:]
variable[url] assign[=] binary_operation[constant[%s?%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f810850>, <ast.Name object at 0x7da18f813280>]]]
variable[result] assign[=] call[name[self].client.get, parameter[name[url]]]
return[<ast.ListComp object at 0x7da18f810340>] | keyword[def] identifier[caches] ( identifier[self] , identifier[options] ={}):
literal[string]
identifier[query] = identifier[urllib] . identifier[urlencode] ( identifier[options] )
identifier[url] = literal[string]
keyword[if] identifier[query] != literal[string] :
identifier[url] = literal[string] %( identifier[url] , identifier[query] )
identifier[result] = identifier[self] . identifier[client] . identifier[get] ( identifier[url] )
keyword[return] [ identifier[cache] [ literal[string] ] keyword[for] identifier[cache] keyword[in] identifier[result] [ literal[string] ]] | def caches(self, options={}):
"""Query the server for a list of caches, parse the JSON response, and
return the result.
Keyword arguments:
options -- a dict of arguments to send with the request. See
http://dev.iron.io/cache/reference/api/#list_caches for more
information on defaults and possible values.
"""
query = urllib.urlencode(options)
url = 'caches'
if query != '':
url = '%s?%s' % (url, query) # depends on [control=['if'], data=['query']]
result = self.client.get(url)
return [cache['name'] for cache in result['body']] |
def extend_list(self, data, parsed_args):
"""Add subnet information to a network list."""
neutron_client = self.get_client()
search_opts = {'fields': ['id', 'cidr']}
if self.pagination_support:
page_size = parsed_args.page_size
if page_size:
search_opts.update({'limit': page_size})
subnet_ids = []
for n in data:
if 'subnets' in n:
subnet_ids.extend(n['subnets'])
def _get_subnet_list(sub_ids):
search_opts['id'] = sub_ids
return neutron_client.list_subnets(
**search_opts).get('subnets', [])
try:
subnets = _get_subnet_list(subnet_ids)
except exceptions.RequestURITooLong as uri_len_exc:
# The URI is too long because of too many subnet_id filters
# Use the excess attribute of the exception to know how many
# subnet_id filters can be inserted into a single request
subnet_count = len(subnet_ids)
max_size = ((self.subnet_id_filter_len * subnet_count) -
uri_len_exc.excess)
chunk_size = max_size // self.subnet_id_filter_len
subnets = []
for i in range(0, subnet_count, chunk_size):
subnets.extend(
_get_subnet_list(subnet_ids[i: i + chunk_size]))
subnet_dict = dict([(s['id'], s) for s in subnets])
for n in data:
if 'subnets' in n:
n['subnets'] = [(subnet_dict.get(s) or {"id": s})
for s in n['subnets']] | def function[extend_list, parameter[self, data, parsed_args]]:
constant[Add subnet information to a network list.]
variable[neutron_client] assign[=] call[name[self].get_client, parameter[]]
variable[search_opts] assign[=] dictionary[[<ast.Constant object at 0x7da18f7215d0>], [<ast.List object at 0x7da18f723d00>]]
if name[self].pagination_support begin[:]
variable[page_size] assign[=] name[parsed_args].page_size
if name[page_size] begin[:]
call[name[search_opts].update, parameter[dictionary[[<ast.Constant object at 0x7da18f721e70>], [<ast.Name object at 0x7da18f720400>]]]]
variable[subnet_ids] assign[=] list[[]]
for taget[name[n]] in starred[name[data]] begin[:]
if compare[constant[subnets] in name[n]] begin[:]
call[name[subnet_ids].extend, parameter[call[name[n]][constant[subnets]]]]
def function[_get_subnet_list, parameter[sub_ids]]:
call[name[search_opts]][constant[id]] assign[=] name[sub_ids]
return[call[call[name[neutron_client].list_subnets, parameter[]].get, parameter[constant[subnets], list[[]]]]]
<ast.Try object at 0x7da18f723280>
variable[subnet_dict] assign[=] call[name[dict], parameter[<ast.ListComp object at 0x7da18f722f20>]]
for taget[name[n]] in starred[name[data]] begin[:]
if compare[constant[subnets] in name[n]] begin[:]
call[name[n]][constant[subnets]] assign[=] <ast.ListComp object at 0x7da18f723b50> | keyword[def] identifier[extend_list] ( identifier[self] , identifier[data] , identifier[parsed_args] ):
literal[string]
identifier[neutron_client] = identifier[self] . identifier[get_client] ()
identifier[search_opts] ={ literal[string] :[ literal[string] , literal[string] ]}
keyword[if] identifier[self] . identifier[pagination_support] :
identifier[page_size] = identifier[parsed_args] . identifier[page_size]
keyword[if] identifier[page_size] :
identifier[search_opts] . identifier[update] ({ literal[string] : identifier[page_size] })
identifier[subnet_ids] =[]
keyword[for] identifier[n] keyword[in] identifier[data] :
keyword[if] literal[string] keyword[in] identifier[n] :
identifier[subnet_ids] . identifier[extend] ( identifier[n] [ literal[string] ])
keyword[def] identifier[_get_subnet_list] ( identifier[sub_ids] ):
identifier[search_opts] [ literal[string] ]= identifier[sub_ids]
keyword[return] identifier[neutron_client] . identifier[list_subnets] (
** identifier[search_opts] ). identifier[get] ( literal[string] ,[])
keyword[try] :
identifier[subnets] = identifier[_get_subnet_list] ( identifier[subnet_ids] )
keyword[except] identifier[exceptions] . identifier[RequestURITooLong] keyword[as] identifier[uri_len_exc] :
identifier[subnet_count] = identifier[len] ( identifier[subnet_ids] )
identifier[max_size] =(( identifier[self] . identifier[subnet_id_filter_len] * identifier[subnet_count] )-
identifier[uri_len_exc] . identifier[excess] )
identifier[chunk_size] = identifier[max_size] // identifier[self] . identifier[subnet_id_filter_len]
identifier[subnets] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[subnet_count] , identifier[chunk_size] ):
identifier[subnets] . identifier[extend] (
identifier[_get_subnet_list] ( identifier[subnet_ids] [ identifier[i] : identifier[i] + identifier[chunk_size] ]))
identifier[subnet_dict] = identifier[dict] ([( identifier[s] [ literal[string] ], identifier[s] ) keyword[for] identifier[s] keyword[in] identifier[subnets] ])
keyword[for] identifier[n] keyword[in] identifier[data] :
keyword[if] literal[string] keyword[in] identifier[n] :
identifier[n] [ literal[string] ]=[( identifier[subnet_dict] . identifier[get] ( identifier[s] ) keyword[or] { literal[string] : identifier[s] })
keyword[for] identifier[s] keyword[in] identifier[n] [ literal[string] ]] | def extend_list(self, data, parsed_args):
"""Add subnet information to a network list."""
neutron_client = self.get_client()
search_opts = {'fields': ['id', 'cidr']}
if self.pagination_support:
page_size = parsed_args.page_size
if page_size:
search_opts.update({'limit': page_size}) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
subnet_ids = []
for n in data:
if 'subnets' in n:
subnet_ids.extend(n['subnets']) # depends on [control=['if'], data=['n']] # depends on [control=['for'], data=['n']]
def _get_subnet_list(sub_ids):
search_opts['id'] = sub_ids
return neutron_client.list_subnets(**search_opts).get('subnets', [])
try:
subnets = _get_subnet_list(subnet_ids) # depends on [control=['try'], data=[]]
except exceptions.RequestURITooLong as uri_len_exc:
# The URI is too long because of too many subnet_id filters
# Use the excess attribute of the exception to know how many
# subnet_id filters can be inserted into a single request
subnet_count = len(subnet_ids)
max_size = self.subnet_id_filter_len * subnet_count - uri_len_exc.excess
chunk_size = max_size // self.subnet_id_filter_len
subnets = []
for i in range(0, subnet_count, chunk_size):
subnets.extend(_get_subnet_list(subnet_ids[i:i + chunk_size])) # depends on [control=['for'], data=['i']] # depends on [control=['except'], data=['uri_len_exc']]
subnet_dict = dict([(s['id'], s) for s in subnets])
for n in data:
if 'subnets' in n:
n['subnets'] = [subnet_dict.get(s) or {'id': s} for s in n['subnets']] # depends on [control=['if'], data=['n']] # depends on [control=['for'], data=['n']] |
def parse_snpeff_log(self, f):
""" Go through log file looking for snpeff output """
keys = {
'# Summary table': [
'Genome', 'Number_of_variants_before_filter',
'Number_of_known_variants', 'Number_of_effects',
'Genome_total_length', 'Change_rate'
],
'# Effects by impact': [ 'HIGH', 'LOW', 'MODERATE', 'MODIFIER' ],
'# Effects by functional class': [ 'MISSENSE', 'NONSENSE', 'SILENT', 'Missense_Silent_ratio' ],
'# Hom/Het table': ['Het', 'Hom', 'Missing'],
'# Ts/Tv summary': [ 'Transitions', 'Transversions', 'Ts_Tv_ratio' ],
'# Count by effects': 'all',
'# Count by genomic region': 'all'
}
parsed_data = {}
section = None
for l in f['f']:
l = l.strip()
if l[:1] == '#':
section = l
self.snpeff_section_totals[section] = dict()
continue
s = l.split(',')
# Quality values / counts
if section == '# Quality':
quals = OrderedDict()
if l.startswith('Values'):
values = [int(c) for c in l.split(',')[1:] ]
counts = f['f'].readline()
counts = [int(c) for c in counts.split(',')[1:] ]
c = 0
total = sum(counts)
for i, v in enumerate(values):
if c < (total * 0.995):
quals[v] = counts[i]
c += counts[i]
if len(quals) > 0:
self.snpeff_qualities[f['s_name']] = quals
# Everything else
elif section in keys:
if keys[section] == 'all' or any([k in s[0].strip() for k in keys[section]]):
try:
parsed_data[ s[0].strip() ] = float(s[1].strip())
except ValueError:
parsed_data[ s[0].strip() ] = s[1].strip()
except IndexError:
pass
else:
# Parsing the number worked - add to totals
try:
self.snpeff_section_totals[section][s[0].strip()] += parsed_data[ s[0].strip() ]
except KeyError:
self.snpeff_section_totals[section][s[0].strip()] = parsed_data[ s[0].strip() ]
if len(s) > 2 and s[2][-1:] == '%':
parsed_data[ '{}_percent'.format(s[0].strip()) ] = float(s[2][:-1])
if len(parsed_data) > 0:
if f['s_name'] in self.snpeff_data:
log.debug("Duplicate sample name found! Overwriting: {}".format(f['s_name']))
self.add_data_source(f)
self.snpeff_data[f['s_name']] = parsed_data | def function[parse_snpeff_log, parameter[self, f]]:
constant[ Go through log file looking for snpeff output ]
variable[keys] assign[=] dictionary[[<ast.Constant object at 0x7da1b1e5c310>, <ast.Constant object at 0x7da1b1e5de70>, <ast.Constant object at 0x7da1b1e5eb30>, <ast.Constant object at 0x7da1b1e5e2f0>, <ast.Constant object at 0x7da1b1e5f610>, <ast.Constant object at 0x7da1b1e5caf0>, <ast.Constant object at 0x7da1b1e5cfd0>], [<ast.List object at 0x7da1b1e5c5b0>, <ast.List object at 0x7da1b1e5e500>, <ast.List object at 0x7da1b1e5ead0>, <ast.List object at 0x7da1b1e5e6e0>, <ast.List object at 0x7da1b1e5ee30>, <ast.Constant object at 0x7da1b1e5ece0>, <ast.Constant object at 0x7da207f9b880>]]
variable[parsed_data] assign[=] dictionary[[], []]
variable[section] assign[=] constant[None]
for taget[name[l]] in starred[call[name[f]][constant[f]]] begin[:]
variable[l] assign[=] call[name[l].strip, parameter[]]
if compare[call[name[l]][<ast.Slice object at 0x7da207f99120>] equal[==] constant[#]] begin[:]
variable[section] assign[=] name[l]
call[name[self].snpeff_section_totals][name[section]] assign[=] call[name[dict], parameter[]]
continue
variable[s] assign[=] call[name[l].split, parameter[constant[,]]]
if compare[name[section] equal[==] constant[# Quality]] begin[:]
variable[quals] assign[=] call[name[OrderedDict], parameter[]]
if call[name[l].startswith, parameter[constant[Values]]] begin[:]
variable[values] assign[=] <ast.ListComp object at 0x7da207f9b400>
variable[counts] assign[=] call[call[name[f]][constant[f]].readline, parameter[]]
variable[counts] assign[=] <ast.ListComp object at 0x7da207f9baf0>
variable[c] assign[=] constant[0]
variable[total] assign[=] call[name[sum], parameter[name[counts]]]
for taget[tuple[[<ast.Name object at 0x7da207f9b940>, <ast.Name object at 0x7da207f99ed0>]]] in starred[call[name[enumerate], parameter[name[values]]]] begin[:]
if compare[name[c] less[<] binary_operation[name[total] * constant[0.995]]] begin[:]
call[name[quals]][name[v]] assign[=] call[name[counts]][name[i]]
<ast.AugAssign object at 0x7da207f9a3e0>
if compare[call[name[len], parameter[name[quals]]] greater[>] constant[0]] begin[:]
call[name[self].snpeff_qualities][call[name[f]][constant[s_name]]] assign[=] name[quals]
if compare[call[name[len], parameter[name[parsed_data]]] greater[>] constant[0]] begin[:]
if compare[call[name[f]][constant[s_name]] in name[self].snpeff_data] begin[:]
call[name[log].debug, parameter[call[constant[Duplicate sample name found! Overwriting: {}].format, parameter[call[name[f]][constant[s_name]]]]]]
call[name[self].add_data_source, parameter[name[f]]]
call[name[self].snpeff_data][call[name[f]][constant[s_name]]] assign[=] name[parsed_data] | keyword[def] identifier[parse_snpeff_log] ( identifier[self] , identifier[f] ):
literal[string]
identifier[keys] ={
literal[string] :[
literal[string] , literal[string] ,
literal[string] , literal[string] ,
literal[string] , literal[string]
],
literal[string] :[ literal[string] , literal[string] , literal[string] , literal[string] ],
literal[string] :[ literal[string] , literal[string] , literal[string] , literal[string] ],
literal[string] :[ literal[string] , literal[string] , literal[string] ],
literal[string] :[ literal[string] , literal[string] , literal[string] ],
literal[string] : literal[string] ,
literal[string] : literal[string]
}
identifier[parsed_data] ={}
identifier[section] = keyword[None]
keyword[for] identifier[l] keyword[in] identifier[f] [ literal[string] ]:
identifier[l] = identifier[l] . identifier[strip] ()
keyword[if] identifier[l] [: literal[int] ]== literal[string] :
identifier[section] = identifier[l]
identifier[self] . identifier[snpeff_section_totals] [ identifier[section] ]= identifier[dict] ()
keyword[continue]
identifier[s] = identifier[l] . identifier[split] ( literal[string] )
keyword[if] identifier[section] == literal[string] :
identifier[quals] = identifier[OrderedDict] ()
keyword[if] identifier[l] . identifier[startswith] ( literal[string] ):
identifier[values] =[ identifier[int] ( identifier[c] ) keyword[for] identifier[c] keyword[in] identifier[l] . identifier[split] ( literal[string] )[ literal[int] :]]
identifier[counts] = identifier[f] [ literal[string] ]. identifier[readline] ()
identifier[counts] =[ identifier[int] ( identifier[c] ) keyword[for] identifier[c] keyword[in] identifier[counts] . identifier[split] ( literal[string] )[ literal[int] :]]
identifier[c] = literal[int]
identifier[total] = identifier[sum] ( identifier[counts] )
keyword[for] identifier[i] , identifier[v] keyword[in] identifier[enumerate] ( identifier[values] ):
keyword[if] identifier[c] <( identifier[total] * literal[int] ):
identifier[quals] [ identifier[v] ]= identifier[counts] [ identifier[i] ]
identifier[c] += identifier[counts] [ identifier[i] ]
keyword[if] identifier[len] ( identifier[quals] )> literal[int] :
identifier[self] . identifier[snpeff_qualities] [ identifier[f] [ literal[string] ]]= identifier[quals]
keyword[elif] identifier[section] keyword[in] identifier[keys] :
keyword[if] identifier[keys] [ identifier[section] ]== literal[string] keyword[or] identifier[any] ([ identifier[k] keyword[in] identifier[s] [ literal[int] ]. identifier[strip] () keyword[for] identifier[k] keyword[in] identifier[keys] [ identifier[section] ]]):
keyword[try] :
identifier[parsed_data] [ identifier[s] [ literal[int] ]. identifier[strip] ()]= identifier[float] ( identifier[s] [ literal[int] ]. identifier[strip] ())
keyword[except] identifier[ValueError] :
identifier[parsed_data] [ identifier[s] [ literal[int] ]. identifier[strip] ()]= identifier[s] [ literal[int] ]. identifier[strip] ()
keyword[except] identifier[IndexError] :
keyword[pass]
keyword[else] :
keyword[try] :
identifier[self] . identifier[snpeff_section_totals] [ identifier[section] ][ identifier[s] [ literal[int] ]. identifier[strip] ()]+= identifier[parsed_data] [ identifier[s] [ literal[int] ]. identifier[strip] ()]
keyword[except] identifier[KeyError] :
identifier[self] . identifier[snpeff_section_totals] [ identifier[section] ][ identifier[s] [ literal[int] ]. identifier[strip] ()]= identifier[parsed_data] [ identifier[s] [ literal[int] ]. identifier[strip] ()]
keyword[if] identifier[len] ( identifier[s] )> literal[int] keyword[and] identifier[s] [ literal[int] ][- literal[int] :]== literal[string] :
identifier[parsed_data] [ literal[string] . identifier[format] ( identifier[s] [ literal[int] ]. identifier[strip] ())]= identifier[float] ( identifier[s] [ literal[int] ][:- literal[int] ])
keyword[if] identifier[len] ( identifier[parsed_data] )> literal[int] :
keyword[if] identifier[f] [ literal[string] ] keyword[in] identifier[self] . identifier[snpeff_data] :
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[f] [ literal[string] ]))
identifier[self] . identifier[add_data_source] ( identifier[f] )
identifier[self] . identifier[snpeff_data] [ identifier[f] [ literal[string] ]]= identifier[parsed_data] | def parse_snpeff_log(self, f):
""" Go through log file looking for snpeff output """
keys = {'# Summary table': ['Genome', 'Number_of_variants_before_filter', 'Number_of_known_variants', 'Number_of_effects', 'Genome_total_length', 'Change_rate'], '# Effects by impact': ['HIGH', 'LOW', 'MODERATE', 'MODIFIER'], '# Effects by functional class': ['MISSENSE', 'NONSENSE', 'SILENT', 'Missense_Silent_ratio'], '# Hom/Het table': ['Het', 'Hom', 'Missing'], '# Ts/Tv summary': ['Transitions', 'Transversions', 'Ts_Tv_ratio'], '# Count by effects': 'all', '# Count by genomic region': 'all'}
parsed_data = {}
section = None
for l in f['f']:
l = l.strip()
if l[:1] == '#':
section = l
self.snpeff_section_totals[section] = dict()
continue # depends on [control=['if'], data=[]]
s = l.split(',')
# Quality values / counts
if section == '# Quality':
quals = OrderedDict()
if l.startswith('Values'):
values = [int(c) for c in l.split(',')[1:]]
counts = f['f'].readline()
counts = [int(c) for c in counts.split(',')[1:]]
c = 0
total = sum(counts)
for (i, v) in enumerate(values):
if c < total * 0.995:
quals[v] = counts[i]
c += counts[i] # depends on [control=['if'], data=['c']] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
if len(quals) > 0:
self.snpeff_qualities[f['s_name']] = quals # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Everything else
elif section in keys:
if keys[section] == 'all' or any([k in s[0].strip() for k in keys[section]]):
try:
parsed_data[s[0].strip()] = float(s[1].strip()) # depends on [control=['try'], data=[]]
except ValueError:
parsed_data[s[0].strip()] = s[1].strip() # depends on [control=['except'], data=[]]
except IndexError:
pass # depends on [control=['except'], data=[]]
else:
# Parsing the number worked - add to totals
try:
self.snpeff_section_totals[section][s[0].strip()] += parsed_data[s[0].strip()] # depends on [control=['try'], data=[]]
except KeyError:
self.snpeff_section_totals[section][s[0].strip()] = parsed_data[s[0].strip()] # depends on [control=['except'], data=[]]
if len(s) > 2 and s[2][-1:] == '%':
parsed_data['{}_percent'.format(s[0].strip())] = float(s[2][:-1]) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['section', 'keys']] # depends on [control=['for'], data=['l']]
if len(parsed_data) > 0:
if f['s_name'] in self.snpeff_data:
log.debug('Duplicate sample name found! Overwriting: {}'.format(f['s_name'])) # depends on [control=['if'], data=[]]
self.add_data_source(f)
self.snpeff_data[f['s_name']] = parsed_data # depends on [control=['if'], data=[]] |
def uninstall(self):
"""Uninstall this importer if possible and un-import any modules imported by it."""
if not self._uninstallable:
_tracer().log('Not uninstalling {}'.format(self), V=9)
return
if self in sys.meta_path:
sys.meta_path.remove(self)
maybe_exposed = frozenset(os.path.join(self._root, importable.path)
for importable in self._importables)
sys.path[:] = [path_item for path_item in sys.path if path_item not in maybe_exposed]
for loader in self._loaders:
loader.unload()
_tracer().log('Uninstalled {}'.format(self), V=3) | def function[uninstall, parameter[self]]:
constant[Uninstall this importer if possible and un-import any modules imported by it.]
if <ast.UnaryOp object at 0x7da204623d90> begin[:]
call[call[name[_tracer], parameter[]].log, parameter[call[constant[Not uninstalling {}].format, parameter[name[self]]]]]
return[None]
if compare[name[self] in name[sys].meta_path] begin[:]
call[name[sys].meta_path.remove, parameter[name[self]]]
variable[maybe_exposed] assign[=] call[name[frozenset], parameter[<ast.GeneratorExp object at 0x7da204621030>]]
call[name[sys].path][<ast.Slice object at 0x7da204620cd0>] assign[=] <ast.ListComp object at 0x7da204621c60>
for taget[name[loader]] in starred[name[self]._loaders] begin[:]
call[name[loader].unload, parameter[]]
call[call[name[_tracer], parameter[]].log, parameter[call[constant[Uninstalled {}].format, parameter[name[self]]]]] | keyword[def] identifier[uninstall] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_uninstallable] :
identifier[_tracer] (). identifier[log] ( literal[string] . identifier[format] ( identifier[self] ), identifier[V] = literal[int] )
keyword[return]
keyword[if] identifier[self] keyword[in] identifier[sys] . identifier[meta_path] :
identifier[sys] . identifier[meta_path] . identifier[remove] ( identifier[self] )
identifier[maybe_exposed] = identifier[frozenset] ( identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[_root] , identifier[importable] . identifier[path] )
keyword[for] identifier[importable] keyword[in] identifier[self] . identifier[_importables] )
identifier[sys] . identifier[path] [:]=[ identifier[path_item] keyword[for] identifier[path_item] keyword[in] identifier[sys] . identifier[path] keyword[if] identifier[path_item] keyword[not] keyword[in] identifier[maybe_exposed] ]
keyword[for] identifier[loader] keyword[in] identifier[self] . identifier[_loaders] :
identifier[loader] . identifier[unload] ()
identifier[_tracer] (). identifier[log] ( literal[string] . identifier[format] ( identifier[self] ), identifier[V] = literal[int] ) | def uninstall(self):
"""Uninstall this importer if possible and un-import any modules imported by it."""
if not self._uninstallable:
_tracer().log('Not uninstalling {}'.format(self), V=9)
return # depends on [control=['if'], data=[]]
if self in sys.meta_path:
sys.meta_path.remove(self)
maybe_exposed = frozenset((os.path.join(self._root, importable.path) for importable in self._importables))
sys.path[:] = [path_item for path_item in sys.path if path_item not in maybe_exposed]
for loader in self._loaders:
loader.unload() # depends on [control=['for'], data=['loader']]
_tracer().log('Uninstalled {}'.format(self), V=3) # depends on [control=['if'], data=['self']] |
def agent_intents(self):
"""Returns a list of intent json objects"""
endpoint = self._intent_uri()
intents = self._get(endpoint) # should be list of dicts
if isinstance(intents, dict): # if error: intents = {status: {error}}
raise Exception(intents["status"])
return [Intent(intent_json=i) for i in intents] | def function[agent_intents, parameter[self]]:
constant[Returns a list of intent json objects]
variable[endpoint] assign[=] call[name[self]._intent_uri, parameter[]]
variable[intents] assign[=] call[name[self]._get, parameter[name[endpoint]]]
if call[name[isinstance], parameter[name[intents], name[dict]]] begin[:]
<ast.Raise object at 0x7da20c6a9090>
return[<ast.ListComp object at 0x7da20c6aac50>] | keyword[def] identifier[agent_intents] ( identifier[self] ):
literal[string]
identifier[endpoint] = identifier[self] . identifier[_intent_uri] ()
identifier[intents] = identifier[self] . identifier[_get] ( identifier[endpoint] )
keyword[if] identifier[isinstance] ( identifier[intents] , identifier[dict] ):
keyword[raise] identifier[Exception] ( identifier[intents] [ literal[string] ])
keyword[return] [ identifier[Intent] ( identifier[intent_json] = identifier[i] ) keyword[for] identifier[i] keyword[in] identifier[intents] ] | def agent_intents(self):
"""Returns a list of intent json objects"""
endpoint = self._intent_uri()
intents = self._get(endpoint) # should be list of dicts
if isinstance(intents, dict): # if error: intents = {status: {error}}
raise Exception(intents['status']) # depends on [control=['if'], data=[]]
return [Intent(intent_json=i) for i in intents] |
def _partition_index_names(provisioned_index_names, index_names):
'''Returns 3 disjoint sets of indexes: existing, to be created, and to be deleted.'''
existing_index_names = set()
new_index_names = set()
for name in index_names:
if name in provisioned_index_names:
existing_index_names.add(name)
else:
new_index_names.add(name)
index_names_to_be_deleted = provisioned_index_names - existing_index_names
return existing_index_names, new_index_names, index_names_to_be_deleted | def function[_partition_index_names, parameter[provisioned_index_names, index_names]]:
constant[Returns 3 disjoint sets of indexes: existing, to be created, and to be deleted.]
variable[existing_index_names] assign[=] call[name[set], parameter[]]
variable[new_index_names] assign[=] call[name[set], parameter[]]
for taget[name[name]] in starred[name[index_names]] begin[:]
if compare[name[name] in name[provisioned_index_names]] begin[:]
call[name[existing_index_names].add, parameter[name[name]]]
variable[index_names_to_be_deleted] assign[=] binary_operation[name[provisioned_index_names] - name[existing_index_names]]
return[tuple[[<ast.Name object at 0x7da1b21e0fa0>, <ast.Name object at 0x7da1b21e1660>, <ast.Name object at 0x7da1b21e1ae0>]]] | keyword[def] identifier[_partition_index_names] ( identifier[provisioned_index_names] , identifier[index_names] ):
literal[string]
identifier[existing_index_names] = identifier[set] ()
identifier[new_index_names] = identifier[set] ()
keyword[for] identifier[name] keyword[in] identifier[index_names] :
keyword[if] identifier[name] keyword[in] identifier[provisioned_index_names] :
identifier[existing_index_names] . identifier[add] ( identifier[name] )
keyword[else] :
identifier[new_index_names] . identifier[add] ( identifier[name] )
identifier[index_names_to_be_deleted] = identifier[provisioned_index_names] - identifier[existing_index_names]
keyword[return] identifier[existing_index_names] , identifier[new_index_names] , identifier[index_names_to_be_deleted] | def _partition_index_names(provisioned_index_names, index_names):
"""Returns 3 disjoint sets of indexes: existing, to be created, and to be deleted."""
existing_index_names = set()
new_index_names = set()
for name in index_names:
if name in provisioned_index_names:
existing_index_names.add(name) # depends on [control=['if'], data=['name']]
else:
new_index_names.add(name) # depends on [control=['for'], data=['name']]
index_names_to_be_deleted = provisioned_index_names - existing_index_names
return (existing_index_names, new_index_names, index_names_to_be_deleted) |
def exact(cls, value, precision=None):
"""Convert an integer, float or BigFloat with no loss of precision.
Also convert a string with given precision.
This constructor makes no use of the current context.
"""
# figure out precision to use
if isinstance(value, six.string_types):
if precision is None:
raise TypeError("precision must be supplied when "
"converting from a string")
else:
if precision is not None:
raise TypeError("precision argument should not be "
"specified except when converting "
"from a string")
if isinstance(value, float):
precision = _builtin_max(DBL_PRECISION, PRECISION_MIN)
elif isinstance(value, six.integer_types):
precision = _builtin_max(_bit_length(value), PRECISION_MIN)
elif isinstance(value, BigFloat):
precision = value.precision
else:
raise TypeError("Can't convert argument %s of type %s "
"to BigFloat" % (value, type(value)))
# Use unlimited exponents, with given precision.
with _saved_flags():
set_flagstate(set()) # clear all flags
context = (
WideExponentContext +
Context(precision=precision) +
RoundTiesToEven
)
with context:
result = BigFloat(value)
if test_flag(Overflow):
raise ValueError("value too large to represent as a BigFloat")
if test_flag(Underflow):
raise ValueError("value too small to represent as a BigFloat")
if test_flag(Inexact) and not isinstance(value, six.string_types):
# since this is supposed to be an exact conversion, the
# inexact flag should never be set except when converting
# from a string.
assert False, ("Inexact conversion in BigFloat.exact. "
"This shouldn't ever happen. Please report.")
return result | def function[exact, parameter[cls, value, precision]]:
constant[Convert an integer, float or BigFloat with no loss of precision.
Also convert a string with given precision.
This constructor makes no use of the current context.
]
if call[name[isinstance], parameter[name[value], name[six].string_types]] begin[:]
if compare[name[precision] is constant[None]] begin[:]
<ast.Raise object at 0x7da18c4cfc10>
with call[name[_saved_flags], parameter[]] begin[:]
call[name[set_flagstate], parameter[call[name[set], parameter[]]]]
variable[context] assign[=] binary_operation[binary_operation[name[WideExponentContext] + call[name[Context], parameter[]]] + name[RoundTiesToEven]]
with name[context] begin[:]
variable[result] assign[=] call[name[BigFloat], parameter[name[value]]]
if call[name[test_flag], parameter[name[Overflow]]] begin[:]
<ast.Raise object at 0x7da1b26f0040>
if call[name[test_flag], parameter[name[Underflow]]] begin[:]
<ast.Raise object at 0x7da207f9a710>
if <ast.BoolOp object at 0x7da207f9b9a0> begin[:]
assert[constant[False]]
return[name[result]] | keyword[def] identifier[exact] ( identifier[cls] , identifier[value] , identifier[precision] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[six] . identifier[string_types] ):
keyword[if] identifier[precision] keyword[is] keyword[None] :
keyword[raise] identifier[TypeError] ( literal[string]
literal[string] )
keyword[else] :
keyword[if] identifier[precision] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[TypeError] ( literal[string]
literal[string]
literal[string] )
keyword[if] identifier[isinstance] ( identifier[value] , identifier[float] ):
identifier[precision] = identifier[_builtin_max] ( identifier[DBL_PRECISION] , identifier[PRECISION_MIN] )
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[six] . identifier[integer_types] ):
identifier[precision] = identifier[_builtin_max] ( identifier[_bit_length] ( identifier[value] ), identifier[PRECISION_MIN] )
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[BigFloat] ):
identifier[precision] = identifier[value] . identifier[precision]
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string]
literal[string] %( identifier[value] , identifier[type] ( identifier[value] )))
keyword[with] identifier[_saved_flags] ():
identifier[set_flagstate] ( identifier[set] ())
identifier[context] =(
identifier[WideExponentContext] +
identifier[Context] ( identifier[precision] = identifier[precision] )+
identifier[RoundTiesToEven]
)
keyword[with] identifier[context] :
identifier[result] = identifier[BigFloat] ( identifier[value] )
keyword[if] identifier[test_flag] ( identifier[Overflow] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[test_flag] ( identifier[Underflow] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[test_flag] ( identifier[Inexact] ) keyword[and] keyword[not] identifier[isinstance] ( identifier[value] , identifier[six] . identifier[string_types] ):
keyword[assert] keyword[False] ,( literal[string]
literal[string] )
keyword[return] identifier[result] | def exact(cls, value, precision=None):
"""Convert an integer, float or BigFloat with no loss of precision.
Also convert a string with given precision.
This constructor makes no use of the current context.
"""
# figure out precision to use
if isinstance(value, six.string_types):
if precision is None:
raise TypeError('precision must be supplied when converting from a string') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
if precision is not None:
raise TypeError('precision argument should not be specified except when converting from a string') # depends on [control=['if'], data=[]]
if isinstance(value, float):
precision = _builtin_max(DBL_PRECISION, PRECISION_MIN) # depends on [control=['if'], data=[]]
elif isinstance(value, six.integer_types):
precision = _builtin_max(_bit_length(value), PRECISION_MIN) # depends on [control=['if'], data=[]]
elif isinstance(value, BigFloat):
precision = value.precision # depends on [control=['if'], data=[]]
else:
raise TypeError("Can't convert argument %s of type %s to BigFloat" % (value, type(value)))
# Use unlimited exponents, with given precision.
with _saved_flags():
set_flagstate(set()) # clear all flags
context = WideExponentContext + Context(precision=precision) + RoundTiesToEven
with context:
result = BigFloat(value) # depends on [control=['with'], data=[]]
if test_flag(Overflow):
raise ValueError('value too large to represent as a BigFloat') # depends on [control=['if'], data=[]]
if test_flag(Underflow):
raise ValueError('value too small to represent as a BigFloat') # depends on [control=['if'], data=[]]
if test_flag(Inexact) and (not isinstance(value, six.string_types)):
# since this is supposed to be an exact conversion, the
# inexact flag should never be set except when converting
# from a string.
assert False, "Inexact conversion in BigFloat.exact. This shouldn't ever happen. Please report." # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]]
return result |
def description(self, value):
"""
Setter for **self.__description** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format(
"description", value)
self.__description = value | def function[description, parameter[self, value]]:
constant[
Setter for **self.__description** attribute.
:param value: Attribute value.
:type value: unicode
]
if compare[name[value] is_not constant[None]] begin[:]
assert[compare[call[name[type], parameter[name[value]]] is name[unicode]]]
name[self].__description assign[=] name[value] | keyword[def] identifier[description] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
keyword[assert] identifier[type] ( identifier[value] ) keyword[is] identifier[unicode] , literal[string] . identifier[format] (
literal[string] , identifier[value] )
identifier[self] . identifier[__description] = identifier[value] | def description(self, value):
"""
Setter for **self.__description** attribute.
:param value: Attribute value.
:type value: unicode
"""
if value is not None:
assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format('description', value) # depends on [control=['if'], data=['value']]
self.__description = value |
def _from_dict(cls, _dict):
"""Initialize a DocumentStatus object from a json dictionary."""
args = {}
if 'document_id' in _dict:
args['document_id'] = _dict.get('document_id')
else:
raise ValueError(
'Required property \'document_id\' not present in DocumentStatus JSON'
)
if 'configuration_id' in _dict:
args['configuration_id'] = _dict.get('configuration_id')
if 'status' in _dict:
args['status'] = _dict.get('status')
else:
raise ValueError(
'Required property \'status\' not present in DocumentStatus JSON'
)
if 'status_description' in _dict:
args['status_description'] = _dict.get('status_description')
else:
raise ValueError(
'Required property \'status_description\' not present in DocumentStatus JSON'
)
if 'filename' in _dict:
args['filename'] = _dict.get('filename')
if 'file_type' in _dict:
args['file_type'] = _dict.get('file_type')
if 'sha1' in _dict:
args['sha1'] = _dict.get('sha1')
if 'notices' in _dict:
args['notices'] = [
Notice._from_dict(x) for x in (_dict.get('notices'))
]
else:
raise ValueError(
'Required property \'notices\' not present in DocumentStatus JSON'
)
return cls(**args) | def function[_from_dict, parameter[cls, _dict]]:
constant[Initialize a DocumentStatus object from a json dictionary.]
variable[args] assign[=] dictionary[[], []]
if compare[constant[document_id] in name[_dict]] begin[:]
call[name[args]][constant[document_id]] assign[=] call[name[_dict].get, parameter[constant[document_id]]]
if compare[constant[configuration_id] in name[_dict]] begin[:]
call[name[args]][constant[configuration_id]] assign[=] call[name[_dict].get, parameter[constant[configuration_id]]]
if compare[constant[status] in name[_dict]] begin[:]
call[name[args]][constant[status]] assign[=] call[name[_dict].get, parameter[constant[status]]]
if compare[constant[status_description] in name[_dict]] begin[:]
call[name[args]][constant[status_description]] assign[=] call[name[_dict].get, parameter[constant[status_description]]]
if compare[constant[filename] in name[_dict]] begin[:]
call[name[args]][constant[filename]] assign[=] call[name[_dict].get, parameter[constant[filename]]]
if compare[constant[file_type] in name[_dict]] begin[:]
call[name[args]][constant[file_type]] assign[=] call[name[_dict].get, parameter[constant[file_type]]]
if compare[constant[sha1] in name[_dict]] begin[:]
call[name[args]][constant[sha1]] assign[=] call[name[_dict].get, parameter[constant[sha1]]]
if compare[constant[notices] in name[_dict]] begin[:]
call[name[args]][constant[notices]] assign[=] <ast.ListComp object at 0x7da20c76c040>
return[call[name[cls], parameter[]]] | keyword[def] identifier[_from_dict] ( identifier[cls] , identifier[_dict] ):
literal[string]
identifier[args] ={}
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]= identifier[_dict] . identifier[get] ( literal[string] )
keyword[else] :
keyword[raise] identifier[ValueError] (
literal[string]
)
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]= identifier[_dict] . identifier[get] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]= identifier[_dict] . identifier[get] ( literal[string] )
keyword[else] :
keyword[raise] identifier[ValueError] (
literal[string]
)
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]= identifier[_dict] . identifier[get] ( literal[string] )
keyword[else] :
keyword[raise] identifier[ValueError] (
literal[string]
)
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]= identifier[_dict] . identifier[get] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]= identifier[_dict] . identifier[get] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]= identifier[_dict] . identifier[get] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]=[
identifier[Notice] . identifier[_from_dict] ( identifier[x] ) keyword[for] identifier[x] keyword[in] ( identifier[_dict] . identifier[get] ( literal[string] ))
]
keyword[else] :
keyword[raise] identifier[ValueError] (
literal[string]
)
keyword[return] identifier[cls] (** identifier[args] ) | def _from_dict(cls, _dict):
"""Initialize a DocumentStatus object from a json dictionary."""
args = {}
if 'document_id' in _dict:
args['document_id'] = _dict.get('document_id') # depends on [control=['if'], data=['_dict']]
else:
raise ValueError("Required property 'document_id' not present in DocumentStatus JSON")
if 'configuration_id' in _dict:
args['configuration_id'] = _dict.get('configuration_id') # depends on [control=['if'], data=['_dict']]
if 'status' in _dict:
args['status'] = _dict.get('status') # depends on [control=['if'], data=['_dict']]
else:
raise ValueError("Required property 'status' not present in DocumentStatus JSON")
if 'status_description' in _dict:
args['status_description'] = _dict.get('status_description') # depends on [control=['if'], data=['_dict']]
else:
raise ValueError("Required property 'status_description' not present in DocumentStatus JSON")
if 'filename' in _dict:
args['filename'] = _dict.get('filename') # depends on [control=['if'], data=['_dict']]
if 'file_type' in _dict:
args['file_type'] = _dict.get('file_type') # depends on [control=['if'], data=['_dict']]
if 'sha1' in _dict:
args['sha1'] = _dict.get('sha1') # depends on [control=['if'], data=['_dict']]
if 'notices' in _dict:
args['notices'] = [Notice._from_dict(x) for x in _dict.get('notices')] # depends on [control=['if'], data=['_dict']]
else:
raise ValueError("Required property 'notices' not present in DocumentStatus JSON")
return cls(**args) |
def cmd(send, msg, args):
"""Gets a stock quote.
Syntax: {command} [symbol]
Powered by markit on demand (http://dev.markitondemand.com)
"""
parser = arguments.ArgParser(args['config'])
parser.add_argument('stock', nargs='?', default=random_stock())
try:
cmdargs = parser.parse_args(msg)
except arguments.ArgumentException as e:
send(str(e))
return
send(gen_stock(cmdargs.stock)) | def function[cmd, parameter[send, msg, args]]:
constant[Gets a stock quote.
Syntax: {command} [symbol]
Powered by markit on demand (http://dev.markitondemand.com)
]
variable[parser] assign[=] call[name[arguments].ArgParser, parameter[call[name[args]][constant[config]]]]
call[name[parser].add_argument, parameter[constant[stock]]]
<ast.Try object at 0x7da1b20f9510>
call[name[send], parameter[call[name[gen_stock], parameter[name[cmdargs].stock]]]] | keyword[def] identifier[cmd] ( identifier[send] , identifier[msg] , identifier[args] ):
literal[string]
identifier[parser] = identifier[arguments] . identifier[ArgParser] ( identifier[args] [ literal[string] ])
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[nargs] = literal[string] , identifier[default] = identifier[random_stock] ())
keyword[try] :
identifier[cmdargs] = identifier[parser] . identifier[parse_args] ( identifier[msg] )
keyword[except] identifier[arguments] . identifier[ArgumentException] keyword[as] identifier[e] :
identifier[send] ( identifier[str] ( identifier[e] ))
keyword[return]
identifier[send] ( identifier[gen_stock] ( identifier[cmdargs] . identifier[stock] )) | def cmd(send, msg, args):
"""Gets a stock quote.
Syntax: {command} [symbol]
Powered by markit on demand (http://dev.markitondemand.com)
"""
parser = arguments.ArgParser(args['config'])
parser.add_argument('stock', nargs='?', default=random_stock())
try:
cmdargs = parser.parse_args(msg) # depends on [control=['try'], data=[]]
except arguments.ArgumentException as e:
send(str(e))
return # depends on [control=['except'], data=['e']]
send(gen_stock(cmdargs.stock)) |
def findUniques(mapF):
"""Finds the unique markers in a MAP.
:param mapF: representation of a ``map`` file.
:type mapF: list
:returns: a :py:class:`dict` containing unique markers (according to their
genomic localisation).
"""
uSNPs = {}
dSNPs = defaultdict(list)
for i, row in enumerate(mapF):
chromosome = row[0]
position = row[3]
snpID = (chromosome, position)
if snpID not in uSNPs:
# This is the first time we see this sample
uSNPs[snpID] = i
else:
# We have seen this sample at least once...
if snpID not in dSNPs:
# This is the second time we see this sample...
dSNPs[snpID].extend([uSNPs[snpID], i])
else:
# We have seen this sample multiple times
dSNPs[snpID].append(i)
# Removing the duplicates from the unique samples
for snpID in dSNPs.iterkeys():
if snpID in uSNPs:
del uSNPs[snpID]
return uSNPs | def function[findUniques, parameter[mapF]]:
constant[Finds the unique markers in a MAP.
:param mapF: representation of a ``map`` file.
:type mapF: list
:returns: a :py:class:`dict` containing unique markers (according to their
genomic localisation).
]
variable[uSNPs] assign[=] dictionary[[], []]
variable[dSNPs] assign[=] call[name[defaultdict], parameter[name[list]]]
for taget[tuple[[<ast.Name object at 0x7da1b0a78190>, <ast.Name object at 0x7da1b0a785e0>]]] in starred[call[name[enumerate], parameter[name[mapF]]]] begin[:]
variable[chromosome] assign[=] call[name[row]][constant[0]]
variable[position] assign[=] call[name[row]][constant[3]]
variable[snpID] assign[=] tuple[[<ast.Name object at 0x7da1b0a7ab30>, <ast.Name object at 0x7da1b0a78340>]]
if compare[name[snpID] <ast.NotIn object at 0x7da2590d7190> name[uSNPs]] begin[:]
call[name[uSNPs]][name[snpID]] assign[=] name[i]
for taget[name[snpID]] in starred[call[name[dSNPs].iterkeys, parameter[]]] begin[:]
if compare[name[snpID] in name[uSNPs]] begin[:]
<ast.Delete object at 0x7da1b0a78cd0>
return[name[uSNPs]] | keyword[def] identifier[findUniques] ( identifier[mapF] ):
literal[string]
identifier[uSNPs] ={}
identifier[dSNPs] = identifier[defaultdict] ( identifier[list] )
keyword[for] identifier[i] , identifier[row] keyword[in] identifier[enumerate] ( identifier[mapF] ):
identifier[chromosome] = identifier[row] [ literal[int] ]
identifier[position] = identifier[row] [ literal[int] ]
identifier[snpID] =( identifier[chromosome] , identifier[position] )
keyword[if] identifier[snpID] keyword[not] keyword[in] identifier[uSNPs] :
identifier[uSNPs] [ identifier[snpID] ]= identifier[i]
keyword[else] :
keyword[if] identifier[snpID] keyword[not] keyword[in] identifier[dSNPs] :
identifier[dSNPs] [ identifier[snpID] ]. identifier[extend] ([ identifier[uSNPs] [ identifier[snpID] ], identifier[i] ])
keyword[else] :
identifier[dSNPs] [ identifier[snpID] ]. identifier[append] ( identifier[i] )
keyword[for] identifier[snpID] keyword[in] identifier[dSNPs] . identifier[iterkeys] ():
keyword[if] identifier[snpID] keyword[in] identifier[uSNPs] :
keyword[del] identifier[uSNPs] [ identifier[snpID] ]
keyword[return] identifier[uSNPs] | def findUniques(mapF):
"""Finds the unique markers in a MAP.
:param mapF: representation of a ``map`` file.
:type mapF: list
:returns: a :py:class:`dict` containing unique markers (according to their
genomic localisation).
"""
uSNPs = {}
dSNPs = defaultdict(list)
for (i, row) in enumerate(mapF):
chromosome = row[0]
position = row[3]
snpID = (chromosome, position)
if snpID not in uSNPs:
# This is the first time we see this sample
uSNPs[snpID] = i # depends on [control=['if'], data=['snpID', 'uSNPs']]
# We have seen this sample at least once...
elif snpID not in dSNPs:
# This is the second time we see this sample...
dSNPs[snpID].extend([uSNPs[snpID], i]) # depends on [control=['if'], data=['snpID', 'dSNPs']]
else:
# We have seen this sample multiple times
dSNPs[snpID].append(i) # depends on [control=['for'], data=[]]
# Removing the duplicates from the unique samples
for snpID in dSNPs.iterkeys():
if snpID in uSNPs:
del uSNPs[snpID] # depends on [control=['if'], data=['snpID', 'uSNPs']] # depends on [control=['for'], data=['snpID']]
return uSNPs |
def showProfileMenu(self, point):
"""
Prompts the user for profile menu options. Editing needs to be enabled
for this to work.
"""
if not self.isEditingEnabled():
return
trigger = self.actionAt(point)
if (isinstance(trigger, XViewProfileAction)):
prof = trigger.profile()
else:
prof = None
# define the menu
menu = QMenu(self)
acts = {}
text = self.profileText()
# user right clicked on a profile
if prof:
acts['edit'] = menu.addAction('Edit {0}...'.format(text))
acts['save'] = menu.addAction('Save Layout')
menu.addSeparator()
acts['copy'] = menu.addAction('Copy {0}'.format(text))
acts['export'] = menu.addAction('Export {0}...'.format(text))
menu.addSeparator()
acts['remove'] = menu.addAction('Delete {0}'.format(text))
# show toolbar options
else:
acts['new'] = menu.addAction('New Layout'.format(text))
menu.addSeparator()
acts['save_as'] = menu.addAction('Save Layout as...')
if QApplication.clipboard().text():
acts['paste'] = menu.addAction('Paste {0}'.format(text))
acts['import'] = menu.addAction('Import {0}...'.format(text))
for key, act in acts.items():
act.setIcon(QIcon(resources.find('img/{0}.png'.format(key))))
# run the menu
act = menu.exec_(QCursor.pos())
# create a new profile
if act is None:
return
elif act == acts.get('new'):
self.clearActive()
# create a new clear profile
elif act == acts.get('save_as'):
self.saveProfileAs()
# edit an existing profile
elif act == acts.get('edit'):
self.editProfile(prof)
# save or create a new profile
elif act == acts.get('save'):
self.saveProfileLayout(prof)
# copy profile
elif act == acts.get('copy'):
QApplication.clipboard().setText(prof.toString())
# export
elif act == acts.get('export'):
self.exportProfile(prof)
# export
elif act == acts.get('import'):
self.importProfile()
# paste profile
elif act == acts.get('paste'):
text = QApplication.clipboard().text()
try:
prof = XViewProfile.fromString(text)
except:
prof = None
QMessageBox.information(self.window(),
'Invalid {0}'.format(text),
'The clipboard text does not contain '\
'a properly formated {0}'.format(text))
if prof and not prof.isEmpty():
self.createProfile(profile=prof)
# paste as profile
elif act == acts.get('paste_as'):
text = QApplication.clipboard().text()
prof = XViewProfile.fromString(text)
if not prof.isEmpty():
if XViewProfileDialog.edit(self, prof):
self.createProfile(profile=prof)
# remove the profile
elif act == acts.get('remove'):
self.removeProfile(prof) | def function[showProfileMenu, parameter[self, point]]:
constant[
Prompts the user for profile menu options. Editing needs to be enabled
for this to work.
]
if <ast.UnaryOp object at 0x7da1b24e1510> begin[:]
return[None]
variable[trigger] assign[=] call[name[self].actionAt, parameter[name[point]]]
if call[name[isinstance], parameter[name[trigger], name[XViewProfileAction]]] begin[:]
variable[prof] assign[=] call[name[trigger].profile, parameter[]]
variable[menu] assign[=] call[name[QMenu], parameter[name[self]]]
variable[acts] assign[=] dictionary[[], []]
variable[text] assign[=] call[name[self].profileText, parameter[]]
if name[prof] begin[:]
call[name[acts]][constant[edit]] assign[=] call[name[menu].addAction, parameter[call[constant[Edit {0}...].format, parameter[name[text]]]]]
call[name[acts]][constant[save]] assign[=] call[name[menu].addAction, parameter[constant[Save Layout]]]
call[name[menu].addSeparator, parameter[]]
call[name[acts]][constant[copy]] assign[=] call[name[menu].addAction, parameter[call[constant[Copy {0}].format, parameter[name[text]]]]]
call[name[acts]][constant[export]] assign[=] call[name[menu].addAction, parameter[call[constant[Export {0}...].format, parameter[name[text]]]]]
call[name[menu].addSeparator, parameter[]]
call[name[acts]][constant[remove]] assign[=] call[name[menu].addAction, parameter[call[constant[Delete {0}].format, parameter[name[text]]]]]
for taget[tuple[[<ast.Name object at 0x7da1b24e1b40>, <ast.Name object at 0x7da1b24e1b10>]]] in starred[call[name[acts].items, parameter[]]] begin[:]
call[name[act].setIcon, parameter[call[name[QIcon], parameter[call[name[resources].find, parameter[call[constant[img/{0}.png].format, parameter[name[key]]]]]]]]]
variable[act] assign[=] call[name[menu].exec_, parameter[call[name[QCursor].pos, parameter[]]]]
if compare[name[act] is constant[None]] begin[:]
return[None] | keyword[def] identifier[showProfileMenu] ( identifier[self] , identifier[point] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[isEditingEnabled] ():
keyword[return]
identifier[trigger] = identifier[self] . identifier[actionAt] ( identifier[point] )
keyword[if] ( identifier[isinstance] ( identifier[trigger] , identifier[XViewProfileAction] )):
identifier[prof] = identifier[trigger] . identifier[profile] ()
keyword[else] :
identifier[prof] = keyword[None]
identifier[menu] = identifier[QMenu] ( identifier[self] )
identifier[acts] ={}
identifier[text] = identifier[self] . identifier[profileText] ()
keyword[if] identifier[prof] :
identifier[acts] [ literal[string] ]= identifier[menu] . identifier[addAction] ( literal[string] . identifier[format] ( identifier[text] ))
identifier[acts] [ literal[string] ]= identifier[menu] . identifier[addAction] ( literal[string] )
identifier[menu] . identifier[addSeparator] ()
identifier[acts] [ literal[string] ]= identifier[menu] . identifier[addAction] ( literal[string] . identifier[format] ( identifier[text] ))
identifier[acts] [ literal[string] ]= identifier[menu] . identifier[addAction] ( literal[string] . identifier[format] ( identifier[text] ))
identifier[menu] . identifier[addSeparator] ()
identifier[acts] [ literal[string] ]= identifier[menu] . identifier[addAction] ( literal[string] . identifier[format] ( identifier[text] ))
keyword[else] :
identifier[acts] [ literal[string] ]= identifier[menu] . identifier[addAction] ( literal[string] . identifier[format] ( identifier[text] ))
identifier[menu] . identifier[addSeparator] ()
identifier[acts] [ literal[string] ]= identifier[menu] . identifier[addAction] ( literal[string] )
keyword[if] identifier[QApplication] . identifier[clipboard] (). identifier[text] ():
identifier[acts] [ literal[string] ]= identifier[menu] . identifier[addAction] ( literal[string] . identifier[format] ( identifier[text] ))
identifier[acts] [ literal[string] ]= identifier[menu] . identifier[addAction] ( literal[string] . identifier[format] ( identifier[text] ))
keyword[for] identifier[key] , identifier[act] keyword[in] identifier[acts] . identifier[items] ():
identifier[act] . identifier[setIcon] ( identifier[QIcon] ( identifier[resources] . identifier[find] ( literal[string] . identifier[format] ( identifier[key] ))))
identifier[act] = identifier[menu] . identifier[exec_] ( identifier[QCursor] . identifier[pos] ())
keyword[if] identifier[act] keyword[is] keyword[None] :
keyword[return]
keyword[elif] identifier[act] == identifier[acts] . identifier[get] ( literal[string] ):
identifier[self] . identifier[clearActive] ()
keyword[elif] identifier[act] == identifier[acts] . identifier[get] ( literal[string] ):
identifier[self] . identifier[saveProfileAs] ()
keyword[elif] identifier[act] == identifier[acts] . identifier[get] ( literal[string] ):
identifier[self] . identifier[editProfile] ( identifier[prof] )
keyword[elif] identifier[act] == identifier[acts] . identifier[get] ( literal[string] ):
identifier[self] . identifier[saveProfileLayout] ( identifier[prof] )
keyword[elif] identifier[act] == identifier[acts] . identifier[get] ( literal[string] ):
identifier[QApplication] . identifier[clipboard] (). identifier[setText] ( identifier[prof] . identifier[toString] ())
keyword[elif] identifier[act] == identifier[acts] . identifier[get] ( literal[string] ):
identifier[self] . identifier[exportProfile] ( identifier[prof] )
keyword[elif] identifier[act] == identifier[acts] . identifier[get] ( literal[string] ):
identifier[self] . identifier[importProfile] ()
keyword[elif] identifier[act] == identifier[acts] . identifier[get] ( literal[string] ):
identifier[text] = identifier[QApplication] . identifier[clipboard] (). identifier[text] ()
keyword[try] :
identifier[prof] = identifier[XViewProfile] . identifier[fromString] ( identifier[text] )
keyword[except] :
identifier[prof] = keyword[None]
identifier[QMessageBox] . identifier[information] ( identifier[self] . identifier[window] (),
literal[string] . identifier[format] ( identifier[text] ),
literal[string] literal[string] . identifier[format] ( identifier[text] ))
keyword[if] identifier[prof] keyword[and] keyword[not] identifier[prof] . identifier[isEmpty] ():
identifier[self] . identifier[createProfile] ( identifier[profile] = identifier[prof] )
keyword[elif] identifier[act] == identifier[acts] . identifier[get] ( literal[string] ):
identifier[text] = identifier[QApplication] . identifier[clipboard] (). identifier[text] ()
identifier[prof] = identifier[XViewProfile] . identifier[fromString] ( identifier[text] )
keyword[if] keyword[not] identifier[prof] . identifier[isEmpty] ():
keyword[if] identifier[XViewProfileDialog] . identifier[edit] ( identifier[self] , identifier[prof] ):
identifier[self] . identifier[createProfile] ( identifier[profile] = identifier[prof] )
keyword[elif] identifier[act] == identifier[acts] . identifier[get] ( literal[string] ):
identifier[self] . identifier[removeProfile] ( identifier[prof] ) | def showProfileMenu(self, point):
"""
Prompts the user for profile menu options. Editing needs to be enabled
for this to work.
"""
if not self.isEditingEnabled():
return # depends on [control=['if'], data=[]]
trigger = self.actionAt(point)
if isinstance(trigger, XViewProfileAction):
prof = trigger.profile() # depends on [control=['if'], data=[]]
else:
prof = None # define the menu
menu = QMenu(self)
acts = {}
text = self.profileText() # user right clicked on a profile
if prof:
acts['edit'] = menu.addAction('Edit {0}...'.format(text))
acts['save'] = menu.addAction('Save Layout')
menu.addSeparator()
acts['copy'] = menu.addAction('Copy {0}'.format(text))
acts['export'] = menu.addAction('Export {0}...'.format(text))
menu.addSeparator()
acts['remove'] = menu.addAction('Delete {0}'.format(text)) # depends on [control=['if'], data=[]]
else: # show toolbar options
acts['new'] = menu.addAction('New Layout'.format(text))
menu.addSeparator()
acts['save_as'] = menu.addAction('Save Layout as...')
if QApplication.clipboard().text():
acts['paste'] = menu.addAction('Paste {0}'.format(text)) # depends on [control=['if'], data=[]]
acts['import'] = menu.addAction('Import {0}...'.format(text))
for (key, act) in acts.items():
act.setIcon(QIcon(resources.find('img/{0}.png'.format(key)))) # depends on [control=['for'], data=[]] # run the menu
act = menu.exec_(QCursor.pos()) # create a new profile
if act is None:
return # depends on [control=['if'], data=[]]
elif act == acts.get('new'):
self.clearActive() # depends on [control=['if'], data=[]] # create a new clear profile
elif act == acts.get('save_as'):
self.saveProfileAs() # depends on [control=['if'], data=[]] # edit an existing profile
elif act == acts.get('edit'):
self.editProfile(prof) # depends on [control=['if'], data=[]] # save or create a new profile
elif act == acts.get('save'):
self.saveProfileLayout(prof) # depends on [control=['if'], data=[]] # copy profile
elif act == acts.get('copy'):
QApplication.clipboard().setText(prof.toString()) # depends on [control=['if'], data=[]] # export
elif act == acts.get('export'):
self.exportProfile(prof) # depends on [control=['if'], data=[]] # export
elif act == acts.get('import'):
self.importProfile() # depends on [control=['if'], data=[]] # paste profile
elif act == acts.get('paste'):
text = QApplication.clipboard().text()
try:
prof = XViewProfile.fromString(text) # depends on [control=['try'], data=[]]
except:
prof = None
QMessageBox.information(self.window(), 'Invalid {0}'.format(text), 'The clipboard text does not contain a properly formated {0}'.format(text)) # depends on [control=['except'], data=[]]
if prof and (not prof.isEmpty()):
self.createProfile(profile=prof) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # paste as profile
elif act == acts.get('paste_as'):
text = QApplication.clipboard().text()
prof = XViewProfile.fromString(text)
if not prof.isEmpty():
if XViewProfileDialog.edit(self, prof):
self.createProfile(profile=prof) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # remove the profile
elif act == acts.get('remove'):
self.removeProfile(prof) # depends on [control=['if'], data=[]] |
def taubin(script, iterations=10, t_lambda=0.5, t_mu=-0.53, selected=False):
""" The lambda & mu Taubin smoothing, it make two steps of smoothing, forth
and back, for each iteration.
Based on:
Gabriel Taubin
"A signal processing approach to fair surface design"
Siggraph 1995
Args:
script: the FilterScript object or script filename to write
the filter to.
iterations (int): The number of times that the taubin smoothing is
iterated. Usually it requires a larger number of iteration than the
classical laplacian.
t_lambda (float): The lambda parameter of the Taubin Smoothing algorithm
t_mu (float): The mu parameter of the Taubin Smoothing algorithm
selected (bool): If selected the filter is performed only on the
selected faces
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
"""
filter_xml = ''.join([
' <filter name="Taubin Smooth">\n',
' <Param name="lambda" ',
'value="{}" '.format(t_lambda),
'description="Lambda" ',
'type="RichFloat" ',
'/>\n',
' <Param name="mu" ',
'value="{}" '.format(t_mu),
'description="mu" ',
'type="RichFloat" ',
'/>\n',
' <Param name="stepSmoothNum" ',
'value="{:d}" '.format(iterations),
'description="Smoothing steps" ',
'type="RichInt" ',
'/>\n',
' <Param name="Selected" ',
'value="{}" '.format(str(selected).lower()),
'description="Affect only selected faces" ',
'type="RichBool" ',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
return None | def function[taubin, parameter[script, iterations, t_lambda, t_mu, selected]]:
constant[ The lambda & mu Taubin smoothing, it make two steps of smoothing, forth
and back, for each iteration.
Based on:
Gabriel Taubin
"A signal processing approach to fair surface design"
Siggraph 1995
Args:
script: the FilterScript object or script filename to write
the filter to.
iterations (int): The number of times that the taubin smoothing is
iterated. Usually it requires a larger number of iteration than the
classical laplacian.
t_lambda (float): The lambda parameter of the Taubin Smoothing algorithm
t_mu (float): The mu parameter of the Taubin Smoothing algorithm
selected (bool): If selected the filter is performed only on the
selected faces
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
]
variable[filter_xml] assign[=] call[constant[].join, parameter[list[[<ast.Constant object at 0x7da1b024e890>, <ast.Constant object at 0x7da1b024e410>, <ast.Call object at 0x7da1b024d3f0>, <ast.Constant object at 0x7da1b024ec80>, <ast.Constant object at 0x7da1b024e290>, <ast.Constant object at 0x7da1b024ea70>, <ast.Constant object at 0x7da1b024dae0>, <ast.Call object at 0x7da1b024f670>, <ast.Constant object at 0x7da1b024dd50>, <ast.Constant object at 0x7da1b024f640>, <ast.Constant object at 0x7da1b024fcd0>, <ast.Constant object at 0x7da1b024f1c0>, <ast.Call object at 0x7da1b024da80>, <ast.Constant object at 0x7da1b024c2e0>, <ast.Constant object at 0x7da1b024c730>, <ast.Constant object at 0x7da1b024efb0>, <ast.Constant object at 0x7da1b024c160>, <ast.Call object at 0x7da1b024eec0>, <ast.Constant object at 0x7da1b024f5e0>, <ast.Constant object at 0x7da1b024fe20>, <ast.Constant object at 0x7da1b02956c0>, <ast.Constant object at 0x7da1b0294eb0>]]]]
call[name[util].write_filter, parameter[name[script], name[filter_xml]]]
return[constant[None]] | keyword[def] identifier[taubin] ( identifier[script] , identifier[iterations] = literal[int] , identifier[t_lambda] = literal[int] , identifier[t_mu] =- literal[int] , identifier[selected] = keyword[False] ):
literal[string]
identifier[filter_xml] = literal[string] . identifier[join] ([
literal[string] ,
literal[string] ,
literal[string] . identifier[format] ( identifier[t_lambda] ),
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] . identifier[format] ( identifier[t_mu] ),
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] . identifier[format] ( identifier[iterations] ),
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] . identifier[format] ( identifier[str] ( identifier[selected] ). identifier[lower] ()),
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ])
identifier[util] . identifier[write_filter] ( identifier[script] , identifier[filter_xml] )
keyword[return] keyword[None] | def taubin(script, iterations=10, t_lambda=0.5, t_mu=-0.53, selected=False):
""" The lambda & mu Taubin smoothing, it make two steps of smoothing, forth
and back, for each iteration.
Based on:
Gabriel Taubin
"A signal processing approach to fair surface design"
Siggraph 1995
Args:
script: the FilterScript object or script filename to write
the filter to.
iterations (int): The number of times that the taubin smoothing is
iterated. Usually it requires a larger number of iteration than the
classical laplacian.
t_lambda (float): The lambda parameter of the Taubin Smoothing algorithm
t_mu (float): The mu parameter of the Taubin Smoothing algorithm
selected (bool): If selected the filter is performed only on the
selected faces
Layer stack:
No impacts
MeshLab versions:
2016.12
1.3.4BETA
"""
filter_xml = ''.join([' <filter name="Taubin Smooth">\n', ' <Param name="lambda" ', 'value="{}" '.format(t_lambda), 'description="Lambda" ', 'type="RichFloat" ', '/>\n', ' <Param name="mu" ', 'value="{}" '.format(t_mu), 'description="mu" ', 'type="RichFloat" ', '/>\n', ' <Param name="stepSmoothNum" ', 'value="{:d}" '.format(iterations), 'description="Smoothing steps" ', 'type="RichInt" ', '/>\n', ' <Param name="Selected" ', 'value="{}" '.format(str(selected).lower()), 'description="Affect only selected faces" ', 'type="RichBool" ', '/>\n', ' </filter>\n'])
util.write_filter(script, filter_xml)
return None |
def volreg(dset,suffix='_volreg',base=3,tshift=3,dfile_suffix='_volreg.1D'):
'''simple interface to 3dvolreg
:suffix: suffix to add to ``dset`` for volreg'ed file
:base: either a number or ``dset[#]`` of the base image to register to
:tshift: if a number, then tshift ignoring that many images, if ``None``
then don't tshift
:dfile_suffix: suffix to add to ``dset`` to save the motion parameters to
'''
cmd = ['3dvolreg','-prefix',nl.suffix(dset,suffix),'-base',base,'-dfile',nl.prefix(dset)+dfile_suffix]
if tshift:
cmd += ['-tshift',tshift]
cmd += [dset]
nl.run(cmd,products=nl.suffix(dset,suffix)) | def function[volreg, parameter[dset, suffix, base, tshift, dfile_suffix]]:
constant[simple interface to 3dvolreg
:suffix: suffix to add to ``dset`` for volreg'ed file
:base: either a number or ``dset[#]`` of the base image to register to
:tshift: if a number, then tshift ignoring that many images, if ``None``
then don't tshift
:dfile_suffix: suffix to add to ``dset`` to save the motion parameters to
]
variable[cmd] assign[=] list[[<ast.Constant object at 0x7da18bcc92d0>, <ast.Constant object at 0x7da18bccb340>, <ast.Call object at 0x7da18bcc8100>, <ast.Constant object at 0x7da18bcc8df0>, <ast.Name object at 0x7da18bcca890>, <ast.Constant object at 0x7da18bcca410>, <ast.BinOp object at 0x7da18bcca1a0>]]
if name[tshift] begin[:]
<ast.AugAssign object at 0x7da18bcc8310>
<ast.AugAssign object at 0x7da18bcca290>
call[name[nl].run, parameter[name[cmd]]] | keyword[def] identifier[volreg] ( identifier[dset] , identifier[suffix] = literal[string] , identifier[base] = literal[int] , identifier[tshift] = literal[int] , identifier[dfile_suffix] = literal[string] ):
literal[string]
identifier[cmd] =[ literal[string] , literal[string] , identifier[nl] . identifier[suffix] ( identifier[dset] , identifier[suffix] ), literal[string] , identifier[base] , literal[string] , identifier[nl] . identifier[prefix] ( identifier[dset] )+ identifier[dfile_suffix] ]
keyword[if] identifier[tshift] :
identifier[cmd] +=[ literal[string] , identifier[tshift] ]
identifier[cmd] +=[ identifier[dset] ]
identifier[nl] . identifier[run] ( identifier[cmd] , identifier[products] = identifier[nl] . identifier[suffix] ( identifier[dset] , identifier[suffix] )) | def volreg(dset, suffix='_volreg', base=3, tshift=3, dfile_suffix='_volreg.1D'):
"""simple interface to 3dvolreg
:suffix: suffix to add to ``dset`` for volreg'ed file
:base: either a number or ``dset[#]`` of the base image to register to
:tshift: if a number, then tshift ignoring that many images, if ``None``
then don't tshift
:dfile_suffix: suffix to add to ``dset`` to save the motion parameters to
"""
cmd = ['3dvolreg', '-prefix', nl.suffix(dset, suffix), '-base', base, '-dfile', nl.prefix(dset) + dfile_suffix]
if tshift:
cmd += ['-tshift', tshift] # depends on [control=['if'], data=[]]
cmd += [dset]
nl.run(cmd, products=nl.suffix(dset, suffix)) |
def get_hypergeometric_stats(N, indices):
"""Calculates hypergeom. p-values and fold enrichments for all cutoffs.
Parameters
----------
N: int
The length of the list
indices: `numpy.ndarray` with ``dtype=np.uint16``
The (sorted) indices of the "1's" in the list.
"""
assert isinstance(N, (int, np.integer))
assert isinstance(indices, np.ndarray) and \
np.issubdtype(indices.dtype, np.uint16)
K = indices.size
pvals = np.empty(N+1, dtype=np.float64)
folds = np.empty(N+1, dtype=np.float64)
pvals[0] = 1.0
folds[0] = 1.0
n = 0
k = 0
p = 1.0
while n < N:
if k < K and indices[k] == n:
# "add one"
# calculate f(k+1; N,K,n+1) from f(k; N,K,n)
p *= (float((n+1) * (K-k)) / \
float((N-n) * (k+1)))
k += 1
else:
# "add zero"
# calculate f(k; N,K,n+1) from f(k; N,K,n)
p *= (float((n+1) * (N-K-n+k)) /
float((N-n) * (n-k+1)))
n += 1
# calculate hypergeometric p-value
pvals[n] = get_hgp(p, k, N, K, n)
# calculate fold enrichment
folds[n] = k / (K*(n/float(N)))
return pvals, folds | def function[get_hypergeometric_stats, parameter[N, indices]]:
constant[Calculates hypergeom. p-values and fold enrichments for all cutoffs.
Parameters
----------
N: int
The length of the list
indices: `numpy.ndarray` with ``dtype=np.uint16``
The (sorted) indices of the "1's" in the list.
]
assert[call[name[isinstance], parameter[name[N], tuple[[<ast.Name object at 0x7da2043465c0>, <ast.Attribute object at 0x7da204344850>]]]]]
assert[<ast.BoolOp object at 0x7da204345f60>]
variable[K] assign[=] name[indices].size
variable[pvals] assign[=] call[name[np].empty, parameter[binary_operation[name[N] + constant[1]]]]
variable[folds] assign[=] call[name[np].empty, parameter[binary_operation[name[N] + constant[1]]]]
call[name[pvals]][constant[0]] assign[=] constant[1.0]
call[name[folds]][constant[0]] assign[=] constant[1.0]
variable[n] assign[=] constant[0]
variable[k] assign[=] constant[0]
variable[p] assign[=] constant[1.0]
while compare[name[n] less[<] name[N]] begin[:]
if <ast.BoolOp object at 0x7da20e954220> begin[:]
<ast.AugAssign object at 0x7da20e9575b0>
<ast.AugAssign object at 0x7da20e9556f0>
<ast.AugAssign object at 0x7da18f09f5b0>
call[name[pvals]][name[n]] assign[=] call[name[get_hgp], parameter[name[p], name[k], name[N], name[K], name[n]]]
call[name[folds]][name[n]] assign[=] binary_operation[name[k] / binary_operation[name[K] * binary_operation[name[n] / call[name[float], parameter[name[N]]]]]]
return[tuple[[<ast.Name object at 0x7da18f09d510>, <ast.Name object at 0x7da18f09e0e0>]]] | keyword[def] identifier[get_hypergeometric_stats] ( identifier[N] , identifier[indices] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[N] ,( identifier[int] , identifier[np] . identifier[integer] ))
keyword[assert] identifier[isinstance] ( identifier[indices] , identifier[np] . identifier[ndarray] ) keyword[and] identifier[np] . identifier[issubdtype] ( identifier[indices] . identifier[dtype] , identifier[np] . identifier[uint16] )
identifier[K] = identifier[indices] . identifier[size]
identifier[pvals] = identifier[np] . identifier[empty] ( identifier[N] + literal[int] , identifier[dtype] = identifier[np] . identifier[float64] )
identifier[folds] = identifier[np] . identifier[empty] ( identifier[N] + literal[int] , identifier[dtype] = identifier[np] . identifier[float64] )
identifier[pvals] [ literal[int] ]= literal[int]
identifier[folds] [ literal[int] ]= literal[int]
identifier[n] = literal[int]
identifier[k] = literal[int]
identifier[p] = literal[int]
keyword[while] identifier[n] < identifier[N] :
keyword[if] identifier[k] < identifier[K] keyword[and] identifier[indices] [ identifier[k] ]== identifier[n] :
identifier[p] *=( identifier[float] (( identifier[n] + literal[int] )*( identifier[K] - identifier[k] ))/ identifier[float] (( identifier[N] - identifier[n] )*( identifier[k] + literal[int] )))
identifier[k] += literal[int]
keyword[else] :
identifier[p] *=( identifier[float] (( identifier[n] + literal[int] )*( identifier[N] - identifier[K] - identifier[n] + identifier[k] ))/
identifier[float] (( identifier[N] - identifier[n] )*( identifier[n] - identifier[k] + literal[int] )))
identifier[n] += literal[int]
identifier[pvals] [ identifier[n] ]= identifier[get_hgp] ( identifier[p] , identifier[k] , identifier[N] , identifier[K] , identifier[n] )
identifier[folds] [ identifier[n] ]= identifier[k] /( identifier[K] *( identifier[n] / identifier[float] ( identifier[N] )))
keyword[return] identifier[pvals] , identifier[folds] | def get_hypergeometric_stats(N, indices):
"""Calculates hypergeom. p-values and fold enrichments for all cutoffs.
Parameters
----------
N: int
The length of the list
indices: `numpy.ndarray` with ``dtype=np.uint16``
The (sorted) indices of the "1's" in the list.
"""
assert isinstance(N, (int, np.integer))
assert isinstance(indices, np.ndarray) and np.issubdtype(indices.dtype, np.uint16)
K = indices.size
pvals = np.empty(N + 1, dtype=np.float64)
folds = np.empty(N + 1, dtype=np.float64)
pvals[0] = 1.0
folds[0] = 1.0
n = 0
k = 0
p = 1.0
while n < N:
if k < K and indices[k] == n:
# "add one"
# calculate f(k+1; N,K,n+1) from f(k; N,K,n)
p *= float((n + 1) * (K - k)) / float((N - n) * (k + 1))
k += 1 # depends on [control=['if'], data=[]]
else:
# "add zero"
# calculate f(k; N,K,n+1) from f(k; N,K,n)
p *= float((n + 1) * (N - K - n + k)) / float((N - n) * (n - k + 1))
n += 1
# calculate hypergeometric p-value
pvals[n] = get_hgp(p, k, N, K, n)
# calculate fold enrichment
folds[n] = k / (K * (n / float(N))) # depends on [control=['while'], data=['n', 'N']]
return (pvals, folds) |
def fetch(self, path, sender, msg):
"""
Start a transfer for a registered path.
:param str path:
File path.
:param mitogen.core.Sender sender:
Sender to receive file data.
:returns:
Dict containing the file metadata:
* ``size``: File size in bytes.
* ``mode``: Integer file mode.
* ``owner``: Owner account name on host machine.
* ``group``: Owner group name on host machine.
* ``mtime``: Floating point modification time.
* ``ctime``: Floating point change time.
:raises Error:
Unregistered path, or Sender did not match requestee context.
"""
if path not in self._paths and not self._prefix_is_authorized(path):
msg.reply(mitogen.core.CallError(
Error(self.unregistered_msg % (path,))
))
return
if msg.src_id != sender.context.context_id:
msg.reply(mitogen.core.CallError(
Error(self.context_mismatch_msg)
))
return
LOG.debug('Serving %r', path)
# Response must arrive first so requestee can begin receive loop,
# otherwise first ack won't arrive until all pending chunks were
# delivered. In that case max BDP would always be 128KiB, aka. max
# ~10Mbit/sec over a 100ms link.
try:
fp = open(path, 'rb', self.IO_SIZE)
msg.reply(self._generate_stat(path))
except IOError:
msg.reply(mitogen.core.CallError(
sys.exc_info()[1]
))
return
stream = self.router.stream_by_id(sender.context.context_id)
state = self._state_by_stream.setdefault(stream, FileStreamState())
state.lock.acquire()
try:
state.jobs.append((sender, fp))
self._schedule_pending_unlocked(state)
finally:
state.lock.release() | def function[fetch, parameter[self, path, sender, msg]]:
constant[
Start a transfer for a registered path.
:param str path:
File path.
:param mitogen.core.Sender sender:
Sender to receive file data.
:returns:
Dict containing the file metadata:
* ``size``: File size in bytes.
* ``mode``: Integer file mode.
* ``owner``: Owner account name on host machine.
* ``group``: Owner group name on host machine.
* ``mtime``: Floating point modification time.
* ``ctime``: Floating point change time.
:raises Error:
Unregistered path, or Sender did not match requestee context.
]
if <ast.BoolOp object at 0x7da1b1d0e3b0> begin[:]
call[name[msg].reply, parameter[call[name[mitogen].core.CallError, parameter[call[name[Error], parameter[binary_operation[name[self].unregistered_msg <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1d0d0c0>]]]]]]]]]
return[None]
if compare[name[msg].src_id not_equal[!=] name[sender].context.context_id] begin[:]
call[name[msg].reply, parameter[call[name[mitogen].core.CallError, parameter[call[name[Error], parameter[name[self].context_mismatch_msg]]]]]]
return[None]
call[name[LOG].debug, parameter[constant[Serving %r], name[path]]]
<ast.Try object at 0x7da1b1d0c7f0>
variable[stream] assign[=] call[name[self].router.stream_by_id, parameter[name[sender].context.context_id]]
variable[state] assign[=] call[name[self]._state_by_stream.setdefault, parameter[name[stream], call[name[FileStreamState], parameter[]]]]
call[name[state].lock.acquire, parameter[]]
<ast.Try object at 0x7da1b1d0dc60> | keyword[def] identifier[fetch] ( identifier[self] , identifier[path] , identifier[sender] , identifier[msg] ):
literal[string]
keyword[if] identifier[path] keyword[not] keyword[in] identifier[self] . identifier[_paths] keyword[and] keyword[not] identifier[self] . identifier[_prefix_is_authorized] ( identifier[path] ):
identifier[msg] . identifier[reply] ( identifier[mitogen] . identifier[core] . identifier[CallError] (
identifier[Error] ( identifier[self] . identifier[unregistered_msg] %( identifier[path] ,))
))
keyword[return]
keyword[if] identifier[msg] . identifier[src_id] != identifier[sender] . identifier[context] . identifier[context_id] :
identifier[msg] . identifier[reply] ( identifier[mitogen] . identifier[core] . identifier[CallError] (
identifier[Error] ( identifier[self] . identifier[context_mismatch_msg] )
))
keyword[return]
identifier[LOG] . identifier[debug] ( literal[string] , identifier[path] )
keyword[try] :
identifier[fp] = identifier[open] ( identifier[path] , literal[string] , identifier[self] . identifier[IO_SIZE] )
identifier[msg] . identifier[reply] ( identifier[self] . identifier[_generate_stat] ( identifier[path] ))
keyword[except] identifier[IOError] :
identifier[msg] . identifier[reply] ( identifier[mitogen] . identifier[core] . identifier[CallError] (
identifier[sys] . identifier[exc_info] ()[ literal[int] ]
))
keyword[return]
identifier[stream] = identifier[self] . identifier[router] . identifier[stream_by_id] ( identifier[sender] . identifier[context] . identifier[context_id] )
identifier[state] = identifier[self] . identifier[_state_by_stream] . identifier[setdefault] ( identifier[stream] , identifier[FileStreamState] ())
identifier[state] . identifier[lock] . identifier[acquire] ()
keyword[try] :
identifier[state] . identifier[jobs] . identifier[append] (( identifier[sender] , identifier[fp] ))
identifier[self] . identifier[_schedule_pending_unlocked] ( identifier[state] )
keyword[finally] :
identifier[state] . identifier[lock] . identifier[release] () | def fetch(self, path, sender, msg):
"""
Start a transfer for a registered path.
:param str path:
File path.
:param mitogen.core.Sender sender:
Sender to receive file data.
:returns:
Dict containing the file metadata:
* ``size``: File size in bytes.
* ``mode``: Integer file mode.
* ``owner``: Owner account name on host machine.
* ``group``: Owner group name on host machine.
* ``mtime``: Floating point modification time.
* ``ctime``: Floating point change time.
:raises Error:
Unregistered path, or Sender did not match requestee context.
"""
if path not in self._paths and (not self._prefix_is_authorized(path)):
msg.reply(mitogen.core.CallError(Error(self.unregistered_msg % (path,))))
return # depends on [control=['if'], data=[]]
if msg.src_id != sender.context.context_id:
msg.reply(mitogen.core.CallError(Error(self.context_mismatch_msg)))
return # depends on [control=['if'], data=[]]
LOG.debug('Serving %r', path)
# Response must arrive first so requestee can begin receive loop,
# otherwise first ack won't arrive until all pending chunks were
# delivered. In that case max BDP would always be 128KiB, aka. max
# ~10Mbit/sec over a 100ms link.
try:
fp = open(path, 'rb', self.IO_SIZE)
msg.reply(self._generate_stat(path)) # depends on [control=['try'], data=[]]
except IOError:
msg.reply(mitogen.core.CallError(sys.exc_info()[1]))
return # depends on [control=['except'], data=[]]
stream = self.router.stream_by_id(sender.context.context_id)
state = self._state_by_stream.setdefault(stream, FileStreamState())
state.lock.acquire()
try:
state.jobs.append((sender, fp))
self._schedule_pending_unlocked(state) # depends on [control=['try'], data=[]]
finally:
state.lock.release() |
def diffusion_mds(means, weights, d, diffusion_rounds=10):
"""
Dimensionality reduction using MDS, while running diffusion on W.
Args:
means (array): genes x clusters
weights (array): clusters x cells
d (int): desired dimensionality
Returns:
W_reduced (array): array of shape (d, cells)
"""
for i in range(diffusion_rounds):
weights = weights*weights
weights = weights/weights.sum(0)
X = dim_reduce(means, weights, d)
if X.shape[0]==2:
return X.dot(weights)
else:
return X.T.dot(weights) | def function[diffusion_mds, parameter[means, weights, d, diffusion_rounds]]:
constant[
Dimensionality reduction using MDS, while running diffusion on W.
Args:
means (array): genes x clusters
weights (array): clusters x cells
d (int): desired dimensionality
Returns:
W_reduced (array): array of shape (d, cells)
]
for taget[name[i]] in starred[call[name[range], parameter[name[diffusion_rounds]]]] begin[:]
variable[weights] assign[=] binary_operation[name[weights] * name[weights]]
variable[weights] assign[=] binary_operation[name[weights] / call[name[weights].sum, parameter[constant[0]]]]
variable[X] assign[=] call[name[dim_reduce], parameter[name[means], name[weights], name[d]]]
if compare[call[name[X].shape][constant[0]] equal[==] constant[2]] begin[:]
return[call[name[X].dot, parameter[name[weights]]]] | keyword[def] identifier[diffusion_mds] ( identifier[means] , identifier[weights] , identifier[d] , identifier[diffusion_rounds] = literal[int] ):
literal[string]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[diffusion_rounds] ):
identifier[weights] = identifier[weights] * identifier[weights]
identifier[weights] = identifier[weights] / identifier[weights] . identifier[sum] ( literal[int] )
identifier[X] = identifier[dim_reduce] ( identifier[means] , identifier[weights] , identifier[d] )
keyword[if] identifier[X] . identifier[shape] [ literal[int] ]== literal[int] :
keyword[return] identifier[X] . identifier[dot] ( identifier[weights] )
keyword[else] :
keyword[return] identifier[X] . identifier[T] . identifier[dot] ( identifier[weights] ) | def diffusion_mds(means, weights, d, diffusion_rounds=10):
"""
Dimensionality reduction using MDS, while running diffusion on W.
Args:
means (array): genes x clusters
weights (array): clusters x cells
d (int): desired dimensionality
Returns:
W_reduced (array): array of shape (d, cells)
"""
for i in range(diffusion_rounds):
weights = weights * weights
weights = weights / weights.sum(0) # depends on [control=['for'], data=[]]
X = dim_reduce(means, weights, d)
if X.shape[0] == 2:
return X.dot(weights) # depends on [control=['if'], data=[]]
else:
return X.T.dot(weights) |
def _metadata_endpoint(self, context):
"""
Endpoint for retrieving the backend metadata
:type context: satosa.context.Context
:rtype: satosa.response.Response
:param context: The current context
:return: response with metadata
"""
satosa_logging(logger, logging.DEBUG, "Sending metadata response", context.state)
metadata_string = create_metadata_string(None, self.sp.config, 4, None, None, None, None,
None).decode("utf-8")
return Response(metadata_string, content="text/xml") | def function[_metadata_endpoint, parameter[self, context]]:
constant[
Endpoint for retrieving the backend metadata
:type context: satosa.context.Context
:rtype: satosa.response.Response
:param context: The current context
:return: response with metadata
]
call[name[satosa_logging], parameter[name[logger], name[logging].DEBUG, constant[Sending metadata response], name[context].state]]
variable[metadata_string] assign[=] call[call[name[create_metadata_string], parameter[constant[None], name[self].sp.config, constant[4], constant[None], constant[None], constant[None], constant[None], constant[None]]].decode, parameter[constant[utf-8]]]
return[call[name[Response], parameter[name[metadata_string]]]] | keyword[def] identifier[_metadata_endpoint] ( identifier[self] , identifier[context] ):
literal[string]
identifier[satosa_logging] ( identifier[logger] , identifier[logging] . identifier[DEBUG] , literal[string] , identifier[context] . identifier[state] )
identifier[metadata_string] = identifier[create_metadata_string] ( keyword[None] , identifier[self] . identifier[sp] . identifier[config] , literal[int] , keyword[None] , keyword[None] , keyword[None] , keyword[None] ,
keyword[None] ). identifier[decode] ( literal[string] )
keyword[return] identifier[Response] ( identifier[metadata_string] , identifier[content] = literal[string] ) | def _metadata_endpoint(self, context):
"""
Endpoint for retrieving the backend metadata
:type context: satosa.context.Context
:rtype: satosa.response.Response
:param context: The current context
:return: response with metadata
"""
satosa_logging(logger, logging.DEBUG, 'Sending metadata response', context.state)
metadata_string = create_metadata_string(None, self.sp.config, 4, None, None, None, None, None).decode('utf-8')
return Response(metadata_string, content='text/xml') |
def source_sum_err(self):
"""
The uncertainty of `~photutils.SourceProperties.source_sum`,
propagated from the input ``error`` array.
``source_sum_err`` is the quadrature sum of the total errors
over the non-masked pixels within the source segment:
.. math:: \\Delta F = \\sqrt{\\sum_{i \\in S}
\\sigma_{\\mathrm{tot}, i}^2}
where :math:`\\Delta F` is ``source_sum_err``,
:math:`\\sigma_{\\mathrm{tot, i}}` are the pixel-wise total
errors, and :math:`S` are the non-masked pixels in the source
segment.
Pixel values that are masked in the input ``data``, including
any non-finite pixel values (i.e. NaN, infs) that are
automatically masked, are also masked in the error array.
"""
if self._error is not None:
if self._is_completely_masked:
return np.nan * self._error_unit # table output needs unit
else:
return np.sqrt(np.sum(self._error_values ** 2))
else:
return None | def function[source_sum_err, parameter[self]]:
constant[
The uncertainty of `~photutils.SourceProperties.source_sum`,
propagated from the input ``error`` array.
``source_sum_err`` is the quadrature sum of the total errors
over the non-masked pixels within the source segment:
.. math:: \Delta F = \sqrt{\sum_{i \in S}
\sigma_{\mathrm{tot}, i}^2}
where :math:`\Delta F` is ``source_sum_err``,
:math:`\sigma_{\mathrm{tot, i}}` are the pixel-wise total
errors, and :math:`S` are the non-masked pixels in the source
segment.
Pixel values that are masked in the input ``data``, including
any non-finite pixel values (i.e. NaN, infs) that are
automatically masked, are also masked in the error array.
]
if compare[name[self]._error is_not constant[None]] begin[:]
if name[self]._is_completely_masked begin[:]
return[binary_operation[name[np].nan * name[self]._error_unit]] | keyword[def] identifier[source_sum_err] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_error] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[self] . identifier[_is_completely_masked] :
keyword[return] identifier[np] . identifier[nan] * identifier[self] . identifier[_error_unit]
keyword[else] :
keyword[return] identifier[np] . identifier[sqrt] ( identifier[np] . identifier[sum] ( identifier[self] . identifier[_error_values] ** literal[int] ))
keyword[else] :
keyword[return] keyword[None] | def source_sum_err(self):
"""
The uncertainty of `~photutils.SourceProperties.source_sum`,
propagated from the input ``error`` array.
``source_sum_err`` is the quadrature sum of the total errors
over the non-masked pixels within the source segment:
.. math:: \\Delta F = \\sqrt{\\sum_{i \\in S}
\\sigma_{\\mathrm{tot}, i}^2}
where :math:`\\Delta F` is ``source_sum_err``,
:math:`\\sigma_{\\mathrm{tot, i}}` are the pixel-wise total
errors, and :math:`S` are the non-masked pixels in the source
segment.
Pixel values that are masked in the input ``data``, including
any non-finite pixel values (i.e. NaN, infs) that are
automatically masked, are also masked in the error array.
"""
if self._error is not None:
if self._is_completely_masked:
return np.nan * self._error_unit # table output needs unit # depends on [control=['if'], data=[]]
else:
return np.sqrt(np.sum(self._error_values ** 2)) # depends on [control=['if'], data=[]]
else:
return None |
def _remote_methodcall(id, method_name, *args, **kwargs):
"""(Executed on remote engine) convert Ids to real objects, call method """
obj = distob.engine[id]
nargs = []
for a in args:
if isinstance(a, Id):
nargs.append(distob.engine[a])
elif (isinstance(a, collections.Sequence) and
not isinstance(a, string_types)):
nargs.append(
[distob.engine[b] if isinstance(b, Id) else b for b in a])
else: nargs.append(a)
for k, a in kwargs.items():
if isinstance(a, Id):
kwargs[k] = distob.engine[a]
elif (isinstance(a, collections.Sequence) and
not isinstance(a, string_types)):
kwargs[k] = [
distob.engine[b] if isinstance(b, Id) else b for b in a]
result = getattr(obj, method_name)(*nargs, **kwargs)
if (isinstance(result, collections.Sequence) and
not isinstance(result, string_types)):
# We will return any sub-sequences by value, not recurse deeper
results = []
for subresult in result:
if type(subresult) in distob.engine.proxy_types:
results.append(Ref(subresult))
else:
results.append(subresult)
return results
elif type(result) in distob.engine.proxy_types:
return Ref(result)
else:
return result | def function[_remote_methodcall, parameter[id, method_name]]:
constant[(Executed on remote engine) convert Ids to real objects, call method ]
variable[obj] assign[=] call[name[distob].engine][name[id]]
variable[nargs] assign[=] list[[]]
for taget[name[a]] in starred[name[args]] begin[:]
if call[name[isinstance], parameter[name[a], name[Id]]] begin[:]
call[name[nargs].append, parameter[call[name[distob].engine][name[a]]]]
for taget[tuple[[<ast.Name object at 0x7da1affd51e0>, <ast.Name object at 0x7da1affd4b80>]]] in starred[call[name[kwargs].items, parameter[]]] begin[:]
if call[name[isinstance], parameter[name[a], name[Id]]] begin[:]
call[name[kwargs]][name[k]] assign[=] call[name[distob].engine][name[a]]
variable[result] assign[=] call[call[name[getattr], parameter[name[obj], name[method_name]]], parameter[<ast.Starred object at 0x7da1affd4640>]]
if <ast.BoolOp object at 0x7da1affd6440> begin[:]
variable[results] assign[=] list[[]]
for taget[name[subresult]] in starred[name[result]] begin[:]
if compare[call[name[type], parameter[name[subresult]]] in name[distob].engine.proxy_types] begin[:]
call[name[results].append, parameter[call[name[Ref], parameter[name[subresult]]]]]
return[name[results]] | keyword[def] identifier[_remote_methodcall] ( identifier[id] , identifier[method_name] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[obj] = identifier[distob] . identifier[engine] [ identifier[id] ]
identifier[nargs] =[]
keyword[for] identifier[a] keyword[in] identifier[args] :
keyword[if] identifier[isinstance] ( identifier[a] , identifier[Id] ):
identifier[nargs] . identifier[append] ( identifier[distob] . identifier[engine] [ identifier[a] ])
keyword[elif] ( identifier[isinstance] ( identifier[a] , identifier[collections] . identifier[Sequence] ) keyword[and]
keyword[not] identifier[isinstance] ( identifier[a] , identifier[string_types] )):
identifier[nargs] . identifier[append] (
[ identifier[distob] . identifier[engine] [ identifier[b] ] keyword[if] identifier[isinstance] ( identifier[b] , identifier[Id] ) keyword[else] identifier[b] keyword[for] identifier[b] keyword[in] identifier[a] ])
keyword[else] : identifier[nargs] . identifier[append] ( identifier[a] )
keyword[for] identifier[k] , identifier[a] keyword[in] identifier[kwargs] . identifier[items] ():
keyword[if] identifier[isinstance] ( identifier[a] , identifier[Id] ):
identifier[kwargs] [ identifier[k] ]= identifier[distob] . identifier[engine] [ identifier[a] ]
keyword[elif] ( identifier[isinstance] ( identifier[a] , identifier[collections] . identifier[Sequence] ) keyword[and]
keyword[not] identifier[isinstance] ( identifier[a] , identifier[string_types] )):
identifier[kwargs] [ identifier[k] ]=[
identifier[distob] . identifier[engine] [ identifier[b] ] keyword[if] identifier[isinstance] ( identifier[b] , identifier[Id] ) keyword[else] identifier[b] keyword[for] identifier[b] keyword[in] identifier[a] ]
identifier[result] = identifier[getattr] ( identifier[obj] , identifier[method_name] )(* identifier[nargs] ,** identifier[kwargs] )
keyword[if] ( identifier[isinstance] ( identifier[result] , identifier[collections] . identifier[Sequence] ) keyword[and]
keyword[not] identifier[isinstance] ( identifier[result] , identifier[string_types] )):
identifier[results] =[]
keyword[for] identifier[subresult] keyword[in] identifier[result] :
keyword[if] identifier[type] ( identifier[subresult] ) keyword[in] identifier[distob] . identifier[engine] . identifier[proxy_types] :
identifier[results] . identifier[append] ( identifier[Ref] ( identifier[subresult] ))
keyword[else] :
identifier[results] . identifier[append] ( identifier[subresult] )
keyword[return] identifier[results]
keyword[elif] identifier[type] ( identifier[result] ) keyword[in] identifier[distob] . identifier[engine] . identifier[proxy_types] :
keyword[return] identifier[Ref] ( identifier[result] )
keyword[else] :
keyword[return] identifier[result] | def _remote_methodcall(id, method_name, *args, **kwargs):
"""(Executed on remote engine) convert Ids to real objects, call method """
obj = distob.engine[id]
nargs = []
for a in args:
if isinstance(a, Id):
nargs.append(distob.engine[a]) # depends on [control=['if'], data=[]]
elif isinstance(a, collections.Sequence) and (not isinstance(a, string_types)):
nargs.append([distob.engine[b] if isinstance(b, Id) else b for b in a]) # depends on [control=['if'], data=[]]
else:
nargs.append(a) # depends on [control=['for'], data=['a']]
for (k, a) in kwargs.items():
if isinstance(a, Id):
kwargs[k] = distob.engine[a] # depends on [control=['if'], data=[]]
elif isinstance(a, collections.Sequence) and (not isinstance(a, string_types)):
kwargs[k] = [distob.engine[b] if isinstance(b, Id) else b for b in a] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
result = getattr(obj, method_name)(*nargs, **kwargs)
if isinstance(result, collections.Sequence) and (not isinstance(result, string_types)):
# We will return any sub-sequences by value, not recurse deeper
results = []
for subresult in result:
if type(subresult) in distob.engine.proxy_types:
results.append(Ref(subresult)) # depends on [control=['if'], data=[]]
else:
results.append(subresult) # depends on [control=['for'], data=['subresult']]
return results # depends on [control=['if'], data=[]]
elif type(result) in distob.engine.proxy_types:
return Ref(result) # depends on [control=['if'], data=[]]
else:
return result |
def getSegmentKeys(self, segment):
"""
Get the different keys for 1 defined segment
:param segment: Segment to find. Ex : PV1, PID
"""
return list(filter(lambda x: x.startswith(segment), self.getAliasedKeys())) | def function[getSegmentKeys, parameter[self, segment]]:
constant[
Get the different keys for 1 defined segment
:param segment: Segment to find. Ex : PV1, PID
]
return[call[name[list], parameter[call[name[filter], parameter[<ast.Lambda object at 0x7da204623fa0>, call[name[self].getAliasedKeys, parameter[]]]]]]] | keyword[def] identifier[getSegmentKeys] ( identifier[self] , identifier[segment] ):
literal[string]
keyword[return] identifier[list] ( identifier[filter] ( keyword[lambda] identifier[x] : identifier[x] . identifier[startswith] ( identifier[segment] ), identifier[self] . identifier[getAliasedKeys] ())) | def getSegmentKeys(self, segment):
"""
Get the different keys for 1 defined segment
:param segment: Segment to find. Ex : PV1, PID
"""
return list(filter(lambda x: x.startswith(segment), self.getAliasedKeys())) |
def split_title(self, title, splitter):
"""\
Split the title to best part possible
"""
large_text_length = 0
large_text_index = 0
title_pieces = splitter.split(title)
# find the largest title piece
for i in range(len(title_pieces)):
current = title_pieces[i]
if len(current) > large_text_length:
large_text_length = len(current)
large_text_index = i
# replace content
title = title_pieces[large_text_index]
return TITLE_REPLACEMENTS.replaceAll(title).strip() | def function[split_title, parameter[self, title, splitter]]:
constant[ Split the title to best part possible
]
variable[large_text_length] assign[=] constant[0]
variable[large_text_index] assign[=] constant[0]
variable[title_pieces] assign[=] call[name[splitter].split, parameter[name[title]]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[title_pieces]]]]]] begin[:]
variable[current] assign[=] call[name[title_pieces]][name[i]]
if compare[call[name[len], parameter[name[current]]] greater[>] name[large_text_length]] begin[:]
variable[large_text_length] assign[=] call[name[len], parameter[name[current]]]
variable[large_text_index] assign[=] name[i]
variable[title] assign[=] call[name[title_pieces]][name[large_text_index]]
return[call[call[name[TITLE_REPLACEMENTS].replaceAll, parameter[name[title]]].strip, parameter[]]] | keyword[def] identifier[split_title] ( identifier[self] , identifier[title] , identifier[splitter] ):
literal[string]
identifier[large_text_length] = literal[int]
identifier[large_text_index] = literal[int]
identifier[title_pieces] = identifier[splitter] . identifier[split] ( identifier[title] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[title_pieces] )):
identifier[current] = identifier[title_pieces] [ identifier[i] ]
keyword[if] identifier[len] ( identifier[current] )> identifier[large_text_length] :
identifier[large_text_length] = identifier[len] ( identifier[current] )
identifier[large_text_index] = identifier[i]
identifier[title] = identifier[title_pieces] [ identifier[large_text_index] ]
keyword[return] identifier[TITLE_REPLACEMENTS] . identifier[replaceAll] ( identifier[title] ). identifier[strip] () | def split_title(self, title, splitter):
""" Split the title to best part possible
"""
large_text_length = 0
large_text_index = 0
title_pieces = splitter.split(title)
# find the largest title piece
for i in range(len(title_pieces)):
current = title_pieces[i]
if len(current) > large_text_length:
large_text_length = len(current)
large_text_index = i # depends on [control=['if'], data=['large_text_length']] # depends on [control=['for'], data=['i']]
# replace content
title = title_pieces[large_text_index]
return TITLE_REPLACEMENTS.replaceAll(title).strip() |
def skip(self, count):
'''
Skip up to [count] cases, default behavior is to just mutate [count] times
:count: number of cases to skip
:rtype: int
:return: number of cases skipped
'''
skipped = 0
for _ in range(count):
if self.mutate():
skipped += 1
else:
break
return skipped | def function[skip, parameter[self, count]]:
constant[
Skip up to [count] cases, default behavior is to just mutate [count] times
:count: number of cases to skip
:rtype: int
:return: number of cases skipped
]
variable[skipped] assign[=] constant[0]
for taget[name[_]] in starred[call[name[range], parameter[name[count]]]] begin[:]
if call[name[self].mutate, parameter[]] begin[:]
<ast.AugAssign object at 0x7da207f98cd0>
return[name[skipped]] | keyword[def] identifier[skip] ( identifier[self] , identifier[count] ):
literal[string]
identifier[skipped] = literal[int]
keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[count] ):
keyword[if] identifier[self] . identifier[mutate] ():
identifier[skipped] += literal[int]
keyword[else] :
keyword[break]
keyword[return] identifier[skipped] | def skip(self, count):
"""
Skip up to [count] cases, default behavior is to just mutate [count] times
:count: number of cases to skip
:rtype: int
:return: number of cases skipped
"""
skipped = 0
for _ in range(count):
if self.mutate():
skipped += 1 # depends on [control=['if'], data=[]]
else:
break # depends on [control=['for'], data=[]]
return skipped |
def create_round_trip_tear_sheet(returns, positions, transactions,
sector_mappings=None,
estimate_intraday='infer', return_fig=False):
"""
Generate a number of figures and plots describing the duration,
frequency, and profitability of trade "round trips."
A round trip is started when a new long or short position is
opened and is only completed when the number of shares in that
position returns to or crosses zero.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in create_full_tear_sheet.
sector_mappings : dict or pd.Series, optional
Security identifier to sector mapping.
Security ids as keys, sectors as values.
estimate_intraday: boolean or str, optional
Approximate returns for intraday strategies.
See description in create_full_tear_sheet.
return_fig : boolean, optional
If True, returns the figure that was plotted on.
"""
positions = utils.check_intraday(estimate_intraday, returns,
positions, transactions)
transactions_closed = round_trips.add_closing_transactions(positions,
transactions)
# extract_round_trips requires BoD portfolio_value
trades = round_trips.extract_round_trips(
transactions_closed,
portfolio_value=positions.sum(axis='columns') / (1 + returns)
)
if len(trades) < 5:
warnings.warn(
"""Fewer than 5 round-trip trades made.
Skipping round trip tearsheet.""", UserWarning)
return
round_trips.print_round_trip_stats(trades)
plotting.show_profit_attribution(trades)
if sector_mappings is not None:
sector_trades = round_trips.apply_sector_mappings_to_round_trips(
trades, sector_mappings)
plotting.show_profit_attribution(sector_trades)
fig = plt.figure(figsize=(14, 3 * 6))
gs = gridspec.GridSpec(3, 2, wspace=0.5, hspace=0.5)
ax_trade_lifetimes = plt.subplot(gs[0, :])
ax_prob_profit_trade = plt.subplot(gs[1, 0])
ax_holding_time = plt.subplot(gs[1, 1])
ax_pnl_per_round_trip_dollars = plt.subplot(gs[2, 0])
ax_pnl_per_round_trip_pct = plt.subplot(gs[2, 1])
plotting.plot_round_trip_lifetimes(trades, ax=ax_trade_lifetimes)
plotting.plot_prob_profit_trade(trades, ax=ax_prob_profit_trade)
trade_holding_times = [x.days for x in trades['duration']]
sns.distplot(trade_holding_times, kde=False, ax=ax_holding_time)
ax_holding_time.set(xlabel='Holding time in days')
sns.distplot(trades.pnl, kde=False, ax=ax_pnl_per_round_trip_dollars)
ax_pnl_per_round_trip_dollars.set(xlabel='PnL per round-trip trade in $')
sns.distplot(trades.returns.dropna() * 100, kde=False,
ax=ax_pnl_per_round_trip_pct)
ax_pnl_per_round_trip_pct.set(
xlabel='Round-trip returns in %')
gs.tight_layout(fig)
if return_fig:
return fig | def function[create_round_trip_tear_sheet, parameter[returns, positions, transactions, sector_mappings, estimate_intraday, return_fig]]:
constant[
Generate a number of figures and plots describing the duration,
frequency, and profitability of trade "round trips."
A round trip is started when a new long or short position is
opened and is only completed when the number of shares in that
position returns to or crosses zero.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in create_full_tear_sheet.
sector_mappings : dict or pd.Series, optional
Security identifier to sector mapping.
Security ids as keys, sectors as values.
estimate_intraday: boolean or str, optional
Approximate returns for intraday strategies.
See description in create_full_tear_sheet.
return_fig : boolean, optional
If True, returns the figure that was plotted on.
]
variable[positions] assign[=] call[name[utils].check_intraday, parameter[name[estimate_intraday], name[returns], name[positions], name[transactions]]]
variable[transactions_closed] assign[=] call[name[round_trips].add_closing_transactions, parameter[name[positions], name[transactions]]]
variable[trades] assign[=] call[name[round_trips].extract_round_trips, parameter[name[transactions_closed]]]
if compare[call[name[len], parameter[name[trades]]] less[<] constant[5]] begin[:]
call[name[warnings].warn, parameter[constant[Fewer than 5 round-trip trades made.
Skipping round trip tearsheet.], name[UserWarning]]]
return[None]
call[name[round_trips].print_round_trip_stats, parameter[name[trades]]]
call[name[plotting].show_profit_attribution, parameter[name[trades]]]
if compare[name[sector_mappings] is_not constant[None]] begin[:]
variable[sector_trades] assign[=] call[name[round_trips].apply_sector_mappings_to_round_trips, parameter[name[trades], name[sector_mappings]]]
call[name[plotting].show_profit_attribution, parameter[name[sector_trades]]]
variable[fig] assign[=] call[name[plt].figure, parameter[]]
variable[gs] assign[=] call[name[gridspec].GridSpec, parameter[constant[3], constant[2]]]
variable[ax_trade_lifetimes] assign[=] call[name[plt].subplot, parameter[call[name[gs]][tuple[[<ast.Constant object at 0x7da1b0028b80>, <ast.Slice object at 0x7da1b0028cd0>]]]]]
variable[ax_prob_profit_trade] assign[=] call[name[plt].subplot, parameter[call[name[gs]][tuple[[<ast.Constant object at 0x7da1b0028ac0>, <ast.Constant object at 0x7da1b0028af0>]]]]]
variable[ax_holding_time] assign[=] call[name[plt].subplot, parameter[call[name[gs]][tuple[[<ast.Constant object at 0x7da1b002a0b0>, <ast.Constant object at 0x7da1b002a230>]]]]]
variable[ax_pnl_per_round_trip_dollars] assign[=] call[name[plt].subplot, parameter[call[name[gs]][tuple[[<ast.Constant object at 0x7da1b002a590>, <ast.Constant object at 0x7da1b002a1d0>]]]]]
variable[ax_pnl_per_round_trip_pct] assign[=] call[name[plt].subplot, parameter[call[name[gs]][tuple[[<ast.Constant object at 0x7da1b002b2e0>, <ast.Constant object at 0x7da1b002afe0>]]]]]
call[name[plotting].plot_round_trip_lifetimes, parameter[name[trades]]]
call[name[plotting].plot_prob_profit_trade, parameter[name[trades]]]
variable[trade_holding_times] assign[=] <ast.ListComp object at 0x7da1b0028550>
call[name[sns].distplot, parameter[name[trade_holding_times]]]
call[name[ax_holding_time].set, parameter[]]
call[name[sns].distplot, parameter[name[trades].pnl]]
call[name[ax_pnl_per_round_trip_dollars].set, parameter[]]
call[name[sns].distplot, parameter[binary_operation[call[name[trades].returns.dropna, parameter[]] * constant[100]]]]
call[name[ax_pnl_per_round_trip_pct].set, parameter[]]
call[name[gs].tight_layout, parameter[name[fig]]]
if name[return_fig] begin[:]
return[name[fig]] | keyword[def] identifier[create_round_trip_tear_sheet] ( identifier[returns] , identifier[positions] , identifier[transactions] ,
identifier[sector_mappings] = keyword[None] ,
identifier[estimate_intraday] = literal[string] , identifier[return_fig] = keyword[False] ):
literal[string]
identifier[positions] = identifier[utils] . identifier[check_intraday] ( identifier[estimate_intraday] , identifier[returns] ,
identifier[positions] , identifier[transactions] )
identifier[transactions_closed] = identifier[round_trips] . identifier[add_closing_transactions] ( identifier[positions] ,
identifier[transactions] )
identifier[trades] = identifier[round_trips] . identifier[extract_round_trips] (
identifier[transactions_closed] ,
identifier[portfolio_value] = identifier[positions] . identifier[sum] ( identifier[axis] = literal[string] )/( literal[int] + identifier[returns] )
)
keyword[if] identifier[len] ( identifier[trades] )< literal[int] :
identifier[warnings] . identifier[warn] (
literal[string] , identifier[UserWarning] )
keyword[return]
identifier[round_trips] . identifier[print_round_trip_stats] ( identifier[trades] )
identifier[plotting] . identifier[show_profit_attribution] ( identifier[trades] )
keyword[if] identifier[sector_mappings] keyword[is] keyword[not] keyword[None] :
identifier[sector_trades] = identifier[round_trips] . identifier[apply_sector_mappings_to_round_trips] (
identifier[trades] , identifier[sector_mappings] )
identifier[plotting] . identifier[show_profit_attribution] ( identifier[sector_trades] )
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[figsize] =( literal[int] , literal[int] * literal[int] ))
identifier[gs] = identifier[gridspec] . identifier[GridSpec] ( literal[int] , literal[int] , identifier[wspace] = literal[int] , identifier[hspace] = literal[int] )
identifier[ax_trade_lifetimes] = identifier[plt] . identifier[subplot] ( identifier[gs] [ literal[int] ,:])
identifier[ax_prob_profit_trade] = identifier[plt] . identifier[subplot] ( identifier[gs] [ literal[int] , literal[int] ])
identifier[ax_holding_time] = identifier[plt] . identifier[subplot] ( identifier[gs] [ literal[int] , literal[int] ])
identifier[ax_pnl_per_round_trip_dollars] = identifier[plt] . identifier[subplot] ( identifier[gs] [ literal[int] , literal[int] ])
identifier[ax_pnl_per_round_trip_pct] = identifier[plt] . identifier[subplot] ( identifier[gs] [ literal[int] , literal[int] ])
identifier[plotting] . identifier[plot_round_trip_lifetimes] ( identifier[trades] , identifier[ax] = identifier[ax_trade_lifetimes] )
identifier[plotting] . identifier[plot_prob_profit_trade] ( identifier[trades] , identifier[ax] = identifier[ax_prob_profit_trade] )
identifier[trade_holding_times] =[ identifier[x] . identifier[days] keyword[for] identifier[x] keyword[in] identifier[trades] [ literal[string] ]]
identifier[sns] . identifier[distplot] ( identifier[trade_holding_times] , identifier[kde] = keyword[False] , identifier[ax] = identifier[ax_holding_time] )
identifier[ax_holding_time] . identifier[set] ( identifier[xlabel] = literal[string] )
identifier[sns] . identifier[distplot] ( identifier[trades] . identifier[pnl] , identifier[kde] = keyword[False] , identifier[ax] = identifier[ax_pnl_per_round_trip_dollars] )
identifier[ax_pnl_per_round_trip_dollars] . identifier[set] ( identifier[xlabel] = literal[string] )
identifier[sns] . identifier[distplot] ( identifier[trades] . identifier[returns] . identifier[dropna] ()* literal[int] , identifier[kde] = keyword[False] ,
identifier[ax] = identifier[ax_pnl_per_round_trip_pct] )
identifier[ax_pnl_per_round_trip_pct] . identifier[set] (
identifier[xlabel] = literal[string] )
identifier[gs] . identifier[tight_layout] ( identifier[fig] )
keyword[if] identifier[return_fig] :
keyword[return] identifier[fig] | def create_round_trip_tear_sheet(returns, positions, transactions, sector_mappings=None, estimate_intraday='infer', return_fig=False):
"""
Generate a number of figures and plots describing the duration,
frequency, and profitability of trade "round trips."
A round trip is started when a new long or short position is
opened and is only completed when the number of shares in that
position returns to or crosses zero.
Parameters
----------
returns : pd.Series
Daily returns of the strategy, noncumulative.
- See full explanation in create_full_tear_sheet.
positions : pd.DataFrame
Daily net position values.
- See full explanation in create_full_tear_sheet.
transactions : pd.DataFrame
Prices and amounts of executed trades. One row per trade.
- See full explanation in create_full_tear_sheet.
sector_mappings : dict or pd.Series, optional
Security identifier to sector mapping.
Security ids as keys, sectors as values.
estimate_intraday: boolean or str, optional
Approximate returns for intraday strategies.
See description in create_full_tear_sheet.
return_fig : boolean, optional
If True, returns the figure that was plotted on.
"""
positions = utils.check_intraday(estimate_intraday, returns, positions, transactions)
transactions_closed = round_trips.add_closing_transactions(positions, transactions)
# extract_round_trips requires BoD portfolio_value
trades = round_trips.extract_round_trips(transactions_closed, portfolio_value=positions.sum(axis='columns') / (1 + returns))
if len(trades) < 5:
warnings.warn('Fewer than 5 round-trip trades made.\n Skipping round trip tearsheet.', UserWarning)
return # depends on [control=['if'], data=[]]
round_trips.print_round_trip_stats(trades)
plotting.show_profit_attribution(trades)
if sector_mappings is not None:
sector_trades = round_trips.apply_sector_mappings_to_round_trips(trades, sector_mappings)
plotting.show_profit_attribution(sector_trades) # depends on [control=['if'], data=['sector_mappings']]
fig = plt.figure(figsize=(14, 3 * 6))
gs = gridspec.GridSpec(3, 2, wspace=0.5, hspace=0.5)
ax_trade_lifetimes = plt.subplot(gs[0, :])
ax_prob_profit_trade = plt.subplot(gs[1, 0])
ax_holding_time = plt.subplot(gs[1, 1])
ax_pnl_per_round_trip_dollars = plt.subplot(gs[2, 0])
ax_pnl_per_round_trip_pct = plt.subplot(gs[2, 1])
plotting.plot_round_trip_lifetimes(trades, ax=ax_trade_lifetimes)
plotting.plot_prob_profit_trade(trades, ax=ax_prob_profit_trade)
trade_holding_times = [x.days for x in trades['duration']]
sns.distplot(trade_holding_times, kde=False, ax=ax_holding_time)
ax_holding_time.set(xlabel='Holding time in days')
sns.distplot(trades.pnl, kde=False, ax=ax_pnl_per_round_trip_dollars)
ax_pnl_per_round_trip_dollars.set(xlabel='PnL per round-trip trade in $')
sns.distplot(trades.returns.dropna() * 100, kde=False, ax=ax_pnl_per_round_trip_pct)
ax_pnl_per_round_trip_pct.set(xlabel='Round-trip returns in %')
gs.tight_layout(fig)
if return_fig:
return fig # depends on [control=['if'], data=[]] |
def add_interval(self, start, end, data=None):
'''
Inserts an interval to the tree.
Note that when inserting we do not maintain appropriate sorting of the "mid" data structure.
This should be done after all intervals are inserted.
'''
# Ignore intervals of 0 or negative length
if (end - start) <= 0:
return
if self.single_interval is None:
# This is an empty tree and we are adding the first interval. Just record it in a field.
self.single_interval = (start, end, data)
elif self.single_interval == 0:
# This is a usual tree, use standard addition method
self._add_interval(start, end, data)
else:
# This is a tree with a single interval. Convert to a usual tree.
self._add_interval(*self.single_interval)
self.single_interval = 0
self._add_interval(start, end, data) | def function[add_interval, parameter[self, start, end, data]]:
constant[
Inserts an interval to the tree.
Note that when inserting we do not maintain appropriate sorting of the "mid" data structure.
This should be done after all intervals are inserted.
]
if compare[binary_operation[name[end] - name[start]] less_or_equal[<=] constant[0]] begin[:]
return[None]
if compare[name[self].single_interval is constant[None]] begin[:]
name[self].single_interval assign[=] tuple[[<ast.Name object at 0x7da1b26a0550>, <ast.Name object at 0x7da1b26a3d30>, <ast.Name object at 0x7da1b26a1a50>]] | keyword[def] identifier[add_interval] ( identifier[self] , identifier[start] , identifier[end] , identifier[data] = keyword[None] ):
literal[string]
keyword[if] ( identifier[end] - identifier[start] )<= literal[int] :
keyword[return]
keyword[if] identifier[self] . identifier[single_interval] keyword[is] keyword[None] :
identifier[self] . identifier[single_interval] =( identifier[start] , identifier[end] , identifier[data] )
keyword[elif] identifier[self] . identifier[single_interval] == literal[int] :
identifier[self] . identifier[_add_interval] ( identifier[start] , identifier[end] , identifier[data] )
keyword[else] :
identifier[self] . identifier[_add_interval] (* identifier[self] . identifier[single_interval] )
identifier[self] . identifier[single_interval] = literal[int]
identifier[self] . identifier[_add_interval] ( identifier[start] , identifier[end] , identifier[data] ) | def add_interval(self, start, end, data=None):
"""
Inserts an interval to the tree.
Note that when inserting we do not maintain appropriate sorting of the "mid" data structure.
This should be done after all intervals are inserted.
"""
# Ignore intervals of 0 or negative length
if end - start <= 0:
return # depends on [control=['if'], data=[]]
if self.single_interval is None:
# This is an empty tree and we are adding the first interval. Just record it in a field.
self.single_interval = (start, end, data) # depends on [control=['if'], data=[]]
elif self.single_interval == 0:
# This is a usual tree, use standard addition method
self._add_interval(start, end, data) # depends on [control=['if'], data=[]]
else:
# This is a tree with a single interval. Convert to a usual tree.
self._add_interval(*self.single_interval)
self.single_interval = 0
self._add_interval(start, end, data) |
def rm(config, name, bucket):
""" Delete lambda function, role, and zipfile """
# options should override config if it is there
myname = name or config.name
mybucket = bucket or config.bucket
click.echo('Deleting {} from {}'.format(myname, mybucket))
lambder.delete_function(myname, mybucket) | def function[rm, parameter[config, name, bucket]]:
constant[ Delete lambda function, role, and zipfile ]
variable[myname] assign[=] <ast.BoolOp object at 0x7da1b0fda9e0>
variable[mybucket] assign[=] <ast.BoolOp object at 0x7da1b0e0f9a0>
call[name[click].echo, parameter[call[constant[Deleting {} from {}].format, parameter[name[myname], name[mybucket]]]]]
call[name[lambder].delete_function, parameter[name[myname], name[mybucket]]] | keyword[def] identifier[rm] ( identifier[config] , identifier[name] , identifier[bucket] ):
literal[string]
identifier[myname] = identifier[name] keyword[or] identifier[config] . identifier[name]
identifier[mybucket] = identifier[bucket] keyword[or] identifier[config] . identifier[bucket]
identifier[click] . identifier[echo] ( literal[string] . identifier[format] ( identifier[myname] , identifier[mybucket] ))
identifier[lambder] . identifier[delete_function] ( identifier[myname] , identifier[mybucket] ) | def rm(config, name, bucket):
""" Delete lambda function, role, and zipfile """
# options should override config if it is there
myname = name or config.name
mybucket = bucket or config.bucket
click.echo('Deleting {} from {}'.format(myname, mybucket))
lambder.delete_function(myname, mybucket) |
def process_image_field(self, data):
"""
Process perseus fields like questions and hints, which look like:
.. code-block:: python
{
"content": "md string including imgs like  and ",
"images": {
"URL-key": {"width": 425, "height": 425},
"URL-key2": {"width": 425, "height": 425}
}
}
Replaces `content` attribute and returns (images_dict, image_files), where
- `images_dict` is a replacement for the old `images` key
- `image_files` is a list image files for the URLs found
Note it is possible for assesment items to include images links `content`
that are not listed under `images`, so code must handle that case too,
see https://github.com/learningequality/ricecooker/issues/178 for details.
"""
new_images_dict = copy.deepcopy(data['images'])
image_files = []
# STEP 1. Compile dict of {old_url-->new_url} image URL replacements
image_replacements = {}
# STEP 1A. get all images specified in data['images']
for old_url, image_settings in data['images'].items():
new_url, new_image_files = self.set_image(old_url)
image_files += new_image_files
new_images_dict[new_url] = new_images_dict.pop(old_url)
image_replacements[old_url] = new_url
# STEP 1B. look for additional `MARKDOWN_IMAGE_REGEX`-like link in `content` attr.
img_link_pat = re.compile(MARKDOWN_IMAGE_REGEX, flags=re.IGNORECASE)
img_link_matches = img_link_pat.findall(data['content'])
for match in img_link_matches:
old_url = match[1]
if old_url not in image_replacements.keys():
new_url, new_image_files = self.set_image(old_url)
image_files += new_image_files
image_replacements[old_url] = new_url
# Performd content replacent for all URLs in image_replacements
for old_url, new_url in image_replacements.items():
data['content'] = data['content'].replace(old_url, new_url)
return new_images_dict, image_files | def function[process_image_field, parameter[self, data]]:
constant[
Process perseus fields like questions and hints, which look like:
.. code-block:: python
{
"content": "md string including imgs like  and ",
"images": {
"URL-key": {"width": 425, "height": 425},
"URL-key2": {"width": 425, "height": 425}
}
}
Replaces `content` attribute and returns (images_dict, image_files), where
- `images_dict` is a replacement for the old `images` key
- `image_files` is a list image files for the URLs found
Note it is possible for assesment items to include images links `content`
that are not listed under `images`, so code must handle that case too,
see https://github.com/learningequality/ricecooker/issues/178 for details.
]
variable[new_images_dict] assign[=] call[name[copy].deepcopy, parameter[call[name[data]][constant[images]]]]
variable[image_files] assign[=] list[[]]
variable[image_replacements] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da207f98730>, <ast.Name object at 0x7da207f99c90>]]] in starred[call[call[name[data]][constant[images]].items, parameter[]]] begin[:]
<ast.Tuple object at 0x7da207f98f70> assign[=] call[name[self].set_image, parameter[name[old_url]]]
<ast.AugAssign object at 0x7da207f9b4c0>
call[name[new_images_dict]][name[new_url]] assign[=] call[name[new_images_dict].pop, parameter[name[old_url]]]
call[name[image_replacements]][name[old_url]] assign[=] name[new_url]
variable[img_link_pat] assign[=] call[name[re].compile, parameter[name[MARKDOWN_IMAGE_REGEX]]]
variable[img_link_matches] assign[=] call[name[img_link_pat].findall, parameter[call[name[data]][constant[content]]]]
for taget[name[match]] in starred[name[img_link_matches]] begin[:]
variable[old_url] assign[=] call[name[match]][constant[1]]
if compare[name[old_url] <ast.NotIn object at 0x7da2590d7190> call[name[image_replacements].keys, parameter[]]] begin[:]
<ast.Tuple object at 0x7da20e960be0> assign[=] call[name[self].set_image, parameter[name[old_url]]]
<ast.AugAssign object at 0x7da20e960640>
call[name[image_replacements]][name[old_url]] assign[=] name[new_url]
for taget[tuple[[<ast.Name object at 0x7da1b0e45cc0>, <ast.Name object at 0x7da1b0e461d0>]]] in starred[call[name[image_replacements].items, parameter[]]] begin[:]
call[name[data]][constant[content]] assign[=] call[call[name[data]][constant[content]].replace, parameter[name[old_url], name[new_url]]]
return[tuple[[<ast.Name object at 0x7da1b0e46140>, <ast.Name object at 0x7da1b0e47670>]]] | keyword[def] identifier[process_image_field] ( identifier[self] , identifier[data] ):
literal[string]
identifier[new_images_dict] = identifier[copy] . identifier[deepcopy] ( identifier[data] [ literal[string] ])
identifier[image_files] =[]
identifier[image_replacements] ={}
keyword[for] identifier[old_url] , identifier[image_settings] keyword[in] identifier[data] [ literal[string] ]. identifier[items] ():
identifier[new_url] , identifier[new_image_files] = identifier[self] . identifier[set_image] ( identifier[old_url] )
identifier[image_files] += identifier[new_image_files]
identifier[new_images_dict] [ identifier[new_url] ]= identifier[new_images_dict] . identifier[pop] ( identifier[old_url] )
identifier[image_replacements] [ identifier[old_url] ]= identifier[new_url]
identifier[img_link_pat] = identifier[re] . identifier[compile] ( identifier[MARKDOWN_IMAGE_REGEX] , identifier[flags] = identifier[re] . identifier[IGNORECASE] )
identifier[img_link_matches] = identifier[img_link_pat] . identifier[findall] ( identifier[data] [ literal[string] ])
keyword[for] identifier[match] keyword[in] identifier[img_link_matches] :
identifier[old_url] = identifier[match] [ literal[int] ]
keyword[if] identifier[old_url] keyword[not] keyword[in] identifier[image_replacements] . identifier[keys] ():
identifier[new_url] , identifier[new_image_files] = identifier[self] . identifier[set_image] ( identifier[old_url] )
identifier[image_files] += identifier[new_image_files]
identifier[image_replacements] [ identifier[old_url] ]= identifier[new_url]
keyword[for] identifier[old_url] , identifier[new_url] keyword[in] identifier[image_replacements] . identifier[items] ():
identifier[data] [ literal[string] ]= identifier[data] [ literal[string] ]. identifier[replace] ( identifier[old_url] , identifier[new_url] )
keyword[return] identifier[new_images_dict] , identifier[image_files] | def process_image_field(self, data):
"""
Process perseus fields like questions and hints, which look like:
.. code-block:: python
{
"content": "md string including imgs like  and ",
"images": {
"URL-key": {"width": 425, "height": 425},
"URL-key2": {"width": 425, "height": 425}
}
}
Replaces `content` attribute and returns (images_dict, image_files), where
- `images_dict` is a replacement for the old `images` key
- `image_files` is a list image files for the URLs found
Note it is possible for assesment items to include images links `content`
that are not listed under `images`, so code must handle that case too,
see https://github.com/learningequality/ricecooker/issues/178 for details.
"""
new_images_dict = copy.deepcopy(data['images'])
image_files = []
# STEP 1. Compile dict of {old_url-->new_url} image URL replacements
image_replacements = {}
# STEP 1A. get all images specified in data['images']
for (old_url, image_settings) in data['images'].items():
(new_url, new_image_files) = self.set_image(old_url)
image_files += new_image_files
new_images_dict[new_url] = new_images_dict.pop(old_url)
image_replacements[old_url] = new_url # depends on [control=['for'], data=[]]
# STEP 1B. look for additional `MARKDOWN_IMAGE_REGEX`-like link in `content` attr.
img_link_pat = re.compile(MARKDOWN_IMAGE_REGEX, flags=re.IGNORECASE)
img_link_matches = img_link_pat.findall(data['content'])
for match in img_link_matches:
old_url = match[1]
if old_url not in image_replacements.keys():
(new_url, new_image_files) = self.set_image(old_url)
image_files += new_image_files
image_replacements[old_url] = new_url # depends on [control=['if'], data=['old_url']] # depends on [control=['for'], data=['match']]
# Performd content replacent for all URLs in image_replacements
for (old_url, new_url) in image_replacements.items():
data['content'] = data['content'].replace(old_url, new_url) # depends on [control=['for'], data=[]]
return (new_images_dict, image_files) |
def _update_templatetype(templatetype, existing_tt=None):
"""
Add or update a templatetype. If an existing template type is passed in,
update that one. Otherwise search for an existing one. If not found, add.
"""
if existing_tt is None:
if "id" in templatetype and templatetype.id is not None:
tmpltype_i = db.DBSession.query(TemplateType).filter(TemplateType.id == templatetype.id).one()
else:
tmpltype_i = TemplateType()
else:
tmpltype_i = existing_tt
tmpltype_i.template_id = templatetype.template_id
tmpltype_i.name = templatetype.name
tmpltype_i.description = templatetype.description
tmpltype_i.alias = templatetype.alias
if templatetype.layout is not None:
tmpltype_i.layout = get_layout_as_string(templatetype.layout)
tmpltype_i.resource_type = templatetype.resource_type
ta_dict = {}
for t in tmpltype_i.typeattrs:
ta_dict[t.attr_id] = t
existing_attrs = []
if templatetype.typeattrs is not None:
for typeattr in templatetype.typeattrs:
if typeattr.attr_id in ta_dict:
ta = _set_typeattr(typeattr, ta_dict[typeattr.attr_id])
existing_attrs.append(ta.attr_id)
else:
ta = _set_typeattr(typeattr)
tmpltype_i.typeattrs.append(ta)
existing_attrs.append(ta.attr_id)
log.debug("Deleting any type attrs not sent")
for ta in ta_dict.values():
if ta.attr_id not in existing_attrs:
delete_typeattr(ta)
if existing_tt is None:
db.DBSession.add(tmpltype_i)
return tmpltype_i | def function[_update_templatetype, parameter[templatetype, existing_tt]]:
constant[
Add or update a templatetype. If an existing template type is passed in,
update that one. Otherwise search for an existing one. If not found, add.
]
if compare[name[existing_tt] is constant[None]] begin[:]
if <ast.BoolOp object at 0x7da18bcca890> begin[:]
variable[tmpltype_i] assign[=] call[call[call[name[db].DBSession.query, parameter[name[TemplateType]]].filter, parameter[compare[name[TemplateType].id equal[==] name[templatetype].id]]].one, parameter[]]
name[tmpltype_i].template_id assign[=] name[templatetype].template_id
name[tmpltype_i].name assign[=] name[templatetype].name
name[tmpltype_i].description assign[=] name[templatetype].description
name[tmpltype_i].alias assign[=] name[templatetype].alias
if compare[name[templatetype].layout is_not constant[None]] begin[:]
name[tmpltype_i].layout assign[=] call[name[get_layout_as_string], parameter[name[templatetype].layout]]
name[tmpltype_i].resource_type assign[=] name[templatetype].resource_type
variable[ta_dict] assign[=] dictionary[[], []]
for taget[name[t]] in starred[name[tmpltype_i].typeattrs] begin[:]
call[name[ta_dict]][name[t].attr_id] assign[=] name[t]
variable[existing_attrs] assign[=] list[[]]
if compare[name[templatetype].typeattrs is_not constant[None]] begin[:]
for taget[name[typeattr]] in starred[name[templatetype].typeattrs] begin[:]
if compare[name[typeattr].attr_id in name[ta_dict]] begin[:]
variable[ta] assign[=] call[name[_set_typeattr], parameter[name[typeattr], call[name[ta_dict]][name[typeattr].attr_id]]]
call[name[existing_attrs].append, parameter[name[ta].attr_id]]
call[name[log].debug, parameter[constant[Deleting any type attrs not sent]]]
for taget[name[ta]] in starred[call[name[ta_dict].values, parameter[]]] begin[:]
if compare[name[ta].attr_id <ast.NotIn object at 0x7da2590d7190> name[existing_attrs]] begin[:]
call[name[delete_typeattr], parameter[name[ta]]]
if compare[name[existing_tt] is constant[None]] begin[:]
call[name[db].DBSession.add, parameter[name[tmpltype_i]]]
return[name[tmpltype_i]] | keyword[def] identifier[_update_templatetype] ( identifier[templatetype] , identifier[existing_tt] = keyword[None] ):
literal[string]
keyword[if] identifier[existing_tt] keyword[is] keyword[None] :
keyword[if] literal[string] keyword[in] identifier[templatetype] keyword[and] identifier[templatetype] . identifier[id] keyword[is] keyword[not] keyword[None] :
identifier[tmpltype_i] = identifier[db] . identifier[DBSession] . identifier[query] ( identifier[TemplateType] ). identifier[filter] ( identifier[TemplateType] . identifier[id] == identifier[templatetype] . identifier[id] ). identifier[one] ()
keyword[else] :
identifier[tmpltype_i] = identifier[TemplateType] ()
keyword[else] :
identifier[tmpltype_i] = identifier[existing_tt]
identifier[tmpltype_i] . identifier[template_id] = identifier[templatetype] . identifier[template_id]
identifier[tmpltype_i] . identifier[name] = identifier[templatetype] . identifier[name]
identifier[tmpltype_i] . identifier[description] = identifier[templatetype] . identifier[description]
identifier[tmpltype_i] . identifier[alias] = identifier[templatetype] . identifier[alias]
keyword[if] identifier[templatetype] . identifier[layout] keyword[is] keyword[not] keyword[None] :
identifier[tmpltype_i] . identifier[layout] = identifier[get_layout_as_string] ( identifier[templatetype] . identifier[layout] )
identifier[tmpltype_i] . identifier[resource_type] = identifier[templatetype] . identifier[resource_type]
identifier[ta_dict] ={}
keyword[for] identifier[t] keyword[in] identifier[tmpltype_i] . identifier[typeattrs] :
identifier[ta_dict] [ identifier[t] . identifier[attr_id] ]= identifier[t]
identifier[existing_attrs] =[]
keyword[if] identifier[templatetype] . identifier[typeattrs] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[typeattr] keyword[in] identifier[templatetype] . identifier[typeattrs] :
keyword[if] identifier[typeattr] . identifier[attr_id] keyword[in] identifier[ta_dict] :
identifier[ta] = identifier[_set_typeattr] ( identifier[typeattr] , identifier[ta_dict] [ identifier[typeattr] . identifier[attr_id] ])
identifier[existing_attrs] . identifier[append] ( identifier[ta] . identifier[attr_id] )
keyword[else] :
identifier[ta] = identifier[_set_typeattr] ( identifier[typeattr] )
identifier[tmpltype_i] . identifier[typeattrs] . identifier[append] ( identifier[ta] )
identifier[existing_attrs] . identifier[append] ( identifier[ta] . identifier[attr_id] )
identifier[log] . identifier[debug] ( literal[string] )
keyword[for] identifier[ta] keyword[in] identifier[ta_dict] . identifier[values] ():
keyword[if] identifier[ta] . identifier[attr_id] keyword[not] keyword[in] identifier[existing_attrs] :
identifier[delete_typeattr] ( identifier[ta] )
keyword[if] identifier[existing_tt] keyword[is] keyword[None] :
identifier[db] . identifier[DBSession] . identifier[add] ( identifier[tmpltype_i] )
keyword[return] identifier[tmpltype_i] | def _update_templatetype(templatetype, existing_tt=None):
"""
Add or update a templatetype. If an existing template type is passed in,
update that one. Otherwise search for an existing one. If not found, add.
"""
if existing_tt is None:
if 'id' in templatetype and templatetype.id is not None:
tmpltype_i = db.DBSession.query(TemplateType).filter(TemplateType.id == templatetype.id).one() # depends on [control=['if'], data=[]]
else:
tmpltype_i = TemplateType() # depends on [control=['if'], data=[]]
else:
tmpltype_i = existing_tt
tmpltype_i.template_id = templatetype.template_id
tmpltype_i.name = templatetype.name
tmpltype_i.description = templatetype.description
tmpltype_i.alias = templatetype.alias
if templatetype.layout is not None:
tmpltype_i.layout = get_layout_as_string(templatetype.layout) # depends on [control=['if'], data=[]]
tmpltype_i.resource_type = templatetype.resource_type
ta_dict = {}
for t in tmpltype_i.typeattrs:
ta_dict[t.attr_id] = t # depends on [control=['for'], data=['t']]
existing_attrs = []
if templatetype.typeattrs is not None:
for typeattr in templatetype.typeattrs:
if typeattr.attr_id in ta_dict:
ta = _set_typeattr(typeattr, ta_dict[typeattr.attr_id])
existing_attrs.append(ta.attr_id) # depends on [control=['if'], data=['ta_dict']]
else:
ta = _set_typeattr(typeattr)
tmpltype_i.typeattrs.append(ta)
existing_attrs.append(ta.attr_id) # depends on [control=['for'], data=['typeattr']] # depends on [control=['if'], data=[]]
log.debug('Deleting any type attrs not sent')
for ta in ta_dict.values():
if ta.attr_id not in existing_attrs:
delete_typeattr(ta) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['ta']]
if existing_tt is None:
db.DBSession.add(tmpltype_i) # depends on [control=['if'], data=[]]
return tmpltype_i |
def bedrooms(self):
"""
This method gets the number of bedrooms.
:return:
"""
try:
if self._data_from_search:
info = self._data_from_search.find(
'ul', {"class": "info"}).text
s = info.split('|')
nb = s[1].strip()
return int(nb.split()[0])
else:
div = self._ad_page_content.find(
'div', {'id': 'smi-summary-items'})
spans = div.find_all('span', {'class': 'header_text'})
for span in spans:
# print(span.text)
if 'bed' in span.text.lower():
return int(''.join([n for n in span.text if n.isdigit()]))
return
except Exception as e:
if self._debug:
logging.error(
"Error getting bedrooms. Error message: " + e.args[0])
return 'N/A' | def function[bedrooms, parameter[self]]:
constant[
This method gets the number of bedrooms.
:return:
]
<ast.Try object at 0x7da1b0625600> | keyword[def] identifier[bedrooms] ( identifier[self] ):
literal[string]
keyword[try] :
keyword[if] identifier[self] . identifier[_data_from_search] :
identifier[info] = identifier[self] . identifier[_data_from_search] . identifier[find] (
literal[string] ,{ literal[string] : literal[string] }). identifier[text]
identifier[s] = identifier[info] . identifier[split] ( literal[string] )
identifier[nb] = identifier[s] [ literal[int] ]. identifier[strip] ()
keyword[return] identifier[int] ( identifier[nb] . identifier[split] ()[ literal[int] ])
keyword[else] :
identifier[div] = identifier[self] . identifier[_ad_page_content] . identifier[find] (
literal[string] ,{ literal[string] : literal[string] })
identifier[spans] = identifier[div] . identifier[find_all] ( literal[string] ,{ literal[string] : literal[string] })
keyword[for] identifier[span] keyword[in] identifier[spans] :
keyword[if] literal[string] keyword[in] identifier[span] . identifier[text] . identifier[lower] ():
keyword[return] identifier[int] ( literal[string] . identifier[join] ([ identifier[n] keyword[for] identifier[n] keyword[in] identifier[span] . identifier[text] keyword[if] identifier[n] . identifier[isdigit] ()]))
keyword[return]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[if] identifier[self] . identifier[_debug] :
identifier[logging] . identifier[error] (
literal[string] + identifier[e] . identifier[args] [ literal[int] ])
keyword[return] literal[string] | def bedrooms(self):
"""
This method gets the number of bedrooms.
:return:
"""
try:
if self._data_from_search:
info = self._data_from_search.find('ul', {'class': 'info'}).text
s = info.split('|')
nb = s[1].strip()
return int(nb.split()[0]) # depends on [control=['if'], data=[]]
else:
div = self._ad_page_content.find('div', {'id': 'smi-summary-items'})
spans = div.find_all('span', {'class': 'header_text'})
for span in spans:
# print(span.text)
if 'bed' in span.text.lower():
return int(''.join([n for n in span.text if n.isdigit()])) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['span']]
return # depends on [control=['try'], data=[]]
except Exception as e:
if self._debug:
logging.error('Error getting bedrooms. Error message: ' + e.args[0]) # depends on [control=['if'], data=[]]
return 'N/A' # depends on [control=['except'], data=['e']] |
def store_initial_k2k_session(auth_url, request, scoped_auth_ref,
unscoped_auth_ref):
"""Stores session variables if there are k2k service providers
This stores variables related to Keystone2Keystone federation. This
function gets skipped if there are no Keystone service providers.
An unscoped token to the identity provider keystone gets stored
so that it can be used to do federated login into the service
providers when switching keystone providers.
The settings file can be configured to set the display name
of the local (identity provider) keystone by setting
KEYSTONE_PROVIDER_IDP_NAME. The KEYSTONE_PROVIDER_IDP_ID settings
variable is used for comparison against the service providers.
It should not conflict with any of the service provider ids.
:param auth_url: base token auth url
:param request: Django http request object
:param scoped_auth_ref: Scoped Keystone access info object
:param unscoped_auth_ref: Unscoped Keystone access info object
"""
keystone_provider_id = request.session.get('keystone_provider_id', None)
if keystone_provider_id:
return None
providers = getattr(scoped_auth_ref, 'service_providers', None)
if providers:
providers = getattr(providers, '_service_providers', None)
if providers:
keystone_idp_name = getattr(settings, 'KEYSTONE_PROVIDER_IDP_NAME',
'Local Keystone')
keystone_idp_id = getattr(
settings, 'KEYSTONE_PROVIDER_IDP_ID', 'localkeystone')
keystone_identity_provider = {'name': keystone_idp_name,
'id': keystone_idp_id}
# (edtubill) We will use the IDs as the display names
# We may want to be able to set display names in the future.
keystone_providers = [
{'name': provider_id, 'id': provider_id}
for provider_id in providers]
keystone_providers.append(keystone_identity_provider)
# We treat the Keystone idp ID as None
request.session['keystone_provider_id'] = keystone_idp_id
request.session['keystone_providers'] = keystone_providers
request.session['k2k_base_unscoped_token'] =\
unscoped_auth_ref.auth_token
request.session['k2k_auth_url'] = auth_url | def function[store_initial_k2k_session, parameter[auth_url, request, scoped_auth_ref, unscoped_auth_ref]]:
constant[Stores session variables if there are k2k service providers
This stores variables related to Keystone2Keystone federation. This
function gets skipped if there are no Keystone service providers.
An unscoped token to the identity provider keystone gets stored
so that it can be used to do federated login into the service
providers when switching keystone providers.
The settings file can be configured to set the display name
of the local (identity provider) keystone by setting
KEYSTONE_PROVIDER_IDP_NAME. The KEYSTONE_PROVIDER_IDP_ID settings
variable is used for comparison against the service providers.
It should not conflict with any of the service provider ids.
:param auth_url: base token auth url
:param request: Django http request object
:param scoped_auth_ref: Scoped Keystone access info object
:param unscoped_auth_ref: Unscoped Keystone access info object
]
variable[keystone_provider_id] assign[=] call[name[request].session.get, parameter[constant[keystone_provider_id], constant[None]]]
if name[keystone_provider_id] begin[:]
return[constant[None]]
variable[providers] assign[=] call[name[getattr], parameter[name[scoped_auth_ref], constant[service_providers], constant[None]]]
if name[providers] begin[:]
variable[providers] assign[=] call[name[getattr], parameter[name[providers], constant[_service_providers], constant[None]]]
if name[providers] begin[:]
variable[keystone_idp_name] assign[=] call[name[getattr], parameter[name[settings], constant[KEYSTONE_PROVIDER_IDP_NAME], constant[Local Keystone]]]
variable[keystone_idp_id] assign[=] call[name[getattr], parameter[name[settings], constant[KEYSTONE_PROVIDER_IDP_ID], constant[localkeystone]]]
variable[keystone_identity_provider] assign[=] dictionary[[<ast.Constant object at 0x7da1b19870a0>, <ast.Constant object at 0x7da1b19874f0>], [<ast.Name object at 0x7da1b1987460>, <ast.Name object at 0x7da1b1986440>]]
variable[keystone_providers] assign[=] <ast.ListComp object at 0x7da1b1985090>
call[name[keystone_providers].append, parameter[name[keystone_identity_provider]]]
call[name[request].session][constant[keystone_provider_id]] assign[=] name[keystone_idp_id]
call[name[request].session][constant[keystone_providers]] assign[=] name[keystone_providers]
call[name[request].session][constant[k2k_base_unscoped_token]] assign[=] name[unscoped_auth_ref].auth_token
call[name[request].session][constant[k2k_auth_url]] assign[=] name[auth_url] | keyword[def] identifier[store_initial_k2k_session] ( identifier[auth_url] , identifier[request] , identifier[scoped_auth_ref] ,
identifier[unscoped_auth_ref] ):
literal[string]
identifier[keystone_provider_id] = identifier[request] . identifier[session] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[keystone_provider_id] :
keyword[return] keyword[None]
identifier[providers] = identifier[getattr] ( identifier[scoped_auth_ref] , literal[string] , keyword[None] )
keyword[if] identifier[providers] :
identifier[providers] = identifier[getattr] ( identifier[providers] , literal[string] , keyword[None] )
keyword[if] identifier[providers] :
identifier[keystone_idp_name] = identifier[getattr] ( identifier[settings] , literal[string] ,
literal[string] )
identifier[keystone_idp_id] = identifier[getattr] (
identifier[settings] , literal[string] , literal[string] )
identifier[keystone_identity_provider] ={ literal[string] : identifier[keystone_idp_name] ,
literal[string] : identifier[keystone_idp_id] }
identifier[keystone_providers] =[
{ literal[string] : identifier[provider_id] , literal[string] : identifier[provider_id] }
keyword[for] identifier[provider_id] keyword[in] identifier[providers] ]
identifier[keystone_providers] . identifier[append] ( identifier[keystone_identity_provider] )
identifier[request] . identifier[session] [ literal[string] ]= identifier[keystone_idp_id]
identifier[request] . identifier[session] [ literal[string] ]= identifier[keystone_providers]
identifier[request] . identifier[session] [ literal[string] ]= identifier[unscoped_auth_ref] . identifier[auth_token]
identifier[request] . identifier[session] [ literal[string] ]= identifier[auth_url] | def store_initial_k2k_session(auth_url, request, scoped_auth_ref, unscoped_auth_ref):
"""Stores session variables if there are k2k service providers
This stores variables related to Keystone2Keystone federation. This
function gets skipped if there are no Keystone service providers.
An unscoped token to the identity provider keystone gets stored
so that it can be used to do federated login into the service
providers when switching keystone providers.
The settings file can be configured to set the display name
of the local (identity provider) keystone by setting
KEYSTONE_PROVIDER_IDP_NAME. The KEYSTONE_PROVIDER_IDP_ID settings
variable is used for comparison against the service providers.
It should not conflict with any of the service provider ids.
:param auth_url: base token auth url
:param request: Django http request object
:param scoped_auth_ref: Scoped Keystone access info object
:param unscoped_auth_ref: Unscoped Keystone access info object
"""
keystone_provider_id = request.session.get('keystone_provider_id', None)
if keystone_provider_id:
return None # depends on [control=['if'], data=[]]
providers = getattr(scoped_auth_ref, 'service_providers', None)
if providers:
providers = getattr(providers, '_service_providers', None) # depends on [control=['if'], data=[]]
if providers:
keystone_idp_name = getattr(settings, 'KEYSTONE_PROVIDER_IDP_NAME', 'Local Keystone')
keystone_idp_id = getattr(settings, 'KEYSTONE_PROVIDER_IDP_ID', 'localkeystone')
keystone_identity_provider = {'name': keystone_idp_name, 'id': keystone_idp_id}
# (edtubill) We will use the IDs as the display names
# We may want to be able to set display names in the future.
keystone_providers = [{'name': provider_id, 'id': provider_id} for provider_id in providers]
keystone_providers.append(keystone_identity_provider)
# We treat the Keystone idp ID as None
request.session['keystone_provider_id'] = keystone_idp_id
request.session['keystone_providers'] = keystone_providers
request.session['k2k_base_unscoped_token'] = unscoped_auth_ref.auth_token
request.session['k2k_auth_url'] = auth_url # depends on [control=['if'], data=[]] |
def get_tree(profile, sha, recursive=True):
"""Fetch a tree.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
sha
The SHA of the tree to fetch.
recursive
If ``True``, traverse all subtrees and their subtrees, all the
way down. That will return a list of all objects in the tree,
all levels deep.
Returns:
A dict with data about the tree.
"""
resource = "/trees/" + sha
if recursive:
resource += "?recursive=1"
data = api.get_request(profile, resource)
return prepare(data) | def function[get_tree, parameter[profile, sha, recursive]]:
constant[Fetch a tree.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
sha
The SHA of the tree to fetch.
recursive
If ``True``, traverse all subtrees and their subtrees, all the
way down. That will return a list of all objects in the tree,
all levels deep.
Returns:
A dict with data about the tree.
]
variable[resource] assign[=] binary_operation[constant[/trees/] + name[sha]]
if name[recursive] begin[:]
<ast.AugAssign object at 0x7da1b133d960>
variable[data] assign[=] call[name[api].get_request, parameter[name[profile], name[resource]]]
return[call[name[prepare], parameter[name[data]]]] | keyword[def] identifier[get_tree] ( identifier[profile] , identifier[sha] , identifier[recursive] = keyword[True] ):
literal[string]
identifier[resource] = literal[string] + identifier[sha]
keyword[if] identifier[recursive] :
identifier[resource] += literal[string]
identifier[data] = identifier[api] . identifier[get_request] ( identifier[profile] , identifier[resource] )
keyword[return] identifier[prepare] ( identifier[data] ) | def get_tree(profile, sha, recursive=True):
"""Fetch a tree.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
sha
The SHA of the tree to fetch.
recursive
If ``True``, traverse all subtrees and their subtrees, all the
way down. That will return a list of all objects in the tree,
all levels deep.
Returns:
A dict with data about the tree.
"""
resource = '/trees/' + sha
if recursive:
resource += '?recursive=1' # depends on [control=['if'], data=[]]
data = api.get_request(profile, resource)
return prepare(data) |
def is_bare(self):
"""
:data:`True` if the repository has no working tree, :data:`False` if it does.
The value of this property is computed by checking whether the
``.bzr/checkout`` directory exists (it doesn't exist in Bazaar
repositories created using ``bzr branch --no-tree ...``).
"""
# Make sure the local repository exists.
self.create()
# Check the existence of the directory.
checkout_directory = os.path.join(self.vcs_directory, 'checkout')
return not self.context.is_directory(checkout_directory) | def function[is_bare, parameter[self]]:
constant[
:data:`True` if the repository has no working tree, :data:`False` if it does.
The value of this property is computed by checking whether the
``.bzr/checkout`` directory exists (it doesn't exist in Bazaar
repositories created using ``bzr branch --no-tree ...``).
]
call[name[self].create, parameter[]]
variable[checkout_directory] assign[=] call[name[os].path.join, parameter[name[self].vcs_directory, constant[checkout]]]
return[<ast.UnaryOp object at 0x7da1b092d9f0>] | keyword[def] identifier[is_bare] ( identifier[self] ):
literal[string]
identifier[self] . identifier[create] ()
identifier[checkout_directory] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[vcs_directory] , literal[string] )
keyword[return] keyword[not] identifier[self] . identifier[context] . identifier[is_directory] ( identifier[checkout_directory] ) | def is_bare(self):
"""
:data:`True` if the repository has no working tree, :data:`False` if it does.
The value of this property is computed by checking whether the
``.bzr/checkout`` directory exists (it doesn't exist in Bazaar
repositories created using ``bzr branch --no-tree ...``).
"""
# Make sure the local repository exists.
self.create()
# Check the existence of the directory.
checkout_directory = os.path.join(self.vcs_directory, 'checkout')
return not self.context.is_directory(checkout_directory) |
async def ListSubnets(self, space_tag, zone):
'''
space_tag : str
zone : str
Returns -> typing.Sequence[~Subnet]
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='DiscoverSpaces',
request='ListSubnets',
version=2,
params=_params)
_params['space-tag'] = space_tag
_params['zone'] = zone
reply = await self.rpc(msg)
return reply | <ast.AsyncFunctionDef object at 0x7da1b0e24b50> | keyword[async] keyword[def] identifier[ListSubnets] ( identifier[self] , identifier[space_tag] , identifier[zone] ):
literal[string]
identifier[_params] = identifier[dict] ()
identifier[msg] = identifier[dict] ( identifier[type] = literal[string] ,
identifier[request] = literal[string] ,
identifier[version] = literal[int] ,
identifier[params] = identifier[_params] )
identifier[_params] [ literal[string] ]= identifier[space_tag]
identifier[_params] [ literal[string] ]= identifier[zone]
identifier[reply] = keyword[await] identifier[self] . identifier[rpc] ( identifier[msg] )
keyword[return] identifier[reply] | async def ListSubnets(self, space_tag, zone):
"""
space_tag : str
zone : str
Returns -> typing.Sequence[~Subnet]
"""
# map input types to rpc msg
_params = dict()
msg = dict(type='DiscoverSpaces', request='ListSubnets', version=2, params=_params)
_params['space-tag'] = space_tag
_params['zone'] = zone
reply = await self.rpc(msg)
return reply |
def spreadDF(symbol, token='', version=''):
'''This returns an array of effective spread, eligible volume, and price improvement of a stock, by market.
Unlike volume-by-venue, this will only return a venue if effective spread is not ‘N/A’. Values are sorted in descending order by effectiveSpread.
Lower effectiveSpread and higher priceImprovement values are generally considered optimal.
Effective spread is designed to measure marketable orders executed in relation to the market center’s
quoted spread and takes into account hidden and midpoint liquidity available at each market center.
Effective Spread is calculated by using eligible trade prices recorded to the consolidated tape and
comparing those trade prices to the National Best Bid and Offer (“NBBO”) at the time of the execution.
View the data disclaimer at the bottom of the stocks app for more information about how these values are calculated.
https://iexcloud.io/docs/api/#earnings-today
8am ET M-F
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
DataFrame: result
'''
df = pd.DataFrame(spread(symbol, token, version))
_toDatetime(df)
_reindex(df, 'venue')
return df | def function[spreadDF, parameter[symbol, token, version]]:
constant[This returns an array of effective spread, eligible volume, and price improvement of a stock, by market.
Unlike volume-by-venue, this will only return a venue if effective spread is not ‘N/A’. Values are sorted in descending order by effectiveSpread.
Lower effectiveSpread and higher priceImprovement values are generally considered optimal.
Effective spread is designed to measure marketable orders executed in relation to the market center’s
quoted spread and takes into account hidden and midpoint liquidity available at each market center.
Effective Spread is calculated by using eligible trade prices recorded to the consolidated tape and
comparing those trade prices to the National Best Bid and Offer (“NBBO”) at the time of the execution.
View the data disclaimer at the bottom of the stocks app for more information about how these values are calculated.
https://iexcloud.io/docs/api/#earnings-today
8am ET M-F
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
DataFrame: result
]
variable[df] assign[=] call[name[pd].DataFrame, parameter[call[name[spread], parameter[name[symbol], name[token], name[version]]]]]
call[name[_toDatetime], parameter[name[df]]]
call[name[_reindex], parameter[name[df], constant[venue]]]
return[name[df]] | keyword[def] identifier[spreadDF] ( identifier[symbol] , identifier[token] = literal[string] , identifier[version] = literal[string] ):
literal[string]
identifier[df] = identifier[pd] . identifier[DataFrame] ( identifier[spread] ( identifier[symbol] , identifier[token] , identifier[version] ))
identifier[_toDatetime] ( identifier[df] )
identifier[_reindex] ( identifier[df] , literal[string] )
keyword[return] identifier[df] | def spreadDF(symbol, token='', version=''):
"""This returns an array of effective spread, eligible volume, and price improvement of a stock, by market.
Unlike volume-by-venue, this will only return a venue if effective spread is not ‘N/A’. Values are sorted in descending order by effectiveSpread.
Lower effectiveSpread and higher priceImprovement values are generally considered optimal.
Effective spread is designed to measure marketable orders executed in relation to the market center’s
quoted spread and takes into account hidden and midpoint liquidity available at each market center.
Effective Spread is calculated by using eligible trade prices recorded to the consolidated tape and
comparing those trade prices to the National Best Bid and Offer (“NBBO”) at the time of the execution.
View the data disclaimer at the bottom of the stocks app for more information about how these values are calculated.
https://iexcloud.io/docs/api/#earnings-today
8am ET M-F
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
Returns:
DataFrame: result
"""
df = pd.DataFrame(spread(symbol, token, version))
_toDatetime(df)
_reindex(df, 'venue')
return df |
def _handle_ignores(testtag):
"""Checks if the specified test tag has attribute "ignores"; if it
does, a <global ignore="true" /> tag is created for each variable
name in the list.
"""
if "ignores" in testtag.attrib:
from xml.etree.ElementTree import Element
for varname in re.split("[\s,]+", testtag.attrib["ignores"]):
#For ignored variables, the order does not matter. However,
#if the user included the caret, just remove it.
if varname[0] == '^':
varname = varname[1::]
e = Element("global", {"name": varname, "ignore": "true"})
testtag.append(e) | def function[_handle_ignores, parameter[testtag]]:
constant[Checks if the specified test tag has attribute "ignores"; if it
does, a <global ignore="true" /> tag is created for each variable
name in the list.
]
if compare[constant[ignores] in name[testtag].attrib] begin[:]
from relative_module[xml.etree.ElementTree] import module[Element]
for taget[name[varname]] in starred[call[name[re].split, parameter[constant[[\s,]+], call[name[testtag].attrib][constant[ignores]]]]] begin[:]
if compare[call[name[varname]][constant[0]] equal[==] constant[^]] begin[:]
variable[varname] assign[=] call[name[varname]][<ast.Slice object at 0x7da1b26628c0>]
variable[e] assign[=] call[name[Element], parameter[constant[global], dictionary[[<ast.Constant object at 0x7da1b26609d0>, <ast.Constant object at 0x7da1b2660a60>], [<ast.Name object at 0x7da1b2660a30>, <ast.Constant object at 0x7da1b2660a00>]]]]
call[name[testtag].append, parameter[name[e]]] | keyword[def] identifier[_handle_ignores] ( identifier[testtag] ):
literal[string]
keyword[if] literal[string] keyword[in] identifier[testtag] . identifier[attrib] :
keyword[from] identifier[xml] . identifier[etree] . identifier[ElementTree] keyword[import] identifier[Element]
keyword[for] identifier[varname] keyword[in] identifier[re] . identifier[split] ( literal[string] , identifier[testtag] . identifier[attrib] [ literal[string] ]):
keyword[if] identifier[varname] [ literal[int] ]== literal[string] :
identifier[varname] = identifier[varname] [ literal[int] ::]
identifier[e] = identifier[Element] ( literal[string] ,{ literal[string] : identifier[varname] , literal[string] : literal[string] })
identifier[testtag] . identifier[append] ( identifier[e] ) | def _handle_ignores(testtag):
"""Checks if the specified test tag has attribute "ignores"; if it
does, a <global ignore="true" /> tag is created for each variable
name in the list.
"""
if 'ignores' in testtag.attrib:
from xml.etree.ElementTree import Element
for varname in re.split('[\\s,]+', testtag.attrib['ignores']):
#For ignored variables, the order does not matter. However,
#if the user included the caret, just remove it.
if varname[0] == '^':
varname = varname[1:] # depends on [control=['if'], data=[]]
e = Element('global', {'name': varname, 'ignore': 'true'})
testtag.append(e) # depends on [control=['for'], data=['varname']] # depends on [control=['if'], data=[]] |
def GetSOAPPart(self):
'''Get the SOAP body part.
'''
head, part = self.parts[0]
return StringIO.StringIO(part.getvalue()) | def function[GetSOAPPart, parameter[self]]:
constant[Get the SOAP body part.
]
<ast.Tuple object at 0x7da1b1320610> assign[=] call[name[self].parts][constant[0]]
return[call[name[StringIO].StringIO, parameter[call[name[part].getvalue, parameter[]]]]] | keyword[def] identifier[GetSOAPPart] ( identifier[self] ):
literal[string]
identifier[head] , identifier[part] = identifier[self] . identifier[parts] [ literal[int] ]
keyword[return] identifier[StringIO] . identifier[StringIO] ( identifier[part] . identifier[getvalue] ()) | def GetSOAPPart(self):
"""Get the SOAP body part.
"""
(head, part) = self.parts[0]
return StringIO.StringIO(part.getvalue()) |
def consolidate_tess_fitslc(lclist,
normalize=True,
filterqualityflags=False,
nanfilter=None,
timestoignore=None,
headerkeys=LCHEADERKEYS,
datakeys=LCDATAKEYS,
sapkeys=LCSAPKEYS,
pdckeys=LCPDCKEYS,
topkeys=LCTOPKEYS,
apkeys=LCAPERTUREKEYS):
'''This consolidates a list of LCs for a single TIC object.
NOTE: if light curve time arrays contain nans, these and their associated
measurements will be sorted to the end of the final combined arrays.
Parameters
----------
lclist : list of str, or str
`lclist` is either a list of actual light curve files or a string that
is valid for glob.glob to search for and generate a light curve list
based on the file glob. This is useful for consolidating LC FITS files
across different TESS sectors for a single TIC ID using a glob like
`*<TICID>*_lc.fits`.
normalize : bool
If True, then the light curve's SAP_FLUX and PDCSAP_FLUX measurements
will be normalized to 1.0 by dividing out the median flux for the
component light curve.
filterqualityflags : bool
If True, will remove any measurements that have non-zero quality flags
present. This usually indicates an issue with the instrument or
spacecraft.
nanfilter : {'sap','pdc','sap,pdc'} or None
Indicates the flux measurement type(s) to apply the filtering to.
timestoignore : list of tuples or None
This is of the form::
[(time1_start, time1_end), (time2_start, time2_end), ...]
and indicates the start and end times to mask out of the final
lcdict. Use this to remove anything that wasn't caught by the quality
flags.
headerkeys : list
A list of FITS header keys that will be extracted from the FITS light
curve file. These describe the observations. The default value for this
is given in `LCHEADERKEYS` above.
datakeys : list
A list of FITS column names that correspond to the auxiliary
measurements in the light curve. The default is `LCDATAKEYS` above.
sapkeys : list
A list of FITS column names that correspond to the SAP flux
measurements in the light curve. The default is `LCSAPKEYS` above.
pdckeys : list
A list of FITS column names that correspond to the PDC flux
measurements in the light curve. The default is `LCPDCKEYS` above.
topkeys : list
A list of FITS header keys that describe the object in the light
curve. The default is `LCTOPKEYS` above.
apkeys : list
A list of FITS header keys that describe the flux measurement apertures
used by the TESS pipeline. The default is `LCAPERTUREKEYS` above.
Returns
-------
lcdict
Returns an `lcdict` (this is useable by most astrobase functions for LC
processing).
'''
# if the lclist is a string, assume that we're passing in a fileglob
if isinstance(lclist, str):
if sys.version_info[:2] > (3,4):
matching = glob.glob(lclist,
recursive=True)
LOGINFO('found %s LCs: %r' % (len(matching), matching))
else:
lcfitsdir = os.path.dirname(lclist)
lcfitsfile = os.path.basename(lclist)
walker = os.walk(lcfitsdir)
matching = []
for root, dirs, _files in walker:
for sdir in dirs:
searchpath = os.path.join(root,
sdir,
lcfitsfile)
foundfiles = glob.glob(searchpath)
if foundfiles:
matching.extend(foundfiles)
LOGINFO(
'found %s in dir: %s' % (repr(foundfiles),
os.path.join(root,sdir))
)
if len(matching) == 0:
LOGERROR('could not find any TESS LC files matching glob: %s' %
lclist)
return None
# if the lclist is an actual list of LCs, then use it directly
else:
matching = lclist
# get the first file
consolidated = read_tess_fitslc(matching[0],
normalize=normalize,
headerkeys=LCHEADERKEYS,
datakeys=LCDATAKEYS,
sapkeys=LCSAPKEYS,
pdckeys=LCPDCKEYS,
topkeys=LCTOPKEYS,
apkeys=LCAPERTUREKEYS)
# get the rest of the files
if len(matching) > 1:
for lcf in matching[1:]:
consolidated = read_tess_fitslc(lcf,
appendto=consolidated,
normalize=normalize,
headerkeys=LCHEADERKEYS,
datakeys=LCDATAKEYS,
sapkeys=LCSAPKEYS,
pdckeys=LCPDCKEYS,
topkeys=LCTOPKEYS,
apkeys=LCAPERTUREKEYS)
# get the sort indices. we use time for the columns and sectors for the
# bits in lcinfo and varinfo
LOGINFO('sorting by time...')
# NOTE: nans in time will be sorted to the end of the array
finiteind = np.isfinite(consolidated['time'])
if np.sum(finiteind) < consolidated['time'].size:
LOGWARNING('some time values are nan! '
'measurements at these times will be '
'sorted to the end of the column arrays.')
# get the time sort index
column_sort_ind = np.argsort(consolidated['time'])
# sort the columns by time
for col in consolidated['columns']:
if '.' in col:
key, subkey = col.split('.')
consolidated[key][subkey] = (
consolidated[key][subkey][column_sort_ind]
)
else:
consolidated[col] = consolidated[col][column_sort_ind]
info_sort_ind = np.argsort(consolidated['lcinfo']['sector'])
# sort the keys in lcinfo
for key in consolidated['lcinfo']:
consolidated['lcinfo'][key] = (
np.array(consolidated['lcinfo'][key])[info_sort_ind].tolist()
)
# sort the keys in varinfo
for key in consolidated['varinfo']:
consolidated['varinfo'][key] = (
np.array(consolidated['varinfo'][key])[info_sort_ind].tolist()
)
# filter the LC dict if requested
# we do this at the end
if (filterqualityflags is not False or
nanfilter is not None or
timestoignore is not None):
consolidated = filter_tess_lcdict(consolidated,
filterqualityflags,
nanfilter=nanfilter,
timestoignore=timestoignore)
return consolidated | def function[consolidate_tess_fitslc, parameter[lclist, normalize, filterqualityflags, nanfilter, timestoignore, headerkeys, datakeys, sapkeys, pdckeys, topkeys, apkeys]]:
constant[This consolidates a list of LCs for a single TIC object.
NOTE: if light curve time arrays contain nans, these and their associated
measurements will be sorted to the end of the final combined arrays.
Parameters
----------
lclist : list of str, or str
`lclist` is either a list of actual light curve files or a string that
is valid for glob.glob to search for and generate a light curve list
based on the file glob. This is useful for consolidating LC FITS files
across different TESS sectors for a single TIC ID using a glob like
`*<TICID>*_lc.fits`.
normalize : bool
If True, then the light curve's SAP_FLUX and PDCSAP_FLUX measurements
will be normalized to 1.0 by dividing out the median flux for the
component light curve.
filterqualityflags : bool
If True, will remove any measurements that have non-zero quality flags
present. This usually indicates an issue with the instrument or
spacecraft.
nanfilter : {'sap','pdc','sap,pdc'} or None
Indicates the flux measurement type(s) to apply the filtering to.
timestoignore : list of tuples or None
This is of the form::
[(time1_start, time1_end), (time2_start, time2_end), ...]
and indicates the start and end times to mask out of the final
lcdict. Use this to remove anything that wasn't caught by the quality
flags.
headerkeys : list
A list of FITS header keys that will be extracted from the FITS light
curve file. These describe the observations. The default value for this
is given in `LCHEADERKEYS` above.
datakeys : list
A list of FITS column names that correspond to the auxiliary
measurements in the light curve. The default is `LCDATAKEYS` above.
sapkeys : list
A list of FITS column names that correspond to the SAP flux
measurements in the light curve. The default is `LCSAPKEYS` above.
pdckeys : list
A list of FITS column names that correspond to the PDC flux
measurements in the light curve. The default is `LCPDCKEYS` above.
topkeys : list
A list of FITS header keys that describe the object in the light
curve. The default is `LCTOPKEYS` above.
apkeys : list
A list of FITS header keys that describe the flux measurement apertures
used by the TESS pipeline. The default is `LCAPERTUREKEYS` above.
Returns
-------
lcdict
Returns an `lcdict` (this is useable by most astrobase functions for LC
processing).
]
if call[name[isinstance], parameter[name[lclist], name[str]]] begin[:]
if compare[call[name[sys].version_info][<ast.Slice object at 0x7da1b004bf40>] greater[>] tuple[[<ast.Constant object at 0x7da1b0048940>, <ast.Constant object at 0x7da1b0049450>]]] begin[:]
variable[matching] assign[=] call[name[glob].glob, parameter[name[lclist]]]
call[name[LOGINFO], parameter[binary_operation[constant[found %s LCs: %r] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b0049ed0>, <ast.Name object at 0x7da1b004a950>]]]]]
if compare[call[name[len], parameter[name[matching]]] equal[==] constant[0]] begin[:]
call[name[LOGERROR], parameter[binary_operation[constant[could not find any TESS LC files matching glob: %s] <ast.Mod object at 0x7da2590d6920> name[lclist]]]]
return[constant[None]]
variable[consolidated] assign[=] call[name[read_tess_fitslc], parameter[call[name[matching]][constant[0]]]]
if compare[call[name[len], parameter[name[matching]]] greater[>] constant[1]] begin[:]
for taget[name[lcf]] in starred[call[name[matching]][<ast.Slice object at 0x7da1b0049b70>]] begin[:]
variable[consolidated] assign[=] call[name[read_tess_fitslc], parameter[name[lcf]]]
call[name[LOGINFO], parameter[constant[sorting by time...]]]
variable[finiteind] assign[=] call[name[np].isfinite, parameter[call[name[consolidated]][constant[time]]]]
if compare[call[name[np].sum, parameter[name[finiteind]]] less[<] call[name[consolidated]][constant[time]].size] begin[:]
call[name[LOGWARNING], parameter[constant[some time values are nan! measurements at these times will be sorted to the end of the column arrays.]]]
variable[column_sort_ind] assign[=] call[name[np].argsort, parameter[call[name[consolidated]][constant[time]]]]
for taget[name[col]] in starred[call[name[consolidated]][constant[columns]]] begin[:]
if compare[constant[.] in name[col]] begin[:]
<ast.Tuple object at 0x7da1b004b730> assign[=] call[name[col].split, parameter[constant[.]]]
call[call[name[consolidated]][name[key]]][name[subkey]] assign[=] call[call[call[name[consolidated]][name[key]]][name[subkey]]][name[column_sort_ind]]
variable[info_sort_ind] assign[=] call[name[np].argsort, parameter[call[call[name[consolidated]][constant[lcinfo]]][constant[sector]]]]
for taget[name[key]] in starred[call[name[consolidated]][constant[lcinfo]]] begin[:]
call[call[name[consolidated]][constant[lcinfo]]][name[key]] assign[=] call[call[call[name[np].array, parameter[call[call[name[consolidated]][constant[lcinfo]]][name[key]]]]][name[info_sort_ind]].tolist, parameter[]]
for taget[name[key]] in starred[call[name[consolidated]][constant[varinfo]]] begin[:]
call[call[name[consolidated]][constant[varinfo]]][name[key]] assign[=] call[call[call[name[np].array, parameter[call[call[name[consolidated]][constant[varinfo]]][name[key]]]]][name[info_sort_ind]].tolist, parameter[]]
if <ast.BoolOp object at 0x7da1b0050c40> begin[:]
variable[consolidated] assign[=] call[name[filter_tess_lcdict], parameter[name[consolidated], name[filterqualityflags]]]
return[name[consolidated]] | keyword[def] identifier[consolidate_tess_fitslc] ( identifier[lclist] ,
identifier[normalize] = keyword[True] ,
identifier[filterqualityflags] = keyword[False] ,
identifier[nanfilter] = keyword[None] ,
identifier[timestoignore] = keyword[None] ,
identifier[headerkeys] = identifier[LCHEADERKEYS] ,
identifier[datakeys] = identifier[LCDATAKEYS] ,
identifier[sapkeys] = identifier[LCSAPKEYS] ,
identifier[pdckeys] = identifier[LCPDCKEYS] ,
identifier[topkeys] = identifier[LCTOPKEYS] ,
identifier[apkeys] = identifier[LCAPERTUREKEYS] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[lclist] , identifier[str] ):
keyword[if] identifier[sys] . identifier[version_info] [: literal[int] ]>( literal[int] , literal[int] ):
identifier[matching] = identifier[glob] . identifier[glob] ( identifier[lclist] ,
identifier[recursive] = keyword[True] )
identifier[LOGINFO] ( literal[string] %( identifier[len] ( identifier[matching] ), identifier[matching] ))
keyword[else] :
identifier[lcfitsdir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[lclist] )
identifier[lcfitsfile] = identifier[os] . identifier[path] . identifier[basename] ( identifier[lclist] )
identifier[walker] = identifier[os] . identifier[walk] ( identifier[lcfitsdir] )
identifier[matching] =[]
keyword[for] identifier[root] , identifier[dirs] , identifier[_files] keyword[in] identifier[walker] :
keyword[for] identifier[sdir] keyword[in] identifier[dirs] :
identifier[searchpath] = identifier[os] . identifier[path] . identifier[join] ( identifier[root] ,
identifier[sdir] ,
identifier[lcfitsfile] )
identifier[foundfiles] = identifier[glob] . identifier[glob] ( identifier[searchpath] )
keyword[if] identifier[foundfiles] :
identifier[matching] . identifier[extend] ( identifier[foundfiles] )
identifier[LOGINFO] (
literal[string] %( identifier[repr] ( identifier[foundfiles] ),
identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[sdir] ))
)
keyword[if] identifier[len] ( identifier[matching] )== literal[int] :
identifier[LOGERROR] ( literal[string] %
identifier[lclist] )
keyword[return] keyword[None]
keyword[else] :
identifier[matching] = identifier[lclist]
identifier[consolidated] = identifier[read_tess_fitslc] ( identifier[matching] [ literal[int] ],
identifier[normalize] = identifier[normalize] ,
identifier[headerkeys] = identifier[LCHEADERKEYS] ,
identifier[datakeys] = identifier[LCDATAKEYS] ,
identifier[sapkeys] = identifier[LCSAPKEYS] ,
identifier[pdckeys] = identifier[LCPDCKEYS] ,
identifier[topkeys] = identifier[LCTOPKEYS] ,
identifier[apkeys] = identifier[LCAPERTUREKEYS] )
keyword[if] identifier[len] ( identifier[matching] )> literal[int] :
keyword[for] identifier[lcf] keyword[in] identifier[matching] [ literal[int] :]:
identifier[consolidated] = identifier[read_tess_fitslc] ( identifier[lcf] ,
identifier[appendto] = identifier[consolidated] ,
identifier[normalize] = identifier[normalize] ,
identifier[headerkeys] = identifier[LCHEADERKEYS] ,
identifier[datakeys] = identifier[LCDATAKEYS] ,
identifier[sapkeys] = identifier[LCSAPKEYS] ,
identifier[pdckeys] = identifier[LCPDCKEYS] ,
identifier[topkeys] = identifier[LCTOPKEYS] ,
identifier[apkeys] = identifier[LCAPERTUREKEYS] )
identifier[LOGINFO] ( literal[string] )
identifier[finiteind] = identifier[np] . identifier[isfinite] ( identifier[consolidated] [ literal[string] ])
keyword[if] identifier[np] . identifier[sum] ( identifier[finiteind] )< identifier[consolidated] [ literal[string] ]. identifier[size] :
identifier[LOGWARNING] ( literal[string]
literal[string]
literal[string] )
identifier[column_sort_ind] = identifier[np] . identifier[argsort] ( identifier[consolidated] [ literal[string] ])
keyword[for] identifier[col] keyword[in] identifier[consolidated] [ literal[string] ]:
keyword[if] literal[string] keyword[in] identifier[col] :
identifier[key] , identifier[subkey] = identifier[col] . identifier[split] ( literal[string] )
identifier[consolidated] [ identifier[key] ][ identifier[subkey] ]=(
identifier[consolidated] [ identifier[key] ][ identifier[subkey] ][ identifier[column_sort_ind] ]
)
keyword[else] :
identifier[consolidated] [ identifier[col] ]= identifier[consolidated] [ identifier[col] ][ identifier[column_sort_ind] ]
identifier[info_sort_ind] = identifier[np] . identifier[argsort] ( identifier[consolidated] [ literal[string] ][ literal[string] ])
keyword[for] identifier[key] keyword[in] identifier[consolidated] [ literal[string] ]:
identifier[consolidated] [ literal[string] ][ identifier[key] ]=(
identifier[np] . identifier[array] ( identifier[consolidated] [ literal[string] ][ identifier[key] ])[ identifier[info_sort_ind] ]. identifier[tolist] ()
)
keyword[for] identifier[key] keyword[in] identifier[consolidated] [ literal[string] ]:
identifier[consolidated] [ literal[string] ][ identifier[key] ]=(
identifier[np] . identifier[array] ( identifier[consolidated] [ literal[string] ][ identifier[key] ])[ identifier[info_sort_ind] ]. identifier[tolist] ()
)
keyword[if] ( identifier[filterqualityflags] keyword[is] keyword[not] keyword[False] keyword[or]
identifier[nanfilter] keyword[is] keyword[not] keyword[None] keyword[or]
identifier[timestoignore] keyword[is] keyword[not] keyword[None] ):
identifier[consolidated] = identifier[filter_tess_lcdict] ( identifier[consolidated] ,
identifier[filterqualityflags] ,
identifier[nanfilter] = identifier[nanfilter] ,
identifier[timestoignore] = identifier[timestoignore] )
keyword[return] identifier[consolidated] | def consolidate_tess_fitslc(lclist, normalize=True, filterqualityflags=False, nanfilter=None, timestoignore=None, headerkeys=LCHEADERKEYS, datakeys=LCDATAKEYS, sapkeys=LCSAPKEYS, pdckeys=LCPDCKEYS, topkeys=LCTOPKEYS, apkeys=LCAPERTUREKEYS):
"""This consolidates a list of LCs for a single TIC object.
NOTE: if light curve time arrays contain nans, these and their associated
measurements will be sorted to the end of the final combined arrays.
Parameters
----------
lclist : list of str, or str
`lclist` is either a list of actual light curve files or a string that
is valid for glob.glob to search for and generate a light curve list
based on the file glob. This is useful for consolidating LC FITS files
across different TESS sectors for a single TIC ID using a glob like
`*<TICID>*_lc.fits`.
normalize : bool
If True, then the light curve's SAP_FLUX and PDCSAP_FLUX measurements
will be normalized to 1.0 by dividing out the median flux for the
component light curve.
filterqualityflags : bool
If True, will remove any measurements that have non-zero quality flags
present. This usually indicates an issue with the instrument or
spacecraft.
nanfilter : {'sap','pdc','sap,pdc'} or None
Indicates the flux measurement type(s) to apply the filtering to.
timestoignore : list of tuples or None
This is of the form::
[(time1_start, time1_end), (time2_start, time2_end), ...]
and indicates the start and end times to mask out of the final
lcdict. Use this to remove anything that wasn't caught by the quality
flags.
headerkeys : list
A list of FITS header keys that will be extracted from the FITS light
curve file. These describe the observations. The default value for this
is given in `LCHEADERKEYS` above.
datakeys : list
A list of FITS column names that correspond to the auxiliary
measurements in the light curve. The default is `LCDATAKEYS` above.
sapkeys : list
A list of FITS column names that correspond to the SAP flux
measurements in the light curve. The default is `LCSAPKEYS` above.
pdckeys : list
A list of FITS column names that correspond to the PDC flux
measurements in the light curve. The default is `LCPDCKEYS` above.
topkeys : list
A list of FITS header keys that describe the object in the light
curve. The default is `LCTOPKEYS` above.
apkeys : list
A list of FITS header keys that describe the flux measurement apertures
used by the TESS pipeline. The default is `LCAPERTUREKEYS` above.
Returns
-------
lcdict
Returns an `lcdict` (this is useable by most astrobase functions for LC
processing).
"""
# if the lclist is a string, assume that we're passing in a fileglob
if isinstance(lclist, str):
if sys.version_info[:2] > (3, 4):
matching = glob.glob(lclist, recursive=True)
LOGINFO('found %s LCs: %r' % (len(matching), matching)) # depends on [control=['if'], data=[]]
else:
lcfitsdir = os.path.dirname(lclist)
lcfitsfile = os.path.basename(lclist)
walker = os.walk(lcfitsdir)
matching = []
for (root, dirs, _files) in walker:
for sdir in dirs:
searchpath = os.path.join(root, sdir, lcfitsfile)
foundfiles = glob.glob(searchpath)
if foundfiles:
matching.extend(foundfiles)
LOGINFO('found %s in dir: %s' % (repr(foundfiles), os.path.join(root, sdir))) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['sdir']] # depends on [control=['for'], data=[]]
if len(matching) == 0:
LOGERROR('could not find any TESS LC files matching glob: %s' % lclist)
return None # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# if the lclist is an actual list of LCs, then use it directly
matching = lclist
# get the first file
consolidated = read_tess_fitslc(matching[0], normalize=normalize, headerkeys=LCHEADERKEYS, datakeys=LCDATAKEYS, sapkeys=LCSAPKEYS, pdckeys=LCPDCKEYS, topkeys=LCTOPKEYS, apkeys=LCAPERTUREKEYS)
# get the rest of the files
if len(matching) > 1:
for lcf in matching[1:]:
consolidated = read_tess_fitslc(lcf, appendto=consolidated, normalize=normalize, headerkeys=LCHEADERKEYS, datakeys=LCDATAKEYS, sapkeys=LCSAPKEYS, pdckeys=LCPDCKEYS, topkeys=LCTOPKEYS, apkeys=LCAPERTUREKEYS) # depends on [control=['for'], data=['lcf']] # depends on [control=['if'], data=[]]
# get the sort indices. we use time for the columns and sectors for the
# bits in lcinfo and varinfo
LOGINFO('sorting by time...')
# NOTE: nans in time will be sorted to the end of the array
finiteind = np.isfinite(consolidated['time'])
if np.sum(finiteind) < consolidated['time'].size:
LOGWARNING('some time values are nan! measurements at these times will be sorted to the end of the column arrays.') # depends on [control=['if'], data=[]]
# get the time sort index
column_sort_ind = np.argsort(consolidated['time'])
# sort the columns by time
for col in consolidated['columns']:
if '.' in col:
(key, subkey) = col.split('.')
consolidated[key][subkey] = consolidated[key][subkey][column_sort_ind] # depends on [control=['if'], data=['col']]
else:
consolidated[col] = consolidated[col][column_sort_ind] # depends on [control=['for'], data=['col']]
info_sort_ind = np.argsort(consolidated['lcinfo']['sector'])
# sort the keys in lcinfo
for key in consolidated['lcinfo']:
consolidated['lcinfo'][key] = np.array(consolidated['lcinfo'][key])[info_sort_ind].tolist() # depends on [control=['for'], data=['key']]
# sort the keys in varinfo
for key in consolidated['varinfo']:
consolidated['varinfo'][key] = np.array(consolidated['varinfo'][key])[info_sort_ind].tolist() # depends on [control=['for'], data=['key']]
# filter the LC dict if requested
# we do this at the end
if filterqualityflags is not False or nanfilter is not None or timestoignore is not None:
consolidated = filter_tess_lcdict(consolidated, filterqualityflags, nanfilter=nanfilter, timestoignore=timestoignore) # depends on [control=['if'], data=[]]
return consolidated |
def infer_declared_from_conditions(conds, namespace=None):
''' like infer_declared except that it is passed a set of first party
caveat conditions as a list of string rather than a set of macaroons.
'''
conflicts = []
# If we can't resolve that standard namespace, then we'll look for
# just bare "declared" caveats which will work OK for legacy
# macaroons with no namespace.
if namespace is None:
namespace = Namespace()
prefix = namespace.resolve(STD_NAMESPACE)
if prefix is None:
prefix = ''
declared_cond = prefix + COND_DECLARED
info = {}
for cond in conds:
try:
name, rest = parse_caveat(cond)
except ValueError:
name, rest = '', ''
if name != declared_cond:
continue
parts = rest.split(' ', 1)
if len(parts) != 2:
continue
key, val = parts[0], parts[1]
old_val = info.get(key)
if old_val is not None and old_val != val:
conflicts.append(key)
continue
info[key] = val
for key in set(conflicts):
del info[key]
return info | def function[infer_declared_from_conditions, parameter[conds, namespace]]:
constant[ like infer_declared except that it is passed a set of first party
caveat conditions as a list of string rather than a set of macaroons.
]
variable[conflicts] assign[=] list[[]]
if compare[name[namespace] is constant[None]] begin[:]
variable[namespace] assign[=] call[name[Namespace], parameter[]]
variable[prefix] assign[=] call[name[namespace].resolve, parameter[name[STD_NAMESPACE]]]
if compare[name[prefix] is constant[None]] begin[:]
variable[prefix] assign[=] constant[]
variable[declared_cond] assign[=] binary_operation[name[prefix] + name[COND_DECLARED]]
variable[info] assign[=] dictionary[[], []]
for taget[name[cond]] in starred[name[conds]] begin[:]
<ast.Try object at 0x7da1b2539c00>
if compare[name[name] not_equal[!=] name[declared_cond]] begin[:]
continue
variable[parts] assign[=] call[name[rest].split, parameter[constant[ ], constant[1]]]
if compare[call[name[len], parameter[name[parts]]] not_equal[!=] constant[2]] begin[:]
continue
<ast.Tuple object at 0x7da1b2539750> assign[=] tuple[[<ast.Subscript object at 0x7da1b25394b0>, <ast.Subscript object at 0x7da1b2595450>]]
variable[old_val] assign[=] call[name[info].get, parameter[name[key]]]
if <ast.BoolOp object at 0x7da1b2594910> begin[:]
call[name[conflicts].append, parameter[name[key]]]
continue
call[name[info]][name[key]] assign[=] name[val]
for taget[name[key]] in starred[call[name[set], parameter[name[conflicts]]]] begin[:]
<ast.Delete object at 0x7da1b2596cb0>
return[name[info]] | keyword[def] identifier[infer_declared_from_conditions] ( identifier[conds] , identifier[namespace] = keyword[None] ):
literal[string]
identifier[conflicts] =[]
keyword[if] identifier[namespace] keyword[is] keyword[None] :
identifier[namespace] = identifier[Namespace] ()
identifier[prefix] = identifier[namespace] . identifier[resolve] ( identifier[STD_NAMESPACE] )
keyword[if] identifier[prefix] keyword[is] keyword[None] :
identifier[prefix] = literal[string]
identifier[declared_cond] = identifier[prefix] + identifier[COND_DECLARED]
identifier[info] ={}
keyword[for] identifier[cond] keyword[in] identifier[conds] :
keyword[try] :
identifier[name] , identifier[rest] = identifier[parse_caveat] ( identifier[cond] )
keyword[except] identifier[ValueError] :
identifier[name] , identifier[rest] = literal[string] , literal[string]
keyword[if] identifier[name] != identifier[declared_cond] :
keyword[continue]
identifier[parts] = identifier[rest] . identifier[split] ( literal[string] , literal[int] )
keyword[if] identifier[len] ( identifier[parts] )!= literal[int] :
keyword[continue]
identifier[key] , identifier[val] = identifier[parts] [ literal[int] ], identifier[parts] [ literal[int] ]
identifier[old_val] = identifier[info] . identifier[get] ( identifier[key] )
keyword[if] identifier[old_val] keyword[is] keyword[not] keyword[None] keyword[and] identifier[old_val] != identifier[val] :
identifier[conflicts] . identifier[append] ( identifier[key] )
keyword[continue]
identifier[info] [ identifier[key] ]= identifier[val]
keyword[for] identifier[key] keyword[in] identifier[set] ( identifier[conflicts] ):
keyword[del] identifier[info] [ identifier[key] ]
keyword[return] identifier[info] | def infer_declared_from_conditions(conds, namespace=None):
""" like infer_declared except that it is passed a set of first party
caveat conditions as a list of string rather than a set of macaroons.
"""
conflicts = []
# If we can't resolve that standard namespace, then we'll look for
# just bare "declared" caveats which will work OK for legacy
# macaroons with no namespace.
if namespace is None:
namespace = Namespace() # depends on [control=['if'], data=['namespace']]
prefix = namespace.resolve(STD_NAMESPACE)
if prefix is None:
prefix = '' # depends on [control=['if'], data=['prefix']]
declared_cond = prefix + COND_DECLARED
info = {}
for cond in conds:
try:
(name, rest) = parse_caveat(cond) # depends on [control=['try'], data=[]]
except ValueError:
(name, rest) = ('', '') # depends on [control=['except'], data=[]]
if name != declared_cond:
continue # depends on [control=['if'], data=[]]
parts = rest.split(' ', 1)
if len(parts) != 2:
continue # depends on [control=['if'], data=[]]
(key, val) = (parts[0], parts[1])
old_val = info.get(key)
if old_val is not None and old_val != val:
conflicts.append(key)
continue # depends on [control=['if'], data=[]]
info[key] = val # depends on [control=['for'], data=['cond']]
for key in set(conflicts):
del info[key] # depends on [control=['for'], data=['key']]
return info |
def discover_upnp_devices(
self, st="upnp:rootdevice", timeout=2, mx=1, retries=1
):
"""
sends an SSDP discovery packet to the network and collects
the devices that replies to it. A dictionary is returned
using the devices unique usn as key
"""
# prepare UDP socket to transfer the SSDP packets
s = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP
)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)
s.settimeout(timeout)
# prepare SSDP discover message
msg = SSDPDiscoveryMessage(mx=mx, st=st)
# try to get devices with multiple retries in case of failure
devices = {}
for _ in range(retries):
# send SSDP discovery message
s.sendto(msg.bytes, SSDP_MULTICAST_ADDR)
devices = {}
try:
while True:
# parse response and store it in dict
r = SSDPResponse(s.recvfrom(65507))
devices[r.usn] = r
except socket.timeout:
break
return devices | def function[discover_upnp_devices, parameter[self, st, timeout, mx, retries]]:
constant[
sends an SSDP discovery packet to the network and collects
the devices that replies to it. A dictionary is returned
using the devices unique usn as key
]
variable[s] assign[=] call[name[socket].socket, parameter[name[socket].AF_INET, name[socket].SOCK_DGRAM, name[socket].IPPROTO_UDP]]
call[name[s].setsockopt, parameter[name[socket].SOL_SOCKET, name[socket].SO_REUSEADDR, constant[1]]]
call[name[s].setsockopt, parameter[name[socket].IPPROTO_IP, name[socket].IP_MULTICAST_TTL, constant[2]]]
call[name[s].settimeout, parameter[name[timeout]]]
variable[msg] assign[=] call[name[SSDPDiscoveryMessage], parameter[]]
variable[devices] assign[=] dictionary[[], []]
for taget[name[_]] in starred[call[name[range], parameter[name[retries]]]] begin[:]
call[name[s].sendto, parameter[name[msg].bytes, name[SSDP_MULTICAST_ADDR]]]
variable[devices] assign[=] dictionary[[], []]
<ast.Try object at 0x7da1b11ef0a0>
return[name[devices]] | keyword[def] identifier[discover_upnp_devices] (
identifier[self] , identifier[st] = literal[string] , identifier[timeout] = literal[int] , identifier[mx] = literal[int] , identifier[retries] = literal[int]
):
literal[string]
identifier[s] = identifier[socket] . identifier[socket] (
identifier[socket] . identifier[AF_INET] , identifier[socket] . identifier[SOCK_DGRAM] , identifier[socket] . identifier[IPPROTO_UDP]
)
identifier[s] . identifier[setsockopt] ( identifier[socket] . identifier[SOL_SOCKET] , identifier[socket] . identifier[SO_REUSEADDR] , literal[int] )
identifier[s] . identifier[setsockopt] ( identifier[socket] . identifier[IPPROTO_IP] , identifier[socket] . identifier[IP_MULTICAST_TTL] , literal[int] )
identifier[s] . identifier[settimeout] ( identifier[timeout] )
identifier[msg] = identifier[SSDPDiscoveryMessage] ( identifier[mx] = identifier[mx] , identifier[st] = identifier[st] )
identifier[devices] ={}
keyword[for] identifier[_] keyword[in] identifier[range] ( identifier[retries] ):
identifier[s] . identifier[sendto] ( identifier[msg] . identifier[bytes] , identifier[SSDP_MULTICAST_ADDR] )
identifier[devices] ={}
keyword[try] :
keyword[while] keyword[True] :
identifier[r] = identifier[SSDPResponse] ( identifier[s] . identifier[recvfrom] ( literal[int] ))
identifier[devices] [ identifier[r] . identifier[usn] ]= identifier[r]
keyword[except] identifier[socket] . identifier[timeout] :
keyword[break]
keyword[return] identifier[devices] | def discover_upnp_devices(self, st='upnp:rootdevice', timeout=2, mx=1, retries=1):
"""
sends an SSDP discovery packet to the network and collects
the devices that replies to it. A dictionary is returned
using the devices unique usn as key
"""
# prepare UDP socket to transfer the SSDP packets
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2)
s.settimeout(timeout)
# prepare SSDP discover message
msg = SSDPDiscoveryMessage(mx=mx, st=st)
# try to get devices with multiple retries in case of failure
devices = {}
for _ in range(retries):
# send SSDP discovery message
s.sendto(msg.bytes, SSDP_MULTICAST_ADDR)
devices = {}
try:
while True:
# parse response and store it in dict
r = SSDPResponse(s.recvfrom(65507))
devices[r.usn] = r # depends on [control=['while'], data=[]] # depends on [control=['try'], data=[]]
except socket.timeout:
break # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]]
return devices |
def extract_plain_text(self, reduce: bool = False) -> str:
"""
Extract text segments from the message, joined by single space.
:param reduce: reduce the message before extracting
:return: the joined string
"""
if reduce:
self.reduce()
result = ''
for seg in self:
if seg.type == 'text':
result += ' ' + seg.data['text']
if result:
result = result[1:]
return result | def function[extract_plain_text, parameter[self, reduce]]:
constant[
Extract text segments from the message, joined by single space.
:param reduce: reduce the message before extracting
:return: the joined string
]
if name[reduce] begin[:]
call[name[self].reduce, parameter[]]
variable[result] assign[=] constant[]
for taget[name[seg]] in starred[name[self]] begin[:]
if compare[name[seg].type equal[==] constant[text]] begin[:]
<ast.AugAssign object at 0x7da20c6aaa40>
if name[result] begin[:]
variable[result] assign[=] call[name[result]][<ast.Slice object at 0x7da18c4cc820>]
return[name[result]] | keyword[def] identifier[extract_plain_text] ( identifier[self] , identifier[reduce] : identifier[bool] = keyword[False] )-> identifier[str] :
literal[string]
keyword[if] identifier[reduce] :
identifier[self] . identifier[reduce] ()
identifier[result] = literal[string]
keyword[for] identifier[seg] keyword[in] identifier[self] :
keyword[if] identifier[seg] . identifier[type] == literal[string] :
identifier[result] += literal[string] + identifier[seg] . identifier[data] [ literal[string] ]
keyword[if] identifier[result] :
identifier[result] = identifier[result] [ literal[int] :]
keyword[return] identifier[result] | def extract_plain_text(self, reduce: bool=False) -> str:
"""
Extract text segments from the message, joined by single space.
:param reduce: reduce the message before extracting
:return: the joined string
"""
if reduce:
self.reduce() # depends on [control=['if'], data=[]]
result = ''
for seg in self:
if seg.type == 'text':
result += ' ' + seg.data['text'] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['seg']]
if result:
result = result[1:] # depends on [control=['if'], data=[]]
return result |
def _from_dict(cls, _dict):
"""Initialize a Voices object from a json dictionary."""
args = {}
if 'voices' in _dict:
args['voices'] = [
Voice._from_dict(x) for x in (_dict.get('voices'))
]
else:
raise ValueError(
'Required property \'voices\' not present in Voices JSON')
return cls(**args) | def function[_from_dict, parameter[cls, _dict]]:
constant[Initialize a Voices object from a json dictionary.]
variable[args] assign[=] dictionary[[], []]
if compare[constant[voices] in name[_dict]] begin[:]
call[name[args]][constant[voices]] assign[=] <ast.ListComp object at 0x7da204623070>
return[call[name[cls], parameter[]]] | keyword[def] identifier[_from_dict] ( identifier[cls] , identifier[_dict] ):
literal[string]
identifier[args] ={}
keyword[if] literal[string] keyword[in] identifier[_dict] :
identifier[args] [ literal[string] ]=[
identifier[Voice] . identifier[_from_dict] ( identifier[x] ) keyword[for] identifier[x] keyword[in] ( identifier[_dict] . identifier[get] ( literal[string] ))
]
keyword[else] :
keyword[raise] identifier[ValueError] (
literal[string] )
keyword[return] identifier[cls] (** identifier[args] ) | def _from_dict(cls, _dict):
"""Initialize a Voices object from a json dictionary."""
args = {}
if 'voices' in _dict:
args['voices'] = [Voice._from_dict(x) for x in _dict.get('voices')] # depends on [control=['if'], data=['_dict']]
else:
raise ValueError("Required property 'voices' not present in Voices JSON")
return cls(**args) |
def add(self, name, obj=None):
"""Add the view named `name` to the report text"""
if obj:
text = '\n::\n\n' + indent(str(obj))
else:
text = views.view(name, self.dstore)
if text:
title = self.title[name]
line = '-' * len(title)
self.text += '\n'.join(['\n\n' + title, line, text]) | def function[add, parameter[self, name, obj]]:
constant[Add the view named `name` to the report text]
if name[obj] begin[:]
variable[text] assign[=] binary_operation[constant[
::
] + call[name[indent], parameter[call[name[str], parameter[name[obj]]]]]]
if name[text] begin[:]
variable[title] assign[=] call[name[self].title][name[name]]
variable[line] assign[=] binary_operation[constant[-] * call[name[len], parameter[name[title]]]]
<ast.AugAssign object at 0x7da18eb55de0> | keyword[def] identifier[add] ( identifier[self] , identifier[name] , identifier[obj] = keyword[None] ):
literal[string]
keyword[if] identifier[obj] :
identifier[text] = literal[string] + identifier[indent] ( identifier[str] ( identifier[obj] ))
keyword[else] :
identifier[text] = identifier[views] . identifier[view] ( identifier[name] , identifier[self] . identifier[dstore] )
keyword[if] identifier[text] :
identifier[title] = identifier[self] . identifier[title] [ identifier[name] ]
identifier[line] = literal[string] * identifier[len] ( identifier[title] )
identifier[self] . identifier[text] += literal[string] . identifier[join] ([ literal[string] + identifier[title] , identifier[line] , identifier[text] ]) | def add(self, name, obj=None):
"""Add the view named `name` to the report text"""
if obj:
text = '\n::\n\n' + indent(str(obj)) # depends on [control=['if'], data=[]]
else:
text = views.view(name, self.dstore)
if text:
title = self.title[name]
line = '-' * len(title)
self.text += '\n'.join(['\n\n' + title, line, text]) # depends on [control=['if'], data=[]] |
def log_in(self):
"""Perform the `log_in` task to setup the API session for future data requests."""
if not self.password:
# Password wasn't give, ask for it now
self.password = getpass.getpass('Password: ')
utils.pending_message('Performing login...')
login_result = self.client.login(
account=self.account,
password=self.password
)
if 'error' in login_result:
self.handle_failed_login(login_result)
utils.info_message('Login successful') | def function[log_in, parameter[self]]:
constant[Perform the `log_in` task to setup the API session for future data requests.]
if <ast.UnaryOp object at 0x7da1b0de5900> begin[:]
name[self].password assign[=] call[name[getpass].getpass, parameter[constant[Password: ]]]
call[name[utils].pending_message, parameter[constant[Performing login...]]]
variable[login_result] assign[=] call[name[self].client.login, parameter[]]
if compare[constant[error] in name[login_result]] begin[:]
call[name[self].handle_failed_login, parameter[name[login_result]]]
call[name[utils].info_message, parameter[constant[Login successful]]] | keyword[def] identifier[log_in] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[password] :
identifier[self] . identifier[password] = identifier[getpass] . identifier[getpass] ( literal[string] )
identifier[utils] . identifier[pending_message] ( literal[string] )
identifier[login_result] = identifier[self] . identifier[client] . identifier[login] (
identifier[account] = identifier[self] . identifier[account] ,
identifier[password] = identifier[self] . identifier[password]
)
keyword[if] literal[string] keyword[in] identifier[login_result] :
identifier[self] . identifier[handle_failed_login] ( identifier[login_result] )
identifier[utils] . identifier[info_message] ( literal[string] ) | def log_in(self):
"""Perform the `log_in` task to setup the API session for future data requests."""
if not self.password:
# Password wasn't give, ask for it now
self.password = getpass.getpass('Password: ') # depends on [control=['if'], data=[]]
utils.pending_message('Performing login...')
login_result = self.client.login(account=self.account, password=self.password)
if 'error' in login_result:
self.handle_failed_login(login_result) # depends on [control=['if'], data=['login_result']]
utils.info_message('Login successful') |
def unregister_editorstack(self, editorstack):
"""Removing editorstack only if it's not the last remaining"""
self.remove_last_focus_editorstack(editorstack)
if len(self.editorstacks) > 1:
index = self.editorstacks.index(editorstack)
self.editorstacks.pop(index)
return True
else:
# editorstack was not removed!
return False | def function[unregister_editorstack, parameter[self, editorstack]]:
constant[Removing editorstack only if it's not the last remaining]
call[name[self].remove_last_focus_editorstack, parameter[name[editorstack]]]
if compare[call[name[len], parameter[name[self].editorstacks]] greater[>] constant[1]] begin[:]
variable[index] assign[=] call[name[self].editorstacks.index, parameter[name[editorstack]]]
call[name[self].editorstacks.pop, parameter[name[index]]]
return[constant[True]] | keyword[def] identifier[unregister_editorstack] ( identifier[self] , identifier[editorstack] ):
literal[string]
identifier[self] . identifier[remove_last_focus_editorstack] ( identifier[editorstack] )
keyword[if] identifier[len] ( identifier[self] . identifier[editorstacks] )> literal[int] :
identifier[index] = identifier[self] . identifier[editorstacks] . identifier[index] ( identifier[editorstack] )
identifier[self] . identifier[editorstacks] . identifier[pop] ( identifier[index] )
keyword[return] keyword[True]
keyword[else] :
keyword[return] keyword[False] | def unregister_editorstack(self, editorstack):
"""Removing editorstack only if it's not the last remaining"""
self.remove_last_focus_editorstack(editorstack)
if len(self.editorstacks) > 1:
index = self.editorstacks.index(editorstack)
self.editorstacks.pop(index)
return True # depends on [control=['if'], data=[]]
else: # editorstack was not removed!
return False |
def apply_exclusions(self,exclusions):
""" Trim sky catalog to remove any sources within regions specified by
exclusions file.
"""
# parse exclusion file into list of positions and distances
exclusion_coords = tweakutils.parse_exclusions(exclusions)
if exclusion_coords is None:
return
excluded_list = []
radec_indx = list(range(len(self.radec[0])))
for ra,dec,indx in zip(self.radec[0],self.radec[1],radec_indx):
src_pos = coords.SkyCoord(ra=ra,dec=dec,unit=(u.hourangle,u.deg))
# check to see whether this source is within an exclusion region
for reg in exclusion_coords:
if reg['units'] == 'sky':
regpos = reg['pos']
regdist = reg['distance'] # units: arcsec
else:
regradec = self.wcs.all_pix2world([reg['pos']],1)[0]
regpos = (regradec[0],regradec[1])
regdist = reg['distance']*self.wcs.pscale # units: arcsec
epos = coords.SkyCoord(ra=regpos[0],dec=regpos[1],unit=(u.hourangle,u.deg))
if float(epos.separation(src_pos).to_string(unit=u.arcsec,decimal=True)) <= regdist:
excluded_list.append(indx)
break
# create a list of all 'good' sources outside all exclusion regions
for e in excluded_list: radec_indx.remove(e)
radec_indx = np.array(radec_indx,dtype=int)
num_excluded = len(excluded_list)
if num_excluded > 0:
radec_trimmed = []
xypos_trimmed = []
for arr in self.radec:
radec_trimmed.append(arr[radec_indx])
for arr in self.xypos:
xypos_trimmed.append(arr[radec_indx])
xypos_trimmed[-1] = np.arange(len(xypos_trimmed[0]))
self.radec = radec_trimmed
self.xypos = xypos_trimmed
log.info('Excluded %d sources from catalog.'%num_excluded) | def function[apply_exclusions, parameter[self, exclusions]]:
constant[ Trim sky catalog to remove any sources within regions specified by
exclusions file.
]
variable[exclusion_coords] assign[=] call[name[tweakutils].parse_exclusions, parameter[name[exclusions]]]
if compare[name[exclusion_coords] is constant[None]] begin[:]
return[None]
variable[excluded_list] assign[=] list[[]]
variable[radec_indx] assign[=] call[name[list], parameter[call[name[range], parameter[call[name[len], parameter[call[name[self].radec][constant[0]]]]]]]]
for taget[tuple[[<ast.Name object at 0x7da1b1a2b6d0>, <ast.Name object at 0x7da1b1a2b670>, <ast.Name object at 0x7da1b1a2b6a0>]]] in starred[call[name[zip], parameter[call[name[self].radec][constant[0]], call[name[self].radec][constant[1]], name[radec_indx]]]] begin[:]
variable[src_pos] assign[=] call[name[coords].SkyCoord, parameter[]]
for taget[name[reg]] in starred[name[exclusion_coords]] begin[:]
if compare[call[name[reg]][constant[units]] equal[==] constant[sky]] begin[:]
variable[regpos] assign[=] call[name[reg]][constant[pos]]
variable[regdist] assign[=] call[name[reg]][constant[distance]]
variable[epos] assign[=] call[name[coords].SkyCoord, parameter[]]
if compare[call[name[float], parameter[call[call[name[epos].separation, parameter[name[src_pos]]].to_string, parameter[]]]] less_or_equal[<=] name[regdist]] begin[:]
call[name[excluded_list].append, parameter[name[indx]]]
break
for taget[name[e]] in starred[name[excluded_list]] begin[:]
call[name[radec_indx].remove, parameter[name[e]]]
variable[radec_indx] assign[=] call[name[np].array, parameter[name[radec_indx]]]
variable[num_excluded] assign[=] call[name[len], parameter[name[excluded_list]]]
if compare[name[num_excluded] greater[>] constant[0]] begin[:]
variable[radec_trimmed] assign[=] list[[]]
variable[xypos_trimmed] assign[=] list[[]]
for taget[name[arr]] in starred[name[self].radec] begin[:]
call[name[radec_trimmed].append, parameter[call[name[arr]][name[radec_indx]]]]
for taget[name[arr]] in starred[name[self].xypos] begin[:]
call[name[xypos_trimmed].append, parameter[call[name[arr]][name[radec_indx]]]]
call[name[xypos_trimmed]][<ast.UnaryOp object at 0x7da1b1b60fa0>] assign[=] call[name[np].arange, parameter[call[name[len], parameter[call[name[xypos_trimmed]][constant[0]]]]]]
name[self].radec assign[=] name[radec_trimmed]
name[self].xypos assign[=] name[xypos_trimmed]
call[name[log].info, parameter[binary_operation[constant[Excluded %d sources from catalog.] <ast.Mod object at 0x7da2590d6920> name[num_excluded]]]] | keyword[def] identifier[apply_exclusions] ( identifier[self] , identifier[exclusions] ):
literal[string]
identifier[exclusion_coords] = identifier[tweakutils] . identifier[parse_exclusions] ( identifier[exclusions] )
keyword[if] identifier[exclusion_coords] keyword[is] keyword[None] :
keyword[return]
identifier[excluded_list] =[]
identifier[radec_indx] = identifier[list] ( identifier[range] ( identifier[len] ( identifier[self] . identifier[radec] [ literal[int] ])))
keyword[for] identifier[ra] , identifier[dec] , identifier[indx] keyword[in] identifier[zip] ( identifier[self] . identifier[radec] [ literal[int] ], identifier[self] . identifier[radec] [ literal[int] ], identifier[radec_indx] ):
identifier[src_pos] = identifier[coords] . identifier[SkyCoord] ( identifier[ra] = identifier[ra] , identifier[dec] = identifier[dec] , identifier[unit] =( identifier[u] . identifier[hourangle] , identifier[u] . identifier[deg] ))
keyword[for] identifier[reg] keyword[in] identifier[exclusion_coords] :
keyword[if] identifier[reg] [ literal[string] ]== literal[string] :
identifier[regpos] = identifier[reg] [ literal[string] ]
identifier[regdist] = identifier[reg] [ literal[string] ]
keyword[else] :
identifier[regradec] = identifier[self] . identifier[wcs] . identifier[all_pix2world] ([ identifier[reg] [ literal[string] ]], literal[int] )[ literal[int] ]
identifier[regpos] =( identifier[regradec] [ literal[int] ], identifier[regradec] [ literal[int] ])
identifier[regdist] = identifier[reg] [ literal[string] ]* identifier[self] . identifier[wcs] . identifier[pscale]
identifier[epos] = identifier[coords] . identifier[SkyCoord] ( identifier[ra] = identifier[regpos] [ literal[int] ], identifier[dec] = identifier[regpos] [ literal[int] ], identifier[unit] =( identifier[u] . identifier[hourangle] , identifier[u] . identifier[deg] ))
keyword[if] identifier[float] ( identifier[epos] . identifier[separation] ( identifier[src_pos] ). identifier[to_string] ( identifier[unit] = identifier[u] . identifier[arcsec] , identifier[decimal] = keyword[True] ))<= identifier[regdist] :
identifier[excluded_list] . identifier[append] ( identifier[indx] )
keyword[break]
keyword[for] identifier[e] keyword[in] identifier[excluded_list] : identifier[radec_indx] . identifier[remove] ( identifier[e] )
identifier[radec_indx] = identifier[np] . identifier[array] ( identifier[radec_indx] , identifier[dtype] = identifier[int] )
identifier[num_excluded] = identifier[len] ( identifier[excluded_list] )
keyword[if] identifier[num_excluded] > literal[int] :
identifier[radec_trimmed] =[]
identifier[xypos_trimmed] =[]
keyword[for] identifier[arr] keyword[in] identifier[self] . identifier[radec] :
identifier[radec_trimmed] . identifier[append] ( identifier[arr] [ identifier[radec_indx] ])
keyword[for] identifier[arr] keyword[in] identifier[self] . identifier[xypos] :
identifier[xypos_trimmed] . identifier[append] ( identifier[arr] [ identifier[radec_indx] ])
identifier[xypos_trimmed] [- literal[int] ]= identifier[np] . identifier[arange] ( identifier[len] ( identifier[xypos_trimmed] [ literal[int] ]))
identifier[self] . identifier[radec] = identifier[radec_trimmed]
identifier[self] . identifier[xypos] = identifier[xypos_trimmed]
identifier[log] . identifier[info] ( literal[string] % identifier[num_excluded] ) | def apply_exclusions(self, exclusions):
""" Trim sky catalog to remove any sources within regions specified by
exclusions file.
"""
# parse exclusion file into list of positions and distances
exclusion_coords = tweakutils.parse_exclusions(exclusions)
if exclusion_coords is None:
return # depends on [control=['if'], data=[]]
excluded_list = []
radec_indx = list(range(len(self.radec[0])))
for (ra, dec, indx) in zip(self.radec[0], self.radec[1], radec_indx):
src_pos = coords.SkyCoord(ra=ra, dec=dec, unit=(u.hourangle, u.deg))
# check to see whether this source is within an exclusion region
for reg in exclusion_coords:
if reg['units'] == 'sky':
regpos = reg['pos']
regdist = reg['distance'] # units: arcsec # depends on [control=['if'], data=[]]
else:
regradec = self.wcs.all_pix2world([reg['pos']], 1)[0]
regpos = (regradec[0], regradec[1])
regdist = reg['distance'] * self.wcs.pscale # units: arcsec
epos = coords.SkyCoord(ra=regpos[0], dec=regpos[1], unit=(u.hourangle, u.deg))
if float(epos.separation(src_pos).to_string(unit=u.arcsec, decimal=True)) <= regdist:
excluded_list.append(indx)
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['reg']] # depends on [control=['for'], data=[]]
# create a list of all 'good' sources outside all exclusion regions
for e in excluded_list:
radec_indx.remove(e) # depends on [control=['for'], data=['e']]
radec_indx = np.array(radec_indx, dtype=int)
num_excluded = len(excluded_list)
if num_excluded > 0:
radec_trimmed = []
xypos_trimmed = []
for arr in self.radec:
radec_trimmed.append(arr[radec_indx]) # depends on [control=['for'], data=['arr']]
for arr in self.xypos:
xypos_trimmed.append(arr[radec_indx]) # depends on [control=['for'], data=['arr']]
xypos_trimmed[-1] = np.arange(len(xypos_trimmed[0]))
self.radec = radec_trimmed
self.xypos = xypos_trimmed
log.info('Excluded %d sources from catalog.' % num_excluded) # depends on [control=['if'], data=['num_excluded']] |
def asterisk_to_min_max(field, time_filter, search_engine_endpoint, actual_params=None):
"""
traduce [* TO *] to something like [MIN-INDEXED-DATE TO MAX-INDEXED-DATE]
:param field: map the stats to this field.
:param time_filter: this is the value to be translated. think in "[* TO 2000]"
:param search_engine_endpoint: solr core
:param actual_params: (not implemented) to merge with other params.
:return: translated time filter
"""
if actual_params:
raise NotImplemented("actual_params")
start, end = parse_solr_time_range_as_pair(time_filter)
if start == '*' or end == '*':
params_stats = {
"q": "*:*",
"rows": 0,
"stats.field": field,
"stats": "true",
"wt": "json"
}
res_stats = requests.get(search_engine_endpoint, params=params_stats)
if res_stats.ok:
stats_date_field = res_stats.json()["stats"]["stats_fields"][field]
date_min = stats_date_field["min"]
date_max = stats_date_field["max"]
if start != '*':
date_min = start
if end != '*':
date_max = end
time_filter = "[{0} TO {1}]".format(date_min, date_max)
return time_filter | def function[asterisk_to_min_max, parameter[field, time_filter, search_engine_endpoint, actual_params]]:
constant[
traduce [* TO *] to something like [MIN-INDEXED-DATE TO MAX-INDEXED-DATE]
:param field: map the stats to this field.
:param time_filter: this is the value to be translated. think in "[* TO 2000]"
:param search_engine_endpoint: solr core
:param actual_params: (not implemented) to merge with other params.
:return: translated time filter
]
if name[actual_params] begin[:]
<ast.Raise object at 0x7da18dc9a1d0>
<ast.Tuple object at 0x7da18dc9a410> assign[=] call[name[parse_solr_time_range_as_pair], parameter[name[time_filter]]]
if <ast.BoolOp object at 0x7da18dc9a8c0> begin[:]
variable[params_stats] assign[=] dictionary[[<ast.Constant object at 0x7da18dc987f0>, <ast.Constant object at 0x7da18dc9a320>, <ast.Constant object at 0x7da18dc990c0>, <ast.Constant object at 0x7da18dc99e40>, <ast.Constant object at 0x7da18dc98f70>], [<ast.Constant object at 0x7da18dc9b550>, <ast.Constant object at 0x7da18dc98d90>, <ast.Name object at 0x7da18dc9ad70>, <ast.Constant object at 0x7da18dc9b2b0>, <ast.Constant object at 0x7da18dc9bbb0>]]
variable[res_stats] assign[=] call[name[requests].get, parameter[name[search_engine_endpoint]]]
if name[res_stats].ok begin[:]
variable[stats_date_field] assign[=] call[call[call[call[name[res_stats].json, parameter[]]][constant[stats]]][constant[stats_fields]]][name[field]]
variable[date_min] assign[=] call[name[stats_date_field]][constant[min]]
variable[date_max] assign[=] call[name[stats_date_field]][constant[max]]
if compare[name[start] not_equal[!=] constant[*]] begin[:]
variable[date_min] assign[=] name[start]
if compare[name[end] not_equal[!=] constant[*]] begin[:]
variable[date_max] assign[=] name[end]
variable[time_filter] assign[=] call[constant[[{0} TO {1}]].format, parameter[name[date_min], name[date_max]]]
return[name[time_filter]] | keyword[def] identifier[asterisk_to_min_max] ( identifier[field] , identifier[time_filter] , identifier[search_engine_endpoint] , identifier[actual_params] = keyword[None] ):
literal[string]
keyword[if] identifier[actual_params] :
keyword[raise] identifier[NotImplemented] ( literal[string] )
identifier[start] , identifier[end] = identifier[parse_solr_time_range_as_pair] ( identifier[time_filter] )
keyword[if] identifier[start] == literal[string] keyword[or] identifier[end] == literal[string] :
identifier[params_stats] ={
literal[string] : literal[string] ,
literal[string] : literal[int] ,
literal[string] : identifier[field] ,
literal[string] : literal[string] ,
literal[string] : literal[string]
}
identifier[res_stats] = identifier[requests] . identifier[get] ( identifier[search_engine_endpoint] , identifier[params] = identifier[params_stats] )
keyword[if] identifier[res_stats] . identifier[ok] :
identifier[stats_date_field] = identifier[res_stats] . identifier[json] ()[ literal[string] ][ literal[string] ][ identifier[field] ]
identifier[date_min] = identifier[stats_date_field] [ literal[string] ]
identifier[date_max] = identifier[stats_date_field] [ literal[string] ]
keyword[if] identifier[start] != literal[string] :
identifier[date_min] = identifier[start]
keyword[if] identifier[end] != literal[string] :
identifier[date_max] = identifier[end]
identifier[time_filter] = literal[string] . identifier[format] ( identifier[date_min] , identifier[date_max] )
keyword[return] identifier[time_filter] | def asterisk_to_min_max(field, time_filter, search_engine_endpoint, actual_params=None):
"""
traduce [* TO *] to something like [MIN-INDEXED-DATE TO MAX-INDEXED-DATE]
:param field: map the stats to this field.
:param time_filter: this is the value to be translated. think in "[* TO 2000]"
:param search_engine_endpoint: solr core
:param actual_params: (not implemented) to merge with other params.
:return: translated time filter
"""
if actual_params:
raise NotImplemented('actual_params') # depends on [control=['if'], data=[]]
(start, end) = parse_solr_time_range_as_pair(time_filter)
if start == '*' or end == '*':
params_stats = {'q': '*:*', 'rows': 0, 'stats.field': field, 'stats': 'true', 'wt': 'json'}
res_stats = requests.get(search_engine_endpoint, params=params_stats)
if res_stats.ok:
stats_date_field = res_stats.json()['stats']['stats_fields'][field]
date_min = stats_date_field['min']
date_max = stats_date_field['max']
if start != '*':
date_min = start # depends on [control=['if'], data=['start']]
if end != '*':
date_max = end # depends on [control=['if'], data=['end']]
time_filter = '[{0} TO {1}]'.format(date_min, date_max) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return time_filter |
def delete_blobs(self, blobs, on_error=None, client=None):
"""Deletes a list of blobs from the current bucket.
Uses :meth:`delete_blob` to delete each individual blob.
If :attr:`user_project` is set, bills the API request to that project.
:type blobs: list
:param blobs: A list of :class:`~google.cloud.storage.blob.Blob`-s or
blob names to delete.
:type on_error: callable
:param on_error: (Optional) Takes single argument: ``blob``. Called
called once for each blob raising
:class:`~google.cloud.exceptions.NotFound`;
otherwise, the exception is propagated.
:type client: :class:`~google.cloud.storage.client.Client`
:param client: (Optional) The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:raises: :class:`~google.cloud.exceptions.NotFound` (if
`on_error` is not passed).
"""
for blob in blobs:
try:
blob_name = blob
if not isinstance(blob_name, six.string_types):
blob_name = blob.name
self.delete_blob(blob_name, client=client)
except NotFound:
if on_error is not None:
on_error(blob)
else:
raise | def function[delete_blobs, parameter[self, blobs, on_error, client]]:
constant[Deletes a list of blobs from the current bucket.
Uses :meth:`delete_blob` to delete each individual blob.
If :attr:`user_project` is set, bills the API request to that project.
:type blobs: list
:param blobs: A list of :class:`~google.cloud.storage.blob.Blob`-s or
blob names to delete.
:type on_error: callable
:param on_error: (Optional) Takes single argument: ``blob``. Called
called once for each blob raising
:class:`~google.cloud.exceptions.NotFound`;
otherwise, the exception is propagated.
:type client: :class:`~google.cloud.storage.client.Client`
:param client: (Optional) The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:raises: :class:`~google.cloud.exceptions.NotFound` (if
`on_error` is not passed).
]
for taget[name[blob]] in starred[name[blobs]] begin[:]
<ast.Try object at 0x7da18f00e830> | keyword[def] identifier[delete_blobs] ( identifier[self] , identifier[blobs] , identifier[on_error] = keyword[None] , identifier[client] = keyword[None] ):
literal[string]
keyword[for] identifier[blob] keyword[in] identifier[blobs] :
keyword[try] :
identifier[blob_name] = identifier[blob]
keyword[if] keyword[not] identifier[isinstance] ( identifier[blob_name] , identifier[six] . identifier[string_types] ):
identifier[blob_name] = identifier[blob] . identifier[name]
identifier[self] . identifier[delete_blob] ( identifier[blob_name] , identifier[client] = identifier[client] )
keyword[except] identifier[NotFound] :
keyword[if] identifier[on_error] keyword[is] keyword[not] keyword[None] :
identifier[on_error] ( identifier[blob] )
keyword[else] :
keyword[raise] | def delete_blobs(self, blobs, on_error=None, client=None):
"""Deletes a list of blobs from the current bucket.
Uses :meth:`delete_blob` to delete each individual blob.
If :attr:`user_project` is set, bills the API request to that project.
:type blobs: list
:param blobs: A list of :class:`~google.cloud.storage.blob.Blob`-s or
blob names to delete.
:type on_error: callable
:param on_error: (Optional) Takes single argument: ``blob``. Called
called once for each blob raising
:class:`~google.cloud.exceptions.NotFound`;
otherwise, the exception is propagated.
:type client: :class:`~google.cloud.storage.client.Client`
:param client: (Optional) The client to use. If not passed, falls back
to the ``client`` stored on the current bucket.
:raises: :class:`~google.cloud.exceptions.NotFound` (if
`on_error` is not passed).
"""
for blob in blobs:
try:
blob_name = blob
if not isinstance(blob_name, six.string_types):
blob_name = blob.name # depends on [control=['if'], data=[]]
self.delete_blob(blob_name, client=client) # depends on [control=['try'], data=[]]
except NotFound:
if on_error is not None:
on_error(blob) # depends on [control=['if'], data=['on_error']]
else:
raise # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['blob']] |
def Pdew_mixture(T=None, zs=None, Psats=None, CASRNs=None,
AvailableMethods=False, Method=None): # pragma: no cover
'''
>>> Pdew_mixture(zs=[0.5, 0.5], Psats=[1400, 7000])
2333.3333333333335
'''
def list_methods():
methods = []
if none_and_length_check((Psats, zs)):
methods.append('IDEAL_VLE')
methods.append('NONE')
return methods
if AvailableMethods:
return list_methods()
if not Method:
Method = list_methods()[0]
# This is the calculate, given the method section
if Method == 'IDEAL_VLE':
Pdew = dew_at_T(zs, Psats)
elif Method == 'NONE':
Pdew = None
else:
raise Exception('Failure in in function')
return Pdew | def function[Pdew_mixture, parameter[T, zs, Psats, CASRNs, AvailableMethods, Method]]:
constant[
>>> Pdew_mixture(zs=[0.5, 0.5], Psats=[1400, 7000])
2333.3333333333335
]
def function[list_methods, parameter[]]:
variable[methods] assign[=] list[[]]
if call[name[none_and_length_check], parameter[tuple[[<ast.Name object at 0x7da18fe925f0>, <ast.Name object at 0x7da18fe90ee0>]]]] begin[:]
call[name[methods].append, parameter[constant[IDEAL_VLE]]]
call[name[methods].append, parameter[constant[NONE]]]
return[name[methods]]
if name[AvailableMethods] begin[:]
return[call[name[list_methods], parameter[]]]
if <ast.UnaryOp object at 0x7da18fe91090> begin[:]
variable[Method] assign[=] call[call[name[list_methods], parameter[]]][constant[0]]
if compare[name[Method] equal[==] constant[IDEAL_VLE]] begin[:]
variable[Pdew] assign[=] call[name[dew_at_T], parameter[name[zs], name[Psats]]]
return[name[Pdew]] | keyword[def] identifier[Pdew_mixture] ( identifier[T] = keyword[None] , identifier[zs] = keyword[None] , identifier[Psats] = keyword[None] , identifier[CASRNs] = keyword[None] ,
identifier[AvailableMethods] = keyword[False] , identifier[Method] = keyword[None] ):
literal[string]
keyword[def] identifier[list_methods] ():
identifier[methods] =[]
keyword[if] identifier[none_and_length_check] (( identifier[Psats] , identifier[zs] )):
identifier[methods] . identifier[append] ( literal[string] )
identifier[methods] . identifier[append] ( literal[string] )
keyword[return] identifier[methods]
keyword[if] identifier[AvailableMethods] :
keyword[return] identifier[list_methods] ()
keyword[if] keyword[not] identifier[Method] :
identifier[Method] = identifier[list_methods] ()[ literal[int] ]
keyword[if] identifier[Method] == literal[string] :
identifier[Pdew] = identifier[dew_at_T] ( identifier[zs] , identifier[Psats] )
keyword[elif] identifier[Method] == literal[string] :
identifier[Pdew] = keyword[None]
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[return] identifier[Pdew] | def Pdew_mixture(T=None, zs=None, Psats=None, CASRNs=None, AvailableMethods=False, Method=None): # pragma: no cover
'\n >>> Pdew_mixture(zs=[0.5, 0.5], Psats=[1400, 7000])\n 2333.3333333333335\n '
def list_methods():
methods = []
if none_and_length_check((Psats, zs)):
methods.append('IDEAL_VLE') # depends on [control=['if'], data=[]]
methods.append('NONE')
return methods
if AvailableMethods:
return list_methods() # depends on [control=['if'], data=[]]
if not Method:
Method = list_methods()[0] # depends on [control=['if'], data=[]]
# This is the calculate, given the method section
if Method == 'IDEAL_VLE':
Pdew = dew_at_T(zs, Psats) # depends on [control=['if'], data=[]]
elif Method == 'NONE':
Pdew = None # depends on [control=['if'], data=[]]
else:
raise Exception('Failure in in function')
return Pdew |
def get_device_configs():
'''
Return a `pandas.DataFrame`, where each row corresponds to an available
device configuration, including the `device` (i.e., the name of the
device).
'''
frames = []
for i in range(2):
for device in get_video_sources():
df_device_i = get_configs(device)
df_device_i.insert(0, 'device', str(device))
frames.append(df_device_i)
device_configs = pd.concat(frames).drop_duplicates()
device_configs['label'] = device_configs.device.map(
lambda x: x.split('/')[-1].split('-')[1].split('_')[0])
device_configs['bitrate'] = device_configs.height.map(get_bitrate)
return device_configs | def function[get_device_configs, parameter[]]:
constant[
Return a `pandas.DataFrame`, where each row corresponds to an available
device configuration, including the `device` (i.e., the name of the
device).
]
variable[frames] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[constant[2]]]] begin[:]
for taget[name[device]] in starred[call[name[get_video_sources], parameter[]]] begin[:]
variable[df_device_i] assign[=] call[name[get_configs], parameter[name[device]]]
call[name[df_device_i].insert, parameter[constant[0], constant[device], call[name[str], parameter[name[device]]]]]
call[name[frames].append, parameter[name[df_device_i]]]
variable[device_configs] assign[=] call[call[name[pd].concat, parameter[name[frames]]].drop_duplicates, parameter[]]
call[name[device_configs]][constant[label]] assign[=] call[name[device_configs].device.map, parameter[<ast.Lambda object at 0x7da2041d8790>]]
call[name[device_configs]][constant[bitrate]] assign[=] call[name[device_configs].height.map, parameter[name[get_bitrate]]]
return[name[device_configs]] | keyword[def] identifier[get_device_configs] ():
literal[string]
identifier[frames] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] ):
keyword[for] identifier[device] keyword[in] identifier[get_video_sources] ():
identifier[df_device_i] = identifier[get_configs] ( identifier[device] )
identifier[df_device_i] . identifier[insert] ( literal[int] , literal[string] , identifier[str] ( identifier[device] ))
identifier[frames] . identifier[append] ( identifier[df_device_i] )
identifier[device_configs] = identifier[pd] . identifier[concat] ( identifier[frames] ). identifier[drop_duplicates] ()
identifier[device_configs] [ literal[string] ]= identifier[device_configs] . identifier[device] . identifier[map] (
keyword[lambda] identifier[x] : identifier[x] . identifier[split] ( literal[string] )[- literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ]. identifier[split] ( literal[string] )[ literal[int] ])
identifier[device_configs] [ literal[string] ]= identifier[device_configs] . identifier[height] . identifier[map] ( identifier[get_bitrate] )
keyword[return] identifier[device_configs] | def get_device_configs():
"""
Return a `pandas.DataFrame`, where each row corresponds to an available
device configuration, including the `device` (i.e., the name of the
device).
"""
frames = []
for i in range(2):
for device in get_video_sources():
df_device_i = get_configs(device)
df_device_i.insert(0, 'device', str(device))
frames.append(df_device_i) # depends on [control=['for'], data=['device']] # depends on [control=['for'], data=[]]
device_configs = pd.concat(frames).drop_duplicates()
device_configs['label'] = device_configs.device.map(lambda x: x.split('/')[-1].split('-')[1].split('_')[0])
device_configs['bitrate'] = device_configs.height.map(get_bitrate)
return device_configs |
def _as_document(self, partition):
""" Converts partition to document indexed by to FTS index.
Args:
partition (orm.Partition): partition to convert.
Returns:
dict with structure matches to BasePartitionIndex._schema.
"""
doc = super(self.__class__, self)._as_document(partition)
# pass time_coverage to the _index_document.
doc['time_coverage'] = partition.time_coverage
return doc | def function[_as_document, parameter[self, partition]]:
constant[ Converts partition to document indexed by to FTS index.
Args:
partition (orm.Partition): partition to convert.
Returns:
dict with structure matches to BasePartitionIndex._schema.
]
variable[doc] assign[=] call[call[name[super], parameter[name[self].__class__, name[self]]]._as_document, parameter[name[partition]]]
call[name[doc]][constant[time_coverage]] assign[=] name[partition].time_coverage
return[name[doc]] | keyword[def] identifier[_as_document] ( identifier[self] , identifier[partition] ):
literal[string]
identifier[doc] = identifier[super] ( identifier[self] . identifier[__class__] , identifier[self] ). identifier[_as_document] ( identifier[partition] )
identifier[doc] [ literal[string] ]= identifier[partition] . identifier[time_coverage]
keyword[return] identifier[doc] | def _as_document(self, partition):
""" Converts partition to document indexed by to FTS index.
Args:
partition (orm.Partition): partition to convert.
Returns:
dict with structure matches to BasePartitionIndex._schema.
"""
doc = super(self.__class__, self)._as_document(partition)
# pass time_coverage to the _index_document.
doc['time_coverage'] = partition.time_coverage
return doc |
def to_placeholder(self, name=None, db_type=None):
"""Returns a placeholder for the specified name, by applying the instance's format strings.
:name: if None an unamed placeholder is returned, otherwise a named placeholder is returned.
:db_type: if not None the placeholder is typecast.
"""
if name is None:
placeholder = self.unnamed_placeholder
else:
placeholder = self.named_placeholder.format(name)
if db_type:
return self.typecast(placeholder, db_type)
else:
return placeholder | def function[to_placeholder, parameter[self, name, db_type]]:
constant[Returns a placeholder for the specified name, by applying the instance's format strings.
:name: if None an unamed placeholder is returned, otherwise a named placeholder is returned.
:db_type: if not None the placeholder is typecast.
]
if compare[name[name] is constant[None]] begin[:]
variable[placeholder] assign[=] name[self].unnamed_placeholder
if name[db_type] begin[:]
return[call[name[self].typecast, parameter[name[placeholder], name[db_type]]]] | keyword[def] identifier[to_placeholder] ( identifier[self] , identifier[name] = keyword[None] , identifier[db_type] = keyword[None] ):
literal[string]
keyword[if] identifier[name] keyword[is] keyword[None] :
identifier[placeholder] = identifier[self] . identifier[unnamed_placeholder]
keyword[else] :
identifier[placeholder] = identifier[self] . identifier[named_placeholder] . identifier[format] ( identifier[name] )
keyword[if] identifier[db_type] :
keyword[return] identifier[self] . identifier[typecast] ( identifier[placeholder] , identifier[db_type] )
keyword[else] :
keyword[return] identifier[placeholder] | def to_placeholder(self, name=None, db_type=None):
"""Returns a placeholder for the specified name, by applying the instance's format strings.
:name: if None an unamed placeholder is returned, otherwise a named placeholder is returned.
:db_type: if not None the placeholder is typecast.
"""
if name is None:
placeholder = self.unnamed_placeholder # depends on [control=['if'], data=[]]
else:
placeholder = self.named_placeholder.format(name)
if db_type:
return self.typecast(placeholder, db_type) # depends on [control=['if'], data=[]]
else:
return placeholder |
def _discard_config(self):
"""Set candidate_cfg to current running-config. Erase the merge_cfg file."""
discard_candidate = "copy running-config {}".format(
self._gen_full_path(self.candidate_cfg)
)
discard_merge = "copy null: {}".format(self._gen_full_path(self.merge_cfg))
self.device.send_command_expect(discard_candidate)
self.device.send_command_expect(discard_merge) | def function[_discard_config, parameter[self]]:
constant[Set candidate_cfg to current running-config. Erase the merge_cfg file.]
variable[discard_candidate] assign[=] call[constant[copy running-config {}].format, parameter[call[name[self]._gen_full_path, parameter[name[self].candidate_cfg]]]]
variable[discard_merge] assign[=] call[constant[copy null: {}].format, parameter[call[name[self]._gen_full_path, parameter[name[self].merge_cfg]]]]
call[name[self].device.send_command_expect, parameter[name[discard_candidate]]]
call[name[self].device.send_command_expect, parameter[name[discard_merge]]] | keyword[def] identifier[_discard_config] ( identifier[self] ):
literal[string]
identifier[discard_candidate] = literal[string] . identifier[format] (
identifier[self] . identifier[_gen_full_path] ( identifier[self] . identifier[candidate_cfg] )
)
identifier[discard_merge] = literal[string] . identifier[format] ( identifier[self] . identifier[_gen_full_path] ( identifier[self] . identifier[merge_cfg] ))
identifier[self] . identifier[device] . identifier[send_command_expect] ( identifier[discard_candidate] )
identifier[self] . identifier[device] . identifier[send_command_expect] ( identifier[discard_merge] ) | def _discard_config(self):
"""Set candidate_cfg to current running-config. Erase the merge_cfg file."""
discard_candidate = 'copy running-config {}'.format(self._gen_full_path(self.candidate_cfg))
discard_merge = 'copy null: {}'.format(self._gen_full_path(self.merge_cfg))
self.device.send_command_expect(discard_candidate)
self.device.send_command_expect(discard_merge) |
def upload(self, thread_uuid, file_path, description=None):
""" Upload a file to LinShare using its rest api.
The uploaded document uuid will be returned"""
url = "threads/%s/entries" % thread_uuid
return self.core.upload(file_path, url, description) | def function[upload, parameter[self, thread_uuid, file_path, description]]:
constant[ Upload a file to LinShare using its rest api.
The uploaded document uuid will be returned]
variable[url] assign[=] binary_operation[constant[threads/%s/entries] <ast.Mod object at 0x7da2590d6920> name[thread_uuid]]
return[call[name[self].core.upload, parameter[name[file_path], name[url], name[description]]]] | keyword[def] identifier[upload] ( identifier[self] , identifier[thread_uuid] , identifier[file_path] , identifier[description] = keyword[None] ):
literal[string]
identifier[url] = literal[string] % identifier[thread_uuid]
keyword[return] identifier[self] . identifier[core] . identifier[upload] ( identifier[file_path] , identifier[url] , identifier[description] ) | def upload(self, thread_uuid, file_path, description=None):
""" Upload a file to LinShare using its rest api.
The uploaded document uuid will be returned"""
url = 'threads/%s/entries' % thread_uuid
return self.core.upload(file_path, url, description) |
def peered(name):
'''
Check if node is peered.
name
The remote host with which to peer.
.. code-block:: yaml
peer-cluster:
glusterfs.peered:
- name: two
peer-clusters:
glusterfs.peered:
- names:
- one
- two
- three
- four
'''
ret = {'name': name,
'changes': {},
'comment': '',
'result': False}
try:
suc.check_name(name, 'a-zA-Z0-9._-')
except SaltCloudException:
ret['comment'] = 'Invalid characters in peer name.'
return ret
# Check if the name resolves to one of this minion IP addresses
name_ips = salt.utils.network.host_to_ips(name)
if name_ips is not None:
# if it is None, it means resolution fails, let's not hide
# it from the user.
this_ips = set(salt.utils.network.ip_addrs())
this_ips.update(salt.utils.network.ip_addrs6())
if this_ips.intersection(name_ips):
ret['result'] = True
ret['comment'] = 'Peering with localhost is not needed'
return ret
peers = __salt__['glusterfs.peer_status']()
if peers and any(name in v['hostnames'] for v in peers.values()):
ret['result'] = True
ret['comment'] = 'Host {0} already peered'.format(name)
return ret
if __opts__['test']:
ret['comment'] = 'Peer {0} will be added.'.format(name)
ret['result'] = None
return ret
if not __salt__['glusterfs.peer'](name):
ret['comment'] = 'Failed to peer with {0}, please check logs for errors'.format(name)
return ret
# Double check that the action succeeded
newpeers = __salt__['glusterfs.peer_status']()
if newpeers and any(name in v['hostnames'] for v in newpeers.values()):
ret['result'] = True
ret['comment'] = 'Host {0} successfully peered'.format(name)
ret['changes'] = {'new': newpeers, 'old': peers}
else:
ret['comment'] = 'Host {0} was successfully peered but did not appear in the list of peers'.format(name)
return ret | def function[peered, parameter[name]]:
constant[
Check if node is peered.
name
The remote host with which to peer.
.. code-block:: yaml
peer-cluster:
glusterfs.peered:
- name: two
peer-clusters:
glusterfs.peered:
- names:
- one
- two
- three
- four
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da2041d8ac0>, <ast.Constant object at 0x7da2041db880>, <ast.Constant object at 0x7da2041daec0>, <ast.Constant object at 0x7da2041d8fa0>], [<ast.Name object at 0x7da2041dbc10>, <ast.Dict object at 0x7da2041d9c30>, <ast.Constant object at 0x7da2041da9e0>, <ast.Constant object at 0x7da2041d9900>]]
<ast.Try object at 0x7da2041d8e50>
variable[name_ips] assign[=] call[name[salt].utils.network.host_to_ips, parameter[name[name]]]
if compare[name[name_ips] is_not constant[None]] begin[:]
variable[this_ips] assign[=] call[name[set], parameter[call[name[salt].utils.network.ip_addrs, parameter[]]]]
call[name[this_ips].update, parameter[call[name[salt].utils.network.ip_addrs6, parameter[]]]]
if call[name[this_ips].intersection, parameter[name[name_ips]]] begin[:]
call[name[ret]][constant[result]] assign[=] constant[True]
call[name[ret]][constant[comment]] assign[=] constant[Peering with localhost is not needed]
return[name[ret]]
variable[peers] assign[=] call[call[name[__salt__]][constant[glusterfs.peer_status]], parameter[]]
if <ast.BoolOp object at 0x7da2041dabf0> begin[:]
call[name[ret]][constant[result]] assign[=] constant[True]
call[name[ret]][constant[comment]] assign[=] call[constant[Host {0} already peered].format, parameter[name[name]]]
return[name[ret]]
if call[name[__opts__]][constant[test]] begin[:]
call[name[ret]][constant[comment]] assign[=] call[constant[Peer {0} will be added.].format, parameter[name[name]]]
call[name[ret]][constant[result]] assign[=] constant[None]
return[name[ret]]
if <ast.UnaryOp object at 0x7da2041da8f0> begin[:]
call[name[ret]][constant[comment]] assign[=] call[constant[Failed to peer with {0}, please check logs for errors].format, parameter[name[name]]]
return[name[ret]]
variable[newpeers] assign[=] call[call[name[__salt__]][constant[glusterfs.peer_status]], parameter[]]
if <ast.BoolOp object at 0x7da2041da860> begin[:]
call[name[ret]][constant[result]] assign[=] constant[True]
call[name[ret]][constant[comment]] assign[=] call[constant[Host {0} successfully peered].format, parameter[name[name]]]
call[name[ret]][constant[changes]] assign[=] dictionary[[<ast.Constant object at 0x7da2041d8d90>, <ast.Constant object at 0x7da2041dae60>], [<ast.Name object at 0x7da2041db400>, <ast.Name object at 0x7da2041d8e80>]]
return[name[ret]] | keyword[def] identifier[peered] ( identifier[name] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[name] ,
literal[string] :{},
literal[string] : literal[string] ,
literal[string] : keyword[False] }
keyword[try] :
identifier[suc] . identifier[check_name] ( identifier[name] , literal[string] )
keyword[except] identifier[SaltCloudException] :
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret]
identifier[name_ips] = identifier[salt] . identifier[utils] . identifier[network] . identifier[host_to_ips] ( identifier[name] )
keyword[if] identifier[name_ips] keyword[is] keyword[not] keyword[None] :
identifier[this_ips] = identifier[set] ( identifier[salt] . identifier[utils] . identifier[network] . identifier[ip_addrs] ())
identifier[this_ips] . identifier[update] ( identifier[salt] . identifier[utils] . identifier[network] . identifier[ip_addrs6] ())
keyword[if] identifier[this_ips] . identifier[intersection] ( identifier[name_ips] ):
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret]
identifier[peers] = identifier[__salt__] [ literal[string] ]()
keyword[if] identifier[peers] keyword[and] identifier[any] ( identifier[name] keyword[in] identifier[v] [ literal[string] ] keyword[for] identifier[v] keyword[in] identifier[peers] . identifier[values] ()):
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
keyword[return] identifier[ret]
keyword[if] identifier[__opts__] [ literal[string] ]:
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
identifier[ret] [ literal[string] ]= keyword[None]
keyword[return] identifier[ret]
keyword[if] keyword[not] identifier[__salt__] [ literal[string] ]( identifier[name] ):
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
keyword[return] identifier[ret]
identifier[newpeers] = identifier[__salt__] [ literal[string] ]()
keyword[if] identifier[newpeers] keyword[and] identifier[any] ( identifier[name] keyword[in] identifier[v] [ literal[string] ] keyword[for] identifier[v] keyword[in] identifier[newpeers] . identifier[values] ()):
identifier[ret] [ literal[string] ]= keyword[True]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
identifier[ret] [ literal[string] ]={ literal[string] : identifier[newpeers] , literal[string] : identifier[peers] }
keyword[else] :
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[name] )
keyword[return] identifier[ret] | def peered(name):
"""
Check if node is peered.
name
The remote host with which to peer.
.. code-block:: yaml
peer-cluster:
glusterfs.peered:
- name: two
peer-clusters:
glusterfs.peered:
- names:
- one
- two
- three
- four
"""
ret = {'name': name, 'changes': {}, 'comment': '', 'result': False}
try:
suc.check_name(name, 'a-zA-Z0-9._-') # depends on [control=['try'], data=[]]
except SaltCloudException:
ret['comment'] = 'Invalid characters in peer name.'
return ret # depends on [control=['except'], data=[]]
# Check if the name resolves to one of this minion IP addresses
name_ips = salt.utils.network.host_to_ips(name)
if name_ips is not None:
# if it is None, it means resolution fails, let's not hide
# it from the user.
this_ips = set(salt.utils.network.ip_addrs())
this_ips.update(salt.utils.network.ip_addrs6())
if this_ips.intersection(name_ips):
ret['result'] = True
ret['comment'] = 'Peering with localhost is not needed'
return ret # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['name_ips']]
peers = __salt__['glusterfs.peer_status']()
if peers and any((name in v['hostnames'] for v in peers.values())):
ret['result'] = True
ret['comment'] = 'Host {0} already peered'.format(name)
return ret # depends on [control=['if'], data=[]]
if __opts__['test']:
ret['comment'] = 'Peer {0} will be added.'.format(name)
ret['result'] = None
return ret # depends on [control=['if'], data=[]]
if not __salt__['glusterfs.peer'](name):
ret['comment'] = 'Failed to peer with {0}, please check logs for errors'.format(name)
return ret # depends on [control=['if'], data=[]]
# Double check that the action succeeded
newpeers = __salt__['glusterfs.peer_status']()
if newpeers and any((name in v['hostnames'] for v in newpeers.values())):
ret['result'] = True
ret['comment'] = 'Host {0} successfully peered'.format(name)
ret['changes'] = {'new': newpeers, 'old': peers} # depends on [control=['if'], data=[]]
else:
ret['comment'] = 'Host {0} was successfully peered but did not appear in the list of peers'.format(name)
return ret |
def write_hypergraph(hgr, colored = False):
"""
Return a string specifying the given hypergraph in DOT Language.
@type hgr: hypergraph
@param hgr: Hypergraph.
@type colored: boolean
@param colored: Whether hyperedges should be colored.
@rtype: string
@return: String specifying the hypergraph in DOT Language.
"""
dotG = pydot.Dot()
if not 'name' in dir(hgr):
dotG.set_name('hypergraph')
else:
dotG.set_name(hgr.name)
colortable = {}
colorcount = 0
# Add all of the nodes first
for node in hgr.nodes():
newNode = pydot.Node(str(node), hyper_node_type = 'hypernode')
dotG.add_node(newNode)
for hyperedge in hgr.hyperedges():
if (colored):
colortable[hyperedge] = colors[colorcount % len(colors)]
colorcount += 1
newNode = pydot.Node(str(hyperedge), hyper_node_type = 'hyperedge', \
color = str(colortable[hyperedge]), \
shape = 'point')
else:
newNode = pydot.Node(str(hyperedge), hyper_node_type = 'hyperedge')
dotG.add_node(newNode)
for link in hgr.links(hyperedge):
newEdge = pydot.Edge(str(hyperedge), str(link))
dotG.add_edge(newEdge)
return dotG.to_string() | def function[write_hypergraph, parameter[hgr, colored]]:
constant[
Return a string specifying the given hypergraph in DOT Language.
@type hgr: hypergraph
@param hgr: Hypergraph.
@type colored: boolean
@param colored: Whether hyperedges should be colored.
@rtype: string
@return: String specifying the hypergraph in DOT Language.
]
variable[dotG] assign[=] call[name[pydot].Dot, parameter[]]
if <ast.UnaryOp object at 0x7da1b17e17b0> begin[:]
call[name[dotG].set_name, parameter[constant[hypergraph]]]
variable[colortable] assign[=] dictionary[[], []]
variable[colorcount] assign[=] constant[0]
for taget[name[node]] in starred[call[name[hgr].nodes, parameter[]]] begin[:]
variable[newNode] assign[=] call[name[pydot].Node, parameter[call[name[str], parameter[name[node]]]]]
call[name[dotG].add_node, parameter[name[newNode]]]
for taget[name[hyperedge]] in starred[call[name[hgr].hyperedges, parameter[]]] begin[:]
if name[colored] begin[:]
call[name[colortable]][name[hyperedge]] assign[=] call[name[colors]][binary_operation[name[colorcount] <ast.Mod object at 0x7da2590d6920> call[name[len], parameter[name[colors]]]]]
<ast.AugAssign object at 0x7da1b17e3160>
variable[newNode] assign[=] call[name[pydot].Node, parameter[call[name[str], parameter[name[hyperedge]]]]]
call[name[dotG].add_node, parameter[name[newNode]]]
for taget[name[link]] in starred[call[name[hgr].links, parameter[name[hyperedge]]]] begin[:]
variable[newEdge] assign[=] call[name[pydot].Edge, parameter[call[name[str], parameter[name[hyperedge]]], call[name[str], parameter[name[link]]]]]
call[name[dotG].add_edge, parameter[name[newEdge]]]
return[call[name[dotG].to_string, parameter[]]] | keyword[def] identifier[write_hypergraph] ( identifier[hgr] , identifier[colored] = keyword[False] ):
literal[string]
identifier[dotG] = identifier[pydot] . identifier[Dot] ()
keyword[if] keyword[not] literal[string] keyword[in] identifier[dir] ( identifier[hgr] ):
identifier[dotG] . identifier[set_name] ( literal[string] )
keyword[else] :
identifier[dotG] . identifier[set_name] ( identifier[hgr] . identifier[name] )
identifier[colortable] ={}
identifier[colorcount] = literal[int]
keyword[for] identifier[node] keyword[in] identifier[hgr] . identifier[nodes] ():
identifier[newNode] = identifier[pydot] . identifier[Node] ( identifier[str] ( identifier[node] ), identifier[hyper_node_type] = literal[string] )
identifier[dotG] . identifier[add_node] ( identifier[newNode] )
keyword[for] identifier[hyperedge] keyword[in] identifier[hgr] . identifier[hyperedges] ():
keyword[if] ( identifier[colored] ):
identifier[colortable] [ identifier[hyperedge] ]= identifier[colors] [ identifier[colorcount] % identifier[len] ( identifier[colors] )]
identifier[colorcount] += literal[int]
identifier[newNode] = identifier[pydot] . identifier[Node] ( identifier[str] ( identifier[hyperedge] ), identifier[hyper_node_type] = literal[string] , identifier[color] = identifier[str] ( identifier[colortable] [ identifier[hyperedge] ]), identifier[shape] = literal[string] )
keyword[else] :
identifier[newNode] = identifier[pydot] . identifier[Node] ( identifier[str] ( identifier[hyperedge] ), identifier[hyper_node_type] = literal[string] )
identifier[dotG] . identifier[add_node] ( identifier[newNode] )
keyword[for] identifier[link] keyword[in] identifier[hgr] . identifier[links] ( identifier[hyperedge] ):
identifier[newEdge] = identifier[pydot] . identifier[Edge] ( identifier[str] ( identifier[hyperedge] ), identifier[str] ( identifier[link] ))
identifier[dotG] . identifier[add_edge] ( identifier[newEdge] )
keyword[return] identifier[dotG] . identifier[to_string] () | def write_hypergraph(hgr, colored=False):
"""
Return a string specifying the given hypergraph in DOT Language.
@type hgr: hypergraph
@param hgr: Hypergraph.
@type colored: boolean
@param colored: Whether hyperedges should be colored.
@rtype: string
@return: String specifying the hypergraph in DOT Language.
"""
dotG = pydot.Dot()
if not 'name' in dir(hgr):
dotG.set_name('hypergraph') # depends on [control=['if'], data=[]]
else:
dotG.set_name(hgr.name)
colortable = {}
colorcount = 0
# Add all of the nodes first
for node in hgr.nodes():
newNode = pydot.Node(str(node), hyper_node_type='hypernode')
dotG.add_node(newNode) # depends on [control=['for'], data=['node']]
for hyperedge in hgr.hyperedges():
if colored:
colortable[hyperedge] = colors[colorcount % len(colors)]
colorcount += 1
newNode = pydot.Node(str(hyperedge), hyper_node_type='hyperedge', color=str(colortable[hyperedge]), shape='point') # depends on [control=['if'], data=[]]
else:
newNode = pydot.Node(str(hyperedge), hyper_node_type='hyperedge')
dotG.add_node(newNode)
for link in hgr.links(hyperedge):
newEdge = pydot.Edge(str(hyperedge), str(link))
dotG.add_edge(newEdge) # depends on [control=['for'], data=['link']] # depends on [control=['for'], data=['hyperedge']]
return dotG.to_string() |
def initialize_parse_state(self, build_file):
"""Creates a fresh parse state for the given build file.
:param build_file: The BUILD file to set up a new ParseState for.
:type build_file: :class:`pants.base.build_file.BuildFile`
:returns: A fresh ParseState for parsing the given `build_file` with.
:rtype: :class:`BuildConfiguration.ParseState`
"""
# TODO(John Sirois): Introduce a factory method to seal the BuildConfiguration and add a check
# there that all anonymous types are covered by context aware object factories that are
# Macro instances. Without this, we could have non-Macro context aware object factories being
# asked to be a BuildFileTargetFactory when they are not (in SourceRoot registration context).
# See: https://github.com/pantsbuild/pants/issues/2125
type_aliases = self._exposed_object_by_alias.copy()
parse_context = ParseContext(rel_path=build_file.spec_path, type_aliases=type_aliases)
def create_call_proxy(tgt_type, tgt_alias=None):
def registration_callback(address, addressable):
parse_context._storage.add(addressable, name=address.target_name)
addressable_factory = self._get_addressable_factory(tgt_type, tgt_alias)
return AddressableCallProxy(addressable_factory=addressable_factory,
build_file=build_file,
registration_callback=registration_callback)
# Expose all aliased Target types.
for alias, target_type in self._target_by_alias.items():
proxy = create_call_proxy(target_type, alias)
type_aliases[alias] = proxy
# Expose aliases for exposed objects and targets in the BUILD file.
parse_globals = type_aliases.copy()
# Now its safe to add mappings from both the directly exposed and macro-created target types to
# their call proxies for context awares and macros to use to manufacture targets by type
# instead of by alias.
for alias, target_type in self._target_by_alias.items():
proxy = type_aliases[alias]
type_aliases[target_type] = proxy
for target_macro_factory in self._target_macro_factory_by_alias.values():
for target_type in target_macro_factory.target_types:
proxy = create_call_proxy(target_type)
type_aliases[target_type] = proxy
for alias, object_factory in self._exposed_context_aware_object_factory_by_alias.items():
parse_globals[alias] = object_factory(parse_context)
for alias, target_macro_factory in self._target_macro_factory_by_alias.items():
parse_globals[alias] = target_macro_factory.target_macro(parse_context)
return self.ParseState(parse_context, parse_globals) | def function[initialize_parse_state, parameter[self, build_file]]:
constant[Creates a fresh parse state for the given build file.
:param build_file: The BUILD file to set up a new ParseState for.
:type build_file: :class:`pants.base.build_file.BuildFile`
:returns: A fresh ParseState for parsing the given `build_file` with.
:rtype: :class:`BuildConfiguration.ParseState`
]
variable[type_aliases] assign[=] call[name[self]._exposed_object_by_alias.copy, parameter[]]
variable[parse_context] assign[=] call[name[ParseContext], parameter[]]
def function[create_call_proxy, parameter[tgt_type, tgt_alias]]:
def function[registration_callback, parameter[address, addressable]]:
call[name[parse_context]._storage.add, parameter[name[addressable]]]
variable[addressable_factory] assign[=] call[name[self]._get_addressable_factory, parameter[name[tgt_type], name[tgt_alias]]]
return[call[name[AddressableCallProxy], parameter[]]]
for taget[tuple[[<ast.Name object at 0x7da1b224b490>, <ast.Name object at 0x7da1b2248550>]]] in starred[call[name[self]._target_by_alias.items, parameter[]]] begin[:]
variable[proxy] assign[=] call[name[create_call_proxy], parameter[name[target_type], name[alias]]]
call[name[type_aliases]][name[alias]] assign[=] name[proxy]
variable[parse_globals] assign[=] call[name[type_aliases].copy, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b2249e10>, <ast.Name object at 0x7da1b224be80>]]] in starred[call[name[self]._target_by_alias.items, parameter[]]] begin[:]
variable[proxy] assign[=] call[name[type_aliases]][name[alias]]
call[name[type_aliases]][name[target_type]] assign[=] name[proxy]
for taget[name[target_macro_factory]] in starred[call[name[self]._target_macro_factory_by_alias.values, parameter[]]] begin[:]
for taget[name[target_type]] in starred[name[target_macro_factory].target_types] begin[:]
variable[proxy] assign[=] call[name[create_call_proxy], parameter[name[target_type]]]
call[name[type_aliases]][name[target_type]] assign[=] name[proxy]
for taget[tuple[[<ast.Name object at 0x7da1b2248400>, <ast.Name object at 0x7da1b22488e0>]]] in starred[call[name[self]._exposed_context_aware_object_factory_by_alias.items, parameter[]]] begin[:]
call[name[parse_globals]][name[alias]] assign[=] call[name[object_factory], parameter[name[parse_context]]]
for taget[tuple[[<ast.Name object at 0x7da1b2248df0>, <ast.Name object at 0x7da1b2249600>]]] in starred[call[name[self]._target_macro_factory_by_alias.items, parameter[]]] begin[:]
call[name[parse_globals]][name[alias]] assign[=] call[name[target_macro_factory].target_macro, parameter[name[parse_context]]]
return[call[name[self].ParseState, parameter[name[parse_context], name[parse_globals]]]] | keyword[def] identifier[initialize_parse_state] ( identifier[self] , identifier[build_file] ):
literal[string]
identifier[type_aliases] = identifier[self] . identifier[_exposed_object_by_alias] . identifier[copy] ()
identifier[parse_context] = identifier[ParseContext] ( identifier[rel_path] = identifier[build_file] . identifier[spec_path] , identifier[type_aliases] = identifier[type_aliases] )
keyword[def] identifier[create_call_proxy] ( identifier[tgt_type] , identifier[tgt_alias] = keyword[None] ):
keyword[def] identifier[registration_callback] ( identifier[address] , identifier[addressable] ):
identifier[parse_context] . identifier[_storage] . identifier[add] ( identifier[addressable] , identifier[name] = identifier[address] . identifier[target_name] )
identifier[addressable_factory] = identifier[self] . identifier[_get_addressable_factory] ( identifier[tgt_type] , identifier[tgt_alias] )
keyword[return] identifier[AddressableCallProxy] ( identifier[addressable_factory] = identifier[addressable_factory] ,
identifier[build_file] = identifier[build_file] ,
identifier[registration_callback] = identifier[registration_callback] )
keyword[for] identifier[alias] , identifier[target_type] keyword[in] identifier[self] . identifier[_target_by_alias] . identifier[items] ():
identifier[proxy] = identifier[create_call_proxy] ( identifier[target_type] , identifier[alias] )
identifier[type_aliases] [ identifier[alias] ]= identifier[proxy]
identifier[parse_globals] = identifier[type_aliases] . identifier[copy] ()
keyword[for] identifier[alias] , identifier[target_type] keyword[in] identifier[self] . identifier[_target_by_alias] . identifier[items] ():
identifier[proxy] = identifier[type_aliases] [ identifier[alias] ]
identifier[type_aliases] [ identifier[target_type] ]= identifier[proxy]
keyword[for] identifier[target_macro_factory] keyword[in] identifier[self] . identifier[_target_macro_factory_by_alias] . identifier[values] ():
keyword[for] identifier[target_type] keyword[in] identifier[target_macro_factory] . identifier[target_types] :
identifier[proxy] = identifier[create_call_proxy] ( identifier[target_type] )
identifier[type_aliases] [ identifier[target_type] ]= identifier[proxy]
keyword[for] identifier[alias] , identifier[object_factory] keyword[in] identifier[self] . identifier[_exposed_context_aware_object_factory_by_alias] . identifier[items] ():
identifier[parse_globals] [ identifier[alias] ]= identifier[object_factory] ( identifier[parse_context] )
keyword[for] identifier[alias] , identifier[target_macro_factory] keyword[in] identifier[self] . identifier[_target_macro_factory_by_alias] . identifier[items] ():
identifier[parse_globals] [ identifier[alias] ]= identifier[target_macro_factory] . identifier[target_macro] ( identifier[parse_context] )
keyword[return] identifier[self] . identifier[ParseState] ( identifier[parse_context] , identifier[parse_globals] ) | def initialize_parse_state(self, build_file):
"""Creates a fresh parse state for the given build file.
:param build_file: The BUILD file to set up a new ParseState for.
:type build_file: :class:`pants.base.build_file.BuildFile`
:returns: A fresh ParseState for parsing the given `build_file` with.
:rtype: :class:`BuildConfiguration.ParseState`
"""
# TODO(John Sirois): Introduce a factory method to seal the BuildConfiguration and add a check
# there that all anonymous types are covered by context aware object factories that are
# Macro instances. Without this, we could have non-Macro context aware object factories being
# asked to be a BuildFileTargetFactory when they are not (in SourceRoot registration context).
# See: https://github.com/pantsbuild/pants/issues/2125
type_aliases = self._exposed_object_by_alias.copy()
parse_context = ParseContext(rel_path=build_file.spec_path, type_aliases=type_aliases)
def create_call_proxy(tgt_type, tgt_alias=None):
def registration_callback(address, addressable):
parse_context._storage.add(addressable, name=address.target_name)
addressable_factory = self._get_addressable_factory(tgt_type, tgt_alias)
return AddressableCallProxy(addressable_factory=addressable_factory, build_file=build_file, registration_callback=registration_callback)
# Expose all aliased Target types.
for (alias, target_type) in self._target_by_alias.items():
proxy = create_call_proxy(target_type, alias)
type_aliases[alias] = proxy # depends on [control=['for'], data=[]]
# Expose aliases for exposed objects and targets in the BUILD file.
parse_globals = type_aliases.copy()
# Now its safe to add mappings from both the directly exposed and macro-created target types to
# their call proxies for context awares and macros to use to manufacture targets by type
# instead of by alias.
for (alias, target_type) in self._target_by_alias.items():
proxy = type_aliases[alias]
type_aliases[target_type] = proxy # depends on [control=['for'], data=[]]
for target_macro_factory in self._target_macro_factory_by_alias.values():
for target_type in target_macro_factory.target_types:
proxy = create_call_proxy(target_type)
type_aliases[target_type] = proxy # depends on [control=['for'], data=['target_type']] # depends on [control=['for'], data=['target_macro_factory']]
for (alias, object_factory) in self._exposed_context_aware_object_factory_by_alias.items():
parse_globals[alias] = object_factory(parse_context) # depends on [control=['for'], data=[]]
for (alias, target_macro_factory) in self._target_macro_factory_by_alias.items():
parse_globals[alias] = target_macro_factory.target_macro(parse_context) # depends on [control=['for'], data=[]]
return self.ParseState(parse_context, parse_globals) |
def _get_codes(self):
"""
Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v | def function[_get_codes, parameter[self]]:
constant[
Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
]
variable[v] assign[=] call[name[self]._codes.view, parameter[]]
name[v].flags.writeable assign[=] constant[False]
return[name[v]] | keyword[def] identifier[_get_codes] ( identifier[self] ):
literal[string]
identifier[v] = identifier[self] . identifier[_codes] . identifier[view] ()
identifier[v] . identifier[flags] . identifier[writeable] = keyword[False]
keyword[return] identifier[v] | def _get_codes(self):
"""
Get the codes.
Returns
-------
codes : integer array view
A non writable view of the `codes` array.
"""
v = self._codes.view()
v.flags.writeable = False
return v |
def get_ec_numbers(cls, entry):
"""
get list of models.ECNumber objects from XML node entry
:param entry: XML node entry
:return: list of models.ECNumber objects
"""
ec_numbers = []
for ec in entry.iterfind("./protein/recommendedName/ecNumber"):
ec_numbers.append(models.ECNumber(ec_number=ec.text))
return ec_numbers | def function[get_ec_numbers, parameter[cls, entry]]:
constant[
get list of models.ECNumber objects from XML node entry
:param entry: XML node entry
:return: list of models.ECNumber objects
]
variable[ec_numbers] assign[=] list[[]]
for taget[name[ec]] in starred[call[name[entry].iterfind, parameter[constant[./protein/recommendedName/ecNumber]]]] begin[:]
call[name[ec_numbers].append, parameter[call[name[models].ECNumber, parameter[]]]]
return[name[ec_numbers]] | keyword[def] identifier[get_ec_numbers] ( identifier[cls] , identifier[entry] ):
literal[string]
identifier[ec_numbers] =[]
keyword[for] identifier[ec] keyword[in] identifier[entry] . identifier[iterfind] ( literal[string] ):
identifier[ec_numbers] . identifier[append] ( identifier[models] . identifier[ECNumber] ( identifier[ec_number] = identifier[ec] . identifier[text] ))
keyword[return] identifier[ec_numbers] | def get_ec_numbers(cls, entry):
"""
get list of models.ECNumber objects from XML node entry
:param entry: XML node entry
:return: list of models.ECNumber objects
"""
ec_numbers = []
for ec in entry.iterfind('./protein/recommendedName/ecNumber'):
ec_numbers.append(models.ECNumber(ec_number=ec.text)) # depends on [control=['for'], data=['ec']]
return ec_numbers |
def init_logger(self):
"""Init logger."""
if not self.result_logger:
if not os.path.exists(self.local_dir):
os.makedirs(self.local_dir)
if not self.logdir:
self.logdir = tempfile.mkdtemp(
prefix="{}_{}".format(
str(self)[:MAX_LEN_IDENTIFIER], date_str()),
dir=self.local_dir)
elif not os.path.exists(self.logdir):
os.makedirs(self.logdir)
self.result_logger = UnifiedLogger(
self.config,
self.logdir,
upload_uri=self.upload_dir,
loggers=self.loggers,
sync_function=self.sync_function) | def function[init_logger, parameter[self]]:
constant[Init logger.]
if <ast.UnaryOp object at 0x7da2044c1060> begin[:]
if <ast.UnaryOp object at 0x7da20cabe890> begin[:]
call[name[os].makedirs, parameter[name[self].local_dir]]
if <ast.UnaryOp object at 0x7da20cabe4d0> begin[:]
name[self].logdir assign[=] call[name[tempfile].mkdtemp, parameter[]]
name[self].result_logger assign[=] call[name[UnifiedLogger], parameter[name[self].config, name[self].logdir]] | keyword[def] identifier[init_logger] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[result_logger] :
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[self] . identifier[local_dir] ):
identifier[os] . identifier[makedirs] ( identifier[self] . identifier[local_dir] )
keyword[if] keyword[not] identifier[self] . identifier[logdir] :
identifier[self] . identifier[logdir] = identifier[tempfile] . identifier[mkdtemp] (
identifier[prefix] = literal[string] . identifier[format] (
identifier[str] ( identifier[self] )[: identifier[MAX_LEN_IDENTIFIER] ], identifier[date_str] ()),
identifier[dir] = identifier[self] . identifier[local_dir] )
keyword[elif] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[self] . identifier[logdir] ):
identifier[os] . identifier[makedirs] ( identifier[self] . identifier[logdir] )
identifier[self] . identifier[result_logger] = identifier[UnifiedLogger] (
identifier[self] . identifier[config] ,
identifier[self] . identifier[logdir] ,
identifier[upload_uri] = identifier[self] . identifier[upload_dir] ,
identifier[loggers] = identifier[self] . identifier[loggers] ,
identifier[sync_function] = identifier[self] . identifier[sync_function] ) | def init_logger(self):
"""Init logger."""
if not self.result_logger:
if not os.path.exists(self.local_dir):
os.makedirs(self.local_dir) # depends on [control=['if'], data=[]]
if not self.logdir:
self.logdir = tempfile.mkdtemp(prefix='{}_{}'.format(str(self)[:MAX_LEN_IDENTIFIER], date_str()), dir=self.local_dir) # depends on [control=['if'], data=[]]
elif not os.path.exists(self.logdir):
os.makedirs(self.logdir) # depends on [control=['if'], data=[]]
self.result_logger = UnifiedLogger(self.config, self.logdir, upload_uri=self.upload_dir, loggers=self.loggers, sync_function=self.sync_function) # depends on [control=['if'], data=[]] |
def set_proxy(self, host, port, user, password):
'''
Sets the proxy server host and port for the HTTP CONNECT Tunnelling.
host:
Address of the proxy. Ex: '192.168.0.100'
port:
Port of the proxy. Ex: 6000
user:
User for proxy authorization.
password:
Password for proxy authorization.
'''
self.proxy_host = host
self.proxy_port = port
self.proxy_user = user
self.proxy_password = password | def function[set_proxy, parameter[self, host, port, user, password]]:
constant[
Sets the proxy server host and port for the HTTP CONNECT Tunnelling.
host:
Address of the proxy. Ex: '192.168.0.100'
port:
Port of the proxy. Ex: 6000
user:
User for proxy authorization.
password:
Password for proxy authorization.
]
name[self].proxy_host assign[=] name[host]
name[self].proxy_port assign[=] name[port]
name[self].proxy_user assign[=] name[user]
name[self].proxy_password assign[=] name[password] | keyword[def] identifier[set_proxy] ( identifier[self] , identifier[host] , identifier[port] , identifier[user] , identifier[password] ):
literal[string]
identifier[self] . identifier[proxy_host] = identifier[host]
identifier[self] . identifier[proxy_port] = identifier[port]
identifier[self] . identifier[proxy_user] = identifier[user]
identifier[self] . identifier[proxy_password] = identifier[password] | def set_proxy(self, host, port, user, password):
"""
Sets the proxy server host and port for the HTTP CONNECT Tunnelling.
host:
Address of the proxy. Ex: '192.168.0.100'
port:
Port of the proxy. Ex: 6000
user:
User for proxy authorization.
password:
Password for proxy authorization.
"""
self.proxy_host = host
self.proxy_port = port
self.proxy_user = user
self.proxy_password = password |
def findRequirements(platform):
"""
Read the requirements.txt file and parse into requirements for setup's
install_requirements option.
"""
includePycapnp = platform not in WINDOWS_PLATFORMS
requirementsPath = fixPath(os.path.join(PY_BINDINGS, "requirements.txt"))
return [
line.strip()
for line in open(requirementsPath).readlines()
if not line.startswith("#") and (not line.startswith("pycapnp") or includePycapnp)
] | def function[findRequirements, parameter[platform]]:
constant[
Read the requirements.txt file and parse into requirements for setup's
install_requirements option.
]
variable[includePycapnp] assign[=] compare[name[platform] <ast.NotIn object at 0x7da2590d7190> name[WINDOWS_PLATFORMS]]
variable[requirementsPath] assign[=] call[name[fixPath], parameter[call[name[os].path.join, parameter[name[PY_BINDINGS], constant[requirements.txt]]]]]
return[<ast.ListComp object at 0x7da1b2314e50>] | keyword[def] identifier[findRequirements] ( identifier[platform] ):
literal[string]
identifier[includePycapnp] = identifier[platform] keyword[not] keyword[in] identifier[WINDOWS_PLATFORMS]
identifier[requirementsPath] = identifier[fixPath] ( identifier[os] . identifier[path] . identifier[join] ( identifier[PY_BINDINGS] , literal[string] ))
keyword[return] [
identifier[line] . identifier[strip] ()
keyword[for] identifier[line] keyword[in] identifier[open] ( identifier[requirementsPath] ). identifier[readlines] ()
keyword[if] keyword[not] identifier[line] . identifier[startswith] ( literal[string] ) keyword[and] ( keyword[not] identifier[line] . identifier[startswith] ( literal[string] ) keyword[or] identifier[includePycapnp] )
] | def findRequirements(platform):
"""
Read the requirements.txt file and parse into requirements for setup's
install_requirements option.
"""
includePycapnp = platform not in WINDOWS_PLATFORMS
requirementsPath = fixPath(os.path.join(PY_BINDINGS, 'requirements.txt'))
return [line.strip() for line in open(requirementsPath).readlines() if not line.startswith('#') and (not line.startswith('pycapnp') or includePycapnp)] |
def merge(self, other):
"""
Combine this region with other.
"""
if not isinstance(other, one):
other = one(other)
new = concatenate((self.coordinates, other.coordinates))
unique = set([tuple(x) for x in new.tolist()])
final = asarray([list(x) for x in unique])
return one(final) | def function[merge, parameter[self, other]]:
constant[
Combine this region with other.
]
if <ast.UnaryOp object at 0x7da1b095d090> begin[:]
variable[other] assign[=] call[name[one], parameter[name[other]]]
variable[new] assign[=] call[name[concatenate], parameter[tuple[[<ast.Attribute object at 0x7da1b095c6a0>, <ast.Attribute object at 0x7da1b095f7c0>]]]]
variable[unique] assign[=] call[name[set], parameter[<ast.ListComp object at 0x7da1b095ee30>]]
variable[final] assign[=] call[name[asarray], parameter[<ast.ListComp object at 0x7da1b095f580>]]
return[call[name[one], parameter[name[final]]]] | keyword[def] identifier[merge] ( identifier[self] , identifier[other] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[other] , identifier[one] ):
identifier[other] = identifier[one] ( identifier[other] )
identifier[new] = identifier[concatenate] (( identifier[self] . identifier[coordinates] , identifier[other] . identifier[coordinates] ))
identifier[unique] = identifier[set] ([ identifier[tuple] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[new] . identifier[tolist] ()])
identifier[final] = identifier[asarray] ([ identifier[list] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[unique] ])
keyword[return] identifier[one] ( identifier[final] ) | def merge(self, other):
"""
Combine this region with other.
"""
if not isinstance(other, one):
other = one(other) # depends on [control=['if'], data=[]]
new = concatenate((self.coordinates, other.coordinates))
unique = set([tuple(x) for x in new.tolist()])
final = asarray([list(x) for x in unique])
return one(final) |
def plot_directory(directory, recursive=False, regex='*',
save_prefix='', save_suffix='', axis=None, **kwargs):
"""
Create and save an ANTsPy plot for every image matching a given regular
expression in a directory, optionally recursively. This is a good function
for quick visualize exploration of all of images in a directory
ANTsR function: N/A
Arguments
---------
directory : string
directory in which to search for images and plot them
recursive : boolean
If true, this function will search through all directories under
the given directory recursively to make plots.
If false, this function will only create plots for images in the
given directory
regex : string
regular expression used to filter out certain filenames or suffixes
save_prefix : string
sub-string that will be appended to the beginning of all saved plot filenames.
Default is to add nothing.
save_suffix : string
sub-string that will be appended to the end of all saved plot filenames.
Default is add nothing.
kwargs : keyword arguments
any additional arguments to pass onto the `ants.plot` function.
e.g. overlay, alpha, cmap, etc. See `ants.plot` for more options.
Example
-------
>>> import ants
>>> ants.plot_directory(directory='~/desktop/testdir',
recursive=False, regex='*')
"""
def has_acceptable_suffix(fname):
suffixes = {'.nii.gz'}
return sum([fname.endswith(sx) for sx in suffixes]) > 0
if directory.startswith('~'):
directory = os.path.expanduser(directory)
if not os.path.isdir(directory):
raise ValueError('directory %s does not exist!' % directory)
for root, dirnames, fnames in os.walk(directory):
for fname in fnames:
if fnmatch.fnmatch(fname, regex) and has_acceptable_suffix(fname):
load_fname = os.path.join(root, fname)
fname = fname.replace('.'.join(fname.split('.')[1:]), 'png')
fname = fname.replace('.png', '%s.png' % save_suffix)
fname = '%s%s' % (save_prefix, fname)
save_fname = os.path.join(root, fname)
img = iio2.image_read(load_fname)
if axis is None:
axis_range = [i for i in range(img.dimension)]
else:
axis_range = axis if isinstance(axis,(list,tuple)) else [axis]
if img.dimension > 2:
for axis_idx in axis_range:
filename = save_fname.replace('.png', '_axis%i.png' % axis_idx)
ncol = int(math.sqrt(img.shape[axis_idx]))
plot(img, axis=axis_idx, nslices=img.shape[axis_idx], ncol=ncol,
filename=filename, **kwargs)
else:
filename = save_fname
plot(img, filename=filename, **kwargs) | def function[plot_directory, parameter[directory, recursive, regex, save_prefix, save_suffix, axis]]:
constant[
Create and save an ANTsPy plot for every image matching a given regular
expression in a directory, optionally recursively. This is a good function
for quick visualize exploration of all of images in a directory
ANTsR function: N/A
Arguments
---------
directory : string
directory in which to search for images and plot them
recursive : boolean
If true, this function will search through all directories under
the given directory recursively to make plots.
If false, this function will only create plots for images in the
given directory
regex : string
regular expression used to filter out certain filenames or suffixes
save_prefix : string
sub-string that will be appended to the beginning of all saved plot filenames.
Default is to add nothing.
save_suffix : string
sub-string that will be appended to the end of all saved plot filenames.
Default is add nothing.
kwargs : keyword arguments
any additional arguments to pass onto the `ants.plot` function.
e.g. overlay, alpha, cmap, etc. See `ants.plot` for more options.
Example
-------
>>> import ants
>>> ants.plot_directory(directory='~/desktop/testdir',
recursive=False, regex='*')
]
def function[has_acceptable_suffix, parameter[fname]]:
variable[suffixes] assign[=] <ast.Set object at 0x7da1b140b8e0>
return[compare[call[name[sum], parameter[<ast.ListComp object at 0x7da1b140b790>]] greater[>] constant[0]]]
if call[name[directory].startswith, parameter[constant[~]]] begin[:]
variable[directory] assign[=] call[name[os].path.expanduser, parameter[name[directory]]]
if <ast.UnaryOp object at 0x7da1b15f3190> begin[:]
<ast.Raise object at 0x7da1b15f3070>
for taget[tuple[[<ast.Name object at 0x7da1b15f2ef0>, <ast.Name object at 0x7da1b15f2ec0>, <ast.Name object at 0x7da1b15f2e90>]]] in starred[call[name[os].walk, parameter[name[directory]]]] begin[:]
for taget[name[fname]] in starred[name[fnames]] begin[:]
if <ast.BoolOp object at 0x7da1b15f2ce0> begin[:]
variable[load_fname] assign[=] call[name[os].path.join, parameter[name[root], name[fname]]]
variable[fname] assign[=] call[name[fname].replace, parameter[call[constant[.].join, parameter[call[call[name[fname].split, parameter[constant[.]]]][<ast.Slice object at 0x7da1b15f2740>]]], constant[png]]]
variable[fname] assign[=] call[name[fname].replace, parameter[constant[.png], binary_operation[constant[%s.png] <ast.Mod object at 0x7da2590d6920> name[save_suffix]]]]
variable[fname] assign[=] binary_operation[constant[%s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b15f2410>, <ast.Name object at 0x7da1b15f23e0>]]]
variable[save_fname] assign[=] call[name[os].path.join, parameter[name[root], name[fname]]]
variable[img] assign[=] call[name[iio2].image_read, parameter[name[load_fname]]]
if compare[name[axis] is constant[None]] begin[:]
variable[axis_range] assign[=] <ast.ListComp object at 0x7da1b15f1ff0>
if compare[name[img].dimension greater[>] constant[2]] begin[:]
for taget[name[axis_idx]] in starred[name[axis_range]] begin[:]
variable[filename] assign[=] call[name[save_fname].replace, parameter[constant[.png], binary_operation[constant[_axis%i.png] <ast.Mod object at 0x7da2590d6920> name[axis_idx]]]]
variable[ncol] assign[=] call[name[int], parameter[call[name[math].sqrt, parameter[call[name[img].shape][name[axis_idx]]]]]]
call[name[plot], parameter[name[img]]] | keyword[def] identifier[plot_directory] ( identifier[directory] , identifier[recursive] = keyword[False] , identifier[regex] = literal[string] ,
identifier[save_prefix] = literal[string] , identifier[save_suffix] = literal[string] , identifier[axis] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[def] identifier[has_acceptable_suffix] ( identifier[fname] ):
identifier[suffixes] ={ literal[string] }
keyword[return] identifier[sum] ([ identifier[fname] . identifier[endswith] ( identifier[sx] ) keyword[for] identifier[sx] keyword[in] identifier[suffixes] ])> literal[int]
keyword[if] identifier[directory] . identifier[startswith] ( literal[string] ):
identifier[directory] = identifier[os] . identifier[path] . identifier[expanduser] ( identifier[directory] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isdir] ( identifier[directory] ):
keyword[raise] identifier[ValueError] ( literal[string] % identifier[directory] )
keyword[for] identifier[root] , identifier[dirnames] , identifier[fnames] keyword[in] identifier[os] . identifier[walk] ( identifier[directory] ):
keyword[for] identifier[fname] keyword[in] identifier[fnames] :
keyword[if] identifier[fnmatch] . identifier[fnmatch] ( identifier[fname] , identifier[regex] ) keyword[and] identifier[has_acceptable_suffix] ( identifier[fname] ):
identifier[load_fname] = identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[fname] )
identifier[fname] = identifier[fname] . identifier[replace] ( literal[string] . identifier[join] ( identifier[fname] . identifier[split] ( literal[string] )[ literal[int] :]), literal[string] )
identifier[fname] = identifier[fname] . identifier[replace] ( literal[string] , literal[string] % identifier[save_suffix] )
identifier[fname] = literal[string] %( identifier[save_prefix] , identifier[fname] )
identifier[save_fname] = identifier[os] . identifier[path] . identifier[join] ( identifier[root] , identifier[fname] )
identifier[img] = identifier[iio2] . identifier[image_read] ( identifier[load_fname] )
keyword[if] identifier[axis] keyword[is] keyword[None] :
identifier[axis_range] =[ identifier[i] keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[img] . identifier[dimension] )]
keyword[else] :
identifier[axis_range] = identifier[axis] keyword[if] identifier[isinstance] ( identifier[axis] ,( identifier[list] , identifier[tuple] )) keyword[else] [ identifier[axis] ]
keyword[if] identifier[img] . identifier[dimension] > literal[int] :
keyword[for] identifier[axis_idx] keyword[in] identifier[axis_range] :
identifier[filename] = identifier[save_fname] . identifier[replace] ( literal[string] , literal[string] % identifier[axis_idx] )
identifier[ncol] = identifier[int] ( identifier[math] . identifier[sqrt] ( identifier[img] . identifier[shape] [ identifier[axis_idx] ]))
identifier[plot] ( identifier[img] , identifier[axis] = identifier[axis_idx] , identifier[nslices] = identifier[img] . identifier[shape] [ identifier[axis_idx] ], identifier[ncol] = identifier[ncol] ,
identifier[filename] = identifier[filename] ,** identifier[kwargs] )
keyword[else] :
identifier[filename] = identifier[save_fname]
identifier[plot] ( identifier[img] , identifier[filename] = identifier[filename] ,** identifier[kwargs] ) | def plot_directory(directory, recursive=False, regex='*', save_prefix='', save_suffix='', axis=None, **kwargs):
"""
Create and save an ANTsPy plot for every image matching a given regular
expression in a directory, optionally recursively. This is a good function
for quick visualize exploration of all of images in a directory
ANTsR function: N/A
Arguments
---------
directory : string
directory in which to search for images and plot them
recursive : boolean
If true, this function will search through all directories under
the given directory recursively to make plots.
If false, this function will only create plots for images in the
given directory
regex : string
regular expression used to filter out certain filenames or suffixes
save_prefix : string
sub-string that will be appended to the beginning of all saved plot filenames.
Default is to add nothing.
save_suffix : string
sub-string that will be appended to the end of all saved plot filenames.
Default is add nothing.
kwargs : keyword arguments
any additional arguments to pass onto the `ants.plot` function.
e.g. overlay, alpha, cmap, etc. See `ants.plot` for more options.
Example
-------
>>> import ants
>>> ants.plot_directory(directory='~/desktop/testdir',
recursive=False, regex='*')
"""
def has_acceptable_suffix(fname):
suffixes = {'.nii.gz'}
return sum([fname.endswith(sx) for sx in suffixes]) > 0
if directory.startswith('~'):
directory = os.path.expanduser(directory) # depends on [control=['if'], data=[]]
if not os.path.isdir(directory):
raise ValueError('directory %s does not exist!' % directory) # depends on [control=['if'], data=[]]
for (root, dirnames, fnames) in os.walk(directory):
for fname in fnames:
if fnmatch.fnmatch(fname, regex) and has_acceptable_suffix(fname):
load_fname = os.path.join(root, fname)
fname = fname.replace('.'.join(fname.split('.')[1:]), 'png')
fname = fname.replace('.png', '%s.png' % save_suffix)
fname = '%s%s' % (save_prefix, fname)
save_fname = os.path.join(root, fname)
img = iio2.image_read(load_fname)
if axis is None:
axis_range = [i for i in range(img.dimension)] # depends on [control=['if'], data=[]]
else:
axis_range = axis if isinstance(axis, (list, tuple)) else [axis]
if img.dimension > 2:
for axis_idx in axis_range:
filename = save_fname.replace('.png', '_axis%i.png' % axis_idx)
ncol = int(math.sqrt(img.shape[axis_idx]))
plot(img, axis=axis_idx, nslices=img.shape[axis_idx], ncol=ncol, filename=filename, **kwargs) # depends on [control=['for'], data=['axis_idx']] # depends on [control=['if'], data=[]]
else:
filename = save_fname
plot(img, filename=filename, **kwargs) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['fname']] # depends on [control=['for'], data=[]] |
def callback(msg, _):
"""Callback function called by libnl upon receiving messages from the kernel.
Positional arguments:
msg -- nl_msg class instance containing the data sent by the kernel.
Returns:
An integer, value of NL_OK. It tells libnl to proceed with processing the next kernel message.
"""
# First convert `msg` into something more manageable.
nlh = nlmsg_hdr(msg)
iface = ifinfomsg(nlmsg_data(nlh))
hdr = IFLA_RTA(iface)
remaining = ctypes.c_int(nlh.nlmsg_len - NLMSG_LENGTH(iface.SIZEOF))
# Now iterate through each rtattr stored in `iface`.
while RTA_OK(hdr, remaining):
# Each rtattr (which is what hdr is) instance is only one type. Looping through all of them until we run into
# the ones we care about.
if hdr.rta_type == IFLA_IFNAME:
print('Found network interface {0}: {1}'.format(iface.ifi_index, get_string(RTA_DATA(hdr)).decode('ascii')))
hdr = RTA_NEXT(hdr, remaining)
return NL_OK | def function[callback, parameter[msg, _]]:
constant[Callback function called by libnl upon receiving messages from the kernel.
Positional arguments:
msg -- nl_msg class instance containing the data sent by the kernel.
Returns:
An integer, value of NL_OK. It tells libnl to proceed with processing the next kernel message.
]
variable[nlh] assign[=] call[name[nlmsg_hdr], parameter[name[msg]]]
variable[iface] assign[=] call[name[ifinfomsg], parameter[call[name[nlmsg_data], parameter[name[nlh]]]]]
variable[hdr] assign[=] call[name[IFLA_RTA], parameter[name[iface]]]
variable[remaining] assign[=] call[name[ctypes].c_int, parameter[binary_operation[name[nlh].nlmsg_len - call[name[NLMSG_LENGTH], parameter[name[iface].SIZEOF]]]]]
while call[name[RTA_OK], parameter[name[hdr], name[remaining]]] begin[:]
if compare[name[hdr].rta_type equal[==] name[IFLA_IFNAME]] begin[:]
call[name[print], parameter[call[constant[Found network interface {0}: {1}].format, parameter[name[iface].ifi_index, call[call[name[get_string], parameter[call[name[RTA_DATA], parameter[name[hdr]]]]].decode, parameter[constant[ascii]]]]]]]
variable[hdr] assign[=] call[name[RTA_NEXT], parameter[name[hdr], name[remaining]]]
return[name[NL_OK]] | keyword[def] identifier[callback] ( identifier[msg] , identifier[_] ):
literal[string]
identifier[nlh] = identifier[nlmsg_hdr] ( identifier[msg] )
identifier[iface] = identifier[ifinfomsg] ( identifier[nlmsg_data] ( identifier[nlh] ))
identifier[hdr] = identifier[IFLA_RTA] ( identifier[iface] )
identifier[remaining] = identifier[ctypes] . identifier[c_int] ( identifier[nlh] . identifier[nlmsg_len] - identifier[NLMSG_LENGTH] ( identifier[iface] . identifier[SIZEOF] ))
keyword[while] identifier[RTA_OK] ( identifier[hdr] , identifier[remaining] ):
keyword[if] identifier[hdr] . identifier[rta_type] == identifier[IFLA_IFNAME] :
identifier[print] ( literal[string] . identifier[format] ( identifier[iface] . identifier[ifi_index] , identifier[get_string] ( identifier[RTA_DATA] ( identifier[hdr] )). identifier[decode] ( literal[string] )))
identifier[hdr] = identifier[RTA_NEXT] ( identifier[hdr] , identifier[remaining] )
keyword[return] identifier[NL_OK] | def callback(msg, _):
"""Callback function called by libnl upon receiving messages from the kernel.
Positional arguments:
msg -- nl_msg class instance containing the data sent by the kernel.
Returns:
An integer, value of NL_OK. It tells libnl to proceed with processing the next kernel message.
"""
# First convert `msg` into something more manageable.
nlh = nlmsg_hdr(msg)
iface = ifinfomsg(nlmsg_data(nlh))
hdr = IFLA_RTA(iface)
remaining = ctypes.c_int(nlh.nlmsg_len - NLMSG_LENGTH(iface.SIZEOF))
# Now iterate through each rtattr stored in `iface`.
while RTA_OK(hdr, remaining):
# Each rtattr (which is what hdr is) instance is only one type. Looping through all of them until we run into
# the ones we care about.
if hdr.rta_type == IFLA_IFNAME:
print('Found network interface {0}: {1}'.format(iface.ifi_index, get_string(RTA_DATA(hdr)).decode('ascii'))) # depends on [control=['if'], data=[]]
hdr = RTA_NEXT(hdr, remaining) # depends on [control=['while'], data=[]]
return NL_OK |
def _process_messages(self, messages, ignore_unknown_message_types=False):
"""Performs message processing.
:param dict messages: indexed by message id dict with messages data
:param bool ignore_unknown_message_types: whether to silence exceptions
:raises UnknownMessageTypeError:
"""
with self.before_after_send_handling():
for message_id, message_data in messages.items():
message_model, dispatch_models = message_data
try:
message_cls = get_registered_message_type(message_model.cls)
except UnknownMessageTypeError:
if ignore_unknown_message_types:
continue
raise
message_type_cache = None
for dispatch in dispatch_models:
if not dispatch.message_cache: # Create actual message text for further usage.
try:
if message_type_cache is None and not message_cls.has_dynamic_context:
# If a message class doesn't depend upon a dispatch data for message compilation,
# we'd compile a message just once.
message_type_cache = message_cls.compile(message_model, self, dispatch=dispatch)
dispatch.message_cache = message_type_cache or message_cls.compile(
message_model, self, dispatch=dispatch)
except Exception as e:
self.mark_error(dispatch, e, message_cls)
self.send(message_cls, message_model, dispatch_models) | def function[_process_messages, parameter[self, messages, ignore_unknown_message_types]]:
constant[Performs message processing.
:param dict messages: indexed by message id dict with messages data
:param bool ignore_unknown_message_types: whether to silence exceptions
:raises UnknownMessageTypeError:
]
with call[name[self].before_after_send_handling, parameter[]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da18ede6a70>, <ast.Name object at 0x7da18ede5c60>]]] in starred[call[name[messages].items, parameter[]]] begin[:]
<ast.Tuple object at 0x7da18ede6f50> assign[=] name[message_data]
<ast.Try object at 0x7da18ede4100>
variable[message_type_cache] assign[=] constant[None]
for taget[name[dispatch]] in starred[name[dispatch_models]] begin[:]
if <ast.UnaryOp object at 0x7da18ede6c50> begin[:]
<ast.Try object at 0x7da18ede6260>
call[name[self].send, parameter[name[message_cls], name[message_model], name[dispatch_models]]] | keyword[def] identifier[_process_messages] ( identifier[self] , identifier[messages] , identifier[ignore_unknown_message_types] = keyword[False] ):
literal[string]
keyword[with] identifier[self] . identifier[before_after_send_handling] ():
keyword[for] identifier[message_id] , identifier[message_data] keyword[in] identifier[messages] . identifier[items] ():
identifier[message_model] , identifier[dispatch_models] = identifier[message_data]
keyword[try] :
identifier[message_cls] = identifier[get_registered_message_type] ( identifier[message_model] . identifier[cls] )
keyword[except] identifier[UnknownMessageTypeError] :
keyword[if] identifier[ignore_unknown_message_types] :
keyword[continue]
keyword[raise]
identifier[message_type_cache] = keyword[None]
keyword[for] identifier[dispatch] keyword[in] identifier[dispatch_models] :
keyword[if] keyword[not] identifier[dispatch] . identifier[message_cache] :
keyword[try] :
keyword[if] identifier[message_type_cache] keyword[is] keyword[None] keyword[and] keyword[not] identifier[message_cls] . identifier[has_dynamic_context] :
identifier[message_type_cache] = identifier[message_cls] . identifier[compile] ( identifier[message_model] , identifier[self] , identifier[dispatch] = identifier[dispatch] )
identifier[dispatch] . identifier[message_cache] = identifier[message_type_cache] keyword[or] identifier[message_cls] . identifier[compile] (
identifier[message_model] , identifier[self] , identifier[dispatch] = identifier[dispatch] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[mark_error] ( identifier[dispatch] , identifier[e] , identifier[message_cls] )
identifier[self] . identifier[send] ( identifier[message_cls] , identifier[message_model] , identifier[dispatch_models] ) | def _process_messages(self, messages, ignore_unknown_message_types=False):
"""Performs message processing.
:param dict messages: indexed by message id dict with messages data
:param bool ignore_unknown_message_types: whether to silence exceptions
:raises UnknownMessageTypeError:
"""
with self.before_after_send_handling():
for (message_id, message_data) in messages.items():
(message_model, dispatch_models) = message_data
try:
message_cls = get_registered_message_type(message_model.cls) # depends on [control=['try'], data=[]]
except UnknownMessageTypeError:
if ignore_unknown_message_types:
continue # depends on [control=['if'], data=[]]
raise # depends on [control=['except'], data=[]]
message_type_cache = None
for dispatch in dispatch_models:
if not dispatch.message_cache: # Create actual message text for further usage.
try:
if message_type_cache is None and (not message_cls.has_dynamic_context):
# If a message class doesn't depend upon a dispatch data for message compilation,
# we'd compile a message just once.
message_type_cache = message_cls.compile(message_model, self, dispatch=dispatch) # depends on [control=['if'], data=[]]
dispatch.message_cache = message_type_cache or message_cls.compile(message_model, self, dispatch=dispatch) # depends on [control=['try'], data=[]]
except Exception as e:
self.mark_error(dispatch, e, message_cls) # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['dispatch']]
self.send(message_cls, message_model, dispatch_models) # depends on [control=['for'], data=[]] # depends on [control=['with'], data=[]] |
def update_tenant(
self,
tenant,
update_mask=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Updates specified tenant.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.TenantServiceClient()
>>>
>>> # TODO: Initialize `tenant`:
>>> tenant = {}
>>>
>>> response = client.update_tenant(tenant)
Args:
tenant (Union[dict, ~google.cloud.talent_v4beta1.types.Tenant]): Required.
The tenant resource to replace the current resource in the system.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.talent_v4beta1.types.Tenant`
update_mask (Union[dict, ~google.cloud.talent_v4beta1.types.FieldMask]): Optional but strongly recommended for the best service experience.
If ``update_mask`` is provided, only the specified fields in ``tenant``
are updated. Otherwise all the fields are updated.
A field mask to specify the tenant fields to be updated. Only top level
fields of ``Tenant`` are supported.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.talent_v4beta1.types.FieldMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.talent_v4beta1.types.Tenant` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "update_tenant" not in self._inner_api_calls:
self._inner_api_calls[
"update_tenant"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.update_tenant,
default_retry=self._method_configs["UpdateTenant"].retry,
default_timeout=self._method_configs["UpdateTenant"].timeout,
client_info=self._client_info,
)
request = tenant_service_pb2.UpdateTenantRequest(
tenant=tenant, update_mask=update_mask
)
return self._inner_api_calls["update_tenant"](
request, retry=retry, timeout=timeout, metadata=metadata
) | def function[update_tenant, parameter[self, tenant, update_mask, retry, timeout, metadata]]:
constant[
Updates specified tenant.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.TenantServiceClient()
>>>
>>> # TODO: Initialize `tenant`:
>>> tenant = {}
>>>
>>> response = client.update_tenant(tenant)
Args:
tenant (Union[dict, ~google.cloud.talent_v4beta1.types.Tenant]): Required.
The tenant resource to replace the current resource in the system.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.talent_v4beta1.types.Tenant`
update_mask (Union[dict, ~google.cloud.talent_v4beta1.types.FieldMask]): Optional but strongly recommended for the best service experience.
If ``update_mask`` is provided, only the specified fields in ``tenant``
are updated. Otherwise all the fields are updated.
A field mask to specify the tenant fields to be updated. Only top level
fields of ``Tenant`` are supported.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.talent_v4beta1.types.FieldMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.talent_v4beta1.types.Tenant` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
]
if compare[constant[update_tenant] <ast.NotIn object at 0x7da2590d7190> name[self]._inner_api_calls] begin[:]
call[name[self]._inner_api_calls][constant[update_tenant]] assign[=] call[name[google].api_core.gapic_v1.method.wrap_method, parameter[name[self].transport.update_tenant]]
variable[request] assign[=] call[name[tenant_service_pb2].UpdateTenantRequest, parameter[]]
return[call[call[name[self]._inner_api_calls][constant[update_tenant]], parameter[name[request]]]] | keyword[def] identifier[update_tenant] (
identifier[self] ,
identifier[tenant] ,
identifier[update_mask] = keyword[None] ,
identifier[retry] = identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[DEFAULT] ,
identifier[timeout] = identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[DEFAULT] ,
identifier[metadata] = keyword[None] ,
):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[_inner_api_calls] :
identifier[self] . identifier[_inner_api_calls] [
literal[string]
]= identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[wrap_method] (
identifier[self] . identifier[transport] . identifier[update_tenant] ,
identifier[default_retry] = identifier[self] . identifier[_method_configs] [ literal[string] ]. identifier[retry] ,
identifier[default_timeout] = identifier[self] . identifier[_method_configs] [ literal[string] ]. identifier[timeout] ,
identifier[client_info] = identifier[self] . identifier[_client_info] ,
)
identifier[request] = identifier[tenant_service_pb2] . identifier[UpdateTenantRequest] (
identifier[tenant] = identifier[tenant] , identifier[update_mask] = identifier[update_mask]
)
keyword[return] identifier[self] . identifier[_inner_api_calls] [ literal[string] ](
identifier[request] , identifier[retry] = identifier[retry] , identifier[timeout] = identifier[timeout] , identifier[metadata] = identifier[metadata]
) | def update_tenant(self, tenant, update_mask=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None):
"""
Updates specified tenant.
Example:
>>> from google.cloud import talent_v4beta1
>>>
>>> client = talent_v4beta1.TenantServiceClient()
>>>
>>> # TODO: Initialize `tenant`:
>>> tenant = {}
>>>
>>> response = client.update_tenant(tenant)
Args:
tenant (Union[dict, ~google.cloud.talent_v4beta1.types.Tenant]): Required.
The tenant resource to replace the current resource in the system.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.talent_v4beta1.types.Tenant`
update_mask (Union[dict, ~google.cloud.talent_v4beta1.types.FieldMask]): Optional but strongly recommended for the best service experience.
If ``update_mask`` is provided, only the specified fields in ``tenant``
are updated. Otherwise all the fields are updated.
A field mask to specify the tenant fields to be updated. Only top level
fields of ``Tenant`` are supported.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.talent_v4beta1.types.FieldMask`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.talent_v4beta1.types.Tenant` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'update_tenant' not in self._inner_api_calls:
self._inner_api_calls['update_tenant'] = google.api_core.gapic_v1.method.wrap_method(self.transport.update_tenant, default_retry=self._method_configs['UpdateTenant'].retry, default_timeout=self._method_configs['UpdateTenant'].timeout, client_info=self._client_info) # depends on [control=['if'], data=[]]
request = tenant_service_pb2.UpdateTenantRequest(tenant=tenant, update_mask=update_mask)
return self._inner_api_calls['update_tenant'](request, retry=retry, timeout=timeout, metadata=metadata) |
def mdot(*args):
"""Computes a matrix product of multiple ndarrays
This is a convenience function to avoid constructs such as np.dot(A, np.dot(B, np.dot(C, D))) and instead
use mdot(A, B, C, D).
Parameters
----------
*args : an arbitrarily long list of ndarrays that must be compatible for multiplication,
i.e. args[i].shape[1] = args[i+1].shape[0].
"""
if len(args) < 1:
raise ValueError('need at least one argument')
elif len(args) == 1:
return args[0]
elif len(args) == 2:
return np.dot(args[0], args[1])
else:
return np.dot(args[0], mdot(*args[1:])) | def function[mdot, parameter[]]:
constant[Computes a matrix product of multiple ndarrays
This is a convenience function to avoid constructs such as np.dot(A, np.dot(B, np.dot(C, D))) and instead
use mdot(A, B, C, D).
Parameters
----------
*args : an arbitrarily long list of ndarrays that must be compatible for multiplication,
i.e. args[i].shape[1] = args[i+1].shape[0].
]
if compare[call[name[len], parameter[name[args]]] less[<] constant[1]] begin[:]
<ast.Raise object at 0x7da18bcc90c0> | keyword[def] identifier[mdot] (* identifier[args] ):
literal[string]
keyword[if] identifier[len] ( identifier[args] )< literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[elif] identifier[len] ( identifier[args] )== literal[int] :
keyword[return] identifier[args] [ literal[int] ]
keyword[elif] identifier[len] ( identifier[args] )== literal[int] :
keyword[return] identifier[np] . identifier[dot] ( identifier[args] [ literal[int] ], identifier[args] [ literal[int] ])
keyword[else] :
keyword[return] identifier[np] . identifier[dot] ( identifier[args] [ literal[int] ], identifier[mdot] (* identifier[args] [ literal[int] :])) | def mdot(*args):
"""Computes a matrix product of multiple ndarrays
This is a convenience function to avoid constructs such as np.dot(A, np.dot(B, np.dot(C, D))) and instead
use mdot(A, B, C, D).
Parameters
----------
*args : an arbitrarily long list of ndarrays that must be compatible for multiplication,
i.e. args[i].shape[1] = args[i+1].shape[0].
"""
if len(args) < 1:
raise ValueError('need at least one argument') # depends on [control=['if'], data=[]]
elif len(args) == 1:
return args[0] # depends on [control=['if'], data=[]]
elif len(args) == 2:
return np.dot(args[0], args[1]) # depends on [control=['if'], data=[]]
else:
return np.dot(args[0], mdot(*args[1:])) |
def best_fit_distribution(data, bins=200, ax=None):
"""Model data by finding best fit distribution to data"""
# Get histogram of original data
y, x = np.histogram(data, bins=bins, normed=True)
x = (x + np.roll(x, -1))[:-1] / 2.0
# Distributions to check
DISTRIBUTIONS = [
st.alpha,st.anglit,st.arcsine,st.beta,st.betaprime,st.bradford,st.burr,st.cauchy,st.chi,st.chi2,st.cosine,
st.dgamma,st.dweibull,st.erlang,st.expon,st.exponnorm,st.exponweib,st.exponpow,st.f,st.fatiguelife,st.fisk,
st.foldcauchy,st.foldnorm,st.frechet_r,st.frechet_l,st.genlogistic,st.genpareto,st.gennorm,st.genexpon,
st.genextreme,st.gausshyper,st.gamma,st.gengamma,st.genhalflogistic,st.gilbrat,st.gompertz,st.gumbel_r,
st.gumbel_l,st.halfcauchy,st.halflogistic,st.halfnorm,st.halfgennorm,st.hypsecant,st.invgamma,st.invgauss,
st.invweibull,st.johnsonsb,st.johnsonsu,st.ksone,st.kstwobign,st.laplace,st.levy,st.levy_l,st.levy_stable,
st.logistic,st.loggamma,st.loglaplace,st.lognorm,st.lomax,st.maxwell,st.mielke,st.nakagami,st.ncx2,st.ncf,
st.nct,st.norm,st.pareto,st.pearson3,st.powerlaw,st.powerlognorm,st.powernorm,st.rdist,st.reciprocal,
st.rayleigh,st.rice,st.recipinvgauss,st.semicircular,st.t,st.triang,st.truncexpon,st.truncnorm,st.tukeylambda,
st.uniform,st.vonmises,st.vonmises_line,st.wald,st.weibull_min,st.weibull_max,st.wrapcauchy
]
# Best holders
best_distribution = st.norm
best_params = (0.0, 1.0)
best_sse = np.inf
# Estimate distribution parameters from data
for distribution in DISTRIBUTIONS:
# Try to fit the distribution
try:
# Ignore warnings from data that can't be fit
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
# fit dist to data
params = distribution.fit(data)
# Separate parts of parameters
arg = params[:-2]
loc = params[-2]
scale = params[-1]
# Calculate fitted PDF and error with fit in distribution
pdf = distribution.pdf(x, loc=loc, scale=scale, *arg)
sse = np.sum(np.power(y - pdf, 2.0))
# if axis pass in add to plot
try:
if ax:
pd.Series(pdf, x).plot(ax=ax)
end
except Exception:
pass
# identify if this distribution is better
if best_sse > sse > 0:
best_distribution = distribution
best_params = params
best_sse = sse
except Exception:
pass
return (best_distribution.name, best_params) | def function[best_fit_distribution, parameter[data, bins, ax]]:
constant[Model data by finding best fit distribution to data]
<ast.Tuple object at 0x7da18f58fe80> assign[=] call[name[np].histogram, parameter[name[data]]]
variable[x] assign[=] binary_operation[call[binary_operation[name[x] + call[name[np].roll, parameter[name[x], <ast.UnaryOp object at 0x7da18f58fcd0>]]]][<ast.Slice object at 0x7da18f58cd00>] / constant[2.0]]
variable[DISTRIBUTIONS] assign[=] list[[<ast.Attribute object at 0x7da18f58dc00>, <ast.Attribute object at 0x7da18f58ea10>, <ast.Attribute object at 0x7da18f58d300>, <ast.Attribute object at 0x7da18f58dde0>, <ast.Attribute object at 0x7da18f58d090>, <ast.Attribute object at 0x7da18f58ed40>, <ast.Attribute object at 0x7da18f58f5b0>, <ast.Attribute object at 0x7da18f58d2d0>, <ast.Attribute object at 0x7da18f58fb20>, <ast.Attribute object at 0x7da18f58c820>, <ast.Attribute object at 0x7da18f58ceb0>, <ast.Attribute object at 0x7da18f58cfd0>, <ast.Attribute object at 0x7da18f58e980>, <ast.Attribute object at 0x7da18f58f010>, <ast.Attribute object at 0x7da18f58fe50>, <ast.Attribute object at 0x7da18f58d510>, <ast.Attribute object at 0x7da18f58c8e0>, <ast.Attribute object at 0x7da18f58ebf0>, <ast.Attribute object at 0x7da18f58c2b0>, <ast.Attribute object at 0x7da18f58cac0>, <ast.Attribute object at 0x7da18f58de40>, <ast.Attribute object at 0x7da18f58eb30>, <ast.Attribute object at 0x7da18f58e500>, <ast.Attribute object at 0x7da18f58ce80>, <ast.Attribute object at 0x7da18f58d840>, <ast.Attribute object at 0x7da18f58c670>, <ast.Attribute object at 0x7da18f58c0d0>, <ast.Attribute object at 0x7da18f58eaa0>, <ast.Attribute object at 0x7da18f58fa90>, <ast.Attribute object at 0x7da18f58ccd0>, <ast.Attribute object at 0x7da18f58c220>, <ast.Attribute object at 0x7da18f58ff70>, <ast.Attribute object at 0x7da18f58d570>, <ast.Attribute object at 0x7da18f58d0f0>, <ast.Attribute object at 0x7da18f58d930>, <ast.Attribute object at 0x7da18f58e1a0>, <ast.Attribute object at 0x7da18f58c940>, <ast.Attribute object at 0x7da18f58d9c0>, <ast.Attribute object at 0x7da18f58cd90>, <ast.Attribute object at 0x7da18f58d600>, <ast.Attribute object at 0x7da18f58fd30>, <ast.Attribute object at 0x7da18f58eec0>, <ast.Attribute object at 0x7da18f58ee60>, <ast.Attribute object at 0x7da18f58f8e0>, <ast.Attribute object at 0x7da18f58d3c0>, <ast.Attribute object at 0x7da18f58fdc0>, <ast.Attribute object at 0x7da18f58e9b0>, <ast.Attribute object at 0x7da18f58ed10>, <ast.Attribute object at 0x7da18f58d6c0>, <ast.Attribute object at 0x7da18f58c6a0>, <ast.Attribute object at 0x7da18f58c4c0>, <ast.Attribute object at 0x7da18f58c880>, <ast.Attribute object at 0x7da18f58c070>, <ast.Attribute object at 0x7da18f58fc70>, <ast.Attribute object at 0x7da18f58fc10>, <ast.Attribute object at 0x7da18f58f670>, <ast.Attribute object at 0x7da18f58ef20>, <ast.Attribute object at 0x7da18f58d990>, <ast.Attribute object at 0x7da18f58ded0>, <ast.Attribute object at 0x7da18f58f280>, <ast.Attribute object at 0x7da18f58d3f0>, <ast.Attribute object at 0x7da18f58e6b0>, <ast.Attribute object at 0x7da18f58fdf0>, <ast.Attribute object at 0x7da18f58f760>, <ast.Attribute object at 0x7da18f58f490>, <ast.Attribute object at 0x7da18f58dd20>, <ast.Attribute object at 0x7da18f58dd80>, <ast.Attribute object at 0x7da18f58f340>, <ast.Attribute object at 0x7da18f58ca00>, <ast.Attribute object at 0x7da18f58f070>, <ast.Attribute object at 0x7da18f58e590>, <ast.Attribute object at 0x7da18f58e2f0>, <ast.Attribute object at 0x7da18f58dab0>, <ast.Attribute object at 0x7da18f58cd30>, <ast.Attribute object at 0x7da18f58dc90>, <ast.Attribute object at 0x7da18f58d0c0>, <ast.Attribute object at 0x7da18f58df00>, <ast.Attribute object at 0x7da18f58e530>, <ast.Attribute object at 0x7da18f58e620>, <ast.Attribute object at 0x7da18f58d4e0>, <ast.Attribute object at 0x7da18f58df30>, <ast.Attribute object at 0x7da18f58c700>, <ast.Attribute object at 0x7da18f58e050>, <ast.Attribute object at 0x7da18f58e0e0>, <ast.Attribute object at 0x7da18f58d360>, <ast.Attribute object at 0x7da18f58e920>, <ast.Attribute object at 0x7da18f58e410>, <ast.Attribute object at 0x7da18f58cc40>, <ast.Attribute object at 0x7da18f58d480>]]
variable[best_distribution] assign[=] name[st].norm
variable[best_params] assign[=] tuple[[<ast.Constant object at 0x7da18f58c730>, <ast.Constant object at 0x7da18f58dc60>]]
variable[best_sse] assign[=] name[np].inf
for taget[name[distribution]] in starred[name[DISTRIBUTIONS]] begin[:]
<ast.Try object at 0x7da18f58e3e0>
return[tuple[[<ast.Attribute object at 0x7da20e9b3820>, <ast.Name object at 0x7da20e9b3f70>]]] | keyword[def] identifier[best_fit_distribution] ( identifier[data] , identifier[bins] = literal[int] , identifier[ax] = keyword[None] ):
literal[string]
identifier[y] , identifier[x] = identifier[np] . identifier[histogram] ( identifier[data] , identifier[bins] = identifier[bins] , identifier[normed] = keyword[True] )
identifier[x] =( identifier[x] + identifier[np] . identifier[roll] ( identifier[x] ,- literal[int] ))[:- literal[int] ]/ literal[int]
identifier[DISTRIBUTIONS] =[
identifier[st] . identifier[alpha] , identifier[st] . identifier[anglit] , identifier[st] . identifier[arcsine] , identifier[st] . identifier[beta] , identifier[st] . identifier[betaprime] , identifier[st] . identifier[bradford] , identifier[st] . identifier[burr] , identifier[st] . identifier[cauchy] , identifier[st] . identifier[chi] , identifier[st] . identifier[chi2] , identifier[st] . identifier[cosine] ,
identifier[st] . identifier[dgamma] , identifier[st] . identifier[dweibull] , identifier[st] . identifier[erlang] , identifier[st] . identifier[expon] , identifier[st] . identifier[exponnorm] , identifier[st] . identifier[exponweib] , identifier[st] . identifier[exponpow] , identifier[st] . identifier[f] , identifier[st] . identifier[fatiguelife] , identifier[st] . identifier[fisk] ,
identifier[st] . identifier[foldcauchy] , identifier[st] . identifier[foldnorm] , identifier[st] . identifier[frechet_r] , identifier[st] . identifier[frechet_l] , identifier[st] . identifier[genlogistic] , identifier[st] . identifier[genpareto] , identifier[st] . identifier[gennorm] , identifier[st] . identifier[genexpon] ,
identifier[st] . identifier[genextreme] , identifier[st] . identifier[gausshyper] , identifier[st] . identifier[gamma] , identifier[st] . identifier[gengamma] , identifier[st] . identifier[genhalflogistic] , identifier[st] . identifier[gilbrat] , identifier[st] . identifier[gompertz] , identifier[st] . identifier[gumbel_r] ,
identifier[st] . identifier[gumbel_l] , identifier[st] . identifier[halfcauchy] , identifier[st] . identifier[halflogistic] , identifier[st] . identifier[halfnorm] , identifier[st] . identifier[halfgennorm] , identifier[st] . identifier[hypsecant] , identifier[st] . identifier[invgamma] , identifier[st] . identifier[invgauss] ,
identifier[st] . identifier[invweibull] , identifier[st] . identifier[johnsonsb] , identifier[st] . identifier[johnsonsu] , identifier[st] . identifier[ksone] , identifier[st] . identifier[kstwobign] , identifier[st] . identifier[laplace] , identifier[st] . identifier[levy] , identifier[st] . identifier[levy_l] , identifier[st] . identifier[levy_stable] ,
identifier[st] . identifier[logistic] , identifier[st] . identifier[loggamma] , identifier[st] . identifier[loglaplace] , identifier[st] . identifier[lognorm] , identifier[st] . identifier[lomax] , identifier[st] . identifier[maxwell] , identifier[st] . identifier[mielke] , identifier[st] . identifier[nakagami] , identifier[st] . identifier[ncx2] , identifier[st] . identifier[ncf] ,
identifier[st] . identifier[nct] , identifier[st] . identifier[norm] , identifier[st] . identifier[pareto] , identifier[st] . identifier[pearson3] , identifier[st] . identifier[powerlaw] , identifier[st] . identifier[powerlognorm] , identifier[st] . identifier[powernorm] , identifier[st] . identifier[rdist] , identifier[st] . identifier[reciprocal] ,
identifier[st] . identifier[rayleigh] , identifier[st] . identifier[rice] , identifier[st] . identifier[recipinvgauss] , identifier[st] . identifier[semicircular] , identifier[st] . identifier[t] , identifier[st] . identifier[triang] , identifier[st] . identifier[truncexpon] , identifier[st] . identifier[truncnorm] , identifier[st] . identifier[tukeylambda] ,
identifier[st] . identifier[uniform] , identifier[st] . identifier[vonmises] , identifier[st] . identifier[vonmises_line] , identifier[st] . identifier[wald] , identifier[st] . identifier[weibull_min] , identifier[st] . identifier[weibull_max] , identifier[st] . identifier[wrapcauchy]
]
identifier[best_distribution] = identifier[st] . identifier[norm]
identifier[best_params] =( literal[int] , literal[int] )
identifier[best_sse] = identifier[np] . identifier[inf]
keyword[for] identifier[distribution] keyword[in] identifier[DISTRIBUTIONS] :
keyword[try] :
keyword[with] identifier[warnings] . identifier[catch_warnings] ():
identifier[warnings] . identifier[filterwarnings] ( literal[string] )
identifier[params] = identifier[distribution] . identifier[fit] ( identifier[data] )
identifier[arg] = identifier[params] [:- literal[int] ]
identifier[loc] = identifier[params] [- literal[int] ]
identifier[scale] = identifier[params] [- literal[int] ]
identifier[pdf] = identifier[distribution] . identifier[pdf] ( identifier[x] , identifier[loc] = identifier[loc] , identifier[scale] = identifier[scale] ,* identifier[arg] )
identifier[sse] = identifier[np] . identifier[sum] ( identifier[np] . identifier[power] ( identifier[y] - identifier[pdf] , literal[int] ))
keyword[try] :
keyword[if] identifier[ax] :
identifier[pd] . identifier[Series] ( identifier[pdf] , identifier[x] ). identifier[plot] ( identifier[ax] = identifier[ax] )
identifier[end]
keyword[except] identifier[Exception] :
keyword[pass]
keyword[if] identifier[best_sse] > identifier[sse] > literal[int] :
identifier[best_distribution] = identifier[distribution]
identifier[best_params] = identifier[params]
identifier[best_sse] = identifier[sse]
keyword[except] identifier[Exception] :
keyword[pass]
keyword[return] ( identifier[best_distribution] . identifier[name] , identifier[best_params] ) | def best_fit_distribution(data, bins=200, ax=None):
"""Model data by finding best fit distribution to data"""
# Get histogram of original data
(y, x) = np.histogram(data, bins=bins, normed=True)
x = (x + np.roll(x, -1))[:-1] / 2.0
# Distributions to check
DISTRIBUTIONS = [st.alpha, st.anglit, st.arcsine, st.beta, st.betaprime, st.bradford, st.burr, st.cauchy, st.chi, st.chi2, st.cosine, st.dgamma, st.dweibull, st.erlang, st.expon, st.exponnorm, st.exponweib, st.exponpow, st.f, st.fatiguelife, st.fisk, st.foldcauchy, st.foldnorm, st.frechet_r, st.frechet_l, st.genlogistic, st.genpareto, st.gennorm, st.genexpon, st.genextreme, st.gausshyper, st.gamma, st.gengamma, st.genhalflogistic, st.gilbrat, st.gompertz, st.gumbel_r, st.gumbel_l, st.halfcauchy, st.halflogistic, st.halfnorm, st.halfgennorm, st.hypsecant, st.invgamma, st.invgauss, st.invweibull, st.johnsonsb, st.johnsonsu, st.ksone, st.kstwobign, st.laplace, st.levy, st.levy_l, st.levy_stable, st.logistic, st.loggamma, st.loglaplace, st.lognorm, st.lomax, st.maxwell, st.mielke, st.nakagami, st.ncx2, st.ncf, st.nct, st.norm, st.pareto, st.pearson3, st.powerlaw, st.powerlognorm, st.powernorm, st.rdist, st.reciprocal, st.rayleigh, st.rice, st.recipinvgauss, st.semicircular, st.t, st.triang, st.truncexpon, st.truncnorm, st.tukeylambda, st.uniform, st.vonmises, st.vonmises_line, st.wald, st.weibull_min, st.weibull_max, st.wrapcauchy]
# Best holders
best_distribution = st.norm
best_params = (0.0, 1.0)
best_sse = np.inf
# Estimate distribution parameters from data
for distribution in DISTRIBUTIONS:
# Try to fit the distribution
try:
# Ignore warnings from data that can't be fit
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
# fit dist to data
params = distribution.fit(data)
# Separate parts of parameters
arg = params[:-2]
loc = params[-2]
scale = params[-1]
# Calculate fitted PDF and error with fit in distribution
pdf = distribution.pdf(x, *arg, loc=loc, scale=scale)
sse = np.sum(np.power(y - pdf, 2.0))
# if axis pass in add to plot
try:
if ax:
pd.Series(pdf, x).plot(ax=ax) # depends on [control=['if'], data=[]]
end # depends on [control=['try'], data=[]]
except Exception:
pass # depends on [control=['except'], data=[]]
# identify if this distribution is better
if best_sse > sse > 0:
best_distribution = distribution
best_params = params
best_sse = sse # depends on [control=['if'], data=['best_sse', 'sse']] # depends on [control=['with'], data=[]] # depends on [control=['try'], data=[]]
except Exception:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['distribution']]
return (best_distribution.name, best_params) |
def delete(self, params=None):
"""Delete this resource from the server, passing the specified query parameters.
If this resource doesn't support ``DELETE``, a :py:exc:`.JIRAError`
will be raised; subclasses that specialize this method will only raise errors
in case of user error.
:param params: Parameters for the delete request.
:type params: Optional[Dict[str, Any]]
:rtype: Response
"""
if self._options['async']:
if not hasattr(self._session, '_async_jobs'):
self._session._async_jobs = set()
self._session._async_jobs.add(
threaded_requests.delete(url=self.self, params=params))
else:
return self._session.delete(url=self.self, params=params) | def function[delete, parameter[self, params]]:
constant[Delete this resource from the server, passing the specified query parameters.
If this resource doesn't support ``DELETE``, a :py:exc:`.JIRAError`
will be raised; subclasses that specialize this method will only raise errors
in case of user error.
:param params: Parameters for the delete request.
:type params: Optional[Dict[str, Any]]
:rtype: Response
]
if call[name[self]._options][constant[async]] begin[:]
if <ast.UnaryOp object at 0x7da1b1c2b850> begin[:]
name[self]._session._async_jobs assign[=] call[name[set], parameter[]]
call[name[self]._session._async_jobs.add, parameter[call[name[threaded_requests].delete, parameter[]]]] | keyword[def] identifier[delete] ( identifier[self] , identifier[params] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[_options] [ literal[string] ]:
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] . identifier[_session] , literal[string] ):
identifier[self] . identifier[_session] . identifier[_async_jobs] = identifier[set] ()
identifier[self] . identifier[_session] . identifier[_async_jobs] . identifier[add] (
identifier[threaded_requests] . identifier[delete] ( identifier[url] = identifier[self] . identifier[self] , identifier[params] = identifier[params] ))
keyword[else] :
keyword[return] identifier[self] . identifier[_session] . identifier[delete] ( identifier[url] = identifier[self] . identifier[self] , identifier[params] = identifier[params] ) | def delete(self, params=None):
"""Delete this resource from the server, passing the specified query parameters.
If this resource doesn't support ``DELETE``, a :py:exc:`.JIRAError`
will be raised; subclasses that specialize this method will only raise errors
in case of user error.
:param params: Parameters for the delete request.
:type params: Optional[Dict[str, Any]]
:rtype: Response
"""
if self._options['async']:
if not hasattr(self._session, '_async_jobs'):
self._session._async_jobs = set() # depends on [control=['if'], data=[]]
self._session._async_jobs.add(threaded_requests.delete(url=self.self, params=params)) # depends on [control=['if'], data=[]]
else:
return self._session.delete(url=self.self, params=params) |
def get_grade_system_admin_session(self):
"""Gets the ``OsidSession`` associated with the grade system administration service.
return: (osid.grading.GradeSystemAdminSession) - a
``GradeSystemAdminSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grade_system_admin()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_admin()`` is ``true``.*
"""
if not self.supports_grade_system_admin():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.GradeSystemAdminSession(runtime=self._runtime) | def function[get_grade_system_admin_session, parameter[self]]:
constant[Gets the ``OsidSession`` associated with the grade system administration service.
return: (osid.grading.GradeSystemAdminSession) - a
``GradeSystemAdminSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grade_system_admin()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_admin()`` is ``true``.*
]
if <ast.UnaryOp object at 0x7da20e9573a0> begin[:]
<ast.Raise object at 0x7da20e955840>
return[call[name[sessions].GradeSystemAdminSession, parameter[]]] | keyword[def] identifier[get_grade_system_admin_session] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[supports_grade_system_admin] ():
keyword[raise] identifier[errors] . identifier[Unimplemented] ()
keyword[return] identifier[sessions] . identifier[GradeSystemAdminSession] ( identifier[runtime] = identifier[self] . identifier[_runtime] ) | def get_grade_system_admin_session(self):
"""Gets the ``OsidSession`` associated with the grade system administration service.
return: (osid.grading.GradeSystemAdminSession) - a
``GradeSystemAdminSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grade_system_admin()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_admin()`` is ``true``.*
"""
if not self.supports_grade_system_admin():
raise errors.Unimplemented() # depends on [control=['if'], data=[]]
# pylint: disable=no-member
return sessions.GradeSystemAdminSession(runtime=self._runtime) |
def __parse_dict(self):
''' Parses dictionary with according rules '''
i = 0
lines = self.lines
for line in lines:
line = line.split('/')
word = line[0]
flags = line[1] if len(line) > 1 else None
# Base Word
self.num_words += 1
if flags != None:
# Derivatives possible
for flag in flags:
# Compound?
if flag in self.aff.compound_flags or flag == self.aff.only_in_compound_flag:
for rule in self.aff.compound_rules:
rule.add_flag_values(word, flag)
else:
# No Suggest flags
if self.aff.no_suggest_flag == flag:
pass
else:
affix_rule_entries = self.aff.affix_rules[flag]
# Get flag that meets condition
for i in range(len(affix_rule_entries)):
rule = affix_rule_entries[i]
if rule.meets_condition(word):
# Add word to list if does not already exist
if word not in self.words:
self.words[word] = []
# Derivatives
self.num_words += 1
if self.format == "addsub":
add_sub = rule.generate_add_sub()
# Add to list of keys
if add_sub not in self.keys:
self.keys.append(add_sub)
# Check if key is to be generated
if self.key:
self.words[word].append(str(self.keys.index(add_sub)))
else:
# Generate addsub next to base word
self.words[word].append(rule.generate_add_sub())
else:
# Default, insert complete derivative word
self.words[word].append(rule.create_derivative(word))
else:
# No derivatives.
self.words[word] = []
# Create regular expression from compounds
for rule in self.aff.compound_rules:
# Add to list
self.regex_compounds.append(rule.get_regex()) | def function[__parse_dict, parameter[self]]:
constant[ Parses dictionary with according rules ]
variable[i] assign[=] constant[0]
variable[lines] assign[=] name[self].lines
for taget[name[line]] in starred[name[lines]] begin[:]
variable[line] assign[=] call[name[line].split, parameter[constant[/]]]
variable[word] assign[=] call[name[line]][constant[0]]
variable[flags] assign[=] <ast.IfExp object at 0x7da20c6c61d0>
<ast.AugAssign object at 0x7da20c6c5570>
if compare[name[flags] not_equal[!=] constant[None]] begin[:]
for taget[name[flag]] in starred[name[flags]] begin[:]
if <ast.BoolOp object at 0x7da20c6c4f70> begin[:]
for taget[name[rule]] in starred[name[self].aff.compound_rules] begin[:]
call[name[rule].add_flag_values, parameter[name[word], name[flag]]]
for taget[name[rule]] in starred[name[self].aff.compound_rules] begin[:]
call[name[self].regex_compounds.append, parameter[call[name[rule].get_regex, parameter[]]]] | keyword[def] identifier[__parse_dict] ( identifier[self] ):
literal[string]
identifier[i] = literal[int]
identifier[lines] = identifier[self] . identifier[lines]
keyword[for] identifier[line] keyword[in] identifier[lines] :
identifier[line] = identifier[line] . identifier[split] ( literal[string] )
identifier[word] = identifier[line] [ literal[int] ]
identifier[flags] = identifier[line] [ literal[int] ] keyword[if] identifier[len] ( identifier[line] )> literal[int] keyword[else] keyword[None]
identifier[self] . identifier[num_words] += literal[int]
keyword[if] identifier[flags] != keyword[None] :
keyword[for] identifier[flag] keyword[in] identifier[flags] :
keyword[if] identifier[flag] keyword[in] identifier[self] . identifier[aff] . identifier[compound_flags] keyword[or] identifier[flag] == identifier[self] . identifier[aff] . identifier[only_in_compound_flag] :
keyword[for] identifier[rule] keyword[in] identifier[self] . identifier[aff] . identifier[compound_rules] :
identifier[rule] . identifier[add_flag_values] ( identifier[word] , identifier[flag] )
keyword[else] :
keyword[if] identifier[self] . identifier[aff] . identifier[no_suggest_flag] == identifier[flag] :
keyword[pass]
keyword[else] :
identifier[affix_rule_entries] = identifier[self] . identifier[aff] . identifier[affix_rules] [ identifier[flag] ]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[affix_rule_entries] )):
identifier[rule] = identifier[affix_rule_entries] [ identifier[i] ]
keyword[if] identifier[rule] . identifier[meets_condition] ( identifier[word] ):
keyword[if] identifier[word] keyword[not] keyword[in] identifier[self] . identifier[words] :
identifier[self] . identifier[words] [ identifier[word] ]=[]
identifier[self] . identifier[num_words] += literal[int]
keyword[if] identifier[self] . identifier[format] == literal[string] :
identifier[add_sub] = identifier[rule] . identifier[generate_add_sub] ()
keyword[if] identifier[add_sub] keyword[not] keyword[in] identifier[self] . identifier[keys] :
identifier[self] . identifier[keys] . identifier[append] ( identifier[add_sub] )
keyword[if] identifier[self] . identifier[key] :
identifier[self] . identifier[words] [ identifier[word] ]. identifier[append] ( identifier[str] ( identifier[self] . identifier[keys] . identifier[index] ( identifier[add_sub] )))
keyword[else] :
identifier[self] . identifier[words] [ identifier[word] ]. identifier[append] ( identifier[rule] . identifier[generate_add_sub] ())
keyword[else] :
identifier[self] . identifier[words] [ identifier[word] ]. identifier[append] ( identifier[rule] . identifier[create_derivative] ( identifier[word] ))
keyword[else] :
identifier[self] . identifier[words] [ identifier[word] ]=[]
keyword[for] identifier[rule] keyword[in] identifier[self] . identifier[aff] . identifier[compound_rules] :
identifier[self] . identifier[regex_compounds] . identifier[append] ( identifier[rule] . identifier[get_regex] ()) | def __parse_dict(self):
""" Parses dictionary with according rules """
i = 0
lines = self.lines
for line in lines:
line = line.split('/')
word = line[0]
flags = line[1] if len(line) > 1 else None
# Base Word
self.num_words += 1
if flags != None:
# Derivatives possible
for flag in flags:
# Compound?
if flag in self.aff.compound_flags or flag == self.aff.only_in_compound_flag:
for rule in self.aff.compound_rules:
rule.add_flag_values(word, flag) # depends on [control=['for'], data=['rule']] # depends on [control=['if'], data=[]]
# No Suggest flags
elif self.aff.no_suggest_flag == flag:
pass # depends on [control=['if'], data=[]]
else:
affix_rule_entries = self.aff.affix_rules[flag]
# Get flag that meets condition
for i in range(len(affix_rule_entries)):
rule = affix_rule_entries[i]
if rule.meets_condition(word):
# Add word to list if does not already exist
if word not in self.words:
self.words[word] = [] # depends on [control=['if'], data=['word']]
# Derivatives
self.num_words += 1
if self.format == 'addsub':
add_sub = rule.generate_add_sub()
# Add to list of keys
if add_sub not in self.keys:
self.keys.append(add_sub) # depends on [control=['if'], data=['add_sub']]
# Check if key is to be generated
if self.key:
self.words[word].append(str(self.keys.index(add_sub))) # depends on [control=['if'], data=[]]
else:
# Generate addsub next to base word
self.words[word].append(rule.generate_add_sub()) # depends on [control=['if'], data=[]]
else:
# Default, insert complete derivative word
self.words[word].append(rule.create_derivative(word)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=['flag']] # depends on [control=['if'], data=['flags']]
else:
# No derivatives.
self.words[word] = [] # depends on [control=['for'], data=['line']]
# Create regular expression from compounds
for rule in self.aff.compound_rules:
# Add to list
self.regex_compounds.append(rule.get_regex()) # depends on [control=['for'], data=['rule']] |
def find_guest(name, quiet=False, path=None):
'''
Returns the host for a container.
path
path to the container parent
default: /var/lib/lxc (system default)
.. versionadded:: 2015.8.0
.. code-block:: bash
salt-run lxc.find_guest name
'''
if quiet:
log.warning("'quiet' argument is being deprecated."
' Please migrate to --quiet')
for data in _list_iter(path=path):
host, l = next(six.iteritems(data))
for x in 'running', 'frozen', 'stopped':
if name in l[x]:
if not quiet:
__jid_event__.fire_event(
{'data': host,
'outputter': 'lxc_find_host'},
'progress')
return host
return None | def function[find_guest, parameter[name, quiet, path]]:
constant[
Returns the host for a container.
path
path to the container parent
default: /var/lib/lxc (system default)
.. versionadded:: 2015.8.0
.. code-block:: bash
salt-run lxc.find_guest name
]
if name[quiet] begin[:]
call[name[log].warning, parameter[constant['quiet' argument is being deprecated. Please migrate to --quiet]]]
for taget[name[data]] in starred[call[name[_list_iter], parameter[]]] begin[:]
<ast.Tuple object at 0x7da1b2136020> assign[=] call[name[next], parameter[call[name[six].iteritems, parameter[name[data]]]]]
for taget[name[x]] in starred[tuple[[<ast.Constant object at 0x7da1b1f35930>, <ast.Constant object at 0x7da1b1f35420>, <ast.Constant object at 0x7da1b1f35a80>]]] begin[:]
if compare[name[name] in call[name[l]][name[x]]] begin[:]
if <ast.UnaryOp object at 0x7da1b1f34df0> begin[:]
call[name[__jid_event__].fire_event, parameter[dictionary[[<ast.Constant object at 0x7da1b1f36fb0>, <ast.Constant object at 0x7da1b1f34370>], [<ast.Name object at 0x7da1b1f36230>, <ast.Constant object at 0x7da1b1f34340>]], constant[progress]]]
return[name[host]]
return[constant[None]] | keyword[def] identifier[find_guest] ( identifier[name] , identifier[quiet] = keyword[False] , identifier[path] = keyword[None] ):
literal[string]
keyword[if] identifier[quiet] :
identifier[log] . identifier[warning] ( literal[string]
literal[string] )
keyword[for] identifier[data] keyword[in] identifier[_list_iter] ( identifier[path] = identifier[path] ):
identifier[host] , identifier[l] = identifier[next] ( identifier[six] . identifier[iteritems] ( identifier[data] ))
keyword[for] identifier[x] keyword[in] literal[string] , literal[string] , literal[string] :
keyword[if] identifier[name] keyword[in] identifier[l] [ identifier[x] ]:
keyword[if] keyword[not] identifier[quiet] :
identifier[__jid_event__] . identifier[fire_event] (
{ literal[string] : identifier[host] ,
literal[string] : literal[string] },
literal[string] )
keyword[return] identifier[host]
keyword[return] keyword[None] | def find_guest(name, quiet=False, path=None):
"""
Returns the host for a container.
path
path to the container parent
default: /var/lib/lxc (system default)
.. versionadded:: 2015.8.0
.. code-block:: bash
salt-run lxc.find_guest name
"""
if quiet:
log.warning("'quiet' argument is being deprecated. Please migrate to --quiet") # depends on [control=['if'], data=[]]
for data in _list_iter(path=path):
(host, l) = next(six.iteritems(data))
for x in ('running', 'frozen', 'stopped'):
if name in l[x]:
if not quiet:
__jid_event__.fire_event({'data': host, 'outputter': 'lxc_find_host'}, 'progress') # depends on [control=['if'], data=[]]
return host # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['x']] # depends on [control=['for'], data=['data']]
return None |
def get_policies(self):
"""Returns all the Policies under the Identity namespace.
Returns:
(list): A list containing all the Policies under the Identity
namespace.
"""
prefix = _IDENTITY_NS + _POLICY_NS
policylist_list = [
_create_from_bytes(d, identity_pb2.PolicyList)
for _, d in self._state_view.leaves(prefix=prefix)
]
policies = []
for policy_list in policylist_list:
for policy in policy_list.policies:
policies.append(policy)
return sorted(policies, key=lambda p: p.name) | def function[get_policies, parameter[self]]:
constant[Returns all the Policies under the Identity namespace.
Returns:
(list): A list containing all the Policies under the Identity
namespace.
]
variable[prefix] assign[=] binary_operation[name[_IDENTITY_NS] + name[_POLICY_NS]]
variable[policylist_list] assign[=] <ast.ListComp object at 0x7da20c6e6e60>
variable[policies] assign[=] list[[]]
for taget[name[policy_list]] in starred[name[policylist_list]] begin[:]
for taget[name[policy]] in starred[name[policy_list].policies] begin[:]
call[name[policies].append, parameter[name[policy]]]
return[call[name[sorted], parameter[name[policies]]]] | keyword[def] identifier[get_policies] ( identifier[self] ):
literal[string]
identifier[prefix] = identifier[_IDENTITY_NS] + identifier[_POLICY_NS]
identifier[policylist_list] =[
identifier[_create_from_bytes] ( identifier[d] , identifier[identity_pb2] . identifier[PolicyList] )
keyword[for] identifier[_] , identifier[d] keyword[in] identifier[self] . identifier[_state_view] . identifier[leaves] ( identifier[prefix] = identifier[prefix] )
]
identifier[policies] =[]
keyword[for] identifier[policy_list] keyword[in] identifier[policylist_list] :
keyword[for] identifier[policy] keyword[in] identifier[policy_list] . identifier[policies] :
identifier[policies] . identifier[append] ( identifier[policy] )
keyword[return] identifier[sorted] ( identifier[policies] , identifier[key] = keyword[lambda] identifier[p] : identifier[p] . identifier[name] ) | def get_policies(self):
"""Returns all the Policies under the Identity namespace.
Returns:
(list): A list containing all the Policies under the Identity
namespace.
"""
prefix = _IDENTITY_NS + _POLICY_NS
policylist_list = [_create_from_bytes(d, identity_pb2.PolicyList) for (_, d) in self._state_view.leaves(prefix=prefix)]
policies = []
for policy_list in policylist_list:
for policy in policy_list.policies:
policies.append(policy) # depends on [control=['for'], data=['policy']] # depends on [control=['for'], data=['policy_list']]
return sorted(policies, key=lambda p: p.name) |
def get_indelcaller(d_or_c):
"""Retrieve string for indelcaller to use, or empty string if not specified.
"""
config = d_or_c if isinstance(d_or_c, dict) and "config" in d_or_c else d_or_c
indelcaller = config["algorithm"].get("indelcaller", "")
if not indelcaller:
indelcaller = ""
if isinstance(indelcaller, (list, tuple)):
indelcaller = indelcaller[0] if (len(indelcaller) > 0) else ""
return indelcaller | def function[get_indelcaller, parameter[d_or_c]]:
constant[Retrieve string for indelcaller to use, or empty string if not specified.
]
variable[config] assign[=] <ast.IfExp object at 0x7da1b18fb700>
variable[indelcaller] assign[=] call[call[name[config]][constant[algorithm]].get, parameter[constant[indelcaller], constant[]]]
if <ast.UnaryOp object at 0x7da1b18f99c0> begin[:]
variable[indelcaller] assign[=] constant[]
if call[name[isinstance], parameter[name[indelcaller], tuple[[<ast.Name object at 0x7da1b18489d0>, <ast.Name object at 0x7da1b184b370>]]]] begin[:]
variable[indelcaller] assign[=] <ast.IfExp object at 0x7da1b18493f0>
return[name[indelcaller]] | keyword[def] identifier[get_indelcaller] ( identifier[d_or_c] ):
literal[string]
identifier[config] = identifier[d_or_c] keyword[if] identifier[isinstance] ( identifier[d_or_c] , identifier[dict] ) keyword[and] literal[string] keyword[in] identifier[d_or_c] keyword[else] identifier[d_or_c]
identifier[indelcaller] = identifier[config] [ literal[string] ]. identifier[get] ( literal[string] , literal[string] )
keyword[if] keyword[not] identifier[indelcaller] :
identifier[indelcaller] = literal[string]
keyword[if] identifier[isinstance] ( identifier[indelcaller] ,( identifier[list] , identifier[tuple] )):
identifier[indelcaller] = identifier[indelcaller] [ literal[int] ] keyword[if] ( identifier[len] ( identifier[indelcaller] )> literal[int] ) keyword[else] literal[string]
keyword[return] identifier[indelcaller] | def get_indelcaller(d_or_c):
"""Retrieve string for indelcaller to use, or empty string if not specified.
"""
config = d_or_c if isinstance(d_or_c, dict) and 'config' in d_or_c else d_or_c
indelcaller = config['algorithm'].get('indelcaller', '')
if not indelcaller:
indelcaller = '' # depends on [control=['if'], data=[]]
if isinstance(indelcaller, (list, tuple)):
indelcaller = indelcaller[0] if len(indelcaller) > 0 else '' # depends on [control=['if'], data=[]]
return indelcaller |
def elekta_icon_fbp(ray_transform,
padding=False, filter_type='Hann', frequency_scaling=0.6,
parker_weighting=True):
"""Approximation of the FDK reconstruction used in the Elekta Icon.
Parameters
----------
ray_transform : `RayTransform`
The ray transform to be used, should have an Elekta Icon geometry.
padding : bool, optional
Whether the FBP filter should use padding, increases memory use
significantly.
filter_type : str, optional
Type of filter to apply in the FBP filter.
frequency_scaling : float, optional
Frequency scaling for FBP filter.
parker_weighting : bool, optional
Whether Parker weighting should be applied to compensate for partial
scan.
Returns
-------
elekta_icon_fbp : `DiscreteLp`
Examples
--------
Create default FBP for default geometry:
>>> from odl.contrib import tomo
>>> geometry = tomo.elekta_icon_geometry()
>>> space = tomo.elekta_icon_space()
>>> ray_transform = odl.tomo.RayTransform(space, geometry)
>>> fbp_op = tomo.elekta_icon_fbp(ray_transform)
"""
fbp_op = odl.tomo.fbp_op(ray_transform,
padding=padding,
filter_type=filter_type,
frequency_scaling=frequency_scaling)
if parker_weighting:
parker_weighting = odl.tomo.parker_weighting(ray_transform)
fbp_op = fbp_op * parker_weighting
return fbp_op | def function[elekta_icon_fbp, parameter[ray_transform, padding, filter_type, frequency_scaling, parker_weighting]]:
constant[Approximation of the FDK reconstruction used in the Elekta Icon.
Parameters
----------
ray_transform : `RayTransform`
The ray transform to be used, should have an Elekta Icon geometry.
padding : bool, optional
Whether the FBP filter should use padding, increases memory use
significantly.
filter_type : str, optional
Type of filter to apply in the FBP filter.
frequency_scaling : float, optional
Frequency scaling for FBP filter.
parker_weighting : bool, optional
Whether Parker weighting should be applied to compensate for partial
scan.
Returns
-------
elekta_icon_fbp : `DiscreteLp`
Examples
--------
Create default FBP for default geometry:
>>> from odl.contrib import tomo
>>> geometry = tomo.elekta_icon_geometry()
>>> space = tomo.elekta_icon_space()
>>> ray_transform = odl.tomo.RayTransform(space, geometry)
>>> fbp_op = tomo.elekta_icon_fbp(ray_transform)
]
variable[fbp_op] assign[=] call[name[odl].tomo.fbp_op, parameter[name[ray_transform]]]
if name[parker_weighting] begin[:]
variable[parker_weighting] assign[=] call[name[odl].tomo.parker_weighting, parameter[name[ray_transform]]]
variable[fbp_op] assign[=] binary_operation[name[fbp_op] * name[parker_weighting]]
return[name[fbp_op]] | keyword[def] identifier[elekta_icon_fbp] ( identifier[ray_transform] ,
identifier[padding] = keyword[False] , identifier[filter_type] = literal[string] , identifier[frequency_scaling] = literal[int] ,
identifier[parker_weighting] = keyword[True] ):
literal[string]
identifier[fbp_op] = identifier[odl] . identifier[tomo] . identifier[fbp_op] ( identifier[ray_transform] ,
identifier[padding] = identifier[padding] ,
identifier[filter_type] = identifier[filter_type] ,
identifier[frequency_scaling] = identifier[frequency_scaling] )
keyword[if] identifier[parker_weighting] :
identifier[parker_weighting] = identifier[odl] . identifier[tomo] . identifier[parker_weighting] ( identifier[ray_transform] )
identifier[fbp_op] = identifier[fbp_op] * identifier[parker_weighting]
keyword[return] identifier[fbp_op] | def elekta_icon_fbp(ray_transform, padding=False, filter_type='Hann', frequency_scaling=0.6, parker_weighting=True):
"""Approximation of the FDK reconstruction used in the Elekta Icon.
Parameters
----------
ray_transform : `RayTransform`
The ray transform to be used, should have an Elekta Icon geometry.
padding : bool, optional
Whether the FBP filter should use padding, increases memory use
significantly.
filter_type : str, optional
Type of filter to apply in the FBP filter.
frequency_scaling : float, optional
Frequency scaling for FBP filter.
parker_weighting : bool, optional
Whether Parker weighting should be applied to compensate for partial
scan.
Returns
-------
elekta_icon_fbp : `DiscreteLp`
Examples
--------
Create default FBP for default geometry:
>>> from odl.contrib import tomo
>>> geometry = tomo.elekta_icon_geometry()
>>> space = tomo.elekta_icon_space()
>>> ray_transform = odl.tomo.RayTransform(space, geometry)
>>> fbp_op = tomo.elekta_icon_fbp(ray_transform)
"""
fbp_op = odl.tomo.fbp_op(ray_transform, padding=padding, filter_type=filter_type, frequency_scaling=frequency_scaling)
if parker_weighting:
parker_weighting = odl.tomo.parker_weighting(ray_transform)
fbp_op = fbp_op * parker_weighting # depends on [control=['if'], data=[]]
return fbp_op |
def _parse_float_vec(vec):
"""
Parse a vector of float values representing IBM 8 byte floats into
native 8 byte floats.
"""
dtype = np.dtype('>u4,>u4')
vec1 = vec.view(dtype=dtype)
xport1 = vec1['f0']
xport2 = vec1['f1']
# Start by setting first half of ieee number to first half of IBM
# number sans exponent
ieee1 = xport1 & 0x00ffffff
# The fraction bit to the left of the binary point in the ieee
# format was set and the number was shifted 0, 1, 2, or 3
# places. This will tell us how to adjust the ibm exponent to be a
# power of 2 ieee exponent and how to shift the fraction bits to
# restore the correct magnitude.
shift = np.zeros(len(vec), dtype=np.uint8)
shift[np.where(xport1 & 0x00200000)] = 1
shift[np.where(xport1 & 0x00400000)] = 2
shift[np.where(xport1 & 0x00800000)] = 3
# shift the ieee number down the correct number of places then
# set the second half of the ieee number to be the second half
# of the ibm number shifted appropriately, ored with the bits
# from the first half that would have been shifted in if we
# could shift a double. All we are worried about are the low
# order 3 bits of the first half since we're only shifting by
# 1, 2, or 3.
ieee1 >>= shift
ieee2 = (xport2 >> shift) | ((xport1 & 0x00000007) << (29 + (3 - shift)))
# clear the 1 bit to the left of the binary point
ieee1 &= 0xffefffff
# set the exponent of the ieee number to be the actual exponent
# plus the shift count + 1023. Or this into the first half of the
# ieee number. The ibm exponent is excess 64 but is adjusted by 65
# since during conversion to ibm format the exponent is
# incremented by 1 and the fraction bits left 4 positions to the
# right of the radix point. (had to add >> 24 because C treats &
# 0x7f as 0x7f000000 and Python doesn't)
ieee1 |= ((((((xport1 >> 24) & 0x7f) - 65) << 2) +
shift + 1023) << 20) | (xport1 & 0x80000000)
ieee = np.empty((len(ieee1),), dtype='>u4,>u4')
ieee['f0'] = ieee1
ieee['f1'] = ieee2
ieee = ieee.view(dtype='>f8')
ieee = ieee.astype('f8')
return ieee | def function[_parse_float_vec, parameter[vec]]:
constant[
Parse a vector of float values representing IBM 8 byte floats into
native 8 byte floats.
]
variable[dtype] assign[=] call[name[np].dtype, parameter[constant[>u4,>u4]]]
variable[vec1] assign[=] call[name[vec].view, parameter[]]
variable[xport1] assign[=] call[name[vec1]][constant[f0]]
variable[xport2] assign[=] call[name[vec1]][constant[f1]]
variable[ieee1] assign[=] binary_operation[name[xport1] <ast.BitAnd object at 0x7da2590d6b60> constant[16777215]]
variable[shift] assign[=] call[name[np].zeros, parameter[call[name[len], parameter[name[vec]]]]]
call[name[shift]][call[name[np].where, parameter[binary_operation[name[xport1] <ast.BitAnd object at 0x7da2590d6b60> constant[2097152]]]]] assign[=] constant[1]
call[name[shift]][call[name[np].where, parameter[binary_operation[name[xport1] <ast.BitAnd object at 0x7da2590d6b60> constant[4194304]]]]] assign[=] constant[2]
call[name[shift]][call[name[np].where, parameter[binary_operation[name[xport1] <ast.BitAnd object at 0x7da2590d6b60> constant[8388608]]]]] assign[=] constant[3]
<ast.AugAssign object at 0x7da207f98af0>
variable[ieee2] assign[=] binary_operation[binary_operation[name[xport2] <ast.RShift object at 0x7da2590d6a40> name[shift]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[binary_operation[name[xport1] <ast.BitAnd object at 0x7da2590d6b60> constant[7]] <ast.LShift object at 0x7da2590d69e0> binary_operation[constant[29] + binary_operation[constant[3] - name[shift]]]]]
<ast.AugAssign object at 0x7da2045660e0>
<ast.AugAssign object at 0x7da204565330>
variable[ieee] assign[=] call[name[np].empty, parameter[tuple[[<ast.Call object at 0x7da204566560>]]]]
call[name[ieee]][constant[f0]] assign[=] name[ieee1]
call[name[ieee]][constant[f1]] assign[=] name[ieee2]
variable[ieee] assign[=] call[name[ieee].view, parameter[]]
variable[ieee] assign[=] call[name[ieee].astype, parameter[constant[f8]]]
return[name[ieee]] | keyword[def] identifier[_parse_float_vec] ( identifier[vec] ):
literal[string]
identifier[dtype] = identifier[np] . identifier[dtype] ( literal[string] )
identifier[vec1] = identifier[vec] . identifier[view] ( identifier[dtype] = identifier[dtype] )
identifier[xport1] = identifier[vec1] [ literal[string] ]
identifier[xport2] = identifier[vec1] [ literal[string] ]
identifier[ieee1] = identifier[xport1] & literal[int]
identifier[shift] = identifier[np] . identifier[zeros] ( identifier[len] ( identifier[vec] ), identifier[dtype] = identifier[np] . identifier[uint8] )
identifier[shift] [ identifier[np] . identifier[where] ( identifier[xport1] & literal[int] )]= literal[int]
identifier[shift] [ identifier[np] . identifier[where] ( identifier[xport1] & literal[int] )]= literal[int]
identifier[shift] [ identifier[np] . identifier[where] ( identifier[xport1] & literal[int] )]= literal[int]
identifier[ieee1] >>= identifier[shift]
identifier[ieee2] =( identifier[xport2] >> identifier[shift] )|(( identifier[xport1] & literal[int] )<<( literal[int] +( literal[int] - identifier[shift] )))
identifier[ieee1] &= literal[int]
identifier[ieee1] |=(((((( identifier[xport1] >> literal[int] )& literal[int] )- literal[int] )<< literal[int] )+
identifier[shift] + literal[int] )<< literal[int] )|( identifier[xport1] & literal[int] )
identifier[ieee] = identifier[np] . identifier[empty] (( identifier[len] ( identifier[ieee1] ),), identifier[dtype] = literal[string] )
identifier[ieee] [ literal[string] ]= identifier[ieee1]
identifier[ieee] [ literal[string] ]= identifier[ieee2]
identifier[ieee] = identifier[ieee] . identifier[view] ( identifier[dtype] = literal[string] )
identifier[ieee] = identifier[ieee] . identifier[astype] ( literal[string] )
keyword[return] identifier[ieee] | def _parse_float_vec(vec):
"""
Parse a vector of float values representing IBM 8 byte floats into
native 8 byte floats.
"""
dtype = np.dtype('>u4,>u4')
vec1 = vec.view(dtype=dtype)
xport1 = vec1['f0']
xport2 = vec1['f1']
# Start by setting first half of ieee number to first half of IBM
# number sans exponent
ieee1 = xport1 & 16777215
# The fraction bit to the left of the binary point in the ieee
# format was set and the number was shifted 0, 1, 2, or 3
# places. This will tell us how to adjust the ibm exponent to be a
# power of 2 ieee exponent and how to shift the fraction bits to
# restore the correct magnitude.
shift = np.zeros(len(vec), dtype=np.uint8)
shift[np.where(xport1 & 2097152)] = 1
shift[np.where(xport1 & 4194304)] = 2
shift[np.where(xport1 & 8388608)] = 3
# shift the ieee number down the correct number of places then
# set the second half of the ieee number to be the second half
# of the ibm number shifted appropriately, ored with the bits
# from the first half that would have been shifted in if we
# could shift a double. All we are worried about are the low
# order 3 bits of the first half since we're only shifting by
# 1, 2, or 3.
ieee1 >>= shift
ieee2 = xport2 >> shift | (xport1 & 7) << 29 + (3 - shift)
# clear the 1 bit to the left of the binary point
ieee1 &= 4293918719
# set the exponent of the ieee number to be the actual exponent
# plus the shift count + 1023. Or this into the first half of the
# ieee number. The ibm exponent is excess 64 but is adjusted by 65
# since during conversion to ibm format the exponent is
# incremented by 1 and the fraction bits left 4 positions to the
# right of the radix point. (had to add >> 24 because C treats &
# 0x7f as 0x7f000000 and Python doesn't)
ieee1 |= ((xport1 >> 24 & 127) - 65 << 2) + shift + 1023 << 20 | xport1 & 2147483648
ieee = np.empty((len(ieee1),), dtype='>u4,>u4')
ieee['f0'] = ieee1
ieee['f1'] = ieee2
ieee = ieee.view(dtype='>f8')
ieee = ieee.astype('f8')
return ieee |
def _createStatsDict(self, headers, rows):
"""Utility method that returns database stats as a nested dictionary.
@param headers: List of columns in query result.
@param rows: List of rows in query result.
@return: Nested dictionary of values.
First key is the database name and the second key is the
statistics counter name.
"""
dbstats = {}
for row in rows:
dbstats[row[0]] = dict(zip(headers[1:], row[1:]))
return dbstats | def function[_createStatsDict, parameter[self, headers, rows]]:
constant[Utility method that returns database stats as a nested dictionary.
@param headers: List of columns in query result.
@param rows: List of rows in query result.
@return: Nested dictionary of values.
First key is the database name and the second key is the
statistics counter name.
]
variable[dbstats] assign[=] dictionary[[], []]
for taget[name[row]] in starred[name[rows]] begin[:]
call[name[dbstats]][call[name[row]][constant[0]]] assign[=] call[name[dict], parameter[call[name[zip], parameter[call[name[headers]][<ast.Slice object at 0x7da1b0fa54b0>], call[name[row]][<ast.Slice object at 0x7da1b0fa5330>]]]]]
return[name[dbstats]] | keyword[def] identifier[_createStatsDict] ( identifier[self] , identifier[headers] , identifier[rows] ):
literal[string]
identifier[dbstats] ={}
keyword[for] identifier[row] keyword[in] identifier[rows] :
identifier[dbstats] [ identifier[row] [ literal[int] ]]= identifier[dict] ( identifier[zip] ( identifier[headers] [ literal[int] :], identifier[row] [ literal[int] :]))
keyword[return] identifier[dbstats] | def _createStatsDict(self, headers, rows):
"""Utility method that returns database stats as a nested dictionary.
@param headers: List of columns in query result.
@param rows: List of rows in query result.
@return: Nested dictionary of values.
First key is the database name and the second key is the
statistics counter name.
"""
dbstats = {}
for row in rows:
dbstats[row[0]] = dict(zip(headers[1:], row[1:])) # depends on [control=['for'], data=['row']]
return dbstats |
def prepare_everything(self):
"""Convenience method to make the actual |HydPy| instance runable."""
self.prepare_network()
self.init_models()
self.load_conditions()
with hydpy.pub.options.warnmissingobsfile(False):
self.prepare_nodeseries()
self.prepare_modelseries()
self.load_inputseries() | def function[prepare_everything, parameter[self]]:
constant[Convenience method to make the actual |HydPy| instance runable.]
call[name[self].prepare_network, parameter[]]
call[name[self].init_models, parameter[]]
call[name[self].load_conditions, parameter[]]
with call[name[hydpy].pub.options.warnmissingobsfile, parameter[constant[False]]] begin[:]
call[name[self].prepare_nodeseries, parameter[]]
call[name[self].prepare_modelseries, parameter[]]
call[name[self].load_inputseries, parameter[]] | keyword[def] identifier[prepare_everything] ( identifier[self] ):
literal[string]
identifier[self] . identifier[prepare_network] ()
identifier[self] . identifier[init_models] ()
identifier[self] . identifier[load_conditions] ()
keyword[with] identifier[hydpy] . identifier[pub] . identifier[options] . identifier[warnmissingobsfile] ( keyword[False] ):
identifier[self] . identifier[prepare_nodeseries] ()
identifier[self] . identifier[prepare_modelseries] ()
identifier[self] . identifier[load_inputseries] () | def prepare_everything(self):
"""Convenience method to make the actual |HydPy| instance runable."""
self.prepare_network()
self.init_models()
self.load_conditions()
with hydpy.pub.options.warnmissingobsfile(False):
self.prepare_nodeseries() # depends on [control=['with'], data=[]]
self.prepare_modelseries()
self.load_inputseries() |
def which(program):
" Check program is exists. "
head, _ = op.split(program)
if head:
if is_exe(program):
return program
else:
for path in environ["PATH"].split(pathsep):
exe_file = op.join(path, program)
if is_exe(exe_file):
return exe_file
return None | def function[which, parameter[program]]:
constant[ Check program is exists. ]
<ast.Tuple object at 0x7da1b0af8550> assign[=] call[name[op].split, parameter[name[program]]]
if name[head] begin[:]
if call[name[is_exe], parameter[name[program]]] begin[:]
return[name[program]]
return[constant[None]] | keyword[def] identifier[which] ( identifier[program] ):
literal[string]
identifier[head] , identifier[_] = identifier[op] . identifier[split] ( identifier[program] )
keyword[if] identifier[head] :
keyword[if] identifier[is_exe] ( identifier[program] ):
keyword[return] identifier[program]
keyword[else] :
keyword[for] identifier[path] keyword[in] identifier[environ] [ literal[string] ]. identifier[split] ( identifier[pathsep] ):
identifier[exe_file] = identifier[op] . identifier[join] ( identifier[path] , identifier[program] )
keyword[if] identifier[is_exe] ( identifier[exe_file] ):
keyword[return] identifier[exe_file]
keyword[return] keyword[None] | def which(program):
""" Check program is exists. """
(head, _) = op.split(program)
if head:
if is_exe(program):
return program # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
for path in environ['PATH'].split(pathsep):
exe_file = op.join(path, program)
if is_exe(exe_file):
return exe_file # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['path']]
return None |
def build_model(self, n_features, n_classes):
"""Create the computational graph of the model.
:param n_features: Number of features.
:param n_classes: number of classes.
:return: self
"""
self._create_placeholders(n_features, n_classes)
self._create_layers(n_classes)
self.cost = self.loss.compile(self.mod_y, self.input_labels)
self.train_step = self.trainer.compile(self.cost)
self.accuracy = Evaluation.accuracy(self.mod_y, self.input_labels) | def function[build_model, parameter[self, n_features, n_classes]]:
constant[Create the computational graph of the model.
:param n_features: Number of features.
:param n_classes: number of classes.
:return: self
]
call[name[self]._create_placeholders, parameter[name[n_features], name[n_classes]]]
call[name[self]._create_layers, parameter[name[n_classes]]]
name[self].cost assign[=] call[name[self].loss.compile, parameter[name[self].mod_y, name[self].input_labels]]
name[self].train_step assign[=] call[name[self].trainer.compile, parameter[name[self].cost]]
name[self].accuracy assign[=] call[name[Evaluation].accuracy, parameter[name[self].mod_y, name[self].input_labels]] | keyword[def] identifier[build_model] ( identifier[self] , identifier[n_features] , identifier[n_classes] ):
literal[string]
identifier[self] . identifier[_create_placeholders] ( identifier[n_features] , identifier[n_classes] )
identifier[self] . identifier[_create_layers] ( identifier[n_classes] )
identifier[self] . identifier[cost] = identifier[self] . identifier[loss] . identifier[compile] ( identifier[self] . identifier[mod_y] , identifier[self] . identifier[input_labels] )
identifier[self] . identifier[train_step] = identifier[self] . identifier[trainer] . identifier[compile] ( identifier[self] . identifier[cost] )
identifier[self] . identifier[accuracy] = identifier[Evaluation] . identifier[accuracy] ( identifier[self] . identifier[mod_y] , identifier[self] . identifier[input_labels] ) | def build_model(self, n_features, n_classes):
"""Create the computational graph of the model.
:param n_features: Number of features.
:param n_classes: number of classes.
:return: self
"""
self._create_placeholders(n_features, n_classes)
self._create_layers(n_classes)
self.cost = self.loss.compile(self.mod_y, self.input_labels)
self.train_step = self.trainer.compile(self.cost)
self.accuracy = Evaluation.accuracy(self.mod_y, self.input_labels) |
def properties(self):
"""All reaction properties as a dict"""
properties = {'id': self._id,
'reversible': self._rev,
'equation': self._equation}
if 'name' in self._root.attrib:
properties['name'] = self._root.get('name')
if self._lower_flux is not None:
properties['lower_flux'] = self._lower_flux
if self._upper_flux is not None:
properties['upper_flux'] = self._upper_flux
return properties | def function[properties, parameter[self]]:
constant[All reaction properties as a dict]
variable[properties] assign[=] dictionary[[<ast.Constant object at 0x7da20c6c6080>, <ast.Constant object at 0x7da20c6c5e40>, <ast.Constant object at 0x7da20c6c58d0>], [<ast.Attribute object at 0x7da20c6c7d00>, <ast.Attribute object at 0x7da20c6c4700>, <ast.Attribute object at 0x7da20c6c4550>]]
if compare[constant[name] in name[self]._root.attrib] begin[:]
call[name[properties]][constant[name]] assign[=] call[name[self]._root.get, parameter[constant[name]]]
if compare[name[self]._lower_flux is_not constant[None]] begin[:]
call[name[properties]][constant[lower_flux]] assign[=] name[self]._lower_flux
if compare[name[self]._upper_flux is_not constant[None]] begin[:]
call[name[properties]][constant[upper_flux]] assign[=] name[self]._upper_flux
return[name[properties]] | keyword[def] identifier[properties] ( identifier[self] ):
literal[string]
identifier[properties] ={ literal[string] : identifier[self] . identifier[_id] ,
literal[string] : identifier[self] . identifier[_rev] ,
literal[string] : identifier[self] . identifier[_equation] }
keyword[if] literal[string] keyword[in] identifier[self] . identifier[_root] . identifier[attrib] :
identifier[properties] [ literal[string] ]= identifier[self] . identifier[_root] . identifier[get] ( literal[string] )
keyword[if] identifier[self] . identifier[_lower_flux] keyword[is] keyword[not] keyword[None] :
identifier[properties] [ literal[string] ]= identifier[self] . identifier[_lower_flux]
keyword[if] identifier[self] . identifier[_upper_flux] keyword[is] keyword[not] keyword[None] :
identifier[properties] [ literal[string] ]= identifier[self] . identifier[_upper_flux]
keyword[return] identifier[properties] | def properties(self):
"""All reaction properties as a dict"""
properties = {'id': self._id, 'reversible': self._rev, 'equation': self._equation}
if 'name' in self._root.attrib:
properties['name'] = self._root.get('name') # depends on [control=['if'], data=[]]
if self._lower_flux is not None:
properties['lower_flux'] = self._lower_flux # depends on [control=['if'], data=[]]
if self._upper_flux is not None:
properties['upper_flux'] = self._upper_flux # depends on [control=['if'], data=[]]
return properties |
def _record_extension(self, bank_id, key, value):
"""
To structure a record extension property bean
"""
record_bean = {
'value': value,
'displayName': self._text_bean(key),
'description': self._text_bean(key),
'displayLabel': self._text_bean(key),
'associatedId': str(bank_id)
}
return record_bean | def function[_record_extension, parameter[self, bank_id, key, value]]:
constant[
To structure a record extension property bean
]
variable[record_bean] assign[=] dictionary[[<ast.Constant object at 0x7da18f812ef0>, <ast.Constant object at 0x7da18f8131f0>, <ast.Constant object at 0x7da18f8127d0>, <ast.Constant object at 0x7da18f8103a0>, <ast.Constant object at 0x7da18f810d00>], [<ast.Name object at 0x7da18f812da0>, <ast.Call object at 0x7da18f812920>, <ast.Call object at 0x7da18f812a10>, <ast.Call object at 0x7da18f812b00>, <ast.Call object at 0x7da204621b10>]]
return[name[record_bean]] | keyword[def] identifier[_record_extension] ( identifier[self] , identifier[bank_id] , identifier[key] , identifier[value] ):
literal[string]
identifier[record_bean] ={
literal[string] : identifier[value] ,
literal[string] : identifier[self] . identifier[_text_bean] ( identifier[key] ),
literal[string] : identifier[self] . identifier[_text_bean] ( identifier[key] ),
literal[string] : identifier[self] . identifier[_text_bean] ( identifier[key] ),
literal[string] : identifier[str] ( identifier[bank_id] )
}
keyword[return] identifier[record_bean] | def _record_extension(self, bank_id, key, value):
"""
To structure a record extension property bean
"""
record_bean = {'value': value, 'displayName': self._text_bean(key), 'description': self._text_bean(key), 'displayLabel': self._text_bean(key), 'associatedId': str(bank_id)}
return record_bean |
def mroc(adjacency_matrix, alpha):
"""
Extracts hierarchical community features using the MROC method.
Introduced in: Wang, X., Tang, L., Liu, H., & Wang, L. (2013).
Learning with multi-resolution overlapping communities.
Knowledge and information systems, 36(2), 517-535.
Inputs: - A in R^(nxn): Adjacency matrix of an undirected network represented as a SciPy Sparse COOrdinate matrix.
- alpha: A maximum community size stopping threshold.
Outputs: - X in R^(nxC_n): The latent space embedding represented as a SciPy Sparse COOrdinate matrix.
"""
# Find number of nodes
number_of_nodes = adjacency_matrix.shape[0]
####################################################################################################################
# Base community calculation
####################################################################################################################
# Initialize empty lists
base_list = list()
base_row = list()
base_col = list()
# Save function handles for speed
append_base_list = base_list.append
append_base_row = base_row.append
append_base_col = base_col.append
# Find base communities
adjacency_matrix = adjacency_matrix.tocsc()
number_of_base_communities = 0
for i in range(number_of_nodes):
# Calculate base community
base_community = set(adjacency_matrix.getcol(i).indices)
base_community.add(i)
flag = True
for c in base_list:
if c == base_community:
flag = False
break
if flag:
append_base_list(base_community)
for n in base_community:
append_base_row(n)
append_base_col(number_of_base_communities)
number_of_base_communities += 1
# Form sparse matrices
base_row = np.array(base_row)
base_col = np.array(base_col)
base_data = np.ones(base_row.size, dtype=np.float64)
features = sparse.coo_matrix((base_data, (base_row, base_col)),
shape=(number_of_nodes, number_of_base_communities))
features = features.tocsr()
base_community_number = features.shape[1]
print('Base communities calculated.')
reverse_index_csr = copy.copy(features)
reverse_index_csc = reverse_index_csr.tocsc()
reverse_index_csr = reverse_index_csr.tocsr()
reverse_index_rows = np.ndarray(number_of_nodes, dtype=np.ndarray)
reverse_index_cols = np.ndarray(number_of_nodes, dtype=np.ndarray)
for n in range(number_of_nodes):
reverse_index_row = reverse_index_csr.getrow(n)
reverse_index_rows[n] = reverse_index_row.indices
if n < base_community_number:
reverse_index_col = reverse_index_csc.getcol(n)
reverse_index_cols[n] = reverse_index_col.indices
flag = True
print('Start merge iterations.')
iteration = 0
while flag:
level_row = list()
level_col = list()
append_level_row = level_row.append
append_level_col = level_col.append
unavailable_communities = -1*np.ones(reverse_index_csc.shape[1])
unavailable_communities_counter = 0
next_level_communities = list()
append_next_level_community = next_level_communities.append
number_of_communities = 0
for j in range(reverse_index_csr.shape[1]):
if j in unavailable_communities:
continue
must_break = reverse_index_csr.shape[1] - unavailable_communities_counter
print(must_break)
if must_break < 1:
break
unavailable_communities[unavailable_communities_counter] = j
unavailable_communities_counter += 1
c_j = reverse_index_cols[j]
indices = community_neighbors(c_j, reverse_index_rows, unavailable_communities, unavailable_communities_counter)
max_similarity = -1
community_index = 0
for jj in indices:
c_jj = reverse_index_cols[jj]
similarity = jaccard(c_j, c_jj)
if similarity > max_similarity:
max_similarity = similarity
community_index = jj
jj = community_index
if max_similarity > 0:
# Merge two communities
c_jj = reverse_index_cols[jj]
c_new = np.union1d(c_j, c_jj)
flag_1 = np.setdiff1d(c_new, c_j)
flag_2 = np.setdiff1d(c_new, c_jj)
if (flag_1.size != 0) and (flag_2.size != 0):
for n in c_new:
append_level_row(n)
append_level_col(number_of_communities)
if c_new.size < alpha:
append_next_level_community(number_of_communities)
number_of_communities += 1
unavailable_communities[unavailable_communities_counter] = jj
unavailable_communities_counter += 1
level_row = np.array(level_row)
level_col = np.array(level_col)
level_data = np.ones(level_row.size, dtype=np.float64)
communities = sparse.coo_matrix((level_data, (level_row, level_col)),
shape=(number_of_nodes, number_of_communities))
if communities.getnnz() == 0:
break
features = sparse.hstack([features, communities])
reverse_index_csc = copy.copy(communities)
reverse_index_csc = reverse_index_csc.tocsc()
reverse_index_csc = reverse_index_csc[:, np.array(next_level_communities)]
reverse_index_csr = reverse_index_csc.tocsr()
reverse_index_rows = np.ndarray(number_of_nodes, dtype=np.ndarray)
reverse_index_cols = np.ndarray(len(next_level_communities), dtype=np.ndarray)
for n in range(number_of_nodes):
reverse_index_row = reverse_index_csr.getrow(n)
reverse_index_rows[n] = reverse_index_row.indices
if n < len(next_level_communities):
reverse_index_col = reverse_index_csc.getcol(n)
reverse_index_cols[n] = reverse_index_col.indices
if len(next_level_communities) > 1:
flag = True
iteration += 1
print('Iteration: ', iteration)
print('List length', len(next_level_communities))
return features | def function[mroc, parameter[adjacency_matrix, alpha]]:
constant[
Extracts hierarchical community features using the MROC method.
Introduced in: Wang, X., Tang, L., Liu, H., & Wang, L. (2013).
Learning with multi-resolution overlapping communities.
Knowledge and information systems, 36(2), 517-535.
Inputs: - A in R^(nxn): Adjacency matrix of an undirected network represented as a SciPy Sparse COOrdinate matrix.
- alpha: A maximum community size stopping threshold.
Outputs: - X in R^(nxC_n): The latent space embedding represented as a SciPy Sparse COOrdinate matrix.
]
variable[number_of_nodes] assign[=] call[name[adjacency_matrix].shape][constant[0]]
variable[base_list] assign[=] call[name[list], parameter[]]
variable[base_row] assign[=] call[name[list], parameter[]]
variable[base_col] assign[=] call[name[list], parameter[]]
variable[append_base_list] assign[=] name[base_list].append
variable[append_base_row] assign[=] name[base_row].append
variable[append_base_col] assign[=] name[base_col].append
variable[adjacency_matrix] assign[=] call[name[adjacency_matrix].tocsc, parameter[]]
variable[number_of_base_communities] assign[=] constant[0]
for taget[name[i]] in starred[call[name[range], parameter[name[number_of_nodes]]]] begin[:]
variable[base_community] assign[=] call[name[set], parameter[call[name[adjacency_matrix].getcol, parameter[name[i]]].indices]]
call[name[base_community].add, parameter[name[i]]]
variable[flag] assign[=] constant[True]
for taget[name[c]] in starred[name[base_list]] begin[:]
if compare[name[c] equal[==] name[base_community]] begin[:]
variable[flag] assign[=] constant[False]
break
if name[flag] begin[:]
call[name[append_base_list], parameter[name[base_community]]]
for taget[name[n]] in starred[name[base_community]] begin[:]
call[name[append_base_row], parameter[name[n]]]
call[name[append_base_col], parameter[name[number_of_base_communities]]]
<ast.AugAssign object at 0x7da1b1a9acb0>
variable[base_row] assign[=] call[name[np].array, parameter[name[base_row]]]
variable[base_col] assign[=] call[name[np].array, parameter[name[base_col]]]
variable[base_data] assign[=] call[name[np].ones, parameter[name[base_row].size]]
variable[features] assign[=] call[name[sparse].coo_matrix, parameter[tuple[[<ast.Name object at 0x7da1b1a9a6e0>, <ast.Tuple object at 0x7da1b1a9a6b0>]]]]
variable[features] assign[=] call[name[features].tocsr, parameter[]]
variable[base_community_number] assign[=] call[name[features].shape][constant[1]]
call[name[print], parameter[constant[Base communities calculated.]]]
variable[reverse_index_csr] assign[=] call[name[copy].copy, parameter[name[features]]]
variable[reverse_index_csc] assign[=] call[name[reverse_index_csr].tocsc, parameter[]]
variable[reverse_index_csr] assign[=] call[name[reverse_index_csr].tocsr, parameter[]]
variable[reverse_index_rows] assign[=] call[name[np].ndarray, parameter[name[number_of_nodes]]]
variable[reverse_index_cols] assign[=] call[name[np].ndarray, parameter[name[number_of_nodes]]]
for taget[name[n]] in starred[call[name[range], parameter[name[number_of_nodes]]]] begin[:]
variable[reverse_index_row] assign[=] call[name[reverse_index_csr].getrow, parameter[name[n]]]
call[name[reverse_index_rows]][name[n]] assign[=] name[reverse_index_row].indices
if compare[name[n] less[<] name[base_community_number]] begin[:]
variable[reverse_index_col] assign[=] call[name[reverse_index_csc].getcol, parameter[name[n]]]
call[name[reverse_index_cols]][name[n]] assign[=] name[reverse_index_col].indices
variable[flag] assign[=] constant[True]
call[name[print], parameter[constant[Start merge iterations.]]]
variable[iteration] assign[=] constant[0]
while name[flag] begin[:]
variable[level_row] assign[=] call[name[list], parameter[]]
variable[level_col] assign[=] call[name[list], parameter[]]
variable[append_level_row] assign[=] name[level_row].append
variable[append_level_col] assign[=] name[level_col].append
variable[unavailable_communities] assign[=] binary_operation[<ast.UnaryOp object at 0x7da1b1a984c0> * call[name[np].ones, parameter[call[name[reverse_index_csc].shape][constant[1]]]]]
variable[unavailable_communities_counter] assign[=] constant[0]
variable[next_level_communities] assign[=] call[name[list], parameter[]]
variable[append_next_level_community] assign[=] name[next_level_communities].append
variable[number_of_communities] assign[=] constant[0]
for taget[name[j]] in starred[call[name[range], parameter[call[name[reverse_index_csr].shape][constant[1]]]]] begin[:]
if compare[name[j] in name[unavailable_communities]] begin[:]
continue
variable[must_break] assign[=] binary_operation[call[name[reverse_index_csr].shape][constant[1]] - name[unavailable_communities_counter]]
call[name[print], parameter[name[must_break]]]
if compare[name[must_break] less[<] constant[1]] begin[:]
break
call[name[unavailable_communities]][name[unavailable_communities_counter]] assign[=] name[j]
<ast.AugAssign object at 0x7da1b1ad3910>
variable[c_j] assign[=] call[name[reverse_index_cols]][name[j]]
variable[indices] assign[=] call[name[community_neighbors], parameter[name[c_j], name[reverse_index_rows], name[unavailable_communities], name[unavailable_communities_counter]]]
variable[max_similarity] assign[=] <ast.UnaryOp object at 0x7da1b1ad2740>
variable[community_index] assign[=] constant[0]
for taget[name[jj]] in starred[name[indices]] begin[:]
variable[c_jj] assign[=] call[name[reverse_index_cols]][name[jj]]
variable[similarity] assign[=] call[name[jaccard], parameter[name[c_j], name[c_jj]]]
if compare[name[similarity] greater[>] name[max_similarity]] begin[:]
variable[max_similarity] assign[=] name[similarity]
variable[community_index] assign[=] name[jj]
variable[jj] assign[=] name[community_index]
if compare[name[max_similarity] greater[>] constant[0]] begin[:]
variable[c_jj] assign[=] call[name[reverse_index_cols]][name[jj]]
variable[c_new] assign[=] call[name[np].union1d, parameter[name[c_j], name[c_jj]]]
variable[flag_1] assign[=] call[name[np].setdiff1d, parameter[name[c_new], name[c_j]]]
variable[flag_2] assign[=] call[name[np].setdiff1d, parameter[name[c_new], name[c_jj]]]
if <ast.BoolOp object at 0x7da1b1ad1b70> begin[:]
for taget[name[n]] in starred[name[c_new]] begin[:]
call[name[append_level_row], parameter[name[n]]]
call[name[append_level_col], parameter[name[number_of_communities]]]
if compare[name[c_new].size less[<] name[alpha]] begin[:]
call[name[append_next_level_community], parameter[name[number_of_communities]]]
<ast.AugAssign object at 0x7da1b1ad0dc0>
call[name[unavailable_communities]][name[unavailable_communities_counter]] assign[=] name[jj]
<ast.AugAssign object at 0x7da1b1ad0c40>
variable[level_row] assign[=] call[name[np].array, parameter[name[level_row]]]
variable[level_col] assign[=] call[name[np].array, parameter[name[level_col]]]
variable[level_data] assign[=] call[name[np].ones, parameter[name[level_row].size]]
variable[communities] assign[=] call[name[sparse].coo_matrix, parameter[tuple[[<ast.Name object at 0x7da1b1ad0220>, <ast.Tuple object at 0x7da1b1ad0250>]]]]
if compare[call[name[communities].getnnz, parameter[]] equal[==] constant[0]] begin[:]
break
variable[features] assign[=] call[name[sparse].hstack, parameter[list[[<ast.Name object at 0x7da1b1ad0640>, <ast.Name object at 0x7da1b1ad0670>]]]]
variable[reverse_index_csc] assign[=] call[name[copy].copy, parameter[name[communities]]]
variable[reverse_index_csc] assign[=] call[name[reverse_index_csc].tocsc, parameter[]]
variable[reverse_index_csc] assign[=] call[name[reverse_index_csc]][tuple[[<ast.Slice object at 0x7da1b1ad3040>, <ast.Call object at 0x7da1b1ad3070>]]]
variable[reverse_index_csr] assign[=] call[name[reverse_index_csc].tocsr, parameter[]]
variable[reverse_index_rows] assign[=] call[name[np].ndarray, parameter[name[number_of_nodes]]]
variable[reverse_index_cols] assign[=] call[name[np].ndarray, parameter[call[name[len], parameter[name[next_level_communities]]]]]
for taget[name[n]] in starred[call[name[range], parameter[name[number_of_nodes]]]] begin[:]
variable[reverse_index_row] assign[=] call[name[reverse_index_csr].getrow, parameter[name[n]]]
call[name[reverse_index_rows]][name[n]] assign[=] name[reverse_index_row].indices
if compare[name[n] less[<] call[name[len], parameter[name[next_level_communities]]]] begin[:]
variable[reverse_index_col] assign[=] call[name[reverse_index_csc].getcol, parameter[name[n]]]
call[name[reverse_index_cols]][name[n]] assign[=] name[reverse_index_col].indices
if compare[call[name[len], parameter[name[next_level_communities]]] greater[>] constant[1]] begin[:]
variable[flag] assign[=] constant[True]
<ast.AugAssign object at 0x7da1b1a1d330>
call[name[print], parameter[constant[Iteration: ], name[iteration]]]
call[name[print], parameter[constant[List length], call[name[len], parameter[name[next_level_communities]]]]]
return[name[features]] | keyword[def] identifier[mroc] ( identifier[adjacency_matrix] , identifier[alpha] ):
literal[string]
identifier[number_of_nodes] = identifier[adjacency_matrix] . identifier[shape] [ literal[int] ]
identifier[base_list] = identifier[list] ()
identifier[base_row] = identifier[list] ()
identifier[base_col] = identifier[list] ()
identifier[append_base_list] = identifier[base_list] . identifier[append]
identifier[append_base_row] = identifier[base_row] . identifier[append]
identifier[append_base_col] = identifier[base_col] . identifier[append]
identifier[adjacency_matrix] = identifier[adjacency_matrix] . identifier[tocsc] ()
identifier[number_of_base_communities] = literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[number_of_nodes] ):
identifier[base_community] = identifier[set] ( identifier[adjacency_matrix] . identifier[getcol] ( identifier[i] ). identifier[indices] )
identifier[base_community] . identifier[add] ( identifier[i] )
identifier[flag] = keyword[True]
keyword[for] identifier[c] keyword[in] identifier[base_list] :
keyword[if] identifier[c] == identifier[base_community] :
identifier[flag] = keyword[False]
keyword[break]
keyword[if] identifier[flag] :
identifier[append_base_list] ( identifier[base_community] )
keyword[for] identifier[n] keyword[in] identifier[base_community] :
identifier[append_base_row] ( identifier[n] )
identifier[append_base_col] ( identifier[number_of_base_communities] )
identifier[number_of_base_communities] += literal[int]
identifier[base_row] = identifier[np] . identifier[array] ( identifier[base_row] )
identifier[base_col] = identifier[np] . identifier[array] ( identifier[base_col] )
identifier[base_data] = identifier[np] . identifier[ones] ( identifier[base_row] . identifier[size] , identifier[dtype] = identifier[np] . identifier[float64] )
identifier[features] = identifier[sparse] . identifier[coo_matrix] (( identifier[base_data] ,( identifier[base_row] , identifier[base_col] )),
identifier[shape] =( identifier[number_of_nodes] , identifier[number_of_base_communities] ))
identifier[features] = identifier[features] . identifier[tocsr] ()
identifier[base_community_number] = identifier[features] . identifier[shape] [ literal[int] ]
identifier[print] ( literal[string] )
identifier[reverse_index_csr] = identifier[copy] . identifier[copy] ( identifier[features] )
identifier[reverse_index_csc] = identifier[reverse_index_csr] . identifier[tocsc] ()
identifier[reverse_index_csr] = identifier[reverse_index_csr] . identifier[tocsr] ()
identifier[reverse_index_rows] = identifier[np] . identifier[ndarray] ( identifier[number_of_nodes] , identifier[dtype] = identifier[np] . identifier[ndarray] )
identifier[reverse_index_cols] = identifier[np] . identifier[ndarray] ( identifier[number_of_nodes] , identifier[dtype] = identifier[np] . identifier[ndarray] )
keyword[for] identifier[n] keyword[in] identifier[range] ( identifier[number_of_nodes] ):
identifier[reverse_index_row] = identifier[reverse_index_csr] . identifier[getrow] ( identifier[n] )
identifier[reverse_index_rows] [ identifier[n] ]= identifier[reverse_index_row] . identifier[indices]
keyword[if] identifier[n] < identifier[base_community_number] :
identifier[reverse_index_col] = identifier[reverse_index_csc] . identifier[getcol] ( identifier[n] )
identifier[reverse_index_cols] [ identifier[n] ]= identifier[reverse_index_col] . identifier[indices]
identifier[flag] = keyword[True]
identifier[print] ( literal[string] )
identifier[iteration] = literal[int]
keyword[while] identifier[flag] :
identifier[level_row] = identifier[list] ()
identifier[level_col] = identifier[list] ()
identifier[append_level_row] = identifier[level_row] . identifier[append]
identifier[append_level_col] = identifier[level_col] . identifier[append]
identifier[unavailable_communities] =- literal[int] * identifier[np] . identifier[ones] ( identifier[reverse_index_csc] . identifier[shape] [ literal[int] ])
identifier[unavailable_communities_counter] = literal[int]
identifier[next_level_communities] = identifier[list] ()
identifier[append_next_level_community] = identifier[next_level_communities] . identifier[append]
identifier[number_of_communities] = literal[int]
keyword[for] identifier[j] keyword[in] identifier[range] ( identifier[reverse_index_csr] . identifier[shape] [ literal[int] ]):
keyword[if] identifier[j] keyword[in] identifier[unavailable_communities] :
keyword[continue]
identifier[must_break] = identifier[reverse_index_csr] . identifier[shape] [ literal[int] ]- identifier[unavailable_communities_counter]
identifier[print] ( identifier[must_break] )
keyword[if] identifier[must_break] < literal[int] :
keyword[break]
identifier[unavailable_communities] [ identifier[unavailable_communities_counter] ]= identifier[j]
identifier[unavailable_communities_counter] += literal[int]
identifier[c_j] = identifier[reverse_index_cols] [ identifier[j] ]
identifier[indices] = identifier[community_neighbors] ( identifier[c_j] , identifier[reverse_index_rows] , identifier[unavailable_communities] , identifier[unavailable_communities_counter] )
identifier[max_similarity] =- literal[int]
identifier[community_index] = literal[int]
keyword[for] identifier[jj] keyword[in] identifier[indices] :
identifier[c_jj] = identifier[reverse_index_cols] [ identifier[jj] ]
identifier[similarity] = identifier[jaccard] ( identifier[c_j] , identifier[c_jj] )
keyword[if] identifier[similarity] > identifier[max_similarity] :
identifier[max_similarity] = identifier[similarity]
identifier[community_index] = identifier[jj]
identifier[jj] = identifier[community_index]
keyword[if] identifier[max_similarity] > literal[int] :
identifier[c_jj] = identifier[reverse_index_cols] [ identifier[jj] ]
identifier[c_new] = identifier[np] . identifier[union1d] ( identifier[c_j] , identifier[c_jj] )
identifier[flag_1] = identifier[np] . identifier[setdiff1d] ( identifier[c_new] , identifier[c_j] )
identifier[flag_2] = identifier[np] . identifier[setdiff1d] ( identifier[c_new] , identifier[c_jj] )
keyword[if] ( identifier[flag_1] . identifier[size] != literal[int] ) keyword[and] ( identifier[flag_2] . identifier[size] != literal[int] ):
keyword[for] identifier[n] keyword[in] identifier[c_new] :
identifier[append_level_row] ( identifier[n] )
identifier[append_level_col] ( identifier[number_of_communities] )
keyword[if] identifier[c_new] . identifier[size] < identifier[alpha] :
identifier[append_next_level_community] ( identifier[number_of_communities] )
identifier[number_of_communities] += literal[int]
identifier[unavailable_communities] [ identifier[unavailable_communities_counter] ]= identifier[jj]
identifier[unavailable_communities_counter] += literal[int]
identifier[level_row] = identifier[np] . identifier[array] ( identifier[level_row] )
identifier[level_col] = identifier[np] . identifier[array] ( identifier[level_col] )
identifier[level_data] = identifier[np] . identifier[ones] ( identifier[level_row] . identifier[size] , identifier[dtype] = identifier[np] . identifier[float64] )
identifier[communities] = identifier[sparse] . identifier[coo_matrix] (( identifier[level_data] ,( identifier[level_row] , identifier[level_col] )),
identifier[shape] =( identifier[number_of_nodes] , identifier[number_of_communities] ))
keyword[if] identifier[communities] . identifier[getnnz] ()== literal[int] :
keyword[break]
identifier[features] = identifier[sparse] . identifier[hstack] ([ identifier[features] , identifier[communities] ])
identifier[reverse_index_csc] = identifier[copy] . identifier[copy] ( identifier[communities] )
identifier[reverse_index_csc] = identifier[reverse_index_csc] . identifier[tocsc] ()
identifier[reverse_index_csc] = identifier[reverse_index_csc] [:, identifier[np] . identifier[array] ( identifier[next_level_communities] )]
identifier[reverse_index_csr] = identifier[reverse_index_csc] . identifier[tocsr] ()
identifier[reverse_index_rows] = identifier[np] . identifier[ndarray] ( identifier[number_of_nodes] , identifier[dtype] = identifier[np] . identifier[ndarray] )
identifier[reverse_index_cols] = identifier[np] . identifier[ndarray] ( identifier[len] ( identifier[next_level_communities] ), identifier[dtype] = identifier[np] . identifier[ndarray] )
keyword[for] identifier[n] keyword[in] identifier[range] ( identifier[number_of_nodes] ):
identifier[reverse_index_row] = identifier[reverse_index_csr] . identifier[getrow] ( identifier[n] )
identifier[reverse_index_rows] [ identifier[n] ]= identifier[reverse_index_row] . identifier[indices]
keyword[if] identifier[n] < identifier[len] ( identifier[next_level_communities] ):
identifier[reverse_index_col] = identifier[reverse_index_csc] . identifier[getcol] ( identifier[n] )
identifier[reverse_index_cols] [ identifier[n] ]= identifier[reverse_index_col] . identifier[indices]
keyword[if] identifier[len] ( identifier[next_level_communities] )> literal[int] :
identifier[flag] = keyword[True]
identifier[iteration] += literal[int]
identifier[print] ( literal[string] , identifier[iteration] )
identifier[print] ( literal[string] , identifier[len] ( identifier[next_level_communities] ))
keyword[return] identifier[features] | def mroc(adjacency_matrix, alpha):
"""
Extracts hierarchical community features using the MROC method.
Introduced in: Wang, X., Tang, L., Liu, H., & Wang, L. (2013).
Learning with multi-resolution overlapping communities.
Knowledge and information systems, 36(2), 517-535.
Inputs: - A in R^(nxn): Adjacency matrix of an undirected network represented as a SciPy Sparse COOrdinate matrix.
- alpha: A maximum community size stopping threshold.
Outputs: - X in R^(nxC_n): The latent space embedding represented as a SciPy Sparse COOrdinate matrix.
"""
# Find number of nodes
number_of_nodes = adjacency_matrix.shape[0]
####################################################################################################################
# Base community calculation
####################################################################################################################
# Initialize empty lists
base_list = list()
base_row = list()
base_col = list()
# Save function handles for speed
append_base_list = base_list.append
append_base_row = base_row.append
append_base_col = base_col.append
# Find base communities
adjacency_matrix = adjacency_matrix.tocsc()
number_of_base_communities = 0
for i in range(number_of_nodes):
# Calculate base community
base_community = set(adjacency_matrix.getcol(i).indices)
base_community.add(i)
flag = True
for c in base_list:
if c == base_community:
flag = False
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['c']]
if flag:
append_base_list(base_community)
for n in base_community:
append_base_row(n)
append_base_col(number_of_base_communities) # depends on [control=['for'], data=['n']]
number_of_base_communities += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
# Form sparse matrices
base_row = np.array(base_row)
base_col = np.array(base_col)
base_data = np.ones(base_row.size, dtype=np.float64)
features = sparse.coo_matrix((base_data, (base_row, base_col)), shape=(number_of_nodes, number_of_base_communities))
features = features.tocsr()
base_community_number = features.shape[1]
print('Base communities calculated.')
reverse_index_csr = copy.copy(features)
reverse_index_csc = reverse_index_csr.tocsc()
reverse_index_csr = reverse_index_csr.tocsr()
reverse_index_rows = np.ndarray(number_of_nodes, dtype=np.ndarray)
reverse_index_cols = np.ndarray(number_of_nodes, dtype=np.ndarray)
for n in range(number_of_nodes):
reverse_index_row = reverse_index_csr.getrow(n)
reverse_index_rows[n] = reverse_index_row.indices
if n < base_community_number:
reverse_index_col = reverse_index_csc.getcol(n)
reverse_index_cols[n] = reverse_index_col.indices # depends on [control=['if'], data=['n']] # depends on [control=['for'], data=['n']]
flag = True
print('Start merge iterations.')
iteration = 0
while flag:
level_row = list()
level_col = list()
append_level_row = level_row.append
append_level_col = level_col.append
unavailable_communities = -1 * np.ones(reverse_index_csc.shape[1])
unavailable_communities_counter = 0
next_level_communities = list()
append_next_level_community = next_level_communities.append
number_of_communities = 0
for j in range(reverse_index_csr.shape[1]):
if j in unavailable_communities:
continue # depends on [control=['if'], data=[]]
must_break = reverse_index_csr.shape[1] - unavailable_communities_counter
print(must_break)
if must_break < 1:
break # depends on [control=['if'], data=[]]
unavailable_communities[unavailable_communities_counter] = j
unavailable_communities_counter += 1
c_j = reverse_index_cols[j]
indices = community_neighbors(c_j, reverse_index_rows, unavailable_communities, unavailable_communities_counter)
max_similarity = -1
community_index = 0
for jj in indices:
c_jj = reverse_index_cols[jj]
similarity = jaccard(c_j, c_jj)
if similarity > max_similarity:
max_similarity = similarity
community_index = jj # depends on [control=['if'], data=['similarity', 'max_similarity']] # depends on [control=['for'], data=['jj']]
jj = community_index
if max_similarity > 0:
# Merge two communities
c_jj = reverse_index_cols[jj]
c_new = np.union1d(c_j, c_jj)
flag_1 = np.setdiff1d(c_new, c_j)
flag_2 = np.setdiff1d(c_new, c_jj)
if flag_1.size != 0 and flag_2.size != 0:
for n in c_new:
append_level_row(n)
append_level_col(number_of_communities) # depends on [control=['for'], data=['n']]
if c_new.size < alpha:
append_next_level_community(number_of_communities) # depends on [control=['if'], data=[]]
number_of_communities += 1 # depends on [control=['if'], data=[]]
unavailable_communities[unavailable_communities_counter] = jj
unavailable_communities_counter += 1 # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['j']]
level_row = np.array(level_row)
level_col = np.array(level_col)
level_data = np.ones(level_row.size, dtype=np.float64)
communities = sparse.coo_matrix((level_data, (level_row, level_col)), shape=(number_of_nodes, number_of_communities))
if communities.getnnz() == 0:
break # depends on [control=['if'], data=[]]
features = sparse.hstack([features, communities])
reverse_index_csc = copy.copy(communities)
reverse_index_csc = reverse_index_csc.tocsc()
reverse_index_csc = reverse_index_csc[:, np.array(next_level_communities)]
reverse_index_csr = reverse_index_csc.tocsr()
reverse_index_rows = np.ndarray(number_of_nodes, dtype=np.ndarray)
reverse_index_cols = np.ndarray(len(next_level_communities), dtype=np.ndarray)
for n in range(number_of_nodes):
reverse_index_row = reverse_index_csr.getrow(n)
reverse_index_rows[n] = reverse_index_row.indices
if n < len(next_level_communities):
reverse_index_col = reverse_index_csc.getcol(n)
reverse_index_cols[n] = reverse_index_col.indices # depends on [control=['if'], data=['n']] # depends on [control=['for'], data=['n']]
if len(next_level_communities) > 1:
flag = True # depends on [control=['if'], data=[]]
iteration += 1
print('Iteration: ', iteration)
print('List length', len(next_level_communities)) # depends on [control=['while'], data=[]]
return features |
def end_body(self):
"""Ends the whole document. This should be called the last"""
if self.write_copy_script:
self.write(
'<textarea id="c" class="invisible"></textarea>'
'<script>'
'function cp(t){'
'var c=document.getElementById("c");'
'c.value=t;'
'c.select();'
'try{document.execCommand("copy")}'
'catch(e){}}'
'</script>'
)
self.write('</div>{}</body></html>', self._script) | def function[end_body, parameter[self]]:
constant[Ends the whole document. This should be called the last]
if name[self].write_copy_script begin[:]
call[name[self].write, parameter[constant[<textarea id="c" class="invisible"></textarea><script>function cp(t){var c=document.getElementById("c");c.value=t;c.select();try{document.execCommand("copy")}catch(e){}}</script>]]]
call[name[self].write, parameter[constant[</div>{}</body></html>], name[self]._script]] | keyword[def] identifier[end_body] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[write_copy_script] :
identifier[self] . identifier[write] (
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
)
identifier[self] . identifier[write] ( literal[string] , identifier[self] . identifier[_script] ) | def end_body(self):
"""Ends the whole document. This should be called the last"""
if self.write_copy_script:
self.write('<textarea id="c" class="invisible"></textarea><script>function cp(t){var c=document.getElementById("c");c.value=t;c.select();try{document.execCommand("copy")}catch(e){}}</script>') # depends on [control=['if'], data=[]]
self.write('</div>{}</body></html>', self._script) |
def _constraint_lb_and_ub_to_gurobi_sense_rhs_and_range_value(lb, ub):
"""Helper function used by Constraint and Model"""
if lb is None and ub is None:
raise Exception("Free constraint ...")
elif lb is None:
sense = '<'
rhs = float(ub)
range_value = 0.
elif ub is None:
sense = '>'
rhs = float(lb)
range_value = 0.
elif lb == ub:
sense = '='
rhs = float(lb)
range_value = 0.
elif lb > ub:
raise ValueError("Lower bound is larger than upper bound.")
else:
sense = '='
rhs = float(lb)
range_value = float(ub - lb)
return sense, rhs, range_value | def function[_constraint_lb_and_ub_to_gurobi_sense_rhs_and_range_value, parameter[lb, ub]]:
constant[Helper function used by Constraint and Model]
if <ast.BoolOp object at 0x7da1b0ccafe0> begin[:]
<ast.Raise object at 0x7da1b0cca1a0>
return[tuple[[<ast.Name object at 0x7da1b0ccace0>, <ast.Name object at 0x7da1b0cc8340>, <ast.Name object at 0x7da1b0cc9630>]]] | keyword[def] identifier[_constraint_lb_and_ub_to_gurobi_sense_rhs_and_range_value] ( identifier[lb] , identifier[ub] ):
literal[string]
keyword[if] identifier[lb] keyword[is] keyword[None] keyword[and] identifier[ub] keyword[is] keyword[None] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[elif] identifier[lb] keyword[is] keyword[None] :
identifier[sense] = literal[string]
identifier[rhs] = identifier[float] ( identifier[ub] )
identifier[range_value] = literal[int]
keyword[elif] identifier[ub] keyword[is] keyword[None] :
identifier[sense] = literal[string]
identifier[rhs] = identifier[float] ( identifier[lb] )
identifier[range_value] = literal[int]
keyword[elif] identifier[lb] == identifier[ub] :
identifier[sense] = literal[string]
identifier[rhs] = identifier[float] ( identifier[lb] )
identifier[range_value] = literal[int]
keyword[elif] identifier[lb] > identifier[ub] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[else] :
identifier[sense] = literal[string]
identifier[rhs] = identifier[float] ( identifier[lb] )
identifier[range_value] = identifier[float] ( identifier[ub] - identifier[lb] )
keyword[return] identifier[sense] , identifier[rhs] , identifier[range_value] | def _constraint_lb_and_ub_to_gurobi_sense_rhs_and_range_value(lb, ub):
"""Helper function used by Constraint and Model"""
if lb is None and ub is None:
raise Exception('Free constraint ...') # depends on [control=['if'], data=[]]
elif lb is None:
sense = '<'
rhs = float(ub)
range_value = 0.0 # depends on [control=['if'], data=[]]
elif ub is None:
sense = '>'
rhs = float(lb)
range_value = 0.0 # depends on [control=['if'], data=[]]
elif lb == ub:
sense = '='
rhs = float(lb)
range_value = 0.0 # depends on [control=['if'], data=['lb']]
elif lb > ub:
raise ValueError('Lower bound is larger than upper bound.') # depends on [control=['if'], data=[]]
else:
sense = '='
rhs = float(lb)
range_value = float(ub - lb)
return (sense, rhs, range_value) |
def put_motion_detection_xml(self, xml):
""" Put request with xml Motion Detection """
_LOGGING.debug('xml:')
_LOGGING.debug("%s", xml)
headers = DEFAULT_HEADERS
headers['Content-Length'] = len(xml)
headers['Host'] = self._host
response = requests.put(self.motion_url, auth=HTTPBasicAuth(
self._username, self._password), data=xml, headers=headers)
_LOGGING.debug('request.headers:')
_LOGGING.debug('%s', response.request.headers)
_LOGGING.debug('Response:')
_LOGGING.debug('%s', response.text)
if response.status_code != 200:
_LOGGING.error(
"There was an error connecting to %s", self.motion_url)
_LOGGING.error("status_code %s", response.status_code)
return
try:
tree = ElementTree.fromstring(response.text)
enabled_element = tree.findall(
'.//{%s}statusString' % self._xml_namespace)
if len(enabled_element) == 0:
_LOGGING.error("Problem getting motion detection status")
return
if enabled_element[0].text.strip() == 'OK':
_LOGGING.info('Updated successfully')
except AttributeError as attib_err:
_LOGGING.error(
'There was a problem parsing the response: %s', attib_err)
return | def function[put_motion_detection_xml, parameter[self, xml]]:
constant[ Put request with xml Motion Detection ]
call[name[_LOGGING].debug, parameter[constant[xml:]]]
call[name[_LOGGING].debug, parameter[constant[%s], name[xml]]]
variable[headers] assign[=] name[DEFAULT_HEADERS]
call[name[headers]][constant[Content-Length]] assign[=] call[name[len], parameter[name[xml]]]
call[name[headers]][constant[Host]] assign[=] name[self]._host
variable[response] assign[=] call[name[requests].put, parameter[name[self].motion_url]]
call[name[_LOGGING].debug, parameter[constant[request.headers:]]]
call[name[_LOGGING].debug, parameter[constant[%s], name[response].request.headers]]
call[name[_LOGGING].debug, parameter[constant[Response:]]]
call[name[_LOGGING].debug, parameter[constant[%s], name[response].text]]
if compare[name[response].status_code not_equal[!=] constant[200]] begin[:]
call[name[_LOGGING].error, parameter[constant[There was an error connecting to %s], name[self].motion_url]]
call[name[_LOGGING].error, parameter[constant[status_code %s], name[response].status_code]]
return[None]
<ast.Try object at 0x7da20e961a20> | keyword[def] identifier[put_motion_detection_xml] ( identifier[self] , identifier[xml] ):
literal[string]
identifier[_LOGGING] . identifier[debug] ( literal[string] )
identifier[_LOGGING] . identifier[debug] ( literal[string] , identifier[xml] )
identifier[headers] = identifier[DEFAULT_HEADERS]
identifier[headers] [ literal[string] ]= identifier[len] ( identifier[xml] )
identifier[headers] [ literal[string] ]= identifier[self] . identifier[_host]
identifier[response] = identifier[requests] . identifier[put] ( identifier[self] . identifier[motion_url] , identifier[auth] = identifier[HTTPBasicAuth] (
identifier[self] . identifier[_username] , identifier[self] . identifier[_password] ), identifier[data] = identifier[xml] , identifier[headers] = identifier[headers] )
identifier[_LOGGING] . identifier[debug] ( literal[string] )
identifier[_LOGGING] . identifier[debug] ( literal[string] , identifier[response] . identifier[request] . identifier[headers] )
identifier[_LOGGING] . identifier[debug] ( literal[string] )
identifier[_LOGGING] . identifier[debug] ( literal[string] , identifier[response] . identifier[text] )
keyword[if] identifier[response] . identifier[status_code] != literal[int] :
identifier[_LOGGING] . identifier[error] (
literal[string] , identifier[self] . identifier[motion_url] )
identifier[_LOGGING] . identifier[error] ( literal[string] , identifier[response] . identifier[status_code] )
keyword[return]
keyword[try] :
identifier[tree] = identifier[ElementTree] . identifier[fromstring] ( identifier[response] . identifier[text] )
identifier[enabled_element] = identifier[tree] . identifier[findall] (
literal[string] % identifier[self] . identifier[_xml_namespace] )
keyword[if] identifier[len] ( identifier[enabled_element] )== literal[int] :
identifier[_LOGGING] . identifier[error] ( literal[string] )
keyword[return]
keyword[if] identifier[enabled_element] [ literal[int] ]. identifier[text] . identifier[strip] ()== literal[string] :
identifier[_LOGGING] . identifier[info] ( literal[string] )
keyword[except] identifier[AttributeError] keyword[as] identifier[attib_err] :
identifier[_LOGGING] . identifier[error] (
literal[string] , identifier[attib_err] )
keyword[return] | def put_motion_detection_xml(self, xml):
""" Put request with xml Motion Detection """
_LOGGING.debug('xml:')
_LOGGING.debug('%s', xml)
headers = DEFAULT_HEADERS
headers['Content-Length'] = len(xml)
headers['Host'] = self._host
response = requests.put(self.motion_url, auth=HTTPBasicAuth(self._username, self._password), data=xml, headers=headers)
_LOGGING.debug('request.headers:')
_LOGGING.debug('%s', response.request.headers)
_LOGGING.debug('Response:')
_LOGGING.debug('%s', response.text)
if response.status_code != 200:
_LOGGING.error('There was an error connecting to %s', self.motion_url)
_LOGGING.error('status_code %s', response.status_code)
return # depends on [control=['if'], data=[]]
try:
tree = ElementTree.fromstring(response.text)
enabled_element = tree.findall('.//{%s}statusString' % self._xml_namespace)
if len(enabled_element) == 0:
_LOGGING.error('Problem getting motion detection status')
return # depends on [control=['if'], data=[]]
if enabled_element[0].text.strip() == 'OK':
_LOGGING.info('Updated successfully') # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except AttributeError as attib_err:
_LOGGING.error('There was a problem parsing the response: %s', attib_err)
return # depends on [control=['except'], data=['attib_err']] |
def setup(self):
"""
Configures the actor before execution.
:return: None if successful, otherwise error message
:rtype: str
"""
result = super(ActorHandler, self).setup()
if result is None:
self.update_parent()
try:
self.check_actors(self.actors)
except Exception as e:
result = str(e)
if result is None:
for actor in self.actors:
name = actor.name
newname = actor.unique_name(actor.name)
if name != newname:
actor.name = newname
if result is None:
for actor in self.actors:
if actor.skip:
continue
result = actor.setup()
if result is not None:
break
if result is None:
result = self._director.setup()
return result | def function[setup, parameter[self]]:
constant[
Configures the actor before execution.
:return: None if successful, otherwise error message
:rtype: str
]
variable[result] assign[=] call[call[name[super], parameter[name[ActorHandler], name[self]]].setup, parameter[]]
if compare[name[result] is constant[None]] begin[:]
call[name[self].update_parent, parameter[]]
<ast.Try object at 0x7da1b2345300>
if compare[name[result] is constant[None]] begin[:]
for taget[name[actor]] in starred[name[self].actors] begin[:]
variable[name] assign[=] name[actor].name
variable[newname] assign[=] call[name[actor].unique_name, parameter[name[actor].name]]
if compare[name[name] not_equal[!=] name[newname]] begin[:]
name[actor].name assign[=] name[newname]
if compare[name[result] is constant[None]] begin[:]
for taget[name[actor]] in starred[name[self].actors] begin[:]
if name[actor].skip begin[:]
continue
variable[result] assign[=] call[name[actor].setup, parameter[]]
if compare[name[result] is_not constant[None]] begin[:]
break
if compare[name[result] is constant[None]] begin[:]
variable[result] assign[=] call[name[self]._director.setup, parameter[]]
return[name[result]] | keyword[def] identifier[setup] ( identifier[self] ):
literal[string]
identifier[result] = identifier[super] ( identifier[ActorHandler] , identifier[self] ). identifier[setup] ()
keyword[if] identifier[result] keyword[is] keyword[None] :
identifier[self] . identifier[update_parent] ()
keyword[try] :
identifier[self] . identifier[check_actors] ( identifier[self] . identifier[actors] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[result] = identifier[str] ( identifier[e] )
keyword[if] identifier[result] keyword[is] keyword[None] :
keyword[for] identifier[actor] keyword[in] identifier[self] . identifier[actors] :
identifier[name] = identifier[actor] . identifier[name]
identifier[newname] = identifier[actor] . identifier[unique_name] ( identifier[actor] . identifier[name] )
keyword[if] identifier[name] != identifier[newname] :
identifier[actor] . identifier[name] = identifier[newname]
keyword[if] identifier[result] keyword[is] keyword[None] :
keyword[for] identifier[actor] keyword[in] identifier[self] . identifier[actors] :
keyword[if] identifier[actor] . identifier[skip] :
keyword[continue]
identifier[result] = identifier[actor] . identifier[setup] ()
keyword[if] identifier[result] keyword[is] keyword[not] keyword[None] :
keyword[break]
keyword[if] identifier[result] keyword[is] keyword[None] :
identifier[result] = identifier[self] . identifier[_director] . identifier[setup] ()
keyword[return] identifier[result] | def setup(self):
"""
Configures the actor before execution.
:return: None if successful, otherwise error message
:rtype: str
"""
result = super(ActorHandler, self).setup()
if result is None:
self.update_parent()
try:
self.check_actors(self.actors) # depends on [control=['try'], data=[]]
except Exception as e:
result = str(e) # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=['result']]
if result is None:
for actor in self.actors:
name = actor.name
newname = actor.unique_name(actor.name)
if name != newname:
actor.name = newname # depends on [control=['if'], data=['newname']] # depends on [control=['for'], data=['actor']] # depends on [control=['if'], data=[]]
if result is None:
for actor in self.actors:
if actor.skip:
continue # depends on [control=['if'], data=[]]
result = actor.setup()
if result is not None:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['actor']] # depends on [control=['if'], data=['result']]
if result is None:
result = self._director.setup() # depends on [control=['if'], data=['result']]
return result |
def create_dbsecurity_group(self, name, description=None):
"""
Create a new security group for your account.
This will create the security group within the region you
are currently connected to.
:type name: string
:param name: The name of the new security group
:type description: string
:param description: The description of the new security group
:rtype: :class:`boto.rds.dbsecuritygroup.DBSecurityGroup`
:return: The newly created DBSecurityGroup
"""
params = {'DBSecurityGroupName':name}
if description:
params['DBSecurityGroupDescription'] = description
group = self.get_object('CreateDBSecurityGroup', params,
DBSecurityGroup)
group.name = name
group.description = description
return group | def function[create_dbsecurity_group, parameter[self, name, description]]:
constant[
Create a new security group for your account.
This will create the security group within the region you
are currently connected to.
:type name: string
:param name: The name of the new security group
:type description: string
:param description: The description of the new security group
:rtype: :class:`boto.rds.dbsecuritygroup.DBSecurityGroup`
:return: The newly created DBSecurityGroup
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b2616aa0>], [<ast.Name object at 0x7da1b26170d0>]]
if name[description] begin[:]
call[name[params]][constant[DBSecurityGroupDescription]] assign[=] name[description]
variable[group] assign[=] call[name[self].get_object, parameter[constant[CreateDBSecurityGroup], name[params], name[DBSecurityGroup]]]
name[group].name assign[=] name[name]
name[group].description assign[=] name[description]
return[name[group]] | keyword[def] identifier[create_dbsecurity_group] ( identifier[self] , identifier[name] , identifier[description] = keyword[None] ):
literal[string]
identifier[params] ={ literal[string] : identifier[name] }
keyword[if] identifier[description] :
identifier[params] [ literal[string] ]= identifier[description]
identifier[group] = identifier[self] . identifier[get_object] ( literal[string] , identifier[params] ,
identifier[DBSecurityGroup] )
identifier[group] . identifier[name] = identifier[name]
identifier[group] . identifier[description] = identifier[description]
keyword[return] identifier[group] | def create_dbsecurity_group(self, name, description=None):
"""
Create a new security group for your account.
This will create the security group within the region you
are currently connected to.
:type name: string
:param name: The name of the new security group
:type description: string
:param description: The description of the new security group
:rtype: :class:`boto.rds.dbsecuritygroup.DBSecurityGroup`
:return: The newly created DBSecurityGroup
"""
params = {'DBSecurityGroupName': name}
if description:
params['DBSecurityGroupDescription'] = description # depends on [control=['if'], data=[]]
group = self.get_object('CreateDBSecurityGroup', params, DBSecurityGroup)
group.name = name
group.description = description
return group |
def _get_digraph_char(self, cli):
" Return `False`, or the Digraph symbol to be used. "
if cli.quoted_insert:
return '^'
if cli.vi_state.waiting_for_digraph:
if cli.vi_state.digraph_symbol1:
return cli.vi_state.digraph_symbol1
return '?'
return False | def function[_get_digraph_char, parameter[self, cli]]:
constant[ Return `False`, or the Digraph symbol to be used. ]
if name[cli].quoted_insert begin[:]
return[constant[^]]
if name[cli].vi_state.waiting_for_digraph begin[:]
if name[cli].vi_state.digraph_symbol1 begin[:]
return[name[cli].vi_state.digraph_symbol1]
return[constant[?]]
return[constant[False]] | keyword[def] identifier[_get_digraph_char] ( identifier[self] , identifier[cli] ):
literal[string]
keyword[if] identifier[cli] . identifier[quoted_insert] :
keyword[return] literal[string]
keyword[if] identifier[cli] . identifier[vi_state] . identifier[waiting_for_digraph] :
keyword[if] identifier[cli] . identifier[vi_state] . identifier[digraph_symbol1] :
keyword[return] identifier[cli] . identifier[vi_state] . identifier[digraph_symbol1]
keyword[return] literal[string]
keyword[return] keyword[False] | def _get_digraph_char(self, cli):
""" Return `False`, or the Digraph symbol to be used. """
if cli.quoted_insert:
return '^' # depends on [control=['if'], data=[]]
if cli.vi_state.waiting_for_digraph:
if cli.vi_state.digraph_symbol1:
return cli.vi_state.digraph_symbol1 # depends on [control=['if'], data=[]]
return '?' # depends on [control=['if'], data=[]]
return False |
def play(self, source, *, after=None):
"""Plays an :class:`AudioSource`.
The finalizer, ``after`` is called after the source has been exhausted
or an error occurred.
If an error happens while the audio player is running, the exception is
caught and the audio player is then stopped.
Parameters
-----------
source: :class:`AudioSource`
The audio source we're reading from.
after
The finalizer that is called after the stream is exhausted.
All exceptions it throws are silently discarded. This function
must have a single parameter, ``error``, that denotes an
optional exception that was raised during playing.
Raises
-------
ClientException
Already playing audio or not connected.
TypeError
source is not a :class:`AudioSource` or after is not a callable.
"""
if not self.is_connected():
raise ClientException('Not connected to voice.')
if self.is_playing():
raise ClientException('Already playing audio.')
if not isinstance(source, AudioSource):
raise TypeError('source must an AudioSource not {0.__class__.__name__}'.format(source))
self._player = AudioPlayer(source, self, after=after)
self._player.start() | def function[play, parameter[self, source]]:
constant[Plays an :class:`AudioSource`.
The finalizer, ``after`` is called after the source has been exhausted
or an error occurred.
If an error happens while the audio player is running, the exception is
caught and the audio player is then stopped.
Parameters
-----------
source: :class:`AudioSource`
The audio source we're reading from.
after
The finalizer that is called after the stream is exhausted.
All exceptions it throws are silently discarded. This function
must have a single parameter, ``error``, that denotes an
optional exception that was raised during playing.
Raises
-------
ClientException
Already playing audio or not connected.
TypeError
source is not a :class:`AudioSource` or after is not a callable.
]
if <ast.UnaryOp object at 0x7da1b1f26a70> begin[:]
<ast.Raise object at 0x7da1b1f25120>
if call[name[self].is_playing, parameter[]] begin[:]
<ast.Raise object at 0x7da1b1ff1f00>
if <ast.UnaryOp object at 0x7da1b1ff01c0> begin[:]
<ast.Raise object at 0x7da1b1ff0c40>
name[self]._player assign[=] call[name[AudioPlayer], parameter[name[source], name[self]]]
call[name[self]._player.start, parameter[]] | keyword[def] identifier[play] ( identifier[self] , identifier[source] ,*, identifier[after] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[is_connected] ():
keyword[raise] identifier[ClientException] ( literal[string] )
keyword[if] identifier[self] . identifier[is_playing] ():
keyword[raise] identifier[ClientException] ( literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[source] , identifier[AudioSource] ):
keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] ( identifier[source] ))
identifier[self] . identifier[_player] = identifier[AudioPlayer] ( identifier[source] , identifier[self] , identifier[after] = identifier[after] )
identifier[self] . identifier[_player] . identifier[start] () | def play(self, source, *, after=None):
"""Plays an :class:`AudioSource`.
The finalizer, ``after`` is called after the source has been exhausted
or an error occurred.
If an error happens while the audio player is running, the exception is
caught and the audio player is then stopped.
Parameters
-----------
source: :class:`AudioSource`
The audio source we're reading from.
after
The finalizer that is called after the stream is exhausted.
All exceptions it throws are silently discarded. This function
must have a single parameter, ``error``, that denotes an
optional exception that was raised during playing.
Raises
-------
ClientException
Already playing audio or not connected.
TypeError
source is not a :class:`AudioSource` or after is not a callable.
"""
if not self.is_connected():
raise ClientException('Not connected to voice.') # depends on [control=['if'], data=[]]
if self.is_playing():
raise ClientException('Already playing audio.') # depends on [control=['if'], data=[]]
if not isinstance(source, AudioSource):
raise TypeError('source must an AudioSource not {0.__class__.__name__}'.format(source)) # depends on [control=['if'], data=[]]
self._player = AudioPlayer(source, self, after=after)
self._player.start() |
def getmacbyip6(ip6, chainCC=0):
"""Returns the MAC address corresponding to an IPv6 address
neighborCache.get() method is used on instantiated neighbor cache.
Resolution mechanism is described in associated doc string.
(chainCC parameter value ends up being passed to sending function
used to perform the resolution, if needed)
"""
if isinstance(ip6, Net6):
ip6 = str(ip6)
if in6_ismaddr(ip6): # Multicast
mac = in6_getnsmac(inet_pton(socket.AF_INET6, ip6))
return mac
iff, a, nh = conf.route6.route(ip6)
if iff == scapy.consts.LOOPBACK_INTERFACE:
return "ff:ff:ff:ff:ff:ff"
if nh != '::':
ip6 = nh # Found next hop
mac = conf.netcache.in6_neighbor.get(ip6)
if mac:
return mac
res = neighsol(ip6, a, iff, chainCC=chainCC)
if res is not None:
if ICMPv6NDOptDstLLAddr in res:
mac = res[ICMPv6NDOptDstLLAddr].lladdr
else:
mac = res.src
conf.netcache.in6_neighbor[ip6] = mac
return mac
return None | def function[getmacbyip6, parameter[ip6, chainCC]]:
constant[Returns the MAC address corresponding to an IPv6 address
neighborCache.get() method is used on instantiated neighbor cache.
Resolution mechanism is described in associated doc string.
(chainCC parameter value ends up being passed to sending function
used to perform the resolution, if needed)
]
if call[name[isinstance], parameter[name[ip6], name[Net6]]] begin[:]
variable[ip6] assign[=] call[name[str], parameter[name[ip6]]]
if call[name[in6_ismaddr], parameter[name[ip6]]] begin[:]
variable[mac] assign[=] call[name[in6_getnsmac], parameter[call[name[inet_pton], parameter[name[socket].AF_INET6, name[ip6]]]]]
return[name[mac]]
<ast.Tuple object at 0x7da1b1fc9900> assign[=] call[name[conf].route6.route, parameter[name[ip6]]]
if compare[name[iff] equal[==] name[scapy].consts.LOOPBACK_INTERFACE] begin[:]
return[constant[ff:ff:ff:ff:ff:ff]]
if compare[name[nh] not_equal[!=] constant[::]] begin[:]
variable[ip6] assign[=] name[nh]
variable[mac] assign[=] call[name[conf].netcache.in6_neighbor.get, parameter[name[ip6]]]
if name[mac] begin[:]
return[name[mac]]
variable[res] assign[=] call[name[neighsol], parameter[name[ip6], name[a], name[iff]]]
if compare[name[res] is_not constant[None]] begin[:]
if compare[name[ICMPv6NDOptDstLLAddr] in name[res]] begin[:]
variable[mac] assign[=] call[name[res]][name[ICMPv6NDOptDstLLAddr]].lladdr
call[name[conf].netcache.in6_neighbor][name[ip6]] assign[=] name[mac]
return[name[mac]]
return[constant[None]] | keyword[def] identifier[getmacbyip6] ( identifier[ip6] , identifier[chainCC] = literal[int] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[ip6] , identifier[Net6] ):
identifier[ip6] = identifier[str] ( identifier[ip6] )
keyword[if] identifier[in6_ismaddr] ( identifier[ip6] ):
identifier[mac] = identifier[in6_getnsmac] ( identifier[inet_pton] ( identifier[socket] . identifier[AF_INET6] , identifier[ip6] ))
keyword[return] identifier[mac]
identifier[iff] , identifier[a] , identifier[nh] = identifier[conf] . identifier[route6] . identifier[route] ( identifier[ip6] )
keyword[if] identifier[iff] == identifier[scapy] . identifier[consts] . identifier[LOOPBACK_INTERFACE] :
keyword[return] literal[string]
keyword[if] identifier[nh] != literal[string] :
identifier[ip6] = identifier[nh]
identifier[mac] = identifier[conf] . identifier[netcache] . identifier[in6_neighbor] . identifier[get] ( identifier[ip6] )
keyword[if] identifier[mac] :
keyword[return] identifier[mac]
identifier[res] = identifier[neighsol] ( identifier[ip6] , identifier[a] , identifier[iff] , identifier[chainCC] = identifier[chainCC] )
keyword[if] identifier[res] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[ICMPv6NDOptDstLLAddr] keyword[in] identifier[res] :
identifier[mac] = identifier[res] [ identifier[ICMPv6NDOptDstLLAddr] ]. identifier[lladdr]
keyword[else] :
identifier[mac] = identifier[res] . identifier[src]
identifier[conf] . identifier[netcache] . identifier[in6_neighbor] [ identifier[ip6] ]= identifier[mac]
keyword[return] identifier[mac]
keyword[return] keyword[None] | def getmacbyip6(ip6, chainCC=0):
"""Returns the MAC address corresponding to an IPv6 address
neighborCache.get() method is used on instantiated neighbor cache.
Resolution mechanism is described in associated doc string.
(chainCC parameter value ends up being passed to sending function
used to perform the resolution, if needed)
"""
if isinstance(ip6, Net6):
ip6 = str(ip6) # depends on [control=['if'], data=[]]
if in6_ismaddr(ip6): # Multicast
mac = in6_getnsmac(inet_pton(socket.AF_INET6, ip6))
return mac # depends on [control=['if'], data=[]]
(iff, a, nh) = conf.route6.route(ip6)
if iff == scapy.consts.LOOPBACK_INTERFACE:
return 'ff:ff:ff:ff:ff:ff' # depends on [control=['if'], data=[]]
if nh != '::':
ip6 = nh # Found next hop # depends on [control=['if'], data=['nh']]
mac = conf.netcache.in6_neighbor.get(ip6)
if mac:
return mac # depends on [control=['if'], data=[]]
res = neighsol(ip6, a, iff, chainCC=chainCC)
if res is not None:
if ICMPv6NDOptDstLLAddr in res:
mac = res[ICMPv6NDOptDstLLAddr].lladdr # depends on [control=['if'], data=['ICMPv6NDOptDstLLAddr', 'res']]
else:
mac = res.src
conf.netcache.in6_neighbor[ip6] = mac
return mac # depends on [control=['if'], data=['res']]
return None |
def add_function(self, function_id=None, function=None, inputs=None,
outputs=None, input_domain=None, weight=None,
inp_weight=None, out_weight=None, description=None,
filters=None, await_domain=None, await_result=None,
**kwargs):
"""
Add a single function node to dispatcher.
:param function_id:
Function node id.
If None will be assigned as <fun.__name__>.
:type function_id: str, optional
:param function:
Data node estimation function.
:type function: callable, optional
:param inputs:
Ordered arguments (i.e., data node ids) needed by the function.
:type inputs: list, optional
:param outputs:
Ordered results (i.e., data node ids) returned by the function.
:type outputs: list, optional
:param input_domain:
A function that checks if input values satisfy the function domain.
This can be any function that takes the same inputs of the function
and returns True if input values satisfy the domain, otherwise
False. In this case the dispatch algorithm doesn't pass on the node.
:type input_domain: callable, optional
:param weight:
Node weight. It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow.
:type weight: float, int, optional
:param inp_weight:
Edge weights from data nodes to the function node.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type inp_weight: dict[str, float | int], optional
:param out_weight:
Edge weights from the function node to data nodes.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type out_weight: dict[str, float | int], optional
:param description:
Function node's description.
:type description: str, optional
:param filters:
A list of functions that are invoked after the invocation of the
main function.
:type filters: list[function], optional
:param await_domain:
If True the Dispatcher waits all input results before executing the
`input_domain` function. If a number is defined this is used as
`timeout` for `Future.result` method [default: True]. Note this is
used when asynchronous or parallel execution is enable.
:type await_domain: bool|int|float, optional
:param await_result:
If True the Dispatcher waits output results before assigning them to
the workflow. If a number is defined this is used as `timeout` for
`Future.result` method [default: False]. Note this is used when
asynchronous or parallel execution is enable.
:type await_result: bool|int|float, optional
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
"""
kwargs.update(_call_kw(locals()))
self.deferred.append(('add_function', kwargs))
return self | def function[add_function, parameter[self, function_id, function, inputs, outputs, input_domain, weight, inp_weight, out_weight, description, filters, await_domain, await_result]]:
constant[
Add a single function node to dispatcher.
:param function_id:
Function node id.
If None will be assigned as <fun.__name__>.
:type function_id: str, optional
:param function:
Data node estimation function.
:type function: callable, optional
:param inputs:
Ordered arguments (i.e., data node ids) needed by the function.
:type inputs: list, optional
:param outputs:
Ordered results (i.e., data node ids) returned by the function.
:type outputs: list, optional
:param input_domain:
A function that checks if input values satisfy the function domain.
This can be any function that takes the same inputs of the function
and returns True if input values satisfy the domain, otherwise
False. In this case the dispatch algorithm doesn't pass on the node.
:type input_domain: callable, optional
:param weight:
Node weight. It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow.
:type weight: float, int, optional
:param inp_weight:
Edge weights from data nodes to the function node.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type inp_weight: dict[str, float | int], optional
:param out_weight:
Edge weights from the function node to data nodes.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type out_weight: dict[str, float | int], optional
:param description:
Function node's description.
:type description: str, optional
:param filters:
A list of functions that are invoked after the invocation of the
main function.
:type filters: list[function], optional
:param await_domain:
If True the Dispatcher waits all input results before executing the
`input_domain` function. If a number is defined this is used as
`timeout` for `Future.result` method [default: True]. Note this is
used when asynchronous or parallel execution is enable.
:type await_domain: bool|int|float, optional
:param await_result:
If True the Dispatcher waits output results before assigning them to
the workflow. If a number is defined this is used as `timeout` for
`Future.result` method [default: False]. Note this is used when
asynchronous or parallel execution is enable.
:type await_result: bool|int|float, optional
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
]
call[name[kwargs].update, parameter[call[name[_call_kw], parameter[call[name[locals], parameter[]]]]]]
call[name[self].deferred.append, parameter[tuple[[<ast.Constant object at 0x7da207f02c50>, <ast.Name object at 0x7da207f00460>]]]]
return[name[self]] | keyword[def] identifier[add_function] ( identifier[self] , identifier[function_id] = keyword[None] , identifier[function] = keyword[None] , identifier[inputs] = keyword[None] ,
identifier[outputs] = keyword[None] , identifier[input_domain] = keyword[None] , identifier[weight] = keyword[None] ,
identifier[inp_weight] = keyword[None] , identifier[out_weight] = keyword[None] , identifier[description] = keyword[None] ,
identifier[filters] = keyword[None] , identifier[await_domain] = keyword[None] , identifier[await_result] = keyword[None] ,
** identifier[kwargs] ):
literal[string]
identifier[kwargs] . identifier[update] ( identifier[_call_kw] ( identifier[locals] ()))
identifier[self] . identifier[deferred] . identifier[append] (( literal[string] , identifier[kwargs] ))
keyword[return] identifier[self] | def add_function(self, function_id=None, function=None, inputs=None, outputs=None, input_domain=None, weight=None, inp_weight=None, out_weight=None, description=None, filters=None, await_domain=None, await_result=None, **kwargs):
"""
Add a single function node to dispatcher.
:param function_id:
Function node id.
If None will be assigned as <fun.__name__>.
:type function_id: str, optional
:param function:
Data node estimation function.
:type function: callable, optional
:param inputs:
Ordered arguments (i.e., data node ids) needed by the function.
:type inputs: list, optional
:param outputs:
Ordered results (i.e., data node ids) returned by the function.
:type outputs: list, optional
:param input_domain:
A function that checks if input values satisfy the function domain.
This can be any function that takes the same inputs of the function
and returns True if input values satisfy the domain, otherwise
False. In this case the dispatch algorithm doesn't pass on the node.
:type input_domain: callable, optional
:param weight:
Node weight. It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow.
:type weight: float, int, optional
:param inp_weight:
Edge weights from data nodes to the function node.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type inp_weight: dict[str, float | int], optional
:param out_weight:
Edge weights from the function node to data nodes.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type out_weight: dict[str, float | int], optional
:param description:
Function node's description.
:type description: str, optional
:param filters:
A list of functions that are invoked after the invocation of the
main function.
:type filters: list[function], optional
:param await_domain:
If True the Dispatcher waits all input results before executing the
`input_domain` function. If a number is defined this is used as
`timeout` for `Future.result` method [default: True]. Note this is
used when asynchronous or parallel execution is enable.
:type await_domain: bool|int|float, optional
:param await_result:
If True the Dispatcher waits output results before assigning them to
the workflow. If a number is defined this is used as `timeout` for
`Future.result` method [default: False]. Note this is used when
asynchronous or parallel execution is enable.
:type await_result: bool|int|float, optional
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
"""
kwargs.update(_call_kw(locals()))
self.deferred.append(('add_function', kwargs))
return self |
def accuracy(mod_y, ref_y, summary=True, name="accuracy"):
"""Accuracy computation op.
Parameters
----------
mod_y : tf.Tensor
Model output tensor.
ref_y : tf.Tensor
Reference input tensor.
summary : bool, optional (default = True)
Whether to save tf summary for the op.
Returns
-------
tf.Tensor : accuracy op. tensor
"""
with tf.name_scope(name):
mod_pred = tf.argmax(mod_y, 1)
correct_pred = tf.equal(mod_pred, tf.argmax(ref_y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
if summary:
tf.summary.scalar('accuracy', accuracy)
return accuracy | def function[accuracy, parameter[mod_y, ref_y, summary, name]]:
constant[Accuracy computation op.
Parameters
----------
mod_y : tf.Tensor
Model output tensor.
ref_y : tf.Tensor
Reference input tensor.
summary : bool, optional (default = True)
Whether to save tf summary for the op.
Returns
-------
tf.Tensor : accuracy op. tensor
]
with call[name[tf].name_scope, parameter[name[name]]] begin[:]
variable[mod_pred] assign[=] call[name[tf].argmax, parameter[name[mod_y], constant[1]]]
variable[correct_pred] assign[=] call[name[tf].equal, parameter[name[mod_pred], call[name[tf].argmax, parameter[name[ref_y], constant[1]]]]]
variable[accuracy] assign[=] call[name[tf].reduce_mean, parameter[call[name[tf].cast, parameter[name[correct_pred], name[tf].float32]]]]
if name[summary] begin[:]
call[name[tf].summary.scalar, parameter[constant[accuracy], name[accuracy]]]
return[name[accuracy]] | keyword[def] identifier[accuracy] ( identifier[mod_y] , identifier[ref_y] , identifier[summary] = keyword[True] , identifier[name] = literal[string] ):
literal[string]
keyword[with] identifier[tf] . identifier[name_scope] ( identifier[name] ):
identifier[mod_pred] = identifier[tf] . identifier[argmax] ( identifier[mod_y] , literal[int] )
identifier[correct_pred] = identifier[tf] . identifier[equal] ( identifier[mod_pred] , identifier[tf] . identifier[argmax] ( identifier[ref_y] , literal[int] ))
identifier[accuracy] = identifier[tf] . identifier[reduce_mean] ( identifier[tf] . identifier[cast] ( identifier[correct_pred] , identifier[tf] . identifier[float32] ))
keyword[if] identifier[summary] :
identifier[tf] . identifier[summary] . identifier[scalar] ( literal[string] , identifier[accuracy] )
keyword[return] identifier[accuracy] | def accuracy(mod_y, ref_y, summary=True, name='accuracy'):
"""Accuracy computation op.
Parameters
----------
mod_y : tf.Tensor
Model output tensor.
ref_y : tf.Tensor
Reference input tensor.
summary : bool, optional (default = True)
Whether to save tf summary for the op.
Returns
-------
tf.Tensor : accuracy op. tensor
"""
with tf.name_scope(name):
mod_pred = tf.argmax(mod_y, 1)
correct_pred = tf.equal(mod_pred, tf.argmax(ref_y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
if summary:
tf.summary.scalar('accuracy', accuracy) # depends on [control=['if'], data=[]]
return accuracy # depends on [control=['with'], data=[]] |
def from_triples(cls, triples):
"""
Decode triples, as from :meth:`to_triples`, into an Eds object.
"""
nids, nd, edges = [], {}, []
for src, rel, tgt in triples:
if src not in nd:
nids.append(src)
nd[src] = {'pred': None, 'lnk': None, 'carg': None, 'si': []}
if rel == 'predicate':
nd[src]['pred'] = Pred.surface_or_abstract(tgt)
elif rel == 'lnk':
cfrom, cto = tgt.strip('"<>').split(':')
nd[src]['lnk'] = Lnk.charspan(int(cfrom), int(cto))
elif rel == 'carg':
if (tgt[0], tgt[-1]) == ('"', '"'):
tgt = tgt[1:-1]
nd[src]['carg'] = tgt
elif rel == 'type':
nd[src]['si'].append((CVARSORT, tgt))
elif rel.islower():
nd[src]['si'].append((rel, tgt))
else:
edges.append((src, rel, tgt))
nodes = [
Node(
nodeid=nid,
pred=nd[nid]['pred'],
sortinfo=nd[nid]['si'],
lnk=nd[nid]['lnk'],
carg=nd[nid]['carg']
) for nid in nids
]
top = nids[0] if nids else None
return cls(top=top, nodes=nodes, edges=edges) | def function[from_triples, parameter[cls, triples]]:
constant[
Decode triples, as from :meth:`to_triples`, into an Eds object.
]
<ast.Tuple object at 0x7da1b0471870> assign[=] tuple[[<ast.List object at 0x7da1b0472890>, <ast.Dict object at 0x7da1b0472980>, <ast.List object at 0x7da1b04707c0>]]
for taget[tuple[[<ast.Name object at 0x7da1b0472fb0>, <ast.Name object at 0x7da1b0470c40>, <ast.Name object at 0x7da1b04717b0>]]] in starred[name[triples]] begin[:]
if compare[name[src] <ast.NotIn object at 0x7da2590d7190> name[nd]] begin[:]
call[name[nids].append, parameter[name[src]]]
call[name[nd]][name[src]] assign[=] dictionary[[<ast.Constant object at 0x7da1b0472590>, <ast.Constant object at 0x7da1b0472230>, <ast.Constant object at 0x7da1b0472f50>, <ast.Constant object at 0x7da1b0473a30>], [<ast.Constant object at 0x7da1b04724a0>, <ast.Constant object at 0x7da1b04705b0>, <ast.Constant object at 0x7da1b0472e90>, <ast.List object at 0x7da1b0473370>]]
if compare[name[rel] equal[==] constant[predicate]] begin[:]
call[call[name[nd]][name[src]]][constant[pred]] assign[=] call[name[Pred].surface_or_abstract, parameter[name[tgt]]]
variable[nodes] assign[=] <ast.ListComp object at 0x7da1b0431c60>
variable[top] assign[=] <ast.IfExp object at 0x7da1b06cb010>
return[call[name[cls], parameter[]]] | keyword[def] identifier[from_triples] ( identifier[cls] , identifier[triples] ):
literal[string]
identifier[nids] , identifier[nd] , identifier[edges] =[],{},[]
keyword[for] identifier[src] , identifier[rel] , identifier[tgt] keyword[in] identifier[triples] :
keyword[if] identifier[src] keyword[not] keyword[in] identifier[nd] :
identifier[nids] . identifier[append] ( identifier[src] )
identifier[nd] [ identifier[src] ]={ literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] :[]}
keyword[if] identifier[rel] == literal[string] :
identifier[nd] [ identifier[src] ][ literal[string] ]= identifier[Pred] . identifier[surface_or_abstract] ( identifier[tgt] )
keyword[elif] identifier[rel] == literal[string] :
identifier[cfrom] , identifier[cto] = identifier[tgt] . identifier[strip] ( literal[string] ). identifier[split] ( literal[string] )
identifier[nd] [ identifier[src] ][ literal[string] ]= identifier[Lnk] . identifier[charspan] ( identifier[int] ( identifier[cfrom] ), identifier[int] ( identifier[cto] ))
keyword[elif] identifier[rel] == literal[string] :
keyword[if] ( identifier[tgt] [ literal[int] ], identifier[tgt] [- literal[int] ])==( literal[string] , literal[string] ):
identifier[tgt] = identifier[tgt] [ literal[int] :- literal[int] ]
identifier[nd] [ identifier[src] ][ literal[string] ]= identifier[tgt]
keyword[elif] identifier[rel] == literal[string] :
identifier[nd] [ identifier[src] ][ literal[string] ]. identifier[append] (( identifier[CVARSORT] , identifier[tgt] ))
keyword[elif] identifier[rel] . identifier[islower] ():
identifier[nd] [ identifier[src] ][ literal[string] ]. identifier[append] (( identifier[rel] , identifier[tgt] ))
keyword[else] :
identifier[edges] . identifier[append] (( identifier[src] , identifier[rel] , identifier[tgt] ))
identifier[nodes] =[
identifier[Node] (
identifier[nodeid] = identifier[nid] ,
identifier[pred] = identifier[nd] [ identifier[nid] ][ literal[string] ],
identifier[sortinfo] = identifier[nd] [ identifier[nid] ][ literal[string] ],
identifier[lnk] = identifier[nd] [ identifier[nid] ][ literal[string] ],
identifier[carg] = identifier[nd] [ identifier[nid] ][ literal[string] ]
) keyword[for] identifier[nid] keyword[in] identifier[nids]
]
identifier[top] = identifier[nids] [ literal[int] ] keyword[if] identifier[nids] keyword[else] keyword[None]
keyword[return] identifier[cls] ( identifier[top] = identifier[top] , identifier[nodes] = identifier[nodes] , identifier[edges] = identifier[edges] ) | def from_triples(cls, triples):
"""
Decode triples, as from :meth:`to_triples`, into an Eds object.
"""
(nids, nd, edges) = ([], {}, [])
for (src, rel, tgt) in triples:
if src not in nd:
nids.append(src)
nd[src] = {'pred': None, 'lnk': None, 'carg': None, 'si': []} # depends on [control=['if'], data=['src', 'nd']]
if rel == 'predicate':
nd[src]['pred'] = Pred.surface_or_abstract(tgt) # depends on [control=['if'], data=[]]
elif rel == 'lnk':
(cfrom, cto) = tgt.strip('"<>').split(':')
nd[src]['lnk'] = Lnk.charspan(int(cfrom), int(cto)) # depends on [control=['if'], data=[]]
elif rel == 'carg':
if (tgt[0], tgt[-1]) == ('"', '"'):
tgt = tgt[1:-1] # depends on [control=['if'], data=[]]
nd[src]['carg'] = tgt # depends on [control=['if'], data=[]]
elif rel == 'type':
nd[src]['si'].append((CVARSORT, tgt)) # depends on [control=['if'], data=[]]
elif rel.islower():
nd[src]['si'].append((rel, tgt)) # depends on [control=['if'], data=[]]
else:
edges.append((src, rel, tgt)) # depends on [control=['for'], data=[]]
nodes = [Node(nodeid=nid, pred=nd[nid]['pred'], sortinfo=nd[nid]['si'], lnk=nd[nid]['lnk'], carg=nd[nid]['carg']) for nid in nids]
top = nids[0] if nids else None
return cls(top=top, nodes=nodes, edges=edges) |
def find_entity(self, x):
"""
Get the entity that has the specified name (or synonym).
Parameters
----------
x : string
Name or synonym for the target entity.
"""
qstr = self.prefixes + """
SELECT ?x WHERE {{
?x rn:hasName "{0}" .
}}
""".format(x)
res = self.graph.query(qstr)
if list(res):
en = list(res)[0][0].toPython()
return en
else:
return None | def function[find_entity, parameter[self, x]]:
constant[
Get the entity that has the specified name (or synonym).
Parameters
----------
x : string
Name or synonym for the target entity.
]
variable[qstr] assign[=] binary_operation[name[self].prefixes + call[constant[
SELECT ?x WHERE {{
?x rn:hasName "{0}" .
}}
].format, parameter[name[x]]]]
variable[res] assign[=] call[name[self].graph.query, parameter[name[qstr]]]
if call[name[list], parameter[name[res]]] begin[:]
variable[en] assign[=] call[call[call[call[name[list], parameter[name[res]]]][constant[0]]][constant[0]].toPython, parameter[]]
return[name[en]] | keyword[def] identifier[find_entity] ( identifier[self] , identifier[x] ):
literal[string]
identifier[qstr] = identifier[self] . identifier[prefixes] + literal[string] . identifier[format] ( identifier[x] )
identifier[res] = identifier[self] . identifier[graph] . identifier[query] ( identifier[qstr] )
keyword[if] identifier[list] ( identifier[res] ):
identifier[en] = identifier[list] ( identifier[res] )[ literal[int] ][ literal[int] ]. identifier[toPython] ()
keyword[return] identifier[en]
keyword[else] :
keyword[return] keyword[None] | def find_entity(self, x):
"""
Get the entity that has the specified name (or synonym).
Parameters
----------
x : string
Name or synonym for the target entity.
"""
qstr = self.prefixes + '\n SELECT ?x WHERE {{\n ?x rn:hasName "{0}" .\n }}\n '.format(x)
res = self.graph.query(qstr)
if list(res):
en = list(res)[0][0].toPython()
return en # depends on [control=['if'], data=[]]
else:
return None |
def fork_procs_insanity_check(p_string):
"""
This function checks if the pipeline string contains a process between
the fork start token or end token and the separator (lane) token. Checks for
the absence of processes in one of the branches of the fork ['|)' and '(|']
and for the existence of a process before starting a fork (in an inner fork)
['|('].
Parameters
----------
p_string: str
String with the definition of the pipeline, e.g.::
'processA processB processC(ProcessD | ProcessE)'
"""
# Check for the absence of processes in one of the branches of the fork
# ['|)' and '(|'] and for the existence of a process before starting a fork
# (in an inner fork) ['|('].
if FORK_TOKEN + LANE_TOKEN in p_string or \
LANE_TOKEN + CLOSE_TOKEN in p_string or \
LANE_TOKEN + FORK_TOKEN in p_string:
raise SanityError("There must be a process between the fork "
"start character '(' or end ')' and the separator of "
"processes character '|'") | def function[fork_procs_insanity_check, parameter[p_string]]:
constant[
This function checks if the pipeline string contains a process between
the fork start token or end token and the separator (lane) token. Checks for
the absence of processes in one of the branches of the fork ['|)' and '(|']
and for the existence of a process before starting a fork (in an inner fork)
['|('].
Parameters
----------
p_string: str
String with the definition of the pipeline, e.g.::
'processA processB processC(ProcessD | ProcessE)'
]
if <ast.BoolOp object at 0x7da1b03bb730> begin[:]
<ast.Raise object at 0x7da1b03ba830> | keyword[def] identifier[fork_procs_insanity_check] ( identifier[p_string] ):
literal[string]
keyword[if] identifier[FORK_TOKEN] + identifier[LANE_TOKEN] keyword[in] identifier[p_string] keyword[or] identifier[LANE_TOKEN] + identifier[CLOSE_TOKEN] keyword[in] identifier[p_string] keyword[or] identifier[LANE_TOKEN] + identifier[FORK_TOKEN] keyword[in] identifier[p_string] :
keyword[raise] identifier[SanityError] ( literal[string]
literal[string]
literal[string] ) | def fork_procs_insanity_check(p_string):
"""
This function checks if the pipeline string contains a process between
the fork start token or end token and the separator (lane) token. Checks for
the absence of processes in one of the branches of the fork ['|)' and '(|']
and for the existence of a process before starting a fork (in an inner fork)
['|('].
Parameters
----------
p_string: str
String with the definition of the pipeline, e.g.::
'processA processB processC(ProcessD | ProcessE)'
"""
# Check for the absence of processes in one of the branches of the fork
# ['|)' and '(|'] and for the existence of a process before starting a fork
# (in an inner fork) ['|('].
if FORK_TOKEN + LANE_TOKEN in p_string or LANE_TOKEN + CLOSE_TOKEN in p_string or LANE_TOKEN + FORK_TOKEN in p_string:
raise SanityError("There must be a process between the fork start character '(' or end ')' and the separator of processes character '|'") # depends on [control=['if'], data=[]] |
def create_pod(
name,
namespace,
metadata,
spec,
source,
template,
saltenv,
**kwargs):
'''
Creates the kubernetes deployment as defined by the user.
'''
body = __create_object_body(
kind='Pod',
obj_class=kubernetes.client.V1Pod,
spec_creator=__dict_to_pod_spec,
name=name,
namespace=namespace,
metadata=metadata,
spec=spec,
source=source,
template=template,
saltenv=saltenv)
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
api_response = api_instance.create_namespaced_pod(
namespace, body)
return api_response.to_dict()
except (ApiException, HTTPError) as exc:
if isinstance(exc, ApiException) and exc.status == 404:
return None
else:
log.exception(
'Exception when calling '
'CoreV1Api->create_namespaced_pod'
)
raise CommandExecutionError(exc)
finally:
_cleanup(**cfg) | def function[create_pod, parameter[name, namespace, metadata, spec, source, template, saltenv]]:
constant[
Creates the kubernetes deployment as defined by the user.
]
variable[body] assign[=] call[name[__create_object_body], parameter[]]
variable[cfg] assign[=] call[name[_setup_conn], parameter[]]
<ast.Try object at 0x7da1b21a7a60> | keyword[def] identifier[create_pod] (
identifier[name] ,
identifier[namespace] ,
identifier[metadata] ,
identifier[spec] ,
identifier[source] ,
identifier[template] ,
identifier[saltenv] ,
** identifier[kwargs] ):
literal[string]
identifier[body] = identifier[__create_object_body] (
identifier[kind] = literal[string] ,
identifier[obj_class] = identifier[kubernetes] . identifier[client] . identifier[V1Pod] ,
identifier[spec_creator] = identifier[__dict_to_pod_spec] ,
identifier[name] = identifier[name] ,
identifier[namespace] = identifier[namespace] ,
identifier[metadata] = identifier[metadata] ,
identifier[spec] = identifier[spec] ,
identifier[source] = identifier[source] ,
identifier[template] = identifier[template] ,
identifier[saltenv] = identifier[saltenv] )
identifier[cfg] = identifier[_setup_conn] (** identifier[kwargs] )
keyword[try] :
identifier[api_instance] = identifier[kubernetes] . identifier[client] . identifier[CoreV1Api] ()
identifier[api_response] = identifier[api_instance] . identifier[create_namespaced_pod] (
identifier[namespace] , identifier[body] )
keyword[return] identifier[api_response] . identifier[to_dict] ()
keyword[except] ( identifier[ApiException] , identifier[HTTPError] ) keyword[as] identifier[exc] :
keyword[if] identifier[isinstance] ( identifier[exc] , identifier[ApiException] ) keyword[and] identifier[exc] . identifier[status] == literal[int] :
keyword[return] keyword[None]
keyword[else] :
identifier[log] . identifier[exception] (
literal[string]
literal[string]
)
keyword[raise] identifier[CommandExecutionError] ( identifier[exc] )
keyword[finally] :
identifier[_cleanup] (** identifier[cfg] ) | def create_pod(name, namespace, metadata, spec, source, template, saltenv, **kwargs):
"""
Creates the kubernetes deployment as defined by the user.
"""
body = __create_object_body(kind='Pod', obj_class=kubernetes.client.V1Pod, spec_creator=__dict_to_pod_spec, name=name, namespace=namespace, metadata=metadata, spec=spec, source=source, template=template, saltenv=saltenv)
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
api_response = api_instance.create_namespaced_pod(namespace, body)
return api_response.to_dict() # depends on [control=['try'], data=[]]
except (ApiException, HTTPError) as exc:
if isinstance(exc, ApiException) and exc.status == 404:
return None # depends on [control=['if'], data=[]]
else:
log.exception('Exception when calling CoreV1Api->create_namespaced_pod')
raise CommandExecutionError(exc) # depends on [control=['except'], data=['exc']]
finally:
_cleanup(**cfg) |
def reset(self):
"""
Clear the active cells.
"""
self.bumpPhases = np.empty((2,0), dtype="float")
self.phaseDisplacement = np.empty((0,2), dtype="float")
self.cellsForActivePhases = np.empty(0, dtype="int")
self.activeCells = np.empty(0, dtype="int")
self.learningCells = np.empty(0, dtype="int")
self.sensoryAssociatedCells = np.empty(0, dtype="int") | def function[reset, parameter[self]]:
constant[
Clear the active cells.
]
name[self].bumpPhases assign[=] call[name[np].empty, parameter[tuple[[<ast.Constant object at 0x7da1b0830ac0>, <ast.Constant object at 0x7da1b0832140>]]]]
name[self].phaseDisplacement assign[=] call[name[np].empty, parameter[tuple[[<ast.Constant object at 0x7da1b09000a0>, <ast.Constant object at 0x7da1b09028c0>]]]]
name[self].cellsForActivePhases assign[=] call[name[np].empty, parameter[constant[0]]]
name[self].activeCells assign[=] call[name[np].empty, parameter[constant[0]]]
name[self].learningCells assign[=] call[name[np].empty, parameter[constant[0]]]
name[self].sensoryAssociatedCells assign[=] call[name[np].empty, parameter[constant[0]]] | keyword[def] identifier[reset] ( identifier[self] ):
literal[string]
identifier[self] . identifier[bumpPhases] = identifier[np] . identifier[empty] (( literal[int] , literal[int] ), identifier[dtype] = literal[string] )
identifier[self] . identifier[phaseDisplacement] = identifier[np] . identifier[empty] (( literal[int] , literal[int] ), identifier[dtype] = literal[string] )
identifier[self] . identifier[cellsForActivePhases] = identifier[np] . identifier[empty] ( literal[int] , identifier[dtype] = literal[string] )
identifier[self] . identifier[activeCells] = identifier[np] . identifier[empty] ( literal[int] , identifier[dtype] = literal[string] )
identifier[self] . identifier[learningCells] = identifier[np] . identifier[empty] ( literal[int] , identifier[dtype] = literal[string] )
identifier[self] . identifier[sensoryAssociatedCells] = identifier[np] . identifier[empty] ( literal[int] , identifier[dtype] = literal[string] ) | def reset(self):
"""
Clear the active cells.
"""
self.bumpPhases = np.empty((2, 0), dtype='float')
self.phaseDisplacement = np.empty((0, 2), dtype='float')
self.cellsForActivePhases = np.empty(0, dtype='int')
self.activeCells = np.empty(0, dtype='int')
self.learningCells = np.empty(0, dtype='int')
self.sensoryAssociatedCells = np.empty(0, dtype='int') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.