code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
|---|---|---|---|
def create_ical(request, slug):
""" Creates an ical .ics file for an event using python-card-me. """
event = get_object_or_404(Event, slug=slug)
# convert dates to datetimes.
# when we change code to datetimes, we won't have to do this.
start = event.start_date
start = datetime.datetime(start.year, start.month, start.day)
if event.end_date:
end = event.end_date
end = datetime.datetime(end.year, end.month, end.day)
else:
end = start
cal = card_me.iCalendar()
cal.add('method').value = 'PUBLISH'
vevent = cal.add('vevent')
vevent.add('dtstart').value = start
vevent.add('dtend').value = end
vevent.add('dtstamp').value = datetime.datetime.now()
vevent.add('summary').value = event.name
response = HttpResponse(cal.serialize(), content_type='text/calendar')
response['Filename'] = 'filename.ics'
response['Content-Disposition'] = 'attachment; filename=filename.ics'
return response
|
def function[create_ical, parameter[request, slug]]:
constant[ Creates an ical .ics file for an event using python-card-me. ]
variable[event] assign[=] call[name[get_object_or_404], parameter[name[Event]]]
variable[start] assign[=] name[event].start_date
variable[start] assign[=] call[name[datetime].datetime, parameter[name[start].year, name[start].month, name[start].day]]
if name[event].end_date begin[:]
variable[end] assign[=] name[event].end_date
variable[end] assign[=] call[name[datetime].datetime, parameter[name[end].year, name[end].month, name[end].day]]
variable[cal] assign[=] call[name[card_me].iCalendar, parameter[]]
call[name[cal].add, parameter[constant[method]]].value assign[=] constant[PUBLISH]
variable[vevent] assign[=] call[name[cal].add, parameter[constant[vevent]]]
call[name[vevent].add, parameter[constant[dtstart]]].value assign[=] name[start]
call[name[vevent].add, parameter[constant[dtend]]].value assign[=] name[end]
call[name[vevent].add, parameter[constant[dtstamp]]].value assign[=] call[name[datetime].datetime.now, parameter[]]
call[name[vevent].add, parameter[constant[summary]]].value assign[=] name[event].name
variable[response] assign[=] call[name[HttpResponse], parameter[call[name[cal].serialize, parameter[]]]]
call[name[response]][constant[Filename]] assign[=] constant[filename.ics]
call[name[response]][constant[Content-Disposition]] assign[=] constant[attachment; filename=filename.ics]
return[name[response]]
|
keyword[def] identifier[create_ical] ( identifier[request] , identifier[slug] ):
literal[string]
identifier[event] = identifier[get_object_or_404] ( identifier[Event] , identifier[slug] = identifier[slug] )
identifier[start] = identifier[event] . identifier[start_date]
identifier[start] = identifier[datetime] . identifier[datetime] ( identifier[start] . identifier[year] , identifier[start] . identifier[month] , identifier[start] . identifier[day] )
keyword[if] identifier[event] . identifier[end_date] :
identifier[end] = identifier[event] . identifier[end_date]
identifier[end] = identifier[datetime] . identifier[datetime] ( identifier[end] . identifier[year] , identifier[end] . identifier[month] , identifier[end] . identifier[day] )
keyword[else] :
identifier[end] = identifier[start]
identifier[cal] = identifier[card_me] . identifier[iCalendar] ()
identifier[cal] . identifier[add] ( literal[string] ). identifier[value] = literal[string]
identifier[vevent] = identifier[cal] . identifier[add] ( literal[string] )
identifier[vevent] . identifier[add] ( literal[string] ). identifier[value] = identifier[start]
identifier[vevent] . identifier[add] ( literal[string] ). identifier[value] = identifier[end]
identifier[vevent] . identifier[add] ( literal[string] ). identifier[value] = identifier[datetime] . identifier[datetime] . identifier[now] ()
identifier[vevent] . identifier[add] ( literal[string] ). identifier[value] = identifier[event] . identifier[name]
identifier[response] = identifier[HttpResponse] ( identifier[cal] . identifier[serialize] (), identifier[content_type] = literal[string] )
identifier[response] [ literal[string] ]= literal[string]
identifier[response] [ literal[string] ]= literal[string]
keyword[return] identifier[response]
|
def create_ical(request, slug):
""" Creates an ical .ics file for an event using python-card-me. """
event = get_object_or_404(Event, slug=slug)
# convert dates to datetimes.
# when we change code to datetimes, we won't have to do this.
start = event.start_date
start = datetime.datetime(start.year, start.month, start.day)
if event.end_date:
end = event.end_date
end = datetime.datetime(end.year, end.month, end.day) # depends on [control=['if'], data=[]]
else:
end = start
cal = card_me.iCalendar()
cal.add('method').value = 'PUBLISH'
vevent = cal.add('vevent')
vevent.add('dtstart').value = start
vevent.add('dtend').value = end
vevent.add('dtstamp').value = datetime.datetime.now()
vevent.add('summary').value = event.name
response = HttpResponse(cal.serialize(), content_type='text/calendar')
response['Filename'] = 'filename.ics'
response['Content-Disposition'] = 'attachment; filename=filename.ics'
return response
|
def editable(parsed, context, token):
"""
Add the required HTML to the parsed content for in-line editing,
such as the icon and edit form if the object is deemed to be
editable - either it has an ``editable`` method which returns
``True``, or the logged in user has change permissions for the
model.
"""
def parse_field(field):
field = field.split(".")
obj = context.get(field.pop(0), None)
attr = field.pop()
while field:
obj = getattr(obj, field.pop(0))
if callable(obj):
# Allows {% editable page.get_content_model.content %}
obj = obj()
return obj, attr
fields = [parse_field(f) for f in token.split_contents()[1:]]
if fields:
fields = [f for f in fields if len(f) == 2 and f[0] is fields[0][0]]
if not parsed.strip():
try:
parsed = "".join([str(getattr(*field)) for field in fields])
except AttributeError:
pass
if settings.INLINE_EDITING_ENABLED and fields and "request" in context:
obj = fields[0][0]
if isinstance(obj, Model) and is_editable(obj, context["request"]):
field_names = ",".join([f[1] for f in fields])
context["editable_form"] = get_edit_form(obj, field_names)
context["original"] = parsed
t = get_template("includes/editable_form.html")
return t.render(context)
return parsed
|
def function[editable, parameter[parsed, context, token]]:
constant[
Add the required HTML to the parsed content for in-line editing,
such as the icon and edit form if the object is deemed to be
editable - either it has an ``editable`` method which returns
``True``, or the logged in user has change permissions for the
model.
]
def function[parse_field, parameter[field]]:
variable[field] assign[=] call[name[field].split, parameter[constant[.]]]
variable[obj] assign[=] call[name[context].get, parameter[call[name[field].pop, parameter[constant[0]]], constant[None]]]
variable[attr] assign[=] call[name[field].pop, parameter[]]
while name[field] begin[:]
variable[obj] assign[=] call[name[getattr], parameter[name[obj], call[name[field].pop, parameter[constant[0]]]]]
if call[name[callable], parameter[name[obj]]] begin[:]
variable[obj] assign[=] call[name[obj], parameter[]]
return[tuple[[<ast.Name object at 0x7da207f01720>, <ast.Name object at 0x7da207f02fb0>]]]
variable[fields] assign[=] <ast.ListComp object at 0x7da207f01120>
if name[fields] begin[:]
variable[fields] assign[=] <ast.ListComp object at 0x7da207f02230>
if <ast.UnaryOp object at 0x7da1b15798a0> begin[:]
<ast.Try object at 0x7da1b157a860>
if <ast.BoolOp object at 0x7da1b1578430> begin[:]
variable[obj] assign[=] call[call[name[fields]][constant[0]]][constant[0]]
if <ast.BoolOp object at 0x7da1b1579240> begin[:]
variable[field_names] assign[=] call[constant[,].join, parameter[<ast.ListComp object at 0x7da1b157bd90>]]
call[name[context]][constant[editable_form]] assign[=] call[name[get_edit_form], parameter[name[obj], name[field_names]]]
call[name[context]][constant[original]] assign[=] name[parsed]
variable[t] assign[=] call[name[get_template], parameter[constant[includes/editable_form.html]]]
return[call[name[t].render, parameter[name[context]]]]
return[name[parsed]]
|
keyword[def] identifier[editable] ( identifier[parsed] , identifier[context] , identifier[token] ):
literal[string]
keyword[def] identifier[parse_field] ( identifier[field] ):
identifier[field] = identifier[field] . identifier[split] ( literal[string] )
identifier[obj] = identifier[context] . identifier[get] ( identifier[field] . identifier[pop] ( literal[int] ), keyword[None] )
identifier[attr] = identifier[field] . identifier[pop] ()
keyword[while] identifier[field] :
identifier[obj] = identifier[getattr] ( identifier[obj] , identifier[field] . identifier[pop] ( literal[int] ))
keyword[if] identifier[callable] ( identifier[obj] ):
identifier[obj] = identifier[obj] ()
keyword[return] identifier[obj] , identifier[attr]
identifier[fields] =[ identifier[parse_field] ( identifier[f] ) keyword[for] identifier[f] keyword[in] identifier[token] . identifier[split_contents] ()[ literal[int] :]]
keyword[if] identifier[fields] :
identifier[fields] =[ identifier[f] keyword[for] identifier[f] keyword[in] identifier[fields] keyword[if] identifier[len] ( identifier[f] )== literal[int] keyword[and] identifier[f] [ literal[int] ] keyword[is] identifier[fields] [ literal[int] ][ literal[int] ]]
keyword[if] keyword[not] identifier[parsed] . identifier[strip] ():
keyword[try] :
identifier[parsed] = literal[string] . identifier[join] ([ identifier[str] ( identifier[getattr] (* identifier[field] )) keyword[for] identifier[field] keyword[in] identifier[fields] ])
keyword[except] identifier[AttributeError] :
keyword[pass]
keyword[if] identifier[settings] . identifier[INLINE_EDITING_ENABLED] keyword[and] identifier[fields] keyword[and] literal[string] keyword[in] identifier[context] :
identifier[obj] = identifier[fields] [ literal[int] ][ literal[int] ]
keyword[if] identifier[isinstance] ( identifier[obj] , identifier[Model] ) keyword[and] identifier[is_editable] ( identifier[obj] , identifier[context] [ literal[string] ]):
identifier[field_names] = literal[string] . identifier[join] ([ identifier[f] [ literal[int] ] keyword[for] identifier[f] keyword[in] identifier[fields] ])
identifier[context] [ literal[string] ]= identifier[get_edit_form] ( identifier[obj] , identifier[field_names] )
identifier[context] [ literal[string] ]= identifier[parsed]
identifier[t] = identifier[get_template] ( literal[string] )
keyword[return] identifier[t] . identifier[render] ( identifier[context] )
keyword[return] identifier[parsed]
|
def editable(parsed, context, token):
"""
Add the required HTML to the parsed content for in-line editing,
such as the icon and edit form if the object is deemed to be
editable - either it has an ``editable`` method which returns
``True``, or the logged in user has change permissions for the
model.
"""
def parse_field(field):
field = field.split('.')
obj = context.get(field.pop(0), None)
attr = field.pop()
while field:
obj = getattr(obj, field.pop(0))
if callable(obj):
# Allows {% editable page.get_content_model.content %}
obj = obj() # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
return (obj, attr)
fields = [parse_field(f) for f in token.split_contents()[1:]]
if fields:
fields = [f for f in fields if len(f) == 2 and f[0] is fields[0][0]] # depends on [control=['if'], data=[]]
if not parsed.strip():
try:
parsed = ''.join([str(getattr(*field)) for field in fields]) # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if settings.INLINE_EDITING_ENABLED and fields and ('request' in context):
obj = fields[0][0]
if isinstance(obj, Model) and is_editable(obj, context['request']):
field_names = ','.join([f[1] for f in fields])
context['editable_form'] = get_edit_form(obj, field_names)
context['original'] = parsed
t = get_template('includes/editable_form.html')
return t.render(context) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return parsed
|
def chdir(self, directory, browsing_history=False,
refresh_explorer=True, refresh_console=True):
"""Set directory as working directory"""
if directory:
directory = osp.abspath(to_text_string(directory))
# Working directory history management
if browsing_history:
directory = self.history[self.histindex]
elif directory in self.history:
self.histindex = self.history.index(directory)
else:
if self.histindex is None:
self.history = []
else:
self.history = self.history[:self.histindex+1]
self.history.append(directory)
self.histindex = len(self.history)-1
# Changing working directory
try:
os.chdir(directory)
if refresh_explorer:
self.set_explorer_cwd.emit(directory)
if refresh_console:
self.set_current_console_wd.emit(directory)
self.refresh_findinfiles.emit()
except OSError:
self.history.pop(self.histindex)
self.refresh_plugin()
|
def function[chdir, parameter[self, directory, browsing_history, refresh_explorer, refresh_console]]:
constant[Set directory as working directory]
if name[directory] begin[:]
variable[directory] assign[=] call[name[osp].abspath, parameter[call[name[to_text_string], parameter[name[directory]]]]]
if name[browsing_history] begin[:]
variable[directory] assign[=] call[name[self].history][name[self].histindex]
<ast.Try object at 0x7da20e962680>
call[name[self].refresh_plugin, parameter[]]
|
keyword[def] identifier[chdir] ( identifier[self] , identifier[directory] , identifier[browsing_history] = keyword[False] ,
identifier[refresh_explorer] = keyword[True] , identifier[refresh_console] = keyword[True] ):
literal[string]
keyword[if] identifier[directory] :
identifier[directory] = identifier[osp] . identifier[abspath] ( identifier[to_text_string] ( identifier[directory] ))
keyword[if] identifier[browsing_history] :
identifier[directory] = identifier[self] . identifier[history] [ identifier[self] . identifier[histindex] ]
keyword[elif] identifier[directory] keyword[in] identifier[self] . identifier[history] :
identifier[self] . identifier[histindex] = identifier[self] . identifier[history] . identifier[index] ( identifier[directory] )
keyword[else] :
keyword[if] identifier[self] . identifier[histindex] keyword[is] keyword[None] :
identifier[self] . identifier[history] =[]
keyword[else] :
identifier[self] . identifier[history] = identifier[self] . identifier[history] [: identifier[self] . identifier[histindex] + literal[int] ]
identifier[self] . identifier[history] . identifier[append] ( identifier[directory] )
identifier[self] . identifier[histindex] = identifier[len] ( identifier[self] . identifier[history] )- literal[int]
keyword[try] :
identifier[os] . identifier[chdir] ( identifier[directory] )
keyword[if] identifier[refresh_explorer] :
identifier[self] . identifier[set_explorer_cwd] . identifier[emit] ( identifier[directory] )
keyword[if] identifier[refresh_console] :
identifier[self] . identifier[set_current_console_wd] . identifier[emit] ( identifier[directory] )
identifier[self] . identifier[refresh_findinfiles] . identifier[emit] ()
keyword[except] identifier[OSError] :
identifier[self] . identifier[history] . identifier[pop] ( identifier[self] . identifier[histindex] )
identifier[self] . identifier[refresh_plugin] ()
|
def chdir(self, directory, browsing_history=False, refresh_explorer=True, refresh_console=True):
"""Set directory as working directory"""
if directory:
directory = osp.abspath(to_text_string(directory)) # depends on [control=['if'], data=[]] # Working directory history management
if browsing_history:
directory = self.history[self.histindex] # depends on [control=['if'], data=[]]
elif directory in self.history:
self.histindex = self.history.index(directory) # depends on [control=['if'], data=['directory']]
else:
if self.histindex is None:
self.history = [] # depends on [control=['if'], data=[]]
else:
self.history = self.history[:self.histindex + 1]
self.history.append(directory)
self.histindex = len(self.history) - 1 # Changing working directory
try:
os.chdir(directory)
if refresh_explorer:
self.set_explorer_cwd.emit(directory) # depends on [control=['if'], data=[]]
if refresh_console:
self.set_current_console_wd.emit(directory) # depends on [control=['if'], data=[]]
self.refresh_findinfiles.emit() # depends on [control=['try'], data=[]]
except OSError:
self.history.pop(self.histindex) # depends on [control=['except'], data=[]]
self.refresh_plugin()
|
def refresh_state(self, id_or_uri, configuration, timeout=-1):
"""
Refreshes a drive enclosure.
Args:
id_or_uri: Can be either the resource ID or the resource URI.
configuration: Configuration
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Drive Enclosure
"""
uri = self._client.build_uri(id_or_uri) + self.REFRESH_STATE_PATH
return self._client.update(resource=configuration, uri=uri, timeout=timeout)
|
def function[refresh_state, parameter[self, id_or_uri, configuration, timeout]]:
constant[
Refreshes a drive enclosure.
Args:
id_or_uri: Can be either the resource ID or the resource URI.
configuration: Configuration
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Drive Enclosure
]
variable[uri] assign[=] binary_operation[call[name[self]._client.build_uri, parameter[name[id_or_uri]]] + name[self].REFRESH_STATE_PATH]
return[call[name[self]._client.update, parameter[]]]
|
keyword[def] identifier[refresh_state] ( identifier[self] , identifier[id_or_uri] , identifier[configuration] , identifier[timeout] =- literal[int] ):
literal[string]
identifier[uri] = identifier[self] . identifier[_client] . identifier[build_uri] ( identifier[id_or_uri] )+ identifier[self] . identifier[REFRESH_STATE_PATH]
keyword[return] identifier[self] . identifier[_client] . identifier[update] ( identifier[resource] = identifier[configuration] , identifier[uri] = identifier[uri] , identifier[timeout] = identifier[timeout] )
|
def refresh_state(self, id_or_uri, configuration, timeout=-1):
"""
Refreshes a drive enclosure.
Args:
id_or_uri: Can be either the resource ID or the resource URI.
configuration: Configuration
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Drive Enclosure
"""
uri = self._client.build_uri(id_or_uri) + self.REFRESH_STATE_PATH
return self._client.update(resource=configuration, uri=uri, timeout=timeout)
|
def _parse_value(value):
'''Internal helper for parsing configuration values into python values'''
if isinstance(value, bool):
return 'true' if value else 'false'
elif isinstance(value, six.string_types):
# parse compacted notation to dict
listparser = re.compile(r'''((?:[^,"']|"[^"]*"|'[^']*')+)''')
value = value.strip()
if value.startswith('[') and value.endswith(']'):
return listparser.split(value[1:-1])[1::2]
elif value.startswith('(') and value.endswith(')'):
rval = {}
for pair in listparser.split(value[1:-1])[1::2]:
pair = pair.split('=')
if '"' in pair[1]:
pair[1] = pair[1].replace('"', '')
if pair[1].isdigit():
rval[pair[0]] = int(pair[1])
elif pair[1] == 'true':
rval[pair[0]] = True
elif pair[1] == 'false':
rval[pair[0]] = False
else:
rval[pair[0]] = pair[1]
return rval
else:
if '"' in value:
value = value.replace('"', '')
if value.isdigit():
return int(value)
elif value == 'true':
return True
elif value == 'false':
return False
else:
return value
else:
return value
|
def function[_parse_value, parameter[value]]:
constant[Internal helper for parsing configuration values into python values]
if call[name[isinstance], parameter[name[value], name[bool]]] begin[:]
return[<ast.IfExp object at 0x7da1b1f821d0>]
|
keyword[def] identifier[_parse_value] ( identifier[value] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[bool] ):
keyword[return] literal[string] keyword[if] identifier[value] keyword[else] literal[string]
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[six] . identifier[string_types] ):
identifier[listparser] = identifier[re] . identifier[compile] ( literal[string] )
identifier[value] = identifier[value] . identifier[strip] ()
keyword[if] identifier[value] . identifier[startswith] ( literal[string] ) keyword[and] identifier[value] . identifier[endswith] ( literal[string] ):
keyword[return] identifier[listparser] . identifier[split] ( identifier[value] [ literal[int] :- literal[int] ])[ literal[int] :: literal[int] ]
keyword[elif] identifier[value] . identifier[startswith] ( literal[string] ) keyword[and] identifier[value] . identifier[endswith] ( literal[string] ):
identifier[rval] ={}
keyword[for] identifier[pair] keyword[in] identifier[listparser] . identifier[split] ( identifier[value] [ literal[int] :- literal[int] ])[ literal[int] :: literal[int] ]:
identifier[pair] = identifier[pair] . identifier[split] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[pair] [ literal[int] ]:
identifier[pair] [ literal[int] ]= identifier[pair] [ literal[int] ]. identifier[replace] ( literal[string] , literal[string] )
keyword[if] identifier[pair] [ literal[int] ]. identifier[isdigit] ():
identifier[rval] [ identifier[pair] [ literal[int] ]]= identifier[int] ( identifier[pair] [ literal[int] ])
keyword[elif] identifier[pair] [ literal[int] ]== literal[string] :
identifier[rval] [ identifier[pair] [ literal[int] ]]= keyword[True]
keyword[elif] identifier[pair] [ literal[int] ]== literal[string] :
identifier[rval] [ identifier[pair] [ literal[int] ]]= keyword[False]
keyword[else] :
identifier[rval] [ identifier[pair] [ literal[int] ]]= identifier[pair] [ literal[int] ]
keyword[return] identifier[rval]
keyword[else] :
keyword[if] literal[string] keyword[in] identifier[value] :
identifier[value] = identifier[value] . identifier[replace] ( literal[string] , literal[string] )
keyword[if] identifier[value] . identifier[isdigit] ():
keyword[return] identifier[int] ( identifier[value] )
keyword[elif] identifier[value] == literal[string] :
keyword[return] keyword[True]
keyword[elif] identifier[value] == literal[string] :
keyword[return] keyword[False]
keyword[else] :
keyword[return] identifier[value]
keyword[else] :
keyword[return] identifier[value]
|
def _parse_value(value):
"""Internal helper for parsing configuration values into python values"""
if isinstance(value, bool):
return 'true' if value else 'false' # depends on [control=['if'], data=[]]
elif isinstance(value, six.string_types):
# parse compacted notation to dict
listparser = re.compile('((?:[^,"\']|"[^"]*"|\'[^\']*\')+)')
value = value.strip()
if value.startswith('[') and value.endswith(']'):
return listparser.split(value[1:-1])[1::2] # depends on [control=['if'], data=[]]
elif value.startswith('(') and value.endswith(')'):
rval = {}
for pair in listparser.split(value[1:-1])[1::2]:
pair = pair.split('=')
if '"' in pair[1]:
pair[1] = pair[1].replace('"', '') # depends on [control=['if'], data=[]]
if pair[1].isdigit():
rval[pair[0]] = int(pair[1]) # depends on [control=['if'], data=[]]
elif pair[1] == 'true':
rval[pair[0]] = True # depends on [control=['if'], data=[]]
elif pair[1] == 'false':
rval[pair[0]] = False # depends on [control=['if'], data=[]]
else:
rval[pair[0]] = pair[1] # depends on [control=['for'], data=['pair']]
return rval # depends on [control=['if'], data=[]]
else:
if '"' in value:
value = value.replace('"', '') # depends on [control=['if'], data=['value']]
if value.isdigit():
return int(value) # depends on [control=['if'], data=[]]
elif value == 'true':
return True # depends on [control=['if'], data=[]]
elif value == 'false':
return False # depends on [control=['if'], data=[]]
else:
return value # depends on [control=['if'], data=[]]
else:
return value
|
def UnPlug(self, force=False):
"""Unplug controller from Virtual USB Bus and free up ID"""
if force:
_xinput.UnPlugForce(c_uint(self.id))
else:
_xinput.UnPlug(c_uint(self.id))
while self.id not in self.available_ids():
if self.id == 0:
break
|
def function[UnPlug, parameter[self, force]]:
constant[Unplug controller from Virtual USB Bus and free up ID]
if name[force] begin[:]
call[name[_xinput].UnPlugForce, parameter[call[name[c_uint], parameter[name[self].id]]]]
while compare[name[self].id <ast.NotIn object at 0x7da2590d7190> call[name[self].available_ids, parameter[]]] begin[:]
if compare[name[self].id equal[==] constant[0]] begin[:]
break
|
keyword[def] identifier[UnPlug] ( identifier[self] , identifier[force] = keyword[False] ):
literal[string]
keyword[if] identifier[force] :
identifier[_xinput] . identifier[UnPlugForce] ( identifier[c_uint] ( identifier[self] . identifier[id] ))
keyword[else] :
identifier[_xinput] . identifier[UnPlug] ( identifier[c_uint] ( identifier[self] . identifier[id] ))
keyword[while] identifier[self] . identifier[id] keyword[not] keyword[in] identifier[self] . identifier[available_ids] ():
keyword[if] identifier[self] . identifier[id] == literal[int] :
keyword[break]
|
def UnPlug(self, force=False):
"""Unplug controller from Virtual USB Bus and free up ID"""
if force:
_xinput.UnPlugForce(c_uint(self.id)) # depends on [control=['if'], data=[]]
else:
_xinput.UnPlug(c_uint(self.id))
while self.id not in self.available_ids():
if self.id == 0:
break # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
|
def vcf2cytosure(store, institute_id, case_name, individual_id):
"""vcf2cytosure CGH file for inidividual."""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
for individual in case_obj['individuals']:
if individual['individual_id'] == individual_id:
individual_obj = individual
return (individual_obj['display_name'], individual_obj['vcf2cytosure'])
|
def function[vcf2cytosure, parameter[store, institute_id, case_name, individual_id]]:
constant[vcf2cytosure CGH file for inidividual.]
<ast.Tuple object at 0x7da2046213f0> assign[=] call[name[institute_and_case], parameter[name[store], name[institute_id], name[case_name]]]
for taget[name[individual]] in starred[call[name[case_obj]][constant[individuals]]] begin[:]
if compare[call[name[individual]][constant[individual_id]] equal[==] name[individual_id]] begin[:]
variable[individual_obj] assign[=] name[individual]
return[tuple[[<ast.Subscript object at 0x7da1b2344f10>, <ast.Subscript object at 0x7da1b2347640>]]]
|
keyword[def] identifier[vcf2cytosure] ( identifier[store] , identifier[institute_id] , identifier[case_name] , identifier[individual_id] ):
literal[string]
identifier[institute_obj] , identifier[case_obj] = identifier[institute_and_case] ( identifier[store] , identifier[institute_id] , identifier[case_name] )
keyword[for] identifier[individual] keyword[in] identifier[case_obj] [ literal[string] ]:
keyword[if] identifier[individual] [ literal[string] ]== identifier[individual_id] :
identifier[individual_obj] = identifier[individual]
keyword[return] ( identifier[individual_obj] [ literal[string] ], identifier[individual_obj] [ literal[string] ])
|
def vcf2cytosure(store, institute_id, case_name, individual_id):
"""vcf2cytosure CGH file for inidividual."""
(institute_obj, case_obj) = institute_and_case(store, institute_id, case_name)
for individual in case_obj['individuals']:
if individual['individual_id'] == individual_id:
individual_obj = individual # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['individual']]
return (individual_obj['display_name'], individual_obj['vcf2cytosure'])
|
def set_visible_func(self, visible_func):
"""Set the function to decide visibility of an item
:param visible_func: A callable that returns a boolean result to
decide if an item should be visible, for
example::
def is_visible(item):
return True
"""
self.model_filter.set_visible_func(
self._internal_visible_func,
visible_func,
)
self._visible_func = visible_func
self.model_filter.refilter()
|
def function[set_visible_func, parameter[self, visible_func]]:
constant[Set the function to decide visibility of an item
:param visible_func: A callable that returns a boolean result to
decide if an item should be visible, for
example::
def is_visible(item):
return True
]
call[name[self].model_filter.set_visible_func, parameter[name[self]._internal_visible_func, name[visible_func]]]
name[self]._visible_func assign[=] name[visible_func]
call[name[self].model_filter.refilter, parameter[]]
|
keyword[def] identifier[set_visible_func] ( identifier[self] , identifier[visible_func] ):
literal[string]
identifier[self] . identifier[model_filter] . identifier[set_visible_func] (
identifier[self] . identifier[_internal_visible_func] ,
identifier[visible_func] ,
)
identifier[self] . identifier[_visible_func] = identifier[visible_func]
identifier[self] . identifier[model_filter] . identifier[refilter] ()
|
def set_visible_func(self, visible_func):
"""Set the function to decide visibility of an item
:param visible_func: A callable that returns a boolean result to
decide if an item should be visible, for
example::
def is_visible(item):
return True
"""
self.model_filter.set_visible_func(self._internal_visible_func, visible_func)
self._visible_func = visible_func
self.model_filter.refilter()
|
def create_entry(self, title='', image=1, url='', username='', password='',
comment='', y=2999, mon=12, d=28, h=23, min_=59, s=59):
"""This method creates an entry in this group.
Compare to StdEntry for information about the arguments.
One of the following arguments is needed:
- title
- url
- username
- password
- comment
"""
return self.db.create_entry(self, title, image, url, username,
password, comment, y, mon, d, h, min_, s)
|
def function[create_entry, parameter[self, title, image, url, username, password, comment, y, mon, d, h, min_, s]]:
constant[This method creates an entry in this group.
Compare to StdEntry for information about the arguments.
One of the following arguments is needed:
- title
- url
- username
- password
- comment
]
return[call[name[self].db.create_entry, parameter[name[self], name[title], name[image], name[url], name[username], name[password], name[comment], name[y], name[mon], name[d], name[h], name[min_], name[s]]]]
|
keyword[def] identifier[create_entry] ( identifier[self] , identifier[title] = literal[string] , identifier[image] = literal[int] , identifier[url] = literal[string] , identifier[username] = literal[string] , identifier[password] = literal[string] ,
identifier[comment] = literal[string] , identifier[y] = literal[int] , identifier[mon] = literal[int] , identifier[d] = literal[int] , identifier[h] = literal[int] , identifier[min_] = literal[int] , identifier[s] = literal[int] ):
literal[string]
keyword[return] identifier[self] . identifier[db] . identifier[create_entry] ( identifier[self] , identifier[title] , identifier[image] , identifier[url] , identifier[username] ,
identifier[password] , identifier[comment] , identifier[y] , identifier[mon] , identifier[d] , identifier[h] , identifier[min_] , identifier[s] )
|
def create_entry(self, title='', image=1, url='', username='', password='', comment='', y=2999, mon=12, d=28, h=23, min_=59, s=59):
"""This method creates an entry in this group.
Compare to StdEntry for information about the arguments.
One of the following arguments is needed:
- title
- url
- username
- password
- comment
"""
return self.db.create_entry(self, title, image, url, username, password, comment, y, mon, d, h, min_, s)
|
def infer(self, number_of_processes=1, *args, **kwargs):
"""
:param number_of_processes: If set to more than 1, the inference routines will be paralellised
using ``multiprocessing`` module
:param args: arguments to pass to :meth:`Inference.infer`
:param kwargs: keyword arguments to pass to :meth:`Inference.infer`
:return:
"""
if number_of_processes == 1:
results = map(lambda x: x.infer(*args, **kwargs), self._inference_objects)
else:
inference_objects = self._inference_objects
results = raw_results_in_parallel(self._inference_objects, number_of_processes, *args,
**kwargs)
results = [inference._result_from_raw_result(raw_result)
for inference, raw_result in zip(inference_objects, results)]
results = sorted(results, key=lambda x: x.distance_at_minimum)
return InferenceResultsCollection(results)
|
def function[infer, parameter[self, number_of_processes]]:
constant[
:param number_of_processes: If set to more than 1, the inference routines will be paralellised
using ``multiprocessing`` module
:param args: arguments to pass to :meth:`Inference.infer`
:param kwargs: keyword arguments to pass to :meth:`Inference.infer`
:return:
]
if compare[name[number_of_processes] equal[==] constant[1]] begin[:]
variable[results] assign[=] call[name[map], parameter[<ast.Lambda object at 0x7da20c7944c0>, name[self]._inference_objects]]
variable[results] assign[=] call[name[sorted], parameter[name[results]]]
return[call[name[InferenceResultsCollection], parameter[name[results]]]]
|
keyword[def] identifier[infer] ( identifier[self] , identifier[number_of_processes] = literal[int] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[number_of_processes] == literal[int] :
identifier[results] = identifier[map] ( keyword[lambda] identifier[x] : identifier[x] . identifier[infer] (* identifier[args] ,** identifier[kwargs] ), identifier[self] . identifier[_inference_objects] )
keyword[else] :
identifier[inference_objects] = identifier[self] . identifier[_inference_objects]
identifier[results] = identifier[raw_results_in_parallel] ( identifier[self] . identifier[_inference_objects] , identifier[number_of_processes] ,* identifier[args] ,
** identifier[kwargs] )
identifier[results] =[ identifier[inference] . identifier[_result_from_raw_result] ( identifier[raw_result] )
keyword[for] identifier[inference] , identifier[raw_result] keyword[in] identifier[zip] ( identifier[inference_objects] , identifier[results] )]
identifier[results] = identifier[sorted] ( identifier[results] , identifier[key] = keyword[lambda] identifier[x] : identifier[x] . identifier[distance_at_minimum] )
keyword[return] identifier[InferenceResultsCollection] ( identifier[results] )
|
def infer(self, number_of_processes=1, *args, **kwargs):
"""
:param number_of_processes: If set to more than 1, the inference routines will be paralellised
using ``multiprocessing`` module
:param args: arguments to pass to :meth:`Inference.infer`
:param kwargs: keyword arguments to pass to :meth:`Inference.infer`
:return:
"""
if number_of_processes == 1:
results = map(lambda x: x.infer(*args, **kwargs), self._inference_objects) # depends on [control=['if'], data=[]]
else:
inference_objects = self._inference_objects
results = raw_results_in_parallel(self._inference_objects, number_of_processes, *args, **kwargs)
results = [inference._result_from_raw_result(raw_result) for (inference, raw_result) in zip(inference_objects, results)]
results = sorted(results, key=lambda x: x.distance_at_minimum)
return InferenceResultsCollection(results)
|
def setup(self, builder):
"""Performs this component's simulation setup.
Parameters
----------
builder : `engine.Builder`
Interface to several simulation tools.
"""
super().setup(builder)
self.clock = builder.time.clock()
self.excess_mortality_rate = builder.value.register_rate_producer(
f'{self.state_id}.excess_mortality_rate',
source=self.risk_deleted_excess_mortality_rate
)
self.excess_mortality_rate_paf = builder.value.register_value_producer(
f'{self.state_id}.excess_mortality_rate.population_attributable_fraction',
source=lambda index: [pd.Series(0, index=index)],
preferred_combiner=list_combiner,
preferred_post_processor=joint_value_post_processor
)
builder.value.register_value_modifier('mortality_rate', self.add_in_excess_mortality)
self.population_view = builder.population.get_view(
[self._model], query=f"alive == 'alive' and {self._model} == '{self.state_id}'")
|
def function[setup, parameter[self, builder]]:
constant[Performs this component's simulation setup.
Parameters
----------
builder : `engine.Builder`
Interface to several simulation tools.
]
call[call[name[super], parameter[]].setup, parameter[name[builder]]]
name[self].clock assign[=] call[name[builder].time.clock, parameter[]]
name[self].excess_mortality_rate assign[=] call[name[builder].value.register_rate_producer, parameter[<ast.JoinedStr object at 0x7da18bc72290>]]
name[self].excess_mortality_rate_paf assign[=] call[name[builder].value.register_value_producer, parameter[<ast.JoinedStr object at 0x7da18bc731c0>]]
call[name[builder].value.register_value_modifier, parameter[constant[mortality_rate], name[self].add_in_excess_mortality]]
name[self].population_view assign[=] call[name[builder].population.get_view, parameter[list[[<ast.Attribute object at 0x7da18bc72140>]]]]
|
keyword[def] identifier[setup] ( identifier[self] , identifier[builder] ):
literal[string]
identifier[super] (). identifier[setup] ( identifier[builder] )
identifier[self] . identifier[clock] = identifier[builder] . identifier[time] . identifier[clock] ()
identifier[self] . identifier[excess_mortality_rate] = identifier[builder] . identifier[value] . identifier[register_rate_producer] (
literal[string] ,
identifier[source] = identifier[self] . identifier[risk_deleted_excess_mortality_rate]
)
identifier[self] . identifier[excess_mortality_rate_paf] = identifier[builder] . identifier[value] . identifier[register_value_producer] (
literal[string] ,
identifier[source] = keyword[lambda] identifier[index] :[ identifier[pd] . identifier[Series] ( literal[int] , identifier[index] = identifier[index] )],
identifier[preferred_combiner] = identifier[list_combiner] ,
identifier[preferred_post_processor] = identifier[joint_value_post_processor]
)
identifier[builder] . identifier[value] . identifier[register_value_modifier] ( literal[string] , identifier[self] . identifier[add_in_excess_mortality] )
identifier[self] . identifier[population_view] = identifier[builder] . identifier[population] . identifier[get_view] (
[ identifier[self] . identifier[_model] ], identifier[query] = literal[string] )
|
def setup(self, builder):
"""Performs this component's simulation setup.
Parameters
----------
builder : `engine.Builder`
Interface to several simulation tools.
"""
super().setup(builder)
self.clock = builder.time.clock()
self.excess_mortality_rate = builder.value.register_rate_producer(f'{self.state_id}.excess_mortality_rate', source=self.risk_deleted_excess_mortality_rate)
self.excess_mortality_rate_paf = builder.value.register_value_producer(f'{self.state_id}.excess_mortality_rate.population_attributable_fraction', source=lambda index: [pd.Series(0, index=index)], preferred_combiner=list_combiner, preferred_post_processor=joint_value_post_processor)
builder.value.register_value_modifier('mortality_rate', self.add_in_excess_mortality)
self.population_view = builder.population.get_view([self._model], query=f"alive == 'alive' and {self._model} == '{self.state_id}'")
|
def read(self):
"""Read a Response, do some validation, and return it."""
if FLAGS.sc2_verbose_protocol:
self._log(" Reading response ".center(60, "-"))
start = time.time()
response = self._read()
if FLAGS.sc2_verbose_protocol:
self._log(" %0.1f msec\n" % (1000 * (time.time() - start)))
self._log_packet(response)
if not response.HasField("status"):
raise ProtocolError("Got an incomplete response without a status.")
prev_status = self._status
self._status = Status(response.status) # pytype: disable=not-callable
if response.error:
err_str = ("Error in RPC response (likely a bug). "
"Prev status: %s, new status: %s, error:\n%s" % (
prev_status, self._status, "\n".join(response.error)))
logging.error(err_str)
raise ProtocolError(err_str)
return response
|
def function[read, parameter[self]]:
constant[Read a Response, do some validation, and return it.]
if name[FLAGS].sc2_verbose_protocol begin[:]
call[name[self]._log, parameter[call[constant[ Reading response ].center, parameter[constant[60], constant[-]]]]]
variable[start] assign[=] call[name[time].time, parameter[]]
variable[response] assign[=] call[name[self]._read, parameter[]]
if name[FLAGS].sc2_verbose_protocol begin[:]
call[name[self]._log, parameter[binary_operation[constant[ %0.1f msec
] <ast.Mod object at 0x7da2590d6920> binary_operation[constant[1000] * binary_operation[call[name[time].time, parameter[]] - name[start]]]]]]
call[name[self]._log_packet, parameter[name[response]]]
if <ast.UnaryOp object at 0x7da18ede51e0> begin[:]
<ast.Raise object at 0x7da18ede5db0>
variable[prev_status] assign[=] name[self]._status
name[self]._status assign[=] call[name[Status], parameter[name[response].status]]
if name[response].error begin[:]
variable[err_str] assign[=] binary_operation[constant[Error in RPC response (likely a bug). Prev status: %s, new status: %s, error:
%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18ede4880>, <ast.Attribute object at 0x7da18ede6020>, <ast.Call object at 0x7da18ede5930>]]]
call[name[logging].error, parameter[name[err_str]]]
<ast.Raise object at 0x7da18ede58d0>
return[name[response]]
|
keyword[def] identifier[read] ( identifier[self] ):
literal[string]
keyword[if] identifier[FLAGS] . identifier[sc2_verbose_protocol] :
identifier[self] . identifier[_log] ( literal[string] . identifier[center] ( literal[int] , literal[string] ))
identifier[start] = identifier[time] . identifier[time] ()
identifier[response] = identifier[self] . identifier[_read] ()
keyword[if] identifier[FLAGS] . identifier[sc2_verbose_protocol] :
identifier[self] . identifier[_log] ( literal[string] %( literal[int] *( identifier[time] . identifier[time] ()- identifier[start] )))
identifier[self] . identifier[_log_packet] ( identifier[response] )
keyword[if] keyword[not] identifier[response] . identifier[HasField] ( literal[string] ):
keyword[raise] identifier[ProtocolError] ( literal[string] )
identifier[prev_status] = identifier[self] . identifier[_status]
identifier[self] . identifier[_status] = identifier[Status] ( identifier[response] . identifier[status] )
keyword[if] identifier[response] . identifier[error] :
identifier[err_str] =( literal[string]
literal[string] %(
identifier[prev_status] , identifier[self] . identifier[_status] , literal[string] . identifier[join] ( identifier[response] . identifier[error] )))
identifier[logging] . identifier[error] ( identifier[err_str] )
keyword[raise] identifier[ProtocolError] ( identifier[err_str] )
keyword[return] identifier[response]
|
def read(self):
"""Read a Response, do some validation, and return it."""
if FLAGS.sc2_verbose_protocol:
self._log(' Reading response '.center(60, '-'))
start = time.time() # depends on [control=['if'], data=[]]
response = self._read()
if FLAGS.sc2_verbose_protocol:
self._log(' %0.1f msec\n' % (1000 * (time.time() - start)))
self._log_packet(response) # depends on [control=['if'], data=[]]
if not response.HasField('status'):
raise ProtocolError('Got an incomplete response without a status.') # depends on [control=['if'], data=[]]
prev_status = self._status
self._status = Status(response.status) # pytype: disable=not-callable
if response.error:
err_str = 'Error in RPC response (likely a bug). Prev status: %s, new status: %s, error:\n%s' % (prev_status, self._status, '\n'.join(response.error))
logging.error(err_str)
raise ProtocolError(err_str) # depends on [control=['if'], data=[]]
return response
|
def make_sloppy_codec(encoding):
"""
Take a codec name, and return a 'sloppy' version of that codec that can
encode and decode the unassigned bytes in that encoding.
Single-byte encodings in the standard library are defined using some
boilerplate classes surrounding the functions that do the actual work,
`codecs.charmap_decode` and `charmap_encode`. This function, given an
encoding name, *defines* those boilerplate classes.
"""
# Make a bytestring of all 256 possible bytes.
all_bytes = bytes(range(256))
# Get a list of what they would decode to in Latin-1.
sloppy_chars = list(all_bytes.decode('latin-1'))
# Get a list of what they decode to in the given encoding. Use the
# replacement character for unassigned bytes.
if PY26:
decoded_chars = all_bytes.decode(encoding, 'replace')
else:
decoded_chars = all_bytes.decode(encoding, errors='replace')
# Update the sloppy_chars list. Each byte that was successfully decoded
# gets its decoded value in the list. The unassigned bytes are left as
# they are, which gives their decoding in Latin-1.
for i, char in enumerate(decoded_chars):
if char != REPLACEMENT_CHAR:
sloppy_chars[i] = char
# For ftfy's own purposes, we're going to allow byte 1A, the "Substitute"
# control code, to encode the Unicode replacement character U+FFFD.
sloppy_chars[0x1a] = REPLACEMENT_CHAR
# Create the data structures that tell the charmap methods how to encode
# and decode in this sloppy encoding.
decoding_table = ''.join(sloppy_chars)
encoding_table = codecs.charmap_build(decoding_table)
# Now produce all the class boilerplate. Look at the Python source for
# `encodings.cp1252` for comparison; this is almost exactly the same,
# except I made it follow pep8.
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
return codecs.CodecInfo(
name='sloppy-' + encoding,
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
def function[make_sloppy_codec, parameter[encoding]]:
constant[
Take a codec name, and return a 'sloppy' version of that codec that can
encode and decode the unassigned bytes in that encoding.
Single-byte encodings in the standard library are defined using some
boilerplate classes surrounding the functions that do the actual work,
`codecs.charmap_decode` and `charmap_encode`. This function, given an
encoding name, *defines* those boilerplate classes.
]
variable[all_bytes] assign[=] call[name[bytes], parameter[call[name[range], parameter[constant[256]]]]]
variable[sloppy_chars] assign[=] call[name[list], parameter[call[name[all_bytes].decode, parameter[constant[latin-1]]]]]
if name[PY26] begin[:]
variable[decoded_chars] assign[=] call[name[all_bytes].decode, parameter[name[encoding], constant[replace]]]
for taget[tuple[[<ast.Name object at 0x7da207f02470>, <ast.Name object at 0x7da207f02e00>]]] in starred[call[name[enumerate], parameter[name[decoded_chars]]]] begin[:]
if compare[name[char] not_equal[!=] name[REPLACEMENT_CHAR]] begin[:]
call[name[sloppy_chars]][name[i]] assign[=] name[char]
call[name[sloppy_chars]][constant[26]] assign[=] name[REPLACEMENT_CHAR]
variable[decoding_table] assign[=] call[constant[].join, parameter[name[sloppy_chars]]]
variable[encoding_table] assign[=] call[name[codecs].charmap_build, parameter[name[decoding_table]]]
class class[Codec, parameter[]] begin[:]
def function[encode, parameter[self, input, errors]]:
return[call[name[codecs].charmap_encode, parameter[name[input], name[errors], name[encoding_table]]]]
def function[decode, parameter[self, input, errors]]:
return[call[name[codecs].charmap_decode, parameter[name[input], name[errors], name[decoding_table]]]]
class class[IncrementalEncoder, parameter[]] begin[:]
def function[encode, parameter[self, input, final]]:
return[call[call[name[codecs].charmap_encode, parameter[name[input], name[self].errors, name[encoding_table]]]][constant[0]]]
class class[IncrementalDecoder, parameter[]] begin[:]
def function[decode, parameter[self, input, final]]:
return[call[call[name[codecs].charmap_decode, parameter[name[input], name[self].errors, name[decoding_table]]]][constant[0]]]
class class[StreamWriter, parameter[]] begin[:]
pass
class class[StreamReader, parameter[]] begin[:]
pass
return[call[name[codecs].CodecInfo, parameter[]]]
|
keyword[def] identifier[make_sloppy_codec] ( identifier[encoding] ):
literal[string]
identifier[all_bytes] = identifier[bytes] ( identifier[range] ( literal[int] ))
identifier[sloppy_chars] = identifier[list] ( identifier[all_bytes] . identifier[decode] ( literal[string] ))
keyword[if] identifier[PY26] :
identifier[decoded_chars] = identifier[all_bytes] . identifier[decode] ( identifier[encoding] , literal[string] )
keyword[else] :
identifier[decoded_chars] = identifier[all_bytes] . identifier[decode] ( identifier[encoding] , identifier[errors] = literal[string] )
keyword[for] identifier[i] , identifier[char] keyword[in] identifier[enumerate] ( identifier[decoded_chars] ):
keyword[if] identifier[char] != identifier[REPLACEMENT_CHAR] :
identifier[sloppy_chars] [ identifier[i] ]= identifier[char]
identifier[sloppy_chars] [ literal[int] ]= identifier[REPLACEMENT_CHAR]
identifier[decoding_table] = literal[string] . identifier[join] ( identifier[sloppy_chars] )
identifier[encoding_table] = identifier[codecs] . identifier[charmap_build] ( identifier[decoding_table] )
keyword[class] identifier[Codec] ( identifier[codecs] . identifier[Codec] ):
keyword[def] identifier[encode] ( identifier[self] , identifier[input] , identifier[errors] = literal[string] ):
keyword[return] identifier[codecs] . identifier[charmap_encode] ( identifier[input] , identifier[errors] , identifier[encoding_table] )
keyword[def] identifier[decode] ( identifier[self] , identifier[input] , identifier[errors] = literal[string] ):
keyword[return] identifier[codecs] . identifier[charmap_decode] ( identifier[input] , identifier[errors] , identifier[decoding_table] )
keyword[class] identifier[IncrementalEncoder] ( identifier[codecs] . identifier[IncrementalEncoder] ):
keyword[def] identifier[encode] ( identifier[self] , identifier[input] , identifier[final] = keyword[False] ):
keyword[return] identifier[codecs] . identifier[charmap_encode] ( identifier[input] , identifier[self] . identifier[errors] , identifier[encoding_table] )[ literal[int] ]
keyword[class] identifier[IncrementalDecoder] ( identifier[codecs] . identifier[IncrementalDecoder] ):
keyword[def] identifier[decode] ( identifier[self] , identifier[input] , identifier[final] = keyword[False] ):
keyword[return] identifier[codecs] . identifier[charmap_decode] ( identifier[input] , identifier[self] . identifier[errors] , identifier[decoding_table] )[ literal[int] ]
keyword[class] identifier[StreamWriter] ( identifier[Codec] , identifier[codecs] . identifier[StreamWriter] ):
keyword[pass]
keyword[class] identifier[StreamReader] ( identifier[Codec] , identifier[codecs] . identifier[StreamReader] ):
keyword[pass]
keyword[return] identifier[codecs] . identifier[CodecInfo] (
identifier[name] = literal[string] + identifier[encoding] ,
identifier[encode] = identifier[Codec] (). identifier[encode] ,
identifier[decode] = identifier[Codec] (). identifier[decode] ,
identifier[incrementalencoder] = identifier[IncrementalEncoder] ,
identifier[incrementaldecoder] = identifier[IncrementalDecoder] ,
identifier[streamreader] = identifier[StreamReader] ,
identifier[streamwriter] = identifier[StreamWriter] ,
)
|
def make_sloppy_codec(encoding):
"""
Take a codec name, and return a 'sloppy' version of that codec that can
encode and decode the unassigned bytes in that encoding.
Single-byte encodings in the standard library are defined using some
boilerplate classes surrounding the functions that do the actual work,
`codecs.charmap_decode` and `charmap_encode`. This function, given an
encoding name, *defines* those boilerplate classes.
"""
# Make a bytestring of all 256 possible bytes.
all_bytes = bytes(range(256))
# Get a list of what they would decode to in Latin-1.
sloppy_chars = list(all_bytes.decode('latin-1'))
# Get a list of what they decode to in the given encoding. Use the
# replacement character for unassigned bytes.
if PY26:
decoded_chars = all_bytes.decode(encoding, 'replace') # depends on [control=['if'], data=[]]
else:
decoded_chars = all_bytes.decode(encoding, errors='replace')
# Update the sloppy_chars list. Each byte that was successfully decoded
# gets its decoded value in the list. The unassigned bytes are left as
# they are, which gives their decoding in Latin-1.
for (i, char) in enumerate(decoded_chars):
if char != REPLACEMENT_CHAR:
sloppy_chars[i] = char # depends on [control=['if'], data=['char']] # depends on [control=['for'], data=[]]
# For ftfy's own purposes, we're going to allow byte 1A, the "Substitute"
# control code, to encode the Unicode replacement character U+FFFD.
sloppy_chars[26] = REPLACEMENT_CHAR
# Create the data structures that tell the charmap methods how to encode
# and decode in this sloppy encoding.
decoding_table = ''.join(sloppy_chars)
encoding_table = codecs.charmap_build(decoding_table)
# Now produce all the class boilerplate. Look at the Python source for
# `encodings.cp1252` for comparison; this is almost exactly the same,
# except I made it follow pep8.
class Codec(codecs.Codec):
def encode(self, input, errors='strict'):
return codecs.charmap_encode(input, errors, encoding_table)
def decode(self, input, errors='strict'):
return codecs.charmap_decode(input, errors, decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, decoding_table)[0]
class StreamWriter(Codec, codecs.StreamWriter):
pass
class StreamReader(Codec, codecs.StreamReader):
pass
return codecs.CodecInfo(name='sloppy-' + encoding, encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter)
|
def server_bind(self):
"""Override server_bind to store the server name."""
socketserver.TCPServer.server_bind(self)
host, port = self.socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
|
def function[server_bind, parameter[self]]:
constant[Override server_bind to store the server name.]
call[name[socketserver].TCPServer.server_bind, parameter[name[self]]]
<ast.Tuple object at 0x7da18dc04c10> assign[=] call[call[name[self].socket.getsockname, parameter[]]][<ast.Slice object at 0x7da18dc05360>]
name[self].server_name assign[=] call[name[socket].getfqdn, parameter[name[host]]]
name[self].server_port assign[=] name[port]
|
keyword[def] identifier[server_bind] ( identifier[self] ):
literal[string]
identifier[socketserver] . identifier[TCPServer] . identifier[server_bind] ( identifier[self] )
identifier[host] , identifier[port] = identifier[self] . identifier[socket] . identifier[getsockname] ()[: literal[int] ]
identifier[self] . identifier[server_name] = identifier[socket] . identifier[getfqdn] ( identifier[host] )
identifier[self] . identifier[server_port] = identifier[port]
|
def server_bind(self):
"""Override server_bind to store the server name."""
socketserver.TCPServer.server_bind(self)
(host, port) = self.socket.getsockname()[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
|
async def oauth2_request(
self,
url: str,
access_token: str = None,
post_args: Dict[str, Any] = None,
**args: Any
) -> Any:
"""Fetches the given URL auth an OAuth2 access token.
If the request is a POST, ``post_args`` should be provided. Query
string arguments should be given as keyword arguments.
Example usage:
..testcode::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FacebookGraphMixin):
@tornado.web.authenticated
async def get(self):
new_entry = await self.oauth2_request(
"https://graph.facebook.com/me/feed",
post_args={"message": "I am posting from my Tornado application!"},
access_token=self.current_user["access_token"])
if not new_entry:
# Call failed; perhaps missing permission?
await self.authorize_redirect()
return
self.finish("Posted a message!")
.. testoutput::
:hide:
.. versionadded:: 4.3
.. versionchanged::: 6.0
The ``callback`` argument was removed. Use the returned awaitable object instead.
"""
all_args = {}
if access_token:
all_args["access_token"] = access_token
all_args.update(args)
if all_args:
url += "?" + urllib.parse.urlencode(all_args)
http = self.get_auth_http_client()
if post_args is not None:
response = await http.fetch(
url, method="POST", body=urllib.parse.urlencode(post_args)
)
else:
response = await http.fetch(url)
return escape.json_decode(response.body)
|
<ast.AsyncFunctionDef object at 0x7da1b1f2eec0>
|
keyword[async] keyword[def] identifier[oauth2_request] (
identifier[self] ,
identifier[url] : identifier[str] ,
identifier[access_token] : identifier[str] = keyword[None] ,
identifier[post_args] : identifier[Dict] [ identifier[str] , identifier[Any] ]= keyword[None] ,
** identifier[args] : identifier[Any]
)-> identifier[Any] :
literal[string]
identifier[all_args] ={}
keyword[if] identifier[access_token] :
identifier[all_args] [ literal[string] ]= identifier[access_token]
identifier[all_args] . identifier[update] ( identifier[args] )
keyword[if] identifier[all_args] :
identifier[url] += literal[string] + identifier[urllib] . identifier[parse] . identifier[urlencode] ( identifier[all_args] )
identifier[http] = identifier[self] . identifier[get_auth_http_client] ()
keyword[if] identifier[post_args] keyword[is] keyword[not] keyword[None] :
identifier[response] = keyword[await] identifier[http] . identifier[fetch] (
identifier[url] , identifier[method] = literal[string] , identifier[body] = identifier[urllib] . identifier[parse] . identifier[urlencode] ( identifier[post_args] )
)
keyword[else] :
identifier[response] = keyword[await] identifier[http] . identifier[fetch] ( identifier[url] )
keyword[return] identifier[escape] . identifier[json_decode] ( identifier[response] . identifier[body] )
|
async def oauth2_request(self, url: str, access_token: str=None, post_args: Dict[str, Any]=None, **args: Any) -> Any:
"""Fetches the given URL auth an OAuth2 access token.
If the request is a POST, ``post_args`` should be provided. Query
string arguments should be given as keyword arguments.
Example usage:
..testcode::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FacebookGraphMixin):
@tornado.web.authenticated
async def get(self):
new_entry = await self.oauth2_request(
"https://graph.facebook.com/me/feed",
post_args={"message": "I am posting from my Tornado application!"},
access_token=self.current_user["access_token"])
if not new_entry:
# Call failed; perhaps missing permission?
await self.authorize_redirect()
return
self.finish("Posted a message!")
.. testoutput::
:hide:
.. versionadded:: 4.3
.. versionchanged::: 6.0
The ``callback`` argument was removed. Use the returned awaitable object instead.
"""
all_args = {}
if access_token:
all_args['access_token'] = access_token
all_args.update(args) # depends on [control=['if'], data=[]]
if all_args:
url += '?' + urllib.parse.urlencode(all_args) # depends on [control=['if'], data=[]]
http = self.get_auth_http_client()
if post_args is not None:
response = await http.fetch(url, method='POST', body=urllib.parse.urlencode(post_args)) # depends on [control=['if'], data=['post_args']]
else:
response = await http.fetch(url)
return escape.json_decode(response.body)
|
def qAx(mt, x, q):
""" This function evaluates the APV of a geometrically increasing annual annuity-due """
q = float(q)
j = (mt.i - q) / (1 + q)
mtj = Actuarial(nt=mt.nt, i=j)
return Ax(mtj, x)
|
def function[qAx, parameter[mt, x, q]]:
constant[ This function evaluates the APV of a geometrically increasing annual annuity-due ]
variable[q] assign[=] call[name[float], parameter[name[q]]]
variable[j] assign[=] binary_operation[binary_operation[name[mt].i - name[q]] / binary_operation[constant[1] + name[q]]]
variable[mtj] assign[=] call[name[Actuarial], parameter[]]
return[call[name[Ax], parameter[name[mtj], name[x]]]]
|
keyword[def] identifier[qAx] ( identifier[mt] , identifier[x] , identifier[q] ):
literal[string]
identifier[q] = identifier[float] ( identifier[q] )
identifier[j] =( identifier[mt] . identifier[i] - identifier[q] )/( literal[int] + identifier[q] )
identifier[mtj] = identifier[Actuarial] ( identifier[nt] = identifier[mt] . identifier[nt] , identifier[i] = identifier[j] )
keyword[return] identifier[Ax] ( identifier[mtj] , identifier[x] )
|
def qAx(mt, x, q):
""" This function evaluates the APV of a geometrically increasing annual annuity-due """
q = float(q)
j = (mt.i - q) / (1 + q)
mtj = Actuarial(nt=mt.nt, i=j)
return Ax(mtj, x)
|
def to_utf8(s):
"""Convert a string to utf8. If the argument is an iterable
(list/tuple/set), then each element of it would be converted instead.
>>> to_utf8('a')
'a'
>>> to_utf8(u'a')
'a'
>>> to_utf8([u'a', u'b', u'\u4f60'])
['a', 'b', '\\xe4\\xbd\\xa0']
"""
if six.PY2:
if isinstance(s, str):
return s
elif isinstance(s, unicode):
return s.encode('utf-8')
elif isinstance(s, (list, tuple, set)):
return [to_utf8(v) for v in s]
else:
return s
else:
return s
|
def function[to_utf8, parameter[s]]:
constant[Convert a string to utf8. If the argument is an iterable
(list/tuple/set), then each element of it would be converted instead.
>>> to_utf8('a')
'a'
>>> to_utf8(u'a')
'a'
>>> to_utf8([u'a', u'b', u'你'])
['a', 'b', '\xe4\xbd\xa0']
]
if name[six].PY2 begin[:]
if call[name[isinstance], parameter[name[s], name[str]]] begin[:]
return[name[s]]
|
keyword[def] identifier[to_utf8] ( identifier[s] ):
literal[string]
keyword[if] identifier[six] . identifier[PY2] :
keyword[if] identifier[isinstance] ( identifier[s] , identifier[str] ):
keyword[return] identifier[s]
keyword[elif] identifier[isinstance] ( identifier[s] , identifier[unicode] ):
keyword[return] identifier[s] . identifier[encode] ( literal[string] )
keyword[elif] identifier[isinstance] ( identifier[s] ,( identifier[list] , identifier[tuple] , identifier[set] )):
keyword[return] [ identifier[to_utf8] ( identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[s] ]
keyword[else] :
keyword[return] identifier[s]
keyword[else] :
keyword[return] identifier[s]
|
def to_utf8(s):
"""Convert a string to utf8. If the argument is an iterable
(list/tuple/set), then each element of it would be converted instead.
>>> to_utf8('a')
'a'
>>> to_utf8(u'a')
'a'
>>> to_utf8([u'a', u'b', u'你'])
['a', 'b', '\\xe4\\xbd\\xa0']
"""
if six.PY2:
if isinstance(s, str):
return s # depends on [control=['if'], data=[]]
elif isinstance(s, unicode):
return s.encode('utf-8') # depends on [control=['if'], data=[]]
elif isinstance(s, (list, tuple, set)):
return [to_utf8(v) for v in s] # depends on [control=['if'], data=[]]
else:
return s # depends on [control=['if'], data=[]]
else:
return s
|
def zone_present(domain, type, profile):
'''
Ensures a record is present.
:param domain: Zone name, i.e. the domain name
:type domain: ``str``
:param type: Zone type (master / slave), defaults to master
:type type: ``str``
:param profile: The profile key
:type profile: ``str``
'''
zones = __salt__['libcloud_dns.list_zones'](profile)
if not type:
type = 'master'
matching_zone = [z for z in zones if z['domain'] == domain]
if matching_zone:
return state_result(True, 'Zone already exists', domain)
else:
result = __salt__['libcloud_dns.create_zone'](domain, profile, type)
return state_result(True, 'Created new zone', domain, result)
|
def function[zone_present, parameter[domain, type, profile]]:
constant[
Ensures a record is present.
:param domain: Zone name, i.e. the domain name
:type domain: ``str``
:param type: Zone type (master / slave), defaults to master
:type type: ``str``
:param profile: The profile key
:type profile: ``str``
]
variable[zones] assign[=] call[call[name[__salt__]][constant[libcloud_dns.list_zones]], parameter[name[profile]]]
if <ast.UnaryOp object at 0x7da1b2020370> begin[:]
variable[type] assign[=] constant[master]
variable[matching_zone] assign[=] <ast.ListComp object at 0x7da1b208eb90>
if name[matching_zone] begin[:]
return[call[name[state_result], parameter[constant[True], constant[Zone already exists], name[domain]]]]
|
keyword[def] identifier[zone_present] ( identifier[domain] , identifier[type] , identifier[profile] ):
literal[string]
identifier[zones] = identifier[__salt__] [ literal[string] ]( identifier[profile] )
keyword[if] keyword[not] identifier[type] :
identifier[type] = literal[string]
identifier[matching_zone] =[ identifier[z] keyword[for] identifier[z] keyword[in] identifier[zones] keyword[if] identifier[z] [ literal[string] ]== identifier[domain] ]
keyword[if] identifier[matching_zone] :
keyword[return] identifier[state_result] ( keyword[True] , literal[string] , identifier[domain] )
keyword[else] :
identifier[result] = identifier[__salt__] [ literal[string] ]( identifier[domain] , identifier[profile] , identifier[type] )
keyword[return] identifier[state_result] ( keyword[True] , literal[string] , identifier[domain] , identifier[result] )
|
def zone_present(domain, type, profile):
"""
Ensures a record is present.
:param domain: Zone name, i.e. the domain name
:type domain: ``str``
:param type: Zone type (master / slave), defaults to master
:type type: ``str``
:param profile: The profile key
:type profile: ``str``
"""
zones = __salt__['libcloud_dns.list_zones'](profile)
if not type:
type = 'master' # depends on [control=['if'], data=[]]
matching_zone = [z for z in zones if z['domain'] == domain]
if matching_zone:
return state_result(True, 'Zone already exists', domain) # depends on [control=['if'], data=[]]
else:
result = __salt__['libcloud_dns.create_zone'](domain, profile, type)
return state_result(True, 'Created new zone', domain, result)
|
def _insert_error(self, path, node):
""" Adds an error or sub-tree to :attr:tree.
:param path: Path to the error.
:type path: Tuple of strings and integers.
:param node: An error message or a sub-tree.
:type node: String or dictionary.
"""
field = path[0]
if len(path) == 1:
if field in self.tree:
subtree = self.tree[field].pop()
self.tree[field] += [node, subtree]
else:
self.tree[field] = [node, {}]
elif len(path) >= 1:
if field not in self.tree:
self.tree[field] = [{}]
subtree = self.tree[field][-1]
if subtree:
new = self.__class__(tree=copy(subtree))
else:
new = self.__class__()
new._insert_error(path[1:], node)
subtree.update(new.tree)
|
def function[_insert_error, parameter[self, path, node]]:
constant[ Adds an error or sub-tree to :attr:tree.
:param path: Path to the error.
:type path: Tuple of strings and integers.
:param node: An error message or a sub-tree.
:type node: String or dictionary.
]
variable[field] assign[=] call[name[path]][constant[0]]
if compare[call[name[len], parameter[name[path]]] equal[==] constant[1]] begin[:]
if compare[name[field] in name[self].tree] begin[:]
variable[subtree] assign[=] call[call[name[self].tree][name[field]].pop, parameter[]]
<ast.AugAssign object at 0x7da1b1eb9cf0>
|
keyword[def] identifier[_insert_error] ( identifier[self] , identifier[path] , identifier[node] ):
literal[string]
identifier[field] = identifier[path] [ literal[int] ]
keyword[if] identifier[len] ( identifier[path] )== literal[int] :
keyword[if] identifier[field] keyword[in] identifier[self] . identifier[tree] :
identifier[subtree] = identifier[self] . identifier[tree] [ identifier[field] ]. identifier[pop] ()
identifier[self] . identifier[tree] [ identifier[field] ]+=[ identifier[node] , identifier[subtree] ]
keyword[else] :
identifier[self] . identifier[tree] [ identifier[field] ]=[ identifier[node] ,{}]
keyword[elif] identifier[len] ( identifier[path] )>= literal[int] :
keyword[if] identifier[field] keyword[not] keyword[in] identifier[self] . identifier[tree] :
identifier[self] . identifier[tree] [ identifier[field] ]=[{}]
identifier[subtree] = identifier[self] . identifier[tree] [ identifier[field] ][- literal[int] ]
keyword[if] identifier[subtree] :
identifier[new] = identifier[self] . identifier[__class__] ( identifier[tree] = identifier[copy] ( identifier[subtree] ))
keyword[else] :
identifier[new] = identifier[self] . identifier[__class__] ()
identifier[new] . identifier[_insert_error] ( identifier[path] [ literal[int] :], identifier[node] )
identifier[subtree] . identifier[update] ( identifier[new] . identifier[tree] )
|
def _insert_error(self, path, node):
""" Adds an error or sub-tree to :attr:tree.
:param path: Path to the error.
:type path: Tuple of strings and integers.
:param node: An error message or a sub-tree.
:type node: String or dictionary.
"""
field = path[0]
if len(path) == 1:
if field in self.tree:
subtree = self.tree[field].pop()
self.tree[field] += [node, subtree] # depends on [control=['if'], data=['field']]
else:
self.tree[field] = [node, {}] # depends on [control=['if'], data=[]]
elif len(path) >= 1:
if field not in self.tree:
self.tree[field] = [{}] # depends on [control=['if'], data=['field']]
subtree = self.tree[field][-1]
if subtree:
new = self.__class__(tree=copy(subtree)) # depends on [control=['if'], data=[]]
else:
new = self.__class__()
new._insert_error(path[1:], node)
subtree.update(new.tree) # depends on [control=['if'], data=[]]
|
def _process_settings(**kwargs):
"""
Apply user supplied settings.
"""
# If we are in this method due to a signal, only reload for our settings
setting_name = kwargs.get('setting', None)
if setting_name is not None and setting_name != 'QUERYCOUNT':
return
# Support the old-style settings
if getattr(settings, 'QUERYCOUNT_THRESHOLDS', False):
QC_SETTINGS['THRESHOLDS'] = settings.QUERYCOUNT_THRESHOLDS
# Apply new-style settings
if not getattr(settings, 'QUERYCOUNT', False):
return
# Duplicate display is a special case, configure it specifically
if 'DISPLAY_DUPLICATES' in settings.QUERYCOUNT:
duplicate_settings = settings.QUERYCOUNT['DISPLAY_DUPLICATES']
if duplicate_settings is not None:
duplicate_settings = int(duplicate_settings)
QC_SETTINGS['DISPLAY_DUPLICATES'] = duplicate_settings
# Apply the rest of the setting overrides
for key in ['THRESHOLDS',
'IGNORE_REQUEST_PATTERNS',
'IGNORE_SQL_PATTERNS',
'IGNORE_PATTERNS',
'RESPONSE_HEADER']:
if key in settings.QUERYCOUNT:
QC_SETTINGS[key] = settings.QUERYCOUNT[key]
|
def function[_process_settings, parameter[]]:
constant[
Apply user supplied settings.
]
variable[setting_name] assign[=] call[name[kwargs].get, parameter[constant[setting], constant[None]]]
if <ast.BoolOp object at 0x7da20c992020> begin[:]
return[None]
if call[name[getattr], parameter[name[settings], constant[QUERYCOUNT_THRESHOLDS], constant[False]]] begin[:]
call[name[QC_SETTINGS]][constant[THRESHOLDS]] assign[=] name[settings].QUERYCOUNT_THRESHOLDS
if <ast.UnaryOp object at 0x7da20c990b80> begin[:]
return[None]
if compare[constant[DISPLAY_DUPLICATES] in name[settings].QUERYCOUNT] begin[:]
variable[duplicate_settings] assign[=] call[name[settings].QUERYCOUNT][constant[DISPLAY_DUPLICATES]]
if compare[name[duplicate_settings] is_not constant[None]] begin[:]
variable[duplicate_settings] assign[=] call[name[int], parameter[name[duplicate_settings]]]
call[name[QC_SETTINGS]][constant[DISPLAY_DUPLICATES]] assign[=] name[duplicate_settings]
for taget[name[key]] in starred[list[[<ast.Constant object at 0x7da20c9916f0>, <ast.Constant object at 0x7da20c9919f0>, <ast.Constant object at 0x7da20c993250>, <ast.Constant object at 0x7da20c9918d0>, <ast.Constant object at 0x7da20c992800>]]] begin[:]
if compare[name[key] in name[settings].QUERYCOUNT] begin[:]
call[name[QC_SETTINGS]][name[key]] assign[=] call[name[settings].QUERYCOUNT][name[key]]
|
keyword[def] identifier[_process_settings] (** identifier[kwargs] ):
literal[string]
identifier[setting_name] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[setting_name] keyword[is] keyword[not] keyword[None] keyword[and] identifier[setting_name] != literal[string] :
keyword[return]
keyword[if] identifier[getattr] ( identifier[settings] , literal[string] , keyword[False] ):
identifier[QC_SETTINGS] [ literal[string] ]= identifier[settings] . identifier[QUERYCOUNT_THRESHOLDS]
keyword[if] keyword[not] identifier[getattr] ( identifier[settings] , literal[string] , keyword[False] ):
keyword[return]
keyword[if] literal[string] keyword[in] identifier[settings] . identifier[QUERYCOUNT] :
identifier[duplicate_settings] = identifier[settings] . identifier[QUERYCOUNT] [ literal[string] ]
keyword[if] identifier[duplicate_settings] keyword[is] keyword[not] keyword[None] :
identifier[duplicate_settings] = identifier[int] ( identifier[duplicate_settings] )
identifier[QC_SETTINGS] [ literal[string] ]= identifier[duplicate_settings]
keyword[for] identifier[key] keyword[in] [ literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ,
literal[string] ]:
keyword[if] identifier[key] keyword[in] identifier[settings] . identifier[QUERYCOUNT] :
identifier[QC_SETTINGS] [ identifier[key] ]= identifier[settings] . identifier[QUERYCOUNT] [ identifier[key] ]
|
def _process_settings(**kwargs):
"""
Apply user supplied settings.
"""
# If we are in this method due to a signal, only reload for our settings
setting_name = kwargs.get('setting', None)
if setting_name is not None and setting_name != 'QUERYCOUNT':
return # depends on [control=['if'], data=[]]
# Support the old-style settings
if getattr(settings, 'QUERYCOUNT_THRESHOLDS', False):
QC_SETTINGS['THRESHOLDS'] = settings.QUERYCOUNT_THRESHOLDS # depends on [control=['if'], data=[]]
# Apply new-style settings
if not getattr(settings, 'QUERYCOUNT', False):
return # depends on [control=['if'], data=[]]
# Duplicate display is a special case, configure it specifically
if 'DISPLAY_DUPLICATES' in settings.QUERYCOUNT:
duplicate_settings = settings.QUERYCOUNT['DISPLAY_DUPLICATES']
if duplicate_settings is not None:
duplicate_settings = int(duplicate_settings) # depends on [control=['if'], data=['duplicate_settings']]
QC_SETTINGS['DISPLAY_DUPLICATES'] = duplicate_settings # depends on [control=['if'], data=[]]
# Apply the rest of the setting overrides
for key in ['THRESHOLDS', 'IGNORE_REQUEST_PATTERNS', 'IGNORE_SQL_PATTERNS', 'IGNORE_PATTERNS', 'RESPONSE_HEADER']:
if key in settings.QUERYCOUNT:
QC_SETTINGS[key] = settings.QUERYCOUNT[key] # depends on [control=['if'], data=['key']] # depends on [control=['for'], data=['key']]
|
def zip_source_model(ssmLT, archive_zip='', log=logging.info):
"""
Zip the source model files starting from the smmLT.xml file
"""
basedir = os.path.dirname(ssmLT)
if os.path.basename(ssmLT) != 'ssmLT.xml':
orig = ssmLT
ssmLT = os.path.join(basedir, 'ssmLT.xml')
with open(ssmLT, 'wb') as f:
f.write(open(orig, 'rb').read())
archive_zip = archive_zip or os.path.join(basedir, 'ssmLT.zip')
if os.path.exists(archive_zip):
sys.exit('%s exists already' % archive_zip)
oq = mock.Mock(inputs={'source_model_logic_tree': ssmLT})
checksum = readinput.get_checksum32(oq)
checkfile = os.path.join(os.path.dirname(ssmLT), 'CHECKSUM.txt')
with open(checkfile, 'w') as f:
f.write(str(checksum))
files = logictree.collect_info(ssmLT).smpaths + [
os.path.abspath(ssmLT), os.path.abspath(checkfile)]
general.zipfiles(files, archive_zip, log=log, cleanup=True)
return archive_zip
|
def function[zip_source_model, parameter[ssmLT, archive_zip, log]]:
constant[
Zip the source model files starting from the smmLT.xml file
]
variable[basedir] assign[=] call[name[os].path.dirname, parameter[name[ssmLT]]]
if compare[call[name[os].path.basename, parameter[name[ssmLT]]] not_equal[!=] constant[ssmLT.xml]] begin[:]
variable[orig] assign[=] name[ssmLT]
variable[ssmLT] assign[=] call[name[os].path.join, parameter[name[basedir], constant[ssmLT.xml]]]
with call[name[open], parameter[name[ssmLT], constant[wb]]] begin[:]
call[name[f].write, parameter[call[call[name[open], parameter[name[orig], constant[rb]]].read, parameter[]]]]
variable[archive_zip] assign[=] <ast.BoolOp object at 0x7da207f9bc10>
if call[name[os].path.exists, parameter[name[archive_zip]]] begin[:]
call[name[sys].exit, parameter[binary_operation[constant[%s exists already] <ast.Mod object at 0x7da2590d6920> name[archive_zip]]]]
variable[oq] assign[=] call[name[mock].Mock, parameter[]]
variable[checksum] assign[=] call[name[readinput].get_checksum32, parameter[name[oq]]]
variable[checkfile] assign[=] call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[name[ssmLT]]], constant[CHECKSUM.txt]]]
with call[name[open], parameter[name[checkfile], constant[w]]] begin[:]
call[name[f].write, parameter[call[name[str], parameter[name[checksum]]]]]
variable[files] assign[=] binary_operation[call[name[logictree].collect_info, parameter[name[ssmLT]]].smpaths + list[[<ast.Call object at 0x7da20e9543a0>, <ast.Call object at 0x7da20e9565f0>]]]
call[name[general].zipfiles, parameter[name[files], name[archive_zip]]]
return[name[archive_zip]]
|
keyword[def] identifier[zip_source_model] ( identifier[ssmLT] , identifier[archive_zip] = literal[string] , identifier[log] = identifier[logging] . identifier[info] ):
literal[string]
identifier[basedir] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[ssmLT] )
keyword[if] identifier[os] . identifier[path] . identifier[basename] ( identifier[ssmLT] )!= literal[string] :
identifier[orig] = identifier[ssmLT]
identifier[ssmLT] = identifier[os] . identifier[path] . identifier[join] ( identifier[basedir] , literal[string] )
keyword[with] identifier[open] ( identifier[ssmLT] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[open] ( identifier[orig] , literal[string] ). identifier[read] ())
identifier[archive_zip] = identifier[archive_zip] keyword[or] identifier[os] . identifier[path] . identifier[join] ( identifier[basedir] , literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[archive_zip] ):
identifier[sys] . identifier[exit] ( literal[string] % identifier[archive_zip] )
identifier[oq] = identifier[mock] . identifier[Mock] ( identifier[inputs] ={ literal[string] : identifier[ssmLT] })
identifier[checksum] = identifier[readinput] . identifier[get_checksum32] ( identifier[oq] )
identifier[checkfile] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[ssmLT] ), literal[string] )
keyword[with] identifier[open] ( identifier[checkfile] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[str] ( identifier[checksum] ))
identifier[files] = identifier[logictree] . identifier[collect_info] ( identifier[ssmLT] ). identifier[smpaths] +[
identifier[os] . identifier[path] . identifier[abspath] ( identifier[ssmLT] ), identifier[os] . identifier[path] . identifier[abspath] ( identifier[checkfile] )]
identifier[general] . identifier[zipfiles] ( identifier[files] , identifier[archive_zip] , identifier[log] = identifier[log] , identifier[cleanup] = keyword[True] )
keyword[return] identifier[archive_zip]
|
def zip_source_model(ssmLT, archive_zip='', log=logging.info):
"""
Zip the source model files starting from the smmLT.xml file
"""
basedir = os.path.dirname(ssmLT)
if os.path.basename(ssmLT) != 'ssmLT.xml':
orig = ssmLT
ssmLT = os.path.join(basedir, 'ssmLT.xml')
with open(ssmLT, 'wb') as f:
f.write(open(orig, 'rb').read()) # depends on [control=['with'], data=['open', 'f']] # depends on [control=['if'], data=[]]
archive_zip = archive_zip or os.path.join(basedir, 'ssmLT.zip')
if os.path.exists(archive_zip):
sys.exit('%s exists already' % archive_zip) # depends on [control=['if'], data=[]]
oq = mock.Mock(inputs={'source_model_logic_tree': ssmLT})
checksum = readinput.get_checksum32(oq)
checkfile = os.path.join(os.path.dirname(ssmLT), 'CHECKSUM.txt')
with open(checkfile, 'w') as f:
f.write(str(checksum)) # depends on [control=['with'], data=['f']]
files = logictree.collect_info(ssmLT).smpaths + [os.path.abspath(ssmLT), os.path.abspath(checkfile)]
general.zipfiles(files, archive_zip, log=log, cleanup=True)
return archive_zip
|
def validate(self, fixerrors=True):
"""
Validates that the geometry is correctly formatted according to the geometry type.
Parameters:
- **fixerrors** (optional): Attempts to fix minor errors without raising exceptions (defaults to True)
Returns:
- True if the geometry is valid.
Raises:
- An Exception if not valid.
"""
# validate nullgeometry or has type and coordinates keys
if not self._data:
# null geometry, no further checking needed
return True
elif "type" not in self._data or "coordinates" not in self._data:
raise Exception("A geometry dictionary or instance must have the type and coordinates entries")
# first validate geometry type
if not self.type in ("Point","MultiPoint","LineString","MultiLineString","Polygon","MultiPolygon"):
if fixerrors:
coretype = self.type.lower().replace("multi","")
if coretype == "point":
newtype = "Point"
elif coretype == "linestring":
newtype = "LineString"
elif coretype == "polygon":
newtype = "Polygon"
else:
raise Exception('Invalid geometry type. Must be one of: "Point","MultiPoint","LineString","MultiLineString","Polygon","MultiPolygon"')
if self.type.lower().startswith("multi"):
newtype = "Multi" + newtype
self.type = newtype
else:
raise Exception('Invalid geometry type. Must be one of: "Point","MultiPoint","LineString","MultiLineString","Polygon","MultiPolygon"')
# then validate coordinate data type
coords = self._data["coordinates"]
if not isinstance(coords, (list,tuple)): raise Exception("Coordinates must be a list or tuple type")
# then validate coordinate structures
if self.type == "Point":
if not len(coords) == 2: raise Exception("Point must be one coordinate pair")
elif self.type in ("MultiPoint","LineString"):
if not len(coords) > 1: raise Exception("MultiPoint and LineString must have more than one coordinates")
elif self.type == "MultiLineString":
for line in coords:
if not len(line) > 1: raise Exception("All LineStrings in a MultiLineString must have more than one coordinate")
elif self.type == "Polygon":
for exterior_or_holes in coords:
if not len(exterior_or_holes) >= 3: raise Exception("The exterior and all holes in a Polygon must have at least 3 coordinates")
elif self.type == "MultiPolygon":
for eachmulti in coords:
for exterior_or_holes in eachmulti:
if not len(exterior_or_holes) >= 3: raise Exception("The exterior and all holes in all Polygons of a MultiPolygon must have at least 3 coordinates")
# validation successful
return True
|
def function[validate, parameter[self, fixerrors]]:
constant[
Validates that the geometry is correctly formatted according to the geometry type.
Parameters:
- **fixerrors** (optional): Attempts to fix minor errors without raising exceptions (defaults to True)
Returns:
- True if the geometry is valid.
Raises:
- An Exception if not valid.
]
if <ast.UnaryOp object at 0x7da20c6e7970> begin[:]
return[constant[True]]
if <ast.UnaryOp object at 0x7da20c6e54e0> begin[:]
if name[fixerrors] begin[:]
variable[coretype] assign[=] call[call[name[self].type.lower, parameter[]].replace, parameter[constant[multi], constant[]]]
if compare[name[coretype] equal[==] constant[point]] begin[:]
variable[newtype] assign[=] constant[Point]
if call[call[name[self].type.lower, parameter[]].startswith, parameter[constant[multi]]] begin[:]
variable[newtype] assign[=] binary_operation[constant[Multi] + name[newtype]]
name[self].type assign[=] name[newtype]
variable[coords] assign[=] call[name[self]._data][constant[coordinates]]
if <ast.UnaryOp object at 0x7da2041d9c00> begin[:]
<ast.Raise object at 0x7da2041da7a0>
if compare[name[self].type equal[==] constant[Point]] begin[:]
if <ast.UnaryOp object at 0x7da2041dab90> begin[:]
<ast.Raise object at 0x7da2041d9030>
return[constant[True]]
|
keyword[def] identifier[validate] ( identifier[self] , identifier[fixerrors] = keyword[True] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_data] :
keyword[return] keyword[True]
keyword[elif] literal[string] keyword[not] keyword[in] identifier[self] . identifier[_data] keyword[or] literal[string] keyword[not] keyword[in] identifier[self] . identifier[_data] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] keyword[not] identifier[self] . identifier[type] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ):
keyword[if] identifier[fixerrors] :
identifier[coretype] = identifier[self] . identifier[type] . identifier[lower] (). identifier[replace] ( literal[string] , literal[string] )
keyword[if] identifier[coretype] == literal[string] :
identifier[newtype] = literal[string]
keyword[elif] identifier[coretype] == literal[string] :
identifier[newtype] = literal[string]
keyword[elif] identifier[coretype] == literal[string] :
identifier[newtype] = literal[string]
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] identifier[self] . identifier[type] . identifier[lower] (). identifier[startswith] ( literal[string] ):
identifier[newtype] = literal[string] + identifier[newtype]
identifier[self] . identifier[type] = identifier[newtype]
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] )
identifier[coords] = identifier[self] . identifier[_data] [ literal[string] ]
keyword[if] keyword[not] identifier[isinstance] ( identifier[coords] ,( identifier[list] , identifier[tuple] )): keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] identifier[self] . identifier[type] == literal[string] :
keyword[if] keyword[not] identifier[len] ( identifier[coords] )== literal[int] : keyword[raise] identifier[Exception] ( literal[string] )
keyword[elif] identifier[self] . identifier[type] keyword[in] ( literal[string] , literal[string] ):
keyword[if] keyword[not] identifier[len] ( identifier[coords] )> literal[int] : keyword[raise] identifier[Exception] ( literal[string] )
keyword[elif] identifier[self] . identifier[type] == literal[string] :
keyword[for] identifier[line] keyword[in] identifier[coords] :
keyword[if] keyword[not] identifier[len] ( identifier[line] )> literal[int] : keyword[raise] identifier[Exception] ( literal[string] )
keyword[elif] identifier[self] . identifier[type] == literal[string] :
keyword[for] identifier[exterior_or_holes] keyword[in] identifier[coords] :
keyword[if] keyword[not] identifier[len] ( identifier[exterior_or_holes] )>= literal[int] : keyword[raise] identifier[Exception] ( literal[string] )
keyword[elif] identifier[self] . identifier[type] == literal[string] :
keyword[for] identifier[eachmulti] keyword[in] identifier[coords] :
keyword[for] identifier[exterior_or_holes] keyword[in] identifier[eachmulti] :
keyword[if] keyword[not] identifier[len] ( identifier[exterior_or_holes] )>= literal[int] : keyword[raise] identifier[Exception] ( literal[string] )
keyword[return] keyword[True]
|
def validate(self, fixerrors=True):
"""
Validates that the geometry is correctly formatted according to the geometry type.
Parameters:
- **fixerrors** (optional): Attempts to fix minor errors without raising exceptions (defaults to True)
Returns:
- True if the geometry is valid.
Raises:
- An Exception if not valid.
"""
# validate nullgeometry or has type and coordinates keys
if not self._data:
# null geometry, no further checking needed
return True # depends on [control=['if'], data=[]]
elif 'type' not in self._data or 'coordinates' not in self._data:
raise Exception('A geometry dictionary or instance must have the type and coordinates entries') # depends on [control=['if'], data=[]]
# first validate geometry type
if not self.type in ('Point', 'MultiPoint', 'LineString', 'MultiLineString', 'Polygon', 'MultiPolygon'):
if fixerrors:
coretype = self.type.lower().replace('multi', '')
if coretype == 'point':
newtype = 'Point' # depends on [control=['if'], data=[]]
elif coretype == 'linestring':
newtype = 'LineString' # depends on [control=['if'], data=[]]
elif coretype == 'polygon':
newtype = 'Polygon' # depends on [control=['if'], data=[]]
else:
raise Exception('Invalid geometry type. Must be one of: "Point","MultiPoint","LineString","MultiLineString","Polygon","MultiPolygon"')
if self.type.lower().startswith('multi'):
newtype = 'Multi' + newtype # depends on [control=['if'], data=[]]
self.type = newtype # depends on [control=['if'], data=[]]
else:
raise Exception('Invalid geometry type. Must be one of: "Point","MultiPoint","LineString","MultiLineString","Polygon","MultiPolygon"') # depends on [control=['if'], data=[]]
# then validate coordinate data type
coords = self._data['coordinates']
if not isinstance(coords, (list, tuple)):
raise Exception('Coordinates must be a list or tuple type') # depends on [control=['if'], data=[]]
# then validate coordinate structures
if self.type == 'Point':
if not len(coords) == 2:
raise Exception('Point must be one coordinate pair') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif self.type in ('MultiPoint', 'LineString'):
if not len(coords) > 1:
raise Exception('MultiPoint and LineString must have more than one coordinates') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif self.type == 'MultiLineString':
for line in coords:
if not len(line) > 1:
raise Exception('All LineStrings in a MultiLineString must have more than one coordinate') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['if'], data=[]]
elif self.type == 'Polygon':
for exterior_or_holes in coords:
if not len(exterior_or_holes) >= 3:
raise Exception('The exterior and all holes in a Polygon must have at least 3 coordinates') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['exterior_or_holes']] # depends on [control=['if'], data=[]]
elif self.type == 'MultiPolygon':
for eachmulti in coords:
for exterior_or_holes in eachmulti:
if not len(exterior_or_holes) >= 3:
raise Exception('The exterior and all holes in all Polygons of a MultiPolygon must have at least 3 coordinates') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['exterior_or_holes']] # depends on [control=['for'], data=['eachmulti']] # depends on [control=['if'], data=[]]
# validation successful
return True
|
def evaluate(module_name: str, fname: str, verbose: bool):
"""
Load fname as a module. Will raise an exception if there is an error
:param module_name: resulting name of module
:param fname: name to load
"""
if verbose:
print("Testing {}".format(fname))
spec = importlib.util.spec_from_file_location(module_name, fname)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
|
def function[evaluate, parameter[module_name, fname, verbose]]:
constant[
Load fname as a module. Will raise an exception if there is an error
:param module_name: resulting name of module
:param fname: name to load
]
if name[verbose] begin[:]
call[name[print], parameter[call[constant[Testing {}].format, parameter[name[fname]]]]]
variable[spec] assign[=] call[name[importlib].util.spec_from_file_location, parameter[name[module_name], name[fname]]]
variable[mod] assign[=] call[name[importlib].util.module_from_spec, parameter[name[spec]]]
call[name[spec].loader.exec_module, parameter[name[mod]]]
|
keyword[def] identifier[evaluate] ( identifier[module_name] : identifier[str] , identifier[fname] : identifier[str] , identifier[verbose] : identifier[bool] ):
literal[string]
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] . identifier[format] ( identifier[fname] ))
identifier[spec] = identifier[importlib] . identifier[util] . identifier[spec_from_file_location] ( identifier[module_name] , identifier[fname] )
identifier[mod] = identifier[importlib] . identifier[util] . identifier[module_from_spec] ( identifier[spec] )
identifier[spec] . identifier[loader] . identifier[exec_module] ( identifier[mod] )
|
def evaluate(module_name: str, fname: str, verbose: bool):
"""
Load fname as a module. Will raise an exception if there is an error
:param module_name: resulting name of module
:param fname: name to load
"""
if verbose:
print('Testing {}'.format(fname)) # depends on [control=['if'], data=[]]
spec = importlib.util.spec_from_file_location(module_name, fname)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
|
def create(self, friendly_name, event_callback_url=values.unset,
events_filter=values.unset, multi_task_enabled=values.unset,
template=values.unset, prioritize_queue_order=values.unset):
"""
Create a new WorkspaceInstance
:param unicode friendly_name: Human readable description of this workspace
:param unicode event_callback_url: If provided, the Workspace will publish events to this URL.
:param unicode events_filter: Use this parameter to receive webhooks on EventCallbackUrl for specific events on a workspace.
:param bool multi_task_enabled: Multi tasking allows workers to handle multiple tasks simultaneously.
:param unicode template: One of the available template names.
:param WorkspaceInstance.QueueOrder prioritize_queue_order: Use this parameter to configure whether to prioritize LIFO or FIFO when workers are receiving Tasks from combination of LIFO and FIFO TaskQueues.
:returns: Newly created WorkspaceInstance
:rtype: twilio.rest.taskrouter.v1.workspace.WorkspaceInstance
"""
data = values.of({
'FriendlyName': friendly_name,
'EventCallbackUrl': event_callback_url,
'EventsFilter': events_filter,
'MultiTaskEnabled': multi_task_enabled,
'Template': template,
'PrioritizeQueueOrder': prioritize_queue_order,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return WorkspaceInstance(self._version, payload, )
|
def function[create, parameter[self, friendly_name, event_callback_url, events_filter, multi_task_enabled, template, prioritize_queue_order]]:
constant[
Create a new WorkspaceInstance
:param unicode friendly_name: Human readable description of this workspace
:param unicode event_callback_url: If provided, the Workspace will publish events to this URL.
:param unicode events_filter: Use this parameter to receive webhooks on EventCallbackUrl for specific events on a workspace.
:param bool multi_task_enabled: Multi tasking allows workers to handle multiple tasks simultaneously.
:param unicode template: One of the available template names.
:param WorkspaceInstance.QueueOrder prioritize_queue_order: Use this parameter to configure whether to prioritize LIFO or FIFO when workers are receiving Tasks from combination of LIFO and FIFO TaskQueues.
:returns: Newly created WorkspaceInstance
:rtype: twilio.rest.taskrouter.v1.workspace.WorkspaceInstance
]
variable[data] assign[=] call[name[values].of, parameter[dictionary[[<ast.Constant object at 0x7da1b1ead600>, <ast.Constant object at 0x7da1b1eaf430>, <ast.Constant object at 0x7da1b1eadc30>, <ast.Constant object at 0x7da1b1eacf10>, <ast.Constant object at 0x7da1b1eaf550>, <ast.Constant object at 0x7da1b1eaece0>], [<ast.Name object at 0x7da1b1eaed10>, <ast.Name object at 0x7da1b1eaea70>, <ast.Name object at 0x7da2054a5720>, <ast.Name object at 0x7da2054a7760>, <ast.Name object at 0x7da2054a4850>, <ast.Name object at 0x7da2054a5120>]]]]
variable[payload] assign[=] call[name[self]._version.create, parameter[constant[POST], name[self]._uri]]
return[call[name[WorkspaceInstance], parameter[name[self]._version, name[payload]]]]
|
keyword[def] identifier[create] ( identifier[self] , identifier[friendly_name] , identifier[event_callback_url] = identifier[values] . identifier[unset] ,
identifier[events_filter] = identifier[values] . identifier[unset] , identifier[multi_task_enabled] = identifier[values] . identifier[unset] ,
identifier[template] = identifier[values] . identifier[unset] , identifier[prioritize_queue_order] = identifier[values] . identifier[unset] ):
literal[string]
identifier[data] = identifier[values] . identifier[of] ({
literal[string] : identifier[friendly_name] ,
literal[string] : identifier[event_callback_url] ,
literal[string] : identifier[events_filter] ,
literal[string] : identifier[multi_task_enabled] ,
literal[string] : identifier[template] ,
literal[string] : identifier[prioritize_queue_order] ,
})
identifier[payload] = identifier[self] . identifier[_version] . identifier[create] (
literal[string] ,
identifier[self] . identifier[_uri] ,
identifier[data] = identifier[data] ,
)
keyword[return] identifier[WorkspaceInstance] ( identifier[self] . identifier[_version] , identifier[payload] ,)
|
def create(self, friendly_name, event_callback_url=values.unset, events_filter=values.unset, multi_task_enabled=values.unset, template=values.unset, prioritize_queue_order=values.unset):
"""
Create a new WorkspaceInstance
:param unicode friendly_name: Human readable description of this workspace
:param unicode event_callback_url: If provided, the Workspace will publish events to this URL.
:param unicode events_filter: Use this parameter to receive webhooks on EventCallbackUrl for specific events on a workspace.
:param bool multi_task_enabled: Multi tasking allows workers to handle multiple tasks simultaneously.
:param unicode template: One of the available template names.
:param WorkspaceInstance.QueueOrder prioritize_queue_order: Use this parameter to configure whether to prioritize LIFO or FIFO when workers are receiving Tasks from combination of LIFO and FIFO TaskQueues.
:returns: Newly created WorkspaceInstance
:rtype: twilio.rest.taskrouter.v1.workspace.WorkspaceInstance
"""
data = values.of({'FriendlyName': friendly_name, 'EventCallbackUrl': event_callback_url, 'EventsFilter': events_filter, 'MultiTaskEnabled': multi_task_enabled, 'Template': template, 'PrioritizeQueueOrder': prioritize_queue_order})
payload = self._version.create('POST', self._uri, data=data)
return WorkspaceInstance(self._version, payload)
|
def container_size(self):
""" Returns the accumulated bit size of all fields in the `Sequence` as
a tuple in the form of ``(number of bytes, remaining number of bits)``.
"""
length = 0
for name, item in enumerate(self):
# Container
if is_container(item):
byte_length, bit_length = item.container_size()
length += bit_length + byte_length * 8
# Field
elif is_field(item):
length += item.bit_size
else:
raise MemberTypeError(self, item, name)
return divmod(length, 8)
|
def function[container_size, parameter[self]]:
constant[ Returns the accumulated bit size of all fields in the `Sequence` as
a tuple in the form of ``(number of bytes, remaining number of bits)``.
]
variable[length] assign[=] constant[0]
for taget[tuple[[<ast.Name object at 0x7da2054a5210>, <ast.Name object at 0x7da2054a5030>]]] in starred[call[name[enumerate], parameter[name[self]]]] begin[:]
if call[name[is_container], parameter[name[item]]] begin[:]
<ast.Tuple object at 0x7da2054a6290> assign[=] call[name[item].container_size, parameter[]]
<ast.AugAssign object at 0x7da2054a6320>
return[call[name[divmod], parameter[name[length], constant[8]]]]
|
keyword[def] identifier[container_size] ( identifier[self] ):
literal[string]
identifier[length] = literal[int]
keyword[for] identifier[name] , identifier[item] keyword[in] identifier[enumerate] ( identifier[self] ):
keyword[if] identifier[is_container] ( identifier[item] ):
identifier[byte_length] , identifier[bit_length] = identifier[item] . identifier[container_size] ()
identifier[length] += identifier[bit_length] + identifier[byte_length] * literal[int]
keyword[elif] identifier[is_field] ( identifier[item] ):
identifier[length] += identifier[item] . identifier[bit_size]
keyword[else] :
keyword[raise] identifier[MemberTypeError] ( identifier[self] , identifier[item] , identifier[name] )
keyword[return] identifier[divmod] ( identifier[length] , literal[int] )
|
def container_size(self):
""" Returns the accumulated bit size of all fields in the `Sequence` as
a tuple in the form of ``(number of bytes, remaining number of bits)``.
"""
length = 0
for (name, item) in enumerate(self):
# Container
if is_container(item):
(byte_length, bit_length) = item.container_size()
length += bit_length + byte_length * 8 # depends on [control=['if'], data=[]]
# Field
elif is_field(item):
length += item.bit_size # depends on [control=['if'], data=[]]
else:
raise MemberTypeError(self, item, name) # depends on [control=['for'], data=[]]
return divmod(length, 8)
|
def do_table(self, arg):
"""Prints the set of values for the independent vs. dependent variables in the
active unit test and analysis group as a table.
"""
usable, filename, append = self._redirect_split(arg)
a = self.tests[self.active]
args = self.curargs
self._make_fits()
result = a.table(args["independent"], args["dependents"], args["threshold"],
args["headings"], args["functions"])
if result is not None:
self._redirect_output(result, filename, append, msg.info)
|
def function[do_table, parameter[self, arg]]:
constant[Prints the set of values for the independent vs. dependent variables in the
active unit test and analysis group as a table.
]
<ast.Tuple object at 0x7da20c7941f0> assign[=] call[name[self]._redirect_split, parameter[name[arg]]]
variable[a] assign[=] call[name[self].tests][name[self].active]
variable[args] assign[=] name[self].curargs
call[name[self]._make_fits, parameter[]]
variable[result] assign[=] call[name[a].table, parameter[call[name[args]][constant[independent]], call[name[args]][constant[dependents]], call[name[args]][constant[threshold]], call[name[args]][constant[headings]], call[name[args]][constant[functions]]]]
if compare[name[result] is_not constant[None]] begin[:]
call[name[self]._redirect_output, parameter[name[result], name[filename], name[append], name[msg].info]]
|
keyword[def] identifier[do_table] ( identifier[self] , identifier[arg] ):
literal[string]
identifier[usable] , identifier[filename] , identifier[append] = identifier[self] . identifier[_redirect_split] ( identifier[arg] )
identifier[a] = identifier[self] . identifier[tests] [ identifier[self] . identifier[active] ]
identifier[args] = identifier[self] . identifier[curargs]
identifier[self] . identifier[_make_fits] ()
identifier[result] = identifier[a] . identifier[table] ( identifier[args] [ literal[string] ], identifier[args] [ literal[string] ], identifier[args] [ literal[string] ],
identifier[args] [ literal[string] ], identifier[args] [ literal[string] ])
keyword[if] identifier[result] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_redirect_output] ( identifier[result] , identifier[filename] , identifier[append] , identifier[msg] . identifier[info] )
|
def do_table(self, arg):
"""Prints the set of values for the independent vs. dependent variables in the
active unit test and analysis group as a table.
"""
(usable, filename, append) = self._redirect_split(arg)
a = self.tests[self.active]
args = self.curargs
self._make_fits()
result = a.table(args['independent'], args['dependents'], args['threshold'], args['headings'], args['functions'])
if result is not None:
self._redirect_output(result, filename, append, msg.info) # depends on [control=['if'], data=['result']]
|
def get_nodes(self, request):
"""
Return menu's node for entries
"""
nodes = []
archives = []
attributes = {'hidden': HIDE_ENTRY_MENU}
for entry in Entry.published.all():
year = entry.creation_date.strftime('%Y')
month = entry.creation_date.strftime('%m')
month_text = format(entry.creation_date, 'b').capitalize()
day = entry.creation_date.strftime('%d')
key_archive_year = 'year-%s' % year
key_archive_month = 'month-%s-%s' % (year, month)
key_archive_day = 'day-%s-%s-%s' % (year, month, day)
if key_archive_year not in archives:
nodes.append(NavigationNode(
year, reverse('zinnia:entry_archive_year', args=[year]),
key_archive_year, attr=attributes))
archives.append(key_archive_year)
if key_archive_month not in archives:
nodes.append(NavigationNode(
month_text,
reverse('zinnia:entry_archive_month', args=[year, month]),
key_archive_month, key_archive_year,
attr=attributes))
archives.append(key_archive_month)
if key_archive_day not in archives:
nodes.append(NavigationNode(
day, reverse('zinnia:entry_archive_day',
args=[year, month, day]),
key_archive_day, key_archive_month,
attr=attributes))
archives.append(key_archive_day)
nodes.append(NavigationNode(entry.title, entry.get_absolute_url(),
entry.pk, key_archive_day))
return nodes
|
def function[get_nodes, parameter[self, request]]:
constant[
Return menu's node for entries
]
variable[nodes] assign[=] list[[]]
variable[archives] assign[=] list[[]]
variable[attributes] assign[=] dictionary[[<ast.Constant object at 0x7da1b0c219f0>], [<ast.Name object at 0x7da1b0c20b20>]]
for taget[name[entry]] in starred[call[name[Entry].published.all, parameter[]]] begin[:]
variable[year] assign[=] call[name[entry].creation_date.strftime, parameter[constant[%Y]]]
variable[month] assign[=] call[name[entry].creation_date.strftime, parameter[constant[%m]]]
variable[month_text] assign[=] call[call[name[format], parameter[name[entry].creation_date, constant[b]]].capitalize, parameter[]]
variable[day] assign[=] call[name[entry].creation_date.strftime, parameter[constant[%d]]]
variable[key_archive_year] assign[=] binary_operation[constant[year-%s] <ast.Mod object at 0x7da2590d6920> name[year]]
variable[key_archive_month] assign[=] binary_operation[constant[month-%s-%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0c204f0>, <ast.Name object at 0x7da1b0c20df0>]]]
variable[key_archive_day] assign[=] binary_operation[constant[day-%s-%s-%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0c20e50>, <ast.Name object at 0x7da1b0c212d0>, <ast.Name object at 0x7da1b0c21300>]]]
if compare[name[key_archive_year] <ast.NotIn object at 0x7da2590d7190> name[archives]] begin[:]
call[name[nodes].append, parameter[call[name[NavigationNode], parameter[name[year], call[name[reverse], parameter[constant[zinnia:entry_archive_year]]], name[key_archive_year]]]]]
call[name[archives].append, parameter[name[key_archive_year]]]
if compare[name[key_archive_month] <ast.NotIn object at 0x7da2590d7190> name[archives]] begin[:]
call[name[nodes].append, parameter[call[name[NavigationNode], parameter[name[month_text], call[name[reverse], parameter[constant[zinnia:entry_archive_month]]], name[key_archive_month], name[key_archive_year]]]]]
call[name[archives].append, parameter[name[key_archive_month]]]
if compare[name[key_archive_day] <ast.NotIn object at 0x7da2590d7190> name[archives]] begin[:]
call[name[nodes].append, parameter[call[name[NavigationNode], parameter[name[day], call[name[reverse], parameter[constant[zinnia:entry_archive_day]]], name[key_archive_day], name[key_archive_month]]]]]
call[name[archives].append, parameter[name[key_archive_day]]]
call[name[nodes].append, parameter[call[name[NavigationNode], parameter[name[entry].title, call[name[entry].get_absolute_url, parameter[]], name[entry].pk, name[key_archive_day]]]]]
return[name[nodes]]
|
keyword[def] identifier[get_nodes] ( identifier[self] , identifier[request] ):
literal[string]
identifier[nodes] =[]
identifier[archives] =[]
identifier[attributes] ={ literal[string] : identifier[HIDE_ENTRY_MENU] }
keyword[for] identifier[entry] keyword[in] identifier[Entry] . identifier[published] . identifier[all] ():
identifier[year] = identifier[entry] . identifier[creation_date] . identifier[strftime] ( literal[string] )
identifier[month] = identifier[entry] . identifier[creation_date] . identifier[strftime] ( literal[string] )
identifier[month_text] = identifier[format] ( identifier[entry] . identifier[creation_date] , literal[string] ). identifier[capitalize] ()
identifier[day] = identifier[entry] . identifier[creation_date] . identifier[strftime] ( literal[string] )
identifier[key_archive_year] = literal[string] % identifier[year]
identifier[key_archive_month] = literal[string] %( identifier[year] , identifier[month] )
identifier[key_archive_day] = literal[string] %( identifier[year] , identifier[month] , identifier[day] )
keyword[if] identifier[key_archive_year] keyword[not] keyword[in] identifier[archives] :
identifier[nodes] . identifier[append] ( identifier[NavigationNode] (
identifier[year] , identifier[reverse] ( literal[string] , identifier[args] =[ identifier[year] ]),
identifier[key_archive_year] , identifier[attr] = identifier[attributes] ))
identifier[archives] . identifier[append] ( identifier[key_archive_year] )
keyword[if] identifier[key_archive_month] keyword[not] keyword[in] identifier[archives] :
identifier[nodes] . identifier[append] ( identifier[NavigationNode] (
identifier[month_text] ,
identifier[reverse] ( literal[string] , identifier[args] =[ identifier[year] , identifier[month] ]),
identifier[key_archive_month] , identifier[key_archive_year] ,
identifier[attr] = identifier[attributes] ))
identifier[archives] . identifier[append] ( identifier[key_archive_month] )
keyword[if] identifier[key_archive_day] keyword[not] keyword[in] identifier[archives] :
identifier[nodes] . identifier[append] ( identifier[NavigationNode] (
identifier[day] , identifier[reverse] ( literal[string] ,
identifier[args] =[ identifier[year] , identifier[month] , identifier[day] ]),
identifier[key_archive_day] , identifier[key_archive_month] ,
identifier[attr] = identifier[attributes] ))
identifier[archives] . identifier[append] ( identifier[key_archive_day] )
identifier[nodes] . identifier[append] ( identifier[NavigationNode] ( identifier[entry] . identifier[title] , identifier[entry] . identifier[get_absolute_url] (),
identifier[entry] . identifier[pk] , identifier[key_archive_day] ))
keyword[return] identifier[nodes]
|
def get_nodes(self, request):
"""
Return menu's node for entries
"""
nodes = []
archives = []
attributes = {'hidden': HIDE_ENTRY_MENU}
for entry in Entry.published.all():
year = entry.creation_date.strftime('%Y')
month = entry.creation_date.strftime('%m')
month_text = format(entry.creation_date, 'b').capitalize()
day = entry.creation_date.strftime('%d')
key_archive_year = 'year-%s' % year
key_archive_month = 'month-%s-%s' % (year, month)
key_archive_day = 'day-%s-%s-%s' % (year, month, day)
if key_archive_year not in archives:
nodes.append(NavigationNode(year, reverse('zinnia:entry_archive_year', args=[year]), key_archive_year, attr=attributes))
archives.append(key_archive_year) # depends on [control=['if'], data=['key_archive_year', 'archives']]
if key_archive_month not in archives:
nodes.append(NavigationNode(month_text, reverse('zinnia:entry_archive_month', args=[year, month]), key_archive_month, key_archive_year, attr=attributes))
archives.append(key_archive_month) # depends on [control=['if'], data=['key_archive_month', 'archives']]
if key_archive_day not in archives:
nodes.append(NavigationNode(day, reverse('zinnia:entry_archive_day', args=[year, month, day]), key_archive_day, key_archive_month, attr=attributes))
archives.append(key_archive_day) # depends on [control=['if'], data=['key_archive_day', 'archives']]
nodes.append(NavigationNode(entry.title, entry.get_absolute_url(), entry.pk, key_archive_day)) # depends on [control=['for'], data=['entry']]
return nodes
|
def typelogged_func(func):
"""Works like typelogged, but is only applicable to functions,
methods and properties.
"""
if not pytypes.typelogging_enabled:
return func
if hasattr(func, 'do_logging'):
func.do_logging = True
return func
elif hasattr(func, 'do_typecheck'):
# actually shouldn't happen
return _typeinspect_func(func, func.do_typecheck, True)
else:
return _typeinspect_func(func, False, True)
|
def function[typelogged_func, parameter[func]]:
constant[Works like typelogged, but is only applicable to functions,
methods and properties.
]
if <ast.UnaryOp object at 0x7da1b0d0d420> begin[:]
return[name[func]]
if call[name[hasattr], parameter[name[func], constant[do_logging]]] begin[:]
name[func].do_logging assign[=] constant[True]
return[name[func]]
|
keyword[def] identifier[typelogged_func] ( identifier[func] ):
literal[string]
keyword[if] keyword[not] identifier[pytypes] . identifier[typelogging_enabled] :
keyword[return] identifier[func]
keyword[if] identifier[hasattr] ( identifier[func] , literal[string] ):
identifier[func] . identifier[do_logging] = keyword[True]
keyword[return] identifier[func]
keyword[elif] identifier[hasattr] ( identifier[func] , literal[string] ):
keyword[return] identifier[_typeinspect_func] ( identifier[func] , identifier[func] . identifier[do_typecheck] , keyword[True] )
keyword[else] :
keyword[return] identifier[_typeinspect_func] ( identifier[func] , keyword[False] , keyword[True] )
|
def typelogged_func(func):
"""Works like typelogged, but is only applicable to functions,
methods and properties.
"""
if not pytypes.typelogging_enabled:
return func # depends on [control=['if'], data=[]]
if hasattr(func, 'do_logging'):
func.do_logging = True
return func # depends on [control=['if'], data=[]]
elif hasattr(func, 'do_typecheck'):
# actually shouldn't happen
return _typeinspect_func(func, func.do_typecheck, True) # depends on [control=['if'], data=[]]
else:
return _typeinspect_func(func, False, True)
|
def iterpink(depth=20):
"""Generate a sequence of samples of pink noise.
pink noise generator
from http://pydoc.net/Python/lmj.sound/0.1.1/lmj.sound.noise/
Based on the Voss-McCartney algorithm, discussion and code examples at
http://www.firstpr.com.au/dsp/pink-noise/
depth: Use this many samples of white noise to calculate the output. A
higher number is slower to run, but renders low frequencies with more
correct power spectra.
Generates a never-ending sequence of floating-point values. Any continuous
set of these samples will tend to have a 1/f power spectrum.
"""
values = numpy.random.randn(depth)
smooth = numpy.random.randn(depth)
source = numpy.random.randn(depth)
sumvals = values.sum()
i = 0
while True:
yield sumvals + smooth[i]
# advance the index by 1. if the index wraps, generate noise to use in
# the calculations, but do not update any of the pink noise values.
i += 1
if i == depth:
i = 0
smooth = numpy.random.randn(depth)
source = numpy.random.randn(depth)
continue
# count trailing zeros in i
c = 0
while not (i >> c) & 1:
c += 1
# replace value c with a new source element
sumvals += source[i] - values[c]
values[c] = source[i]
|
def function[iterpink, parameter[depth]]:
constant[Generate a sequence of samples of pink noise.
pink noise generator
from http://pydoc.net/Python/lmj.sound/0.1.1/lmj.sound.noise/
Based on the Voss-McCartney algorithm, discussion and code examples at
http://www.firstpr.com.au/dsp/pink-noise/
depth: Use this many samples of white noise to calculate the output. A
higher number is slower to run, but renders low frequencies with more
correct power spectra.
Generates a never-ending sequence of floating-point values. Any continuous
set of these samples will tend to have a 1/f power spectrum.
]
variable[values] assign[=] call[name[numpy].random.randn, parameter[name[depth]]]
variable[smooth] assign[=] call[name[numpy].random.randn, parameter[name[depth]]]
variable[source] assign[=] call[name[numpy].random.randn, parameter[name[depth]]]
variable[sumvals] assign[=] call[name[values].sum, parameter[]]
variable[i] assign[=] constant[0]
while constant[True] begin[:]
<ast.Yield object at 0x7da1b1600b20>
<ast.AugAssign object at 0x7da1b16025c0>
if compare[name[i] equal[==] name[depth]] begin[:]
variable[i] assign[=] constant[0]
variable[smooth] assign[=] call[name[numpy].random.randn, parameter[name[depth]]]
variable[source] assign[=] call[name[numpy].random.randn, parameter[name[depth]]]
continue
variable[c] assign[=] constant[0]
while <ast.UnaryOp object at 0x7da1b16008e0> begin[:]
<ast.AugAssign object at 0x7da1b1600850>
<ast.AugAssign object at 0x7da1b16005e0>
call[name[values]][name[c]] assign[=] call[name[source]][name[i]]
|
keyword[def] identifier[iterpink] ( identifier[depth] = literal[int] ):
literal[string]
identifier[values] = identifier[numpy] . identifier[random] . identifier[randn] ( identifier[depth] )
identifier[smooth] = identifier[numpy] . identifier[random] . identifier[randn] ( identifier[depth] )
identifier[source] = identifier[numpy] . identifier[random] . identifier[randn] ( identifier[depth] )
identifier[sumvals] = identifier[values] . identifier[sum] ()
identifier[i] = literal[int]
keyword[while] keyword[True] :
keyword[yield] identifier[sumvals] + identifier[smooth] [ identifier[i] ]
identifier[i] += literal[int]
keyword[if] identifier[i] == identifier[depth] :
identifier[i] = literal[int]
identifier[smooth] = identifier[numpy] . identifier[random] . identifier[randn] ( identifier[depth] )
identifier[source] = identifier[numpy] . identifier[random] . identifier[randn] ( identifier[depth] )
keyword[continue]
identifier[c] = literal[int]
keyword[while] keyword[not] ( identifier[i] >> identifier[c] )& literal[int] :
identifier[c] += literal[int]
identifier[sumvals] += identifier[source] [ identifier[i] ]- identifier[values] [ identifier[c] ]
identifier[values] [ identifier[c] ]= identifier[source] [ identifier[i] ]
|
def iterpink(depth=20):
"""Generate a sequence of samples of pink noise.
pink noise generator
from http://pydoc.net/Python/lmj.sound/0.1.1/lmj.sound.noise/
Based on the Voss-McCartney algorithm, discussion and code examples at
http://www.firstpr.com.au/dsp/pink-noise/
depth: Use this many samples of white noise to calculate the output. A
higher number is slower to run, but renders low frequencies with more
correct power spectra.
Generates a never-ending sequence of floating-point values. Any continuous
set of these samples will tend to have a 1/f power spectrum.
"""
values = numpy.random.randn(depth)
smooth = numpy.random.randn(depth)
source = numpy.random.randn(depth)
sumvals = values.sum()
i = 0
while True:
yield (sumvals + smooth[i])
# advance the index by 1. if the index wraps, generate noise to use in
# the calculations, but do not update any of the pink noise values.
i += 1
if i == depth:
i = 0
smooth = numpy.random.randn(depth)
source = numpy.random.randn(depth)
continue # depends on [control=['if'], data=['i', 'depth']]
# count trailing zeros in i
c = 0
while not i >> c & 1:
c += 1 # depends on [control=['while'], data=[]]
# replace value c with a new source element
sumvals += source[i] - values[c]
values[c] = source[i] # depends on [control=['while'], data=[]]
|
def main():
"""
Runs a clusterer from the command-line. Calls JVM start/stop automatically.
Use -h to see all options.
"""
parser = argparse.ArgumentParser(
description='Performs clustering from the command-line. Calls JVM start/stop automatically.')
parser.add_argument("-j", metavar="classpath", dest="classpath", help="additional classpath, jars/directories")
parser.add_argument("-X", metavar="heap", dest="heap", help="max heap size for jvm, e.g., 512m")
parser.add_argument("-t", metavar="train", dest="train", required=True, help="training set file")
parser.add_argument("-T", metavar="test", dest="test", help="test set file")
parser.add_argument("-d", metavar="outmodel", dest="outmodel", help="model output file name")
parser.add_argument("-l", metavar="inmodel", dest="inmodel", help="model input file name")
parser.add_argument("-p", metavar="attributes", dest="attributes", help="attribute range")
parser.add_argument("-x", metavar="num folds", dest="numfolds", help="number of folds")
parser.add_argument("-s", metavar="seed", dest="seed", help="seed value for randomization")
parser.add_argument("-c", metavar="class index", dest="classindex", help="1-based class attribute index")
parser.add_argument("-g", metavar="graph", dest="graph", help="graph output file (if supported)")
parser.add_argument("clusterer", help="clusterer classname, e.g., weka.clusterers.SimpleKMeans")
parser.add_argument("option", nargs=argparse.REMAINDER, help="additional clusterer options")
parsed = parser.parse_args()
jars = []
if parsed.classpath is not None:
jars = parsed.classpath.split(os.pathsep)
params = []
if parsed.train is not None:
params.extend(["-t", parsed.train])
if parsed.test is not None:
params.extend(["-T", parsed.test])
if parsed.outmodel is not None:
params.extend(["-d", parsed.outmodel])
if parsed.inmodel is not None:
params.extend(["-l", parsed.inmodel])
if parsed.attributes is not None:
params.extend(["-p", parsed.attributes])
if parsed.numfolds is not None:
params.extend(["-x", parsed.numfolds])
if parsed.seed is not None:
params.extend(["-s", parsed.seed])
if parsed.classindex is not None:
params.extend(["-c", parsed.classindex])
if parsed.graph is not None:
params.extend(["-g", parsed.graph])
jvm.start(jars, max_heap_size=parsed.heap, packages=True)
logger.debug("Commandline: " + join_options(sys.argv[1:]))
try:
clusterer = Clusterer(classname=parsed.clusterer)
if len(parsed.option) > 0:
clusterer.options = parsed.option
print(ClusterEvaluation.evaluate_clusterer(clusterer, params))
except Exception as e:
print(e)
finally:
jvm.stop()
|
def function[main, parameter[]]:
constant[
Runs a clusterer from the command-line. Calls JVM start/stop automatically.
Use -h to see all options.
]
variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]]
call[name[parser].add_argument, parameter[constant[-j]]]
call[name[parser].add_argument, parameter[constant[-X]]]
call[name[parser].add_argument, parameter[constant[-t]]]
call[name[parser].add_argument, parameter[constant[-T]]]
call[name[parser].add_argument, parameter[constant[-d]]]
call[name[parser].add_argument, parameter[constant[-l]]]
call[name[parser].add_argument, parameter[constant[-p]]]
call[name[parser].add_argument, parameter[constant[-x]]]
call[name[parser].add_argument, parameter[constant[-s]]]
call[name[parser].add_argument, parameter[constant[-c]]]
call[name[parser].add_argument, parameter[constant[-g]]]
call[name[parser].add_argument, parameter[constant[clusterer]]]
call[name[parser].add_argument, parameter[constant[option]]]
variable[parsed] assign[=] call[name[parser].parse_args, parameter[]]
variable[jars] assign[=] list[[]]
if compare[name[parsed].classpath is_not constant[None]] begin[:]
variable[jars] assign[=] call[name[parsed].classpath.split, parameter[name[os].pathsep]]
variable[params] assign[=] list[[]]
if compare[name[parsed].train is_not constant[None]] begin[:]
call[name[params].extend, parameter[list[[<ast.Constant object at 0x7da1b0619000>, <ast.Attribute object at 0x7da1b061b700>]]]]
if compare[name[parsed].test is_not constant[None]] begin[:]
call[name[params].extend, parameter[list[[<ast.Constant object at 0x7da1b061b970>, <ast.Attribute object at 0x7da1b061b9a0>]]]]
if compare[name[parsed].outmodel is_not constant[None]] begin[:]
call[name[params].extend, parameter[list[[<ast.Constant object at 0x7da1b061bc10>, <ast.Attribute object at 0x7da1b061bc40>]]]]
if compare[name[parsed].inmodel is_not constant[None]] begin[:]
call[name[params].extend, parameter[list[[<ast.Constant object at 0x7da1b061beb0>, <ast.Attribute object at 0x7da1b061bee0>]]]]
if compare[name[parsed].attributes is_not constant[None]] begin[:]
call[name[params].extend, parameter[list[[<ast.Constant object at 0x7da1b0619f90>, <ast.Attribute object at 0x7da1b0619f60>]]]]
if compare[name[parsed].numfolds is_not constant[None]] begin[:]
call[name[params].extend, parameter[list[[<ast.Constant object at 0x7da1b06190f0>, <ast.Attribute object at 0x7da1b0619120>]]]]
if compare[name[parsed].seed is_not constant[None]] begin[:]
call[name[params].extend, parameter[list[[<ast.Constant object at 0x7da1b061ab90>, <ast.Attribute object at 0x7da1b061abc0>]]]]
if compare[name[parsed].classindex is_not constant[None]] begin[:]
call[name[params].extend, parameter[list[[<ast.Constant object at 0x7da1b0618400>, <ast.Attribute object at 0x7da1b0618430>]]]]
if compare[name[parsed].graph is_not constant[None]] begin[:]
call[name[params].extend, parameter[list[[<ast.Constant object at 0x7da1b0619480>, <ast.Attribute object at 0x7da1b0619570>]]]]
call[name[jvm].start, parameter[name[jars]]]
call[name[logger].debug, parameter[binary_operation[constant[Commandline: ] + call[name[join_options], parameter[call[name[sys].argv][<ast.Slice object at 0x7da1b061b160>]]]]]]
<ast.Try object at 0x7da1b061b0d0>
|
keyword[def] identifier[main] ():
literal[string]
identifier[parser] = identifier[argparse] . identifier[ArgumentParser] (
identifier[description] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[metavar] = literal[string] , identifier[dest] = literal[string] , identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[metavar] = literal[string] , identifier[dest] = literal[string] , identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[metavar] = literal[string] , identifier[dest] = literal[string] , identifier[required] = keyword[True] , identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[metavar] = literal[string] , identifier[dest] = literal[string] , identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[metavar] = literal[string] , identifier[dest] = literal[string] , identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[metavar] = literal[string] , identifier[dest] = literal[string] , identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[metavar] = literal[string] , identifier[dest] = literal[string] , identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[metavar] = literal[string] , identifier[dest] = literal[string] , identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[metavar] = literal[string] , identifier[dest] = literal[string] , identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[metavar] = literal[string] , identifier[dest] = literal[string] , identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[metavar] = literal[string] , identifier[dest] = literal[string] , identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[help] = literal[string] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[nargs] = identifier[argparse] . identifier[REMAINDER] , identifier[help] = literal[string] )
identifier[parsed] = identifier[parser] . identifier[parse_args] ()
identifier[jars] =[]
keyword[if] identifier[parsed] . identifier[classpath] keyword[is] keyword[not] keyword[None] :
identifier[jars] = identifier[parsed] . identifier[classpath] . identifier[split] ( identifier[os] . identifier[pathsep] )
identifier[params] =[]
keyword[if] identifier[parsed] . identifier[train] keyword[is] keyword[not] keyword[None] :
identifier[params] . identifier[extend] ([ literal[string] , identifier[parsed] . identifier[train] ])
keyword[if] identifier[parsed] . identifier[test] keyword[is] keyword[not] keyword[None] :
identifier[params] . identifier[extend] ([ literal[string] , identifier[parsed] . identifier[test] ])
keyword[if] identifier[parsed] . identifier[outmodel] keyword[is] keyword[not] keyword[None] :
identifier[params] . identifier[extend] ([ literal[string] , identifier[parsed] . identifier[outmodel] ])
keyword[if] identifier[parsed] . identifier[inmodel] keyword[is] keyword[not] keyword[None] :
identifier[params] . identifier[extend] ([ literal[string] , identifier[parsed] . identifier[inmodel] ])
keyword[if] identifier[parsed] . identifier[attributes] keyword[is] keyword[not] keyword[None] :
identifier[params] . identifier[extend] ([ literal[string] , identifier[parsed] . identifier[attributes] ])
keyword[if] identifier[parsed] . identifier[numfolds] keyword[is] keyword[not] keyword[None] :
identifier[params] . identifier[extend] ([ literal[string] , identifier[parsed] . identifier[numfolds] ])
keyword[if] identifier[parsed] . identifier[seed] keyword[is] keyword[not] keyword[None] :
identifier[params] . identifier[extend] ([ literal[string] , identifier[parsed] . identifier[seed] ])
keyword[if] identifier[parsed] . identifier[classindex] keyword[is] keyword[not] keyword[None] :
identifier[params] . identifier[extend] ([ literal[string] , identifier[parsed] . identifier[classindex] ])
keyword[if] identifier[parsed] . identifier[graph] keyword[is] keyword[not] keyword[None] :
identifier[params] . identifier[extend] ([ literal[string] , identifier[parsed] . identifier[graph] ])
identifier[jvm] . identifier[start] ( identifier[jars] , identifier[max_heap_size] = identifier[parsed] . identifier[heap] , identifier[packages] = keyword[True] )
identifier[logger] . identifier[debug] ( literal[string] + identifier[join_options] ( identifier[sys] . identifier[argv] [ literal[int] :]))
keyword[try] :
identifier[clusterer] = identifier[Clusterer] ( identifier[classname] = identifier[parsed] . identifier[clusterer] )
keyword[if] identifier[len] ( identifier[parsed] . identifier[option] )> literal[int] :
identifier[clusterer] . identifier[options] = identifier[parsed] . identifier[option]
identifier[print] ( identifier[ClusterEvaluation] . identifier[evaluate_clusterer] ( identifier[clusterer] , identifier[params] ))
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[print] ( identifier[e] )
keyword[finally] :
identifier[jvm] . identifier[stop] ()
|
def main():
"""
Runs a clusterer from the command-line. Calls JVM start/stop automatically.
Use -h to see all options.
"""
parser = argparse.ArgumentParser(description='Performs clustering from the command-line. Calls JVM start/stop automatically.')
parser.add_argument('-j', metavar='classpath', dest='classpath', help='additional classpath, jars/directories')
parser.add_argument('-X', metavar='heap', dest='heap', help='max heap size for jvm, e.g., 512m')
parser.add_argument('-t', metavar='train', dest='train', required=True, help='training set file')
parser.add_argument('-T', metavar='test', dest='test', help='test set file')
parser.add_argument('-d', metavar='outmodel', dest='outmodel', help='model output file name')
parser.add_argument('-l', metavar='inmodel', dest='inmodel', help='model input file name')
parser.add_argument('-p', metavar='attributes', dest='attributes', help='attribute range')
parser.add_argument('-x', metavar='num folds', dest='numfolds', help='number of folds')
parser.add_argument('-s', metavar='seed', dest='seed', help='seed value for randomization')
parser.add_argument('-c', metavar='class index', dest='classindex', help='1-based class attribute index')
parser.add_argument('-g', metavar='graph', dest='graph', help='graph output file (if supported)')
parser.add_argument('clusterer', help='clusterer classname, e.g., weka.clusterers.SimpleKMeans')
parser.add_argument('option', nargs=argparse.REMAINDER, help='additional clusterer options')
parsed = parser.parse_args()
jars = []
if parsed.classpath is not None:
jars = parsed.classpath.split(os.pathsep) # depends on [control=['if'], data=[]]
params = []
if parsed.train is not None:
params.extend(['-t', parsed.train]) # depends on [control=['if'], data=[]]
if parsed.test is not None:
params.extend(['-T', parsed.test]) # depends on [control=['if'], data=[]]
if parsed.outmodel is not None:
params.extend(['-d', parsed.outmodel]) # depends on [control=['if'], data=[]]
if parsed.inmodel is not None:
params.extend(['-l', parsed.inmodel]) # depends on [control=['if'], data=[]]
if parsed.attributes is not None:
params.extend(['-p', parsed.attributes]) # depends on [control=['if'], data=[]]
if parsed.numfolds is not None:
params.extend(['-x', parsed.numfolds]) # depends on [control=['if'], data=[]]
if parsed.seed is not None:
params.extend(['-s', parsed.seed]) # depends on [control=['if'], data=[]]
if parsed.classindex is not None:
params.extend(['-c', parsed.classindex]) # depends on [control=['if'], data=[]]
if parsed.graph is not None:
params.extend(['-g', parsed.graph]) # depends on [control=['if'], data=[]]
jvm.start(jars, max_heap_size=parsed.heap, packages=True)
logger.debug('Commandline: ' + join_options(sys.argv[1:]))
try:
clusterer = Clusterer(classname=parsed.clusterer)
if len(parsed.option) > 0:
clusterer.options = parsed.option # depends on [control=['if'], data=[]]
print(ClusterEvaluation.evaluate_clusterer(clusterer, params)) # depends on [control=['try'], data=[]]
except Exception as e:
print(e) # depends on [control=['except'], data=['e']]
finally:
jvm.stop()
|
def create_note(note_store, note, trigger_id, data):
"""
create a note
:param note_store Evernote instance
:param note
:param trigger_id id of the trigger
:param data to save or to put in cache
:type note_store: Evernote Instance
:type note: Note instance
:type trigger_id: int
:type data: dict
:return boolean
:rtype boolean
"""
# create the note !
try:
created_note = note_store.createNote(note)
sentence = str('note %s created') % created_note.guid
logger.debug(sentence)
return True
except EDAMSystemException as e:
return error(trigger_id, data, e)
except EDAMUserException as e:
if e.errorCode == EDAMErrorCode.ENML_VALIDATION:
sentence = "Data ignored due to validation error : err {code} {msg}".format(code=e.errorCode,
msg=e.parameter)
logger.warning(sentence)
update_result(trigger_id, msg=sentence, status=True)
return True
except Exception as e:
logger.critical(e)
update_result(trigger_id, msg=e, status=False)
return False
|
def function[create_note, parameter[note_store, note, trigger_id, data]]:
constant[
create a note
:param note_store Evernote instance
:param note
:param trigger_id id of the trigger
:param data to save or to put in cache
:type note_store: Evernote Instance
:type note: Note instance
:type trigger_id: int
:type data: dict
:return boolean
:rtype boolean
]
<ast.Try object at 0x7da204565bd0>
|
keyword[def] identifier[create_note] ( identifier[note_store] , identifier[note] , identifier[trigger_id] , identifier[data] ):
literal[string]
keyword[try] :
identifier[created_note] = identifier[note_store] . identifier[createNote] ( identifier[note] )
identifier[sentence] = identifier[str] ( literal[string] )% identifier[created_note] . identifier[guid]
identifier[logger] . identifier[debug] ( identifier[sentence] )
keyword[return] keyword[True]
keyword[except] identifier[EDAMSystemException] keyword[as] identifier[e] :
keyword[return] identifier[error] ( identifier[trigger_id] , identifier[data] , identifier[e] )
keyword[except] identifier[EDAMUserException] keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[errorCode] == identifier[EDAMErrorCode] . identifier[ENML_VALIDATION] :
identifier[sentence] = literal[string] . identifier[format] ( identifier[code] = identifier[e] . identifier[errorCode] ,
identifier[msg] = identifier[e] . identifier[parameter] )
identifier[logger] . identifier[warning] ( identifier[sentence] )
identifier[update_result] ( identifier[trigger_id] , identifier[msg] = identifier[sentence] , identifier[status] = keyword[True] )
keyword[return] keyword[True]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[logger] . identifier[critical] ( identifier[e] )
identifier[update_result] ( identifier[trigger_id] , identifier[msg] = identifier[e] , identifier[status] = keyword[False] )
keyword[return] keyword[False]
|
def create_note(note_store, note, trigger_id, data):
"""
create a note
:param note_store Evernote instance
:param note
:param trigger_id id of the trigger
:param data to save or to put in cache
:type note_store: Evernote Instance
:type note: Note instance
:type trigger_id: int
:type data: dict
:return boolean
:rtype boolean
"""
# create the note !
try:
created_note = note_store.createNote(note)
sentence = str('note %s created') % created_note.guid
logger.debug(sentence)
return True # depends on [control=['try'], data=[]]
except EDAMSystemException as e:
return error(trigger_id, data, e) # depends on [control=['except'], data=['e']]
except EDAMUserException as e:
if e.errorCode == EDAMErrorCode.ENML_VALIDATION:
sentence = 'Data ignored due to validation error : err {code} {msg}'.format(code=e.errorCode, msg=e.parameter)
logger.warning(sentence)
update_result(trigger_id, msg=sentence, status=True)
return True # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['e']]
except Exception as e:
logger.critical(e)
update_result(trigger_id, msg=e, status=False)
return False # depends on [control=['except'], data=['e']]
|
def _write_to_pipette(self, gcode, mount, data_string):
'''
Write to an attached pipette's internal memory. The gcode used
determines which portion of memory is written to.
NOTE: To enable write-access to the pipette, it's button must be held
gcode:
String (str) containing a GCode
either 'WRITE_INSTRUMENT_ID' or 'WRITE_INSTRUMENT_MODEL'
mount:
String (str) with value 'left' or 'right'
data_string:
String (str) that is of unkown length
'''
allowed_mounts = {'left': 'L', 'right': 'R'}
mount = allowed_mounts.get(mount)
if not mount:
raise ValueError('Unexpected mount: {}'.format(mount))
if not isinstance(data_string, str):
raise ValueError(
'Expected {0}, not {1}'.format(str, type(data_string)))
# EMI interference from both plunger motors has been found to
# prevent the I2C lines from communicating between Smoothieware and
# pipette's onboard EEPROM. To avoid, turn off both plunger motors
self.disengage_axis('BC')
self.delay(CURRENT_CHANGE_DELAY)
# data is read/written as strings of HEX characters
# to avoid firmware weirdness in how it parses GCode arguments
byte_string = _byte_array_to_hex_string(
bytearray(data_string.encode()))
command = gcode + mount + byte_string
log.debug("_write_to_pipette: {}".format(command))
self._send_command(command)
|
def function[_write_to_pipette, parameter[self, gcode, mount, data_string]]:
constant[
Write to an attached pipette's internal memory. The gcode used
determines which portion of memory is written to.
NOTE: To enable write-access to the pipette, it's button must be held
gcode:
String (str) containing a GCode
either 'WRITE_INSTRUMENT_ID' or 'WRITE_INSTRUMENT_MODEL'
mount:
String (str) with value 'left' or 'right'
data_string:
String (str) that is of unkown length
]
variable[allowed_mounts] assign[=] dictionary[[<ast.Constant object at 0x7da1b0926cb0>, <ast.Constant object at 0x7da1b0925570>], [<ast.Constant object at 0x7da1b0925a20>, <ast.Constant object at 0x7da1b0926290>]]
variable[mount] assign[=] call[name[allowed_mounts].get, parameter[name[mount]]]
if <ast.UnaryOp object at 0x7da1b0925510> begin[:]
<ast.Raise object at 0x7da1b0925540>
if <ast.UnaryOp object at 0x7da1b0926860> begin[:]
<ast.Raise object at 0x7da1b09267a0>
call[name[self].disengage_axis, parameter[constant[BC]]]
call[name[self].delay, parameter[name[CURRENT_CHANGE_DELAY]]]
variable[byte_string] assign[=] call[name[_byte_array_to_hex_string], parameter[call[name[bytearray], parameter[call[name[data_string].encode, parameter[]]]]]]
variable[command] assign[=] binary_operation[binary_operation[name[gcode] + name[mount]] + name[byte_string]]
call[name[log].debug, parameter[call[constant[_write_to_pipette: {}].format, parameter[name[command]]]]]
call[name[self]._send_command, parameter[name[command]]]
|
keyword[def] identifier[_write_to_pipette] ( identifier[self] , identifier[gcode] , identifier[mount] , identifier[data_string] ):
literal[string]
identifier[allowed_mounts] ={ literal[string] : literal[string] , literal[string] : literal[string] }
identifier[mount] = identifier[allowed_mounts] . identifier[get] ( identifier[mount] )
keyword[if] keyword[not] identifier[mount] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[mount] ))
keyword[if] keyword[not] identifier[isinstance] ( identifier[data_string] , identifier[str] ):
keyword[raise] identifier[ValueError] (
literal[string] . identifier[format] ( identifier[str] , identifier[type] ( identifier[data_string] )))
identifier[self] . identifier[disengage_axis] ( literal[string] )
identifier[self] . identifier[delay] ( identifier[CURRENT_CHANGE_DELAY] )
identifier[byte_string] = identifier[_byte_array_to_hex_string] (
identifier[bytearray] ( identifier[data_string] . identifier[encode] ()))
identifier[command] = identifier[gcode] + identifier[mount] + identifier[byte_string]
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( identifier[command] ))
identifier[self] . identifier[_send_command] ( identifier[command] )
|
def _write_to_pipette(self, gcode, mount, data_string):
"""
Write to an attached pipette's internal memory. The gcode used
determines which portion of memory is written to.
NOTE: To enable write-access to the pipette, it's button must be held
gcode:
String (str) containing a GCode
either 'WRITE_INSTRUMENT_ID' or 'WRITE_INSTRUMENT_MODEL'
mount:
String (str) with value 'left' or 'right'
data_string:
String (str) that is of unkown length
"""
allowed_mounts = {'left': 'L', 'right': 'R'}
mount = allowed_mounts.get(mount)
if not mount:
raise ValueError('Unexpected mount: {}'.format(mount)) # depends on [control=['if'], data=[]]
if not isinstance(data_string, str):
raise ValueError('Expected {0}, not {1}'.format(str, type(data_string))) # depends on [control=['if'], data=[]]
# EMI interference from both plunger motors has been found to
# prevent the I2C lines from communicating between Smoothieware and
# pipette's onboard EEPROM. To avoid, turn off both plunger motors
self.disengage_axis('BC')
self.delay(CURRENT_CHANGE_DELAY)
# data is read/written as strings of HEX characters
# to avoid firmware weirdness in how it parses GCode arguments
byte_string = _byte_array_to_hex_string(bytearray(data_string.encode()))
command = gcode + mount + byte_string
log.debug('_write_to_pipette: {}'.format(command))
self._send_command(command)
|
def load_path(self, path):
"""
:param path:
:type path:
:return:
:rtype:
"""
logger.debug("load_path(path=%s)" % path)
if os.path.isfile(path):
configtype = self.guess_type(path)
logger.debug("Detected config type: %s" % self._type_to_str(configtype))
if configtype in self.TYPES:
logger.debug("Opening config for reading")
with open(path, 'r') as f:
data = f.read()
datadict = self.TYPES[configtype]().reverse(data)
return self.load_data(datadict)
else:
raise ValueError("Unsupported config type")
else:
logger.warn("load_path couldn't find the path: %s" % path)
|
def function[load_path, parameter[self, path]]:
constant[
:param path:
:type path:
:return:
:rtype:
]
call[name[logger].debug, parameter[binary_operation[constant[load_path(path=%s)] <ast.Mod object at 0x7da2590d6920> name[path]]]]
if call[name[os].path.isfile, parameter[name[path]]] begin[:]
variable[configtype] assign[=] call[name[self].guess_type, parameter[name[path]]]
call[name[logger].debug, parameter[binary_operation[constant[Detected config type: %s] <ast.Mod object at 0x7da2590d6920> call[name[self]._type_to_str, parameter[name[configtype]]]]]]
if compare[name[configtype] in name[self].TYPES] begin[:]
call[name[logger].debug, parameter[constant[Opening config for reading]]]
with call[name[open], parameter[name[path], constant[r]]] begin[:]
variable[data] assign[=] call[name[f].read, parameter[]]
variable[datadict] assign[=] call[call[call[name[self].TYPES][name[configtype]], parameter[]].reverse, parameter[name[data]]]
return[call[name[self].load_data, parameter[name[datadict]]]]
|
keyword[def] identifier[load_path] ( identifier[self] , identifier[path] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string] % identifier[path] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[path] ):
identifier[configtype] = identifier[self] . identifier[guess_type] ( identifier[path] )
identifier[logger] . identifier[debug] ( literal[string] % identifier[self] . identifier[_type_to_str] ( identifier[configtype] ))
keyword[if] identifier[configtype] keyword[in] identifier[self] . identifier[TYPES] :
identifier[logger] . identifier[debug] ( literal[string] )
keyword[with] identifier[open] ( identifier[path] , literal[string] ) keyword[as] identifier[f] :
identifier[data] = identifier[f] . identifier[read] ()
identifier[datadict] = identifier[self] . identifier[TYPES] [ identifier[configtype] ](). identifier[reverse] ( identifier[data] )
keyword[return] identifier[self] . identifier[load_data] ( identifier[datadict] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[else] :
identifier[logger] . identifier[warn] ( literal[string] % identifier[path] )
|
def load_path(self, path):
"""
:param path:
:type path:
:return:
:rtype:
"""
logger.debug('load_path(path=%s)' % path)
if os.path.isfile(path):
configtype = self.guess_type(path)
logger.debug('Detected config type: %s' % self._type_to_str(configtype))
if configtype in self.TYPES:
logger.debug('Opening config for reading')
with open(path, 'r') as f:
data = f.read() # depends on [control=['with'], data=['f']]
datadict = self.TYPES[configtype]().reverse(data)
return self.load_data(datadict) # depends on [control=['if'], data=['configtype']]
else:
raise ValueError('Unsupported config type') # depends on [control=['if'], data=[]]
else:
logger.warn("load_path couldn't find the path: %s" % path)
|
def sort(self):
"""
Return an iterable of nodes, toplogically sorted to correctly import
dependencies before leaf nodes.
"""
while self.nodes:
iterated = False
for node in self.leaf_nodes():
iterated = True
self.prune_node(node)
yield node
if not iterated:
raise CyclicGraphError("Sorting has found a cyclic graph.")
|
def function[sort, parameter[self]]:
constant[
Return an iterable of nodes, toplogically sorted to correctly import
dependencies before leaf nodes.
]
while name[self].nodes begin[:]
variable[iterated] assign[=] constant[False]
for taget[name[node]] in starred[call[name[self].leaf_nodes, parameter[]]] begin[:]
variable[iterated] assign[=] constant[True]
call[name[self].prune_node, parameter[name[node]]]
<ast.Yield object at 0x7da207f99cc0>
if <ast.UnaryOp object at 0x7da207f99990> begin[:]
<ast.Raise object at 0x7da207f99f00>
|
keyword[def] identifier[sort] ( identifier[self] ):
literal[string]
keyword[while] identifier[self] . identifier[nodes] :
identifier[iterated] = keyword[False]
keyword[for] identifier[node] keyword[in] identifier[self] . identifier[leaf_nodes] ():
identifier[iterated] = keyword[True]
identifier[self] . identifier[prune_node] ( identifier[node] )
keyword[yield] identifier[node]
keyword[if] keyword[not] identifier[iterated] :
keyword[raise] identifier[CyclicGraphError] ( literal[string] )
|
def sort(self):
"""
Return an iterable of nodes, toplogically sorted to correctly import
dependencies before leaf nodes.
"""
while self.nodes:
iterated = False
for node in self.leaf_nodes():
iterated = True
self.prune_node(node)
yield node # depends on [control=['for'], data=['node']]
if not iterated:
raise CyclicGraphError('Sorting has found a cyclic graph.') # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
|
def by_player(self):
""":class:`bool`: Whether the kill involves other characters."""
return any([k.player and self.name != k.name for k in self.killers])
|
def function[by_player, parameter[self]]:
constant[:class:`bool`: Whether the kill involves other characters.]
return[call[name[any], parameter[<ast.ListComp object at 0x7da1b0b6df30>]]]
|
keyword[def] identifier[by_player] ( identifier[self] ):
literal[string]
keyword[return] identifier[any] ([ identifier[k] . identifier[player] keyword[and] identifier[self] . identifier[name] != identifier[k] . identifier[name] keyword[for] identifier[k] keyword[in] identifier[self] . identifier[killers] ])
|
def by_player(self):
""":class:`bool`: Whether the kill involves other characters."""
return any([k.player and self.name != k.name for k in self.killers])
|
def parse(value):
"""Parse the string date.
This supports the subset of ISO8601 used by xsd:date, but is lenient
with what is accepted, handling most reasonable syntax.
Any timezone is parsed but ignored because a) it's meaningless without
a time and b) B{datetime}.I{date} does not support a tzinfo property.
@param value: A date string.
@type value: str
@return: A date object.
@rtype: B{datetime}.I{date}
"""
match_result = RE_DATE.match(value)
if match_result is None:
raise ValueError('date data has invalid format "%s"' % value)
value = date_from_match(match_result)
return value
|
def function[parse, parameter[value]]:
constant[Parse the string date.
This supports the subset of ISO8601 used by xsd:date, but is lenient
with what is accepted, handling most reasonable syntax.
Any timezone is parsed but ignored because a) it's meaningless without
a time and b) B{datetime}.I{date} does not support a tzinfo property.
@param value: A date string.
@type value: str
@return: A date object.
@rtype: B{datetime}.I{date}
]
variable[match_result] assign[=] call[name[RE_DATE].match, parameter[name[value]]]
if compare[name[match_result] is constant[None]] begin[:]
<ast.Raise object at 0x7da18ede65f0>
variable[value] assign[=] call[name[date_from_match], parameter[name[match_result]]]
return[name[value]]
|
keyword[def] identifier[parse] ( identifier[value] ):
literal[string]
identifier[match_result] = identifier[RE_DATE] . identifier[match] ( identifier[value] )
keyword[if] identifier[match_result] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[value] )
identifier[value] = identifier[date_from_match] ( identifier[match_result] )
keyword[return] identifier[value]
|
def parse(value):
"""Parse the string date.
This supports the subset of ISO8601 used by xsd:date, but is lenient
with what is accepted, handling most reasonable syntax.
Any timezone is parsed but ignored because a) it's meaningless without
a time and b) B{datetime}.I{date} does not support a tzinfo property.
@param value: A date string.
@type value: str
@return: A date object.
@rtype: B{datetime}.I{date}
"""
match_result = RE_DATE.match(value)
if match_result is None:
raise ValueError('date data has invalid format "%s"' % value) # depends on [control=['if'], data=[]]
value = date_from_match(match_result)
return value
|
def write(self, data):
"""Write data to serial port."""
for chunk in chunks(data, 512):
self.wait_to_write()
self.comport.write(chunk)
self.comport.flush()
|
def function[write, parameter[self, data]]:
constant[Write data to serial port.]
for taget[name[chunk]] in starred[call[name[chunks], parameter[name[data], constant[512]]]] begin[:]
call[name[self].wait_to_write, parameter[]]
call[name[self].comport.write, parameter[name[chunk]]]
call[name[self].comport.flush, parameter[]]
|
keyword[def] identifier[write] ( identifier[self] , identifier[data] ):
literal[string]
keyword[for] identifier[chunk] keyword[in] identifier[chunks] ( identifier[data] , literal[int] ):
identifier[self] . identifier[wait_to_write] ()
identifier[self] . identifier[comport] . identifier[write] ( identifier[chunk] )
identifier[self] . identifier[comport] . identifier[flush] ()
|
def write(self, data):
"""Write data to serial port."""
for chunk in chunks(data, 512):
self.wait_to_write()
self.comport.write(chunk) # depends on [control=['for'], data=['chunk']]
self.comport.flush()
|
def sort_by_tag(self, tag):
"""Sorts the `AmpalContainer` by a tag on the component objects.
Parameters
----------
tag : str
Key of tag used for sorting.
"""
return AmpalContainer(sorted(self, key=lambda x: x.tags[tag]))
|
def function[sort_by_tag, parameter[self, tag]]:
constant[Sorts the `AmpalContainer` by a tag on the component objects.
Parameters
----------
tag : str
Key of tag used for sorting.
]
return[call[name[AmpalContainer], parameter[call[name[sorted], parameter[name[self]]]]]]
|
keyword[def] identifier[sort_by_tag] ( identifier[self] , identifier[tag] ):
literal[string]
keyword[return] identifier[AmpalContainer] ( identifier[sorted] ( identifier[self] , identifier[key] = keyword[lambda] identifier[x] : identifier[x] . identifier[tags] [ identifier[tag] ]))
|
def sort_by_tag(self, tag):
"""Sorts the `AmpalContainer` by a tag on the component objects.
Parameters
----------
tag : str
Key of tag used for sorting.
"""
return AmpalContainer(sorted(self, key=lambda x: x.tags[tag]))
|
def word(self):
"""
Lazy-loads word value
:getter: Returns the plain string value of the word
:type: str
"""
if self._word is None:
words = self._element.xpath('word/text()')
if len(words) > 0:
self._word = words[0]
return self._word
|
def function[word, parameter[self]]:
constant[
Lazy-loads word value
:getter: Returns the plain string value of the word
:type: str
]
if compare[name[self]._word is constant[None]] begin[:]
variable[words] assign[=] call[name[self]._element.xpath, parameter[constant[word/text()]]]
if compare[call[name[len], parameter[name[words]]] greater[>] constant[0]] begin[:]
name[self]._word assign[=] call[name[words]][constant[0]]
return[name[self]._word]
|
keyword[def] identifier[word] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_word] keyword[is] keyword[None] :
identifier[words] = identifier[self] . identifier[_element] . identifier[xpath] ( literal[string] )
keyword[if] identifier[len] ( identifier[words] )> literal[int] :
identifier[self] . identifier[_word] = identifier[words] [ literal[int] ]
keyword[return] identifier[self] . identifier[_word]
|
def word(self):
"""
Lazy-loads word value
:getter: Returns the plain string value of the word
:type: str
"""
if self._word is None:
words = self._element.xpath('word/text()')
if len(words) > 0:
self._word = words[0] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return self._word
|
def js_click(self, selector, by=By.CSS_SELECTOR):
""" Clicks an element using pure JS. Does not use jQuery. """
selector, by = self.__recalculate_selector(selector, by)
if by == By.LINK_TEXT:
message = (
"Pure JavaScript doesn't support clicking by Link Text. "
"You may want to use self.jquery_click() instead, which "
"allows this with :contains(), assuming jQuery isn't blocked. "
"For now, self.js_click() will use a regular WebDriver click.")
logging.debug(message)
self.click(selector, by=by)
return
element = self.wait_for_element_present(
selector, by=by, timeout=settings.SMALL_TIMEOUT)
if self.is_element_visible(selector, by=by):
self.__demo_mode_highlight_if_active(selector, by)
if not self.demo_mode:
self.__scroll_to_element(element)
css_selector = self.convert_to_css_selector(selector, by=by)
css_selector = re.escape(css_selector)
css_selector = self.__escape_quotes_if_needed(css_selector)
self.__js_click(selector, by=by) # The real "magic" happens here
self.__demo_mode_pause_if_active()
|
def function[js_click, parameter[self, selector, by]]:
constant[ Clicks an element using pure JS. Does not use jQuery. ]
<ast.Tuple object at 0x7da1b1bc9c90> assign[=] call[name[self].__recalculate_selector, parameter[name[selector], name[by]]]
if compare[name[by] equal[==] name[By].LINK_TEXT] begin[:]
variable[message] assign[=] constant[Pure JavaScript doesn't support clicking by Link Text. You may want to use self.jquery_click() instead, which allows this with :contains(), assuming jQuery isn't blocked. For now, self.js_click() will use a regular WebDriver click.]
call[name[logging].debug, parameter[name[message]]]
call[name[self].click, parameter[name[selector]]]
return[None]
variable[element] assign[=] call[name[self].wait_for_element_present, parameter[name[selector]]]
if call[name[self].is_element_visible, parameter[name[selector]]] begin[:]
call[name[self].__demo_mode_highlight_if_active, parameter[name[selector], name[by]]]
if <ast.UnaryOp object at 0x7da1b1bcb9a0> begin[:]
call[name[self].__scroll_to_element, parameter[name[element]]]
variable[css_selector] assign[=] call[name[self].convert_to_css_selector, parameter[name[selector]]]
variable[css_selector] assign[=] call[name[re].escape, parameter[name[css_selector]]]
variable[css_selector] assign[=] call[name[self].__escape_quotes_if_needed, parameter[name[css_selector]]]
call[name[self].__js_click, parameter[name[selector]]]
call[name[self].__demo_mode_pause_if_active, parameter[]]
|
keyword[def] identifier[js_click] ( identifier[self] , identifier[selector] , identifier[by] = identifier[By] . identifier[CSS_SELECTOR] ):
literal[string]
identifier[selector] , identifier[by] = identifier[self] . identifier[__recalculate_selector] ( identifier[selector] , identifier[by] )
keyword[if] identifier[by] == identifier[By] . identifier[LINK_TEXT] :
identifier[message] =(
literal[string]
literal[string]
literal[string]
literal[string] )
identifier[logging] . identifier[debug] ( identifier[message] )
identifier[self] . identifier[click] ( identifier[selector] , identifier[by] = identifier[by] )
keyword[return]
identifier[element] = identifier[self] . identifier[wait_for_element_present] (
identifier[selector] , identifier[by] = identifier[by] , identifier[timeout] = identifier[settings] . identifier[SMALL_TIMEOUT] )
keyword[if] identifier[self] . identifier[is_element_visible] ( identifier[selector] , identifier[by] = identifier[by] ):
identifier[self] . identifier[__demo_mode_highlight_if_active] ( identifier[selector] , identifier[by] )
keyword[if] keyword[not] identifier[self] . identifier[demo_mode] :
identifier[self] . identifier[__scroll_to_element] ( identifier[element] )
identifier[css_selector] = identifier[self] . identifier[convert_to_css_selector] ( identifier[selector] , identifier[by] = identifier[by] )
identifier[css_selector] = identifier[re] . identifier[escape] ( identifier[css_selector] )
identifier[css_selector] = identifier[self] . identifier[__escape_quotes_if_needed] ( identifier[css_selector] )
identifier[self] . identifier[__js_click] ( identifier[selector] , identifier[by] = identifier[by] )
identifier[self] . identifier[__demo_mode_pause_if_active] ()
|
def js_click(self, selector, by=By.CSS_SELECTOR):
""" Clicks an element using pure JS. Does not use jQuery. """
(selector, by) = self.__recalculate_selector(selector, by)
if by == By.LINK_TEXT:
message = "Pure JavaScript doesn't support clicking by Link Text. You may want to use self.jquery_click() instead, which allows this with :contains(), assuming jQuery isn't blocked. For now, self.js_click() will use a regular WebDriver click."
logging.debug(message)
self.click(selector, by=by)
return # depends on [control=['if'], data=['by']]
element = self.wait_for_element_present(selector, by=by, timeout=settings.SMALL_TIMEOUT)
if self.is_element_visible(selector, by=by):
self.__demo_mode_highlight_if_active(selector, by)
if not self.demo_mode:
self.__scroll_to_element(element) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
css_selector = self.convert_to_css_selector(selector, by=by)
css_selector = re.escape(css_selector)
css_selector = self.__escape_quotes_if_needed(css_selector)
self.__js_click(selector, by=by) # The real "magic" happens here
self.__demo_mode_pause_if_active()
|
def add_book_series(self, title, volume=None):
"""
:param volume: the volume of the book
:type volume: string
:param title: the title of the book
:type title: string
"""
book_series = {}
if title is not None:
book_series['title'] = title
if volume is not None:
book_series['volume'] = volume
self._append_to('book_series', book_series)
|
def function[add_book_series, parameter[self, title, volume]]:
constant[
:param volume: the volume of the book
:type volume: string
:param title: the title of the book
:type title: string
]
variable[book_series] assign[=] dictionary[[], []]
if compare[name[title] is_not constant[None]] begin[:]
call[name[book_series]][constant[title]] assign[=] name[title]
if compare[name[volume] is_not constant[None]] begin[:]
call[name[book_series]][constant[volume]] assign[=] name[volume]
call[name[self]._append_to, parameter[constant[book_series], name[book_series]]]
|
keyword[def] identifier[add_book_series] ( identifier[self] , identifier[title] , identifier[volume] = keyword[None] ):
literal[string]
identifier[book_series] ={}
keyword[if] identifier[title] keyword[is] keyword[not] keyword[None] :
identifier[book_series] [ literal[string] ]= identifier[title]
keyword[if] identifier[volume] keyword[is] keyword[not] keyword[None] :
identifier[book_series] [ literal[string] ]= identifier[volume]
identifier[self] . identifier[_append_to] ( literal[string] , identifier[book_series] )
|
def add_book_series(self, title, volume=None):
"""
:param volume: the volume of the book
:type volume: string
:param title: the title of the book
:type title: string
"""
book_series = {}
if title is not None:
book_series['title'] = title # depends on [control=['if'], data=['title']]
if volume is not None:
book_series['volume'] = volume # depends on [control=['if'], data=['volume']]
self._append_to('book_series', book_series)
|
def get_token_obj(self, token, expire=_token_default):
"""Returns or creates the object associaten with the given token.
Parameters
----------
token : string
The token for the object as returned by `create_token`.
expire : number or None
The number of seconds until the object associated with the token
expires or `None` if it should not expire. If the argument is
omitted the value returned by `get_default_token_expiration` is
used. The expiration of an object is lazy. That means the memory
of the expired object is not freed until the next call of
`get_token_obj`. An expiration of 0 or less immediately frees
the memory of the token.
"""
if expire == _token_default:
expire = self.get_default_token_expiration()
now = get_time()
until = now + expire if expire is not None else None
with self._token_lock:
# _token_timings is keys sorted by time
first_valid = None
for (pos, k) in enumerate(self._token_timings):
t = self._token_map[k][0]
if t is None or t > now:
first_valid = pos
break
if first_valid is None:
self._token_map = {}
self._token_timings = []
else:
for k in self._token_timings[:first_valid]:
del self._token_map[k]
self._token_timings = self._token_timings[first_valid:]
if until is None or until > now:
if token not in self._token_map:
self._token_map[token] = (until, {})
self._token_timings.append(token)
else:
self._token_map[token] = (until, self._token_map[token][1])
self._token_timings.sort(key=lambda k: (
1 if self._token_map[k][0] is None else 0,
self._token_map[k][0]
))
return self._token_map[token][1]
else:
if token in self._token_map:
self._token_timings = [
k for k in self._token_timings if k != token
]
del self._token_map[token]
return {}
|
def function[get_token_obj, parameter[self, token, expire]]:
constant[Returns or creates the object associaten with the given token.
Parameters
----------
token : string
The token for the object as returned by `create_token`.
expire : number or None
The number of seconds until the object associated with the token
expires or `None` if it should not expire. If the argument is
omitted the value returned by `get_default_token_expiration` is
used. The expiration of an object is lazy. That means the memory
of the expired object is not freed until the next call of
`get_token_obj`. An expiration of 0 or less immediately frees
the memory of the token.
]
if compare[name[expire] equal[==] name[_token_default]] begin[:]
variable[expire] assign[=] call[name[self].get_default_token_expiration, parameter[]]
variable[now] assign[=] call[name[get_time], parameter[]]
variable[until] assign[=] <ast.IfExp object at 0x7da20c6aa4a0>
with name[self]._token_lock begin[:]
variable[first_valid] assign[=] constant[None]
for taget[tuple[[<ast.Name object at 0x7da20c6a9ae0>, <ast.Name object at 0x7da20c6aa1a0>]]] in starred[call[name[enumerate], parameter[name[self]._token_timings]]] begin[:]
variable[t] assign[=] call[call[name[self]._token_map][name[k]]][constant[0]]
if <ast.BoolOp object at 0x7da20c6a86a0> begin[:]
variable[first_valid] assign[=] name[pos]
break
if compare[name[first_valid] is constant[None]] begin[:]
name[self]._token_map assign[=] dictionary[[], []]
name[self]._token_timings assign[=] list[[]]
if <ast.BoolOp object at 0x7da20c6a8bb0> begin[:]
if compare[name[token] <ast.NotIn object at 0x7da2590d7190> name[self]._token_map] begin[:]
call[name[self]._token_map][name[token]] assign[=] tuple[[<ast.Name object at 0x7da1b23d5240>, <ast.Dict object at 0x7da1b23d5960>]]
call[name[self]._token_timings.append, parameter[name[token]]]
call[name[self]._token_timings.sort, parameter[]]
return[call[call[name[self]._token_map][name[token]]][constant[1]]]
|
keyword[def] identifier[get_token_obj] ( identifier[self] , identifier[token] , identifier[expire] = identifier[_token_default] ):
literal[string]
keyword[if] identifier[expire] == identifier[_token_default] :
identifier[expire] = identifier[self] . identifier[get_default_token_expiration] ()
identifier[now] = identifier[get_time] ()
identifier[until] = identifier[now] + identifier[expire] keyword[if] identifier[expire] keyword[is] keyword[not] keyword[None] keyword[else] keyword[None]
keyword[with] identifier[self] . identifier[_token_lock] :
identifier[first_valid] = keyword[None]
keyword[for] ( identifier[pos] , identifier[k] ) keyword[in] identifier[enumerate] ( identifier[self] . identifier[_token_timings] ):
identifier[t] = identifier[self] . identifier[_token_map] [ identifier[k] ][ literal[int] ]
keyword[if] identifier[t] keyword[is] keyword[None] keyword[or] identifier[t] > identifier[now] :
identifier[first_valid] = identifier[pos]
keyword[break]
keyword[if] identifier[first_valid] keyword[is] keyword[None] :
identifier[self] . identifier[_token_map] ={}
identifier[self] . identifier[_token_timings] =[]
keyword[else] :
keyword[for] identifier[k] keyword[in] identifier[self] . identifier[_token_timings] [: identifier[first_valid] ]:
keyword[del] identifier[self] . identifier[_token_map] [ identifier[k] ]
identifier[self] . identifier[_token_timings] = identifier[self] . identifier[_token_timings] [ identifier[first_valid] :]
keyword[if] identifier[until] keyword[is] keyword[None] keyword[or] identifier[until] > identifier[now] :
keyword[if] identifier[token] keyword[not] keyword[in] identifier[self] . identifier[_token_map] :
identifier[self] . identifier[_token_map] [ identifier[token] ]=( identifier[until] ,{})
identifier[self] . identifier[_token_timings] . identifier[append] ( identifier[token] )
keyword[else] :
identifier[self] . identifier[_token_map] [ identifier[token] ]=( identifier[until] , identifier[self] . identifier[_token_map] [ identifier[token] ][ literal[int] ])
identifier[self] . identifier[_token_timings] . identifier[sort] ( identifier[key] = keyword[lambda] identifier[k] :(
literal[int] keyword[if] identifier[self] . identifier[_token_map] [ identifier[k] ][ literal[int] ] keyword[is] keyword[None] keyword[else] literal[int] ,
identifier[self] . identifier[_token_map] [ identifier[k] ][ literal[int] ]
))
keyword[return] identifier[self] . identifier[_token_map] [ identifier[token] ][ literal[int] ]
keyword[else] :
keyword[if] identifier[token] keyword[in] identifier[self] . identifier[_token_map] :
identifier[self] . identifier[_token_timings] =[
identifier[k] keyword[for] identifier[k] keyword[in] identifier[self] . identifier[_token_timings] keyword[if] identifier[k] != identifier[token]
]
keyword[del] identifier[self] . identifier[_token_map] [ identifier[token] ]
keyword[return] {}
|
def get_token_obj(self, token, expire=_token_default):
"""Returns or creates the object associaten with the given token.
Parameters
----------
token : string
The token for the object as returned by `create_token`.
expire : number or None
The number of seconds until the object associated with the token
expires or `None` if it should not expire. If the argument is
omitted the value returned by `get_default_token_expiration` is
used. The expiration of an object is lazy. That means the memory
of the expired object is not freed until the next call of
`get_token_obj`. An expiration of 0 or less immediately frees
the memory of the token.
"""
if expire == _token_default:
expire = self.get_default_token_expiration() # depends on [control=['if'], data=['expire']]
now = get_time()
until = now + expire if expire is not None else None
with self._token_lock:
# _token_timings is keys sorted by time
first_valid = None
for (pos, k) in enumerate(self._token_timings):
t = self._token_map[k][0]
if t is None or t > now:
first_valid = pos
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if first_valid is None:
self._token_map = {}
self._token_timings = [] # depends on [control=['if'], data=[]]
else:
for k in self._token_timings[:first_valid]:
del self._token_map[k] # depends on [control=['for'], data=['k']]
self._token_timings = self._token_timings[first_valid:]
if until is None or until > now:
if token not in self._token_map:
self._token_map[token] = (until, {})
self._token_timings.append(token) # depends on [control=['if'], data=['token']]
else:
self._token_map[token] = (until, self._token_map[token][1])
self._token_timings.sort(key=lambda k: (1 if self._token_map[k][0] is None else 0, self._token_map[k][0]))
return self._token_map[token][1] # depends on [control=['if'], data=[]]
else:
if token in self._token_map:
self._token_timings = [k for k in self._token_timings if k != token]
del self._token_map[token] # depends on [control=['if'], data=['token']]
return {} # depends on [control=['with'], data=[]]
|
def make_chunk(chunk_type, chunk_data):
"""Create a raw chunk by composing chunk type and data. It
calculates chunk length and CRC for you.
:arg str chunk_type: PNG chunk type.
:arg bytes chunk_data: PNG chunk data, **excluding chunk length, type, and CRC**.
:rtype: bytes
"""
out = struct.pack("!I", len(chunk_data))
chunk_data = chunk_type.encode("latin-1") + chunk_data
out += chunk_data + struct.pack("!I", binascii.crc32(chunk_data) & 0xffffffff)
return out
|
def function[make_chunk, parameter[chunk_type, chunk_data]]:
constant[Create a raw chunk by composing chunk type and data. It
calculates chunk length and CRC for you.
:arg str chunk_type: PNG chunk type.
:arg bytes chunk_data: PNG chunk data, **excluding chunk length, type, and CRC**.
:rtype: bytes
]
variable[out] assign[=] call[name[struct].pack, parameter[constant[!I], call[name[len], parameter[name[chunk_data]]]]]
variable[chunk_data] assign[=] binary_operation[call[name[chunk_type].encode, parameter[constant[latin-1]]] + name[chunk_data]]
<ast.AugAssign object at 0x7da1b054b010>
return[name[out]]
|
keyword[def] identifier[make_chunk] ( identifier[chunk_type] , identifier[chunk_data] ):
literal[string]
identifier[out] = identifier[struct] . identifier[pack] ( literal[string] , identifier[len] ( identifier[chunk_data] ))
identifier[chunk_data] = identifier[chunk_type] . identifier[encode] ( literal[string] )+ identifier[chunk_data]
identifier[out] += identifier[chunk_data] + identifier[struct] . identifier[pack] ( literal[string] , identifier[binascii] . identifier[crc32] ( identifier[chunk_data] )& literal[int] )
keyword[return] identifier[out]
|
def make_chunk(chunk_type, chunk_data):
"""Create a raw chunk by composing chunk type and data. It
calculates chunk length and CRC for you.
:arg str chunk_type: PNG chunk type.
:arg bytes chunk_data: PNG chunk data, **excluding chunk length, type, and CRC**.
:rtype: bytes
"""
out = struct.pack('!I', len(chunk_data))
chunk_data = chunk_type.encode('latin-1') + chunk_data
out += chunk_data + struct.pack('!I', binascii.crc32(chunk_data) & 4294967295)
return out
|
def get_file(self, filename):
"""Get file source from cache"""
import linecache
# Hack for frozen importlib bootstrap
if filename == '<frozen importlib._bootstrap>':
filename = os.path.join(
os.path.dirname(linecache.__file__), 'importlib',
'_bootstrap.py'
)
return to_unicode_string(
''.join(linecache.getlines(filename)), filename
)
|
def function[get_file, parameter[self, filename]]:
constant[Get file source from cache]
import module[linecache]
if compare[name[filename] equal[==] constant[<frozen importlib._bootstrap>]] begin[:]
variable[filename] assign[=] call[name[os].path.join, parameter[call[name[os].path.dirname, parameter[name[linecache].__file__]], constant[importlib], constant[_bootstrap.py]]]
return[call[name[to_unicode_string], parameter[call[constant[].join, parameter[call[name[linecache].getlines, parameter[name[filename]]]]], name[filename]]]]
|
keyword[def] identifier[get_file] ( identifier[self] , identifier[filename] ):
literal[string]
keyword[import] identifier[linecache]
keyword[if] identifier[filename] == literal[string] :
identifier[filename] = identifier[os] . identifier[path] . identifier[join] (
identifier[os] . identifier[path] . identifier[dirname] ( identifier[linecache] . identifier[__file__] ), literal[string] ,
literal[string]
)
keyword[return] identifier[to_unicode_string] (
literal[string] . identifier[join] ( identifier[linecache] . identifier[getlines] ( identifier[filename] )), identifier[filename]
)
|
def get_file(self, filename):
"""Get file source from cache"""
import linecache
# Hack for frozen importlib bootstrap
if filename == '<frozen importlib._bootstrap>':
filename = os.path.join(os.path.dirname(linecache.__file__), 'importlib', '_bootstrap.py') # depends on [control=['if'], data=['filename']]
return to_unicode_string(''.join(linecache.getlines(filename)), filename)
|
def _get_cubic_root(self):
"""Get the cubic root."""
# We have the equation x^2 D^2 + (1-x)^4 * C / h_min^2
# where x = sqrt(mu).
# We substitute x, which is sqrt(mu), with x = y + 1.
# It gives y^3 + py = q
# where p = (D^2 h_min^2)/(2*C) and q = -p.
# We use the Vieta's substitution to compute the root.
# There is only one real solution y (which is in [0, 1] ).
# http://mathworld.wolfram.com/VietasSubstitution.html
assert_array = [
tf.Assert(
tf.logical_not(tf.is_nan(self._dist_to_opt_avg)),
[self._dist_to_opt_avg,]),
tf.Assert(
tf.logical_not(tf.is_nan(self._h_min)),
[self._h_min,]),
tf.Assert(
tf.logical_not(tf.is_nan(self._grad_var)),
[self._grad_var,]),
tf.Assert(
tf.logical_not(tf.is_inf(self._dist_to_opt_avg)),
[self._dist_to_opt_avg,]),
tf.Assert(
tf.logical_not(tf.is_inf(self._h_min)),
[self._h_min,]),
tf.Assert(
tf.logical_not(tf.is_inf(self._grad_var)),
[self._grad_var,])
]
with tf.control_dependencies(assert_array):
p = self._dist_to_opt_avg**2 * self._h_min**2 / 2 / self._grad_var
w3 = (-tf.sqrt(p**2 + 4.0 / 27.0 * p**3) - p) / 2.0
w = tf.sign(w3) * tf.pow(tf.abs(w3), 1.0/3.0)
y = w - p / 3.0 / w
x = y + 1
return x
|
def function[_get_cubic_root, parameter[self]]:
constant[Get the cubic root.]
variable[assert_array] assign[=] list[[<ast.Call object at 0x7da1b2004fa0>, <ast.Call object at 0x7da1b20049a0>, <ast.Call object at 0x7da1b2004e80>, <ast.Call object at 0x7da1b2004be0>, <ast.Call object at 0x7da1b2004820>, <ast.Call object at 0x7da1b1e14340>]]
with call[name[tf].control_dependencies, parameter[name[assert_array]]] begin[:]
variable[p] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[self]._dist_to_opt_avg ** constant[2]] * binary_operation[name[self]._h_min ** constant[2]]] / constant[2]] / name[self]._grad_var]
variable[w3] assign[=] binary_operation[binary_operation[<ast.UnaryOp object at 0x7da1b2088070> - name[p]] / constant[2.0]]
variable[w] assign[=] binary_operation[call[name[tf].sign, parameter[name[w3]]] * call[name[tf].pow, parameter[call[name[tf].abs, parameter[name[w3]]], binary_operation[constant[1.0] / constant[3.0]]]]]
variable[y] assign[=] binary_operation[name[w] - binary_operation[binary_operation[name[p] / constant[3.0]] / name[w]]]
variable[x] assign[=] binary_operation[name[y] + constant[1]]
return[name[x]]
|
keyword[def] identifier[_get_cubic_root] ( identifier[self] ):
literal[string]
identifier[assert_array] =[
identifier[tf] . identifier[Assert] (
identifier[tf] . identifier[logical_not] ( identifier[tf] . identifier[is_nan] ( identifier[self] . identifier[_dist_to_opt_avg] )),
[ identifier[self] . identifier[_dist_to_opt_avg] ,]),
identifier[tf] . identifier[Assert] (
identifier[tf] . identifier[logical_not] ( identifier[tf] . identifier[is_nan] ( identifier[self] . identifier[_h_min] )),
[ identifier[self] . identifier[_h_min] ,]),
identifier[tf] . identifier[Assert] (
identifier[tf] . identifier[logical_not] ( identifier[tf] . identifier[is_nan] ( identifier[self] . identifier[_grad_var] )),
[ identifier[self] . identifier[_grad_var] ,]),
identifier[tf] . identifier[Assert] (
identifier[tf] . identifier[logical_not] ( identifier[tf] . identifier[is_inf] ( identifier[self] . identifier[_dist_to_opt_avg] )),
[ identifier[self] . identifier[_dist_to_opt_avg] ,]),
identifier[tf] . identifier[Assert] (
identifier[tf] . identifier[logical_not] ( identifier[tf] . identifier[is_inf] ( identifier[self] . identifier[_h_min] )),
[ identifier[self] . identifier[_h_min] ,]),
identifier[tf] . identifier[Assert] (
identifier[tf] . identifier[logical_not] ( identifier[tf] . identifier[is_inf] ( identifier[self] . identifier[_grad_var] )),
[ identifier[self] . identifier[_grad_var] ,])
]
keyword[with] identifier[tf] . identifier[control_dependencies] ( identifier[assert_array] ):
identifier[p] = identifier[self] . identifier[_dist_to_opt_avg] ** literal[int] * identifier[self] . identifier[_h_min] ** literal[int] / literal[int] / identifier[self] . identifier[_grad_var]
identifier[w3] =(- identifier[tf] . identifier[sqrt] ( identifier[p] ** literal[int] + literal[int] / literal[int] * identifier[p] ** literal[int] )- identifier[p] )/ literal[int]
identifier[w] = identifier[tf] . identifier[sign] ( identifier[w3] )* identifier[tf] . identifier[pow] ( identifier[tf] . identifier[abs] ( identifier[w3] ), literal[int] / literal[int] )
identifier[y] = identifier[w] - identifier[p] / literal[int] / identifier[w]
identifier[x] = identifier[y] + literal[int]
keyword[return] identifier[x]
|
def _get_cubic_root(self):
"""Get the cubic root."""
# We have the equation x^2 D^2 + (1-x)^4 * C / h_min^2
# where x = sqrt(mu).
# We substitute x, which is sqrt(mu), with x = y + 1.
# It gives y^3 + py = q
# where p = (D^2 h_min^2)/(2*C) and q = -p.
# We use the Vieta's substitution to compute the root.
# There is only one real solution y (which is in [0, 1] ).
# http://mathworld.wolfram.com/VietasSubstitution.html
assert_array = [tf.Assert(tf.logical_not(tf.is_nan(self._dist_to_opt_avg)), [self._dist_to_opt_avg]), tf.Assert(tf.logical_not(tf.is_nan(self._h_min)), [self._h_min]), tf.Assert(tf.logical_not(tf.is_nan(self._grad_var)), [self._grad_var]), tf.Assert(tf.logical_not(tf.is_inf(self._dist_to_opt_avg)), [self._dist_to_opt_avg]), tf.Assert(tf.logical_not(tf.is_inf(self._h_min)), [self._h_min]), tf.Assert(tf.logical_not(tf.is_inf(self._grad_var)), [self._grad_var])]
with tf.control_dependencies(assert_array):
p = self._dist_to_opt_avg ** 2 * self._h_min ** 2 / 2 / self._grad_var
w3 = (-tf.sqrt(p ** 2 + 4.0 / 27.0 * p ** 3) - p) / 2.0
w = tf.sign(w3) * tf.pow(tf.abs(w3), 1.0 / 3.0)
y = w - p / 3.0 / w
x = y + 1 # depends on [control=['with'], data=[]]
return x
|
def cases(self, owner=None, collaborator=None, query=None, skip_assigned=False,
has_causatives=False, reruns=False, finished=False,
research_requested=False, is_research=False, status=None,
phenotype_terms=False, pinned=False, cohort=False, name_query=None,
yield_query=False):
"""Fetches all cases from the backend.
Args:
collaborator(str): If collaborator should be considered
owner(str): Query cases for specified case owner only
query(dict): If a specific query is used
skip_assigned(bool)
has_causatives(bool)
reruns(bool)
finished(bool)
research_requested(bool)
is_research(bool)
status(str)
phenotype_terms(bool): Fetch all cases with phenotype terms
pinned(bool): Fetch all cases with pinned variants
name_query(str): Could be hpo term, HPO-group, user, part of display name,
part of inds or part of synopsis
yield_query(bool): If true, only return mongo query dict for use in
compound querying.
Returns:
Cases ordered by date.
If yield_query is True, does not pose query to db;
instead returns corresponding query dict
that can be reused in compound queries or for testing.
"""
LOG.debug("Fetch all cases")
query = query or {}
# Prioritize when both owner and collaborator params are present
if collaborator and owner:
collaborator = None
if collaborator:
LOG.debug("Use collaborator {0}".format(collaborator))
query['collaborators'] = collaborator
if owner:
LOG.debug("Use owner {0}".format(owner))
query['owner'] = owner
if skip_assigned:
query['assignees'] = {'$exists': False}
if has_causatives:
query['causatives'] = {'$exists': True, '$ne': []}
if reruns:
query['rerun_requested'] = True
if status:
query['status'] = status
elif finished:
query['status'] = {'$in': ['solved', 'archived']}
if research_requested:
query['research_requested'] = True
if is_research:
query['is_research'] = {'$exists': True, '$eq': True}
if phenotype_terms:
query['phenotype_terms'] = {'$exists': True, '$ne': []}
if pinned:
query['suspects'] = {'$exists': True, '$ne': []}
if cohort:
query['cohorts'] = {'$exists': True, '$ne': []}
if name_query:
name_value = name_query.split(':')[-1] # capture ant value provided after query descriptor
users = self.user_collection.find({'name': {'$regex': name_query, '$options': 'i'}})
if users.count() > 0:
query['assignees'] = {'$in': [user['email'] for user in users]}
elif name_query.startswith('HP:'):
LOG.debug("HPO case query")
if name_value:
query['phenotype_terms.phenotype_id'] = name_query
else: # query for cases with no HPO terms
query['$or'] = [ {'phenotype_terms' : {'$size' : 0}}, {'phenotype_terms' : {'$exists' : False}} ]
elif name_query.startswith('PG:'):
LOG.debug("PG case query")
if name_value:
phenotype_group_query = name_query.replace('PG:', 'HP:')
query['phenotype_groups.phenotype_id'] = phenotype_group_query
else: # query for cases with no phenotype groups
query['$or'] = [ {'phenotype_groups' : {'$size' : 0}}, {'phenotype_groups' : {'$exists' : False}} ]
elif name_query.startswith('synopsis:'):
if name_value:
query['$text']={'$search':name_value}
else: # query for cases with missing synopsis
query['synopsis'] = ''
elif name_query.startswith('cohort:'):
query['cohorts'] = name_value
elif name_query.startswith('panel:'):
query['panels'] = {'$elemMatch': {'panel_name': name_value,
'is_default': True }}
elif name_query.startswith('status:'):
status_query = name_query.replace('status:','')
query['status'] = status_query
elif name_query.startswith('is_research'):
query['is_research'] = {'$exists': True, '$eq': True}
else:
query['$or'] = [
{'display_name': {'$regex': name_query}},
{'individuals.display_name': {'$regex': name_query}},
]
if yield_query:
return query
LOG.info("Get cases with query {0}".format(query))
return self.case_collection.find(query).sort('updated_at', -1)
|
def function[cases, parameter[self, owner, collaborator, query, skip_assigned, has_causatives, reruns, finished, research_requested, is_research, status, phenotype_terms, pinned, cohort, name_query, yield_query]]:
constant[Fetches all cases from the backend.
Args:
collaborator(str): If collaborator should be considered
owner(str): Query cases for specified case owner only
query(dict): If a specific query is used
skip_assigned(bool)
has_causatives(bool)
reruns(bool)
finished(bool)
research_requested(bool)
is_research(bool)
status(str)
phenotype_terms(bool): Fetch all cases with phenotype terms
pinned(bool): Fetch all cases with pinned variants
name_query(str): Could be hpo term, HPO-group, user, part of display name,
part of inds or part of synopsis
yield_query(bool): If true, only return mongo query dict for use in
compound querying.
Returns:
Cases ordered by date.
If yield_query is True, does not pose query to db;
instead returns corresponding query dict
that can be reused in compound queries or for testing.
]
call[name[LOG].debug, parameter[constant[Fetch all cases]]]
variable[query] assign[=] <ast.BoolOp object at 0x7da20c7c9cf0>
if <ast.BoolOp object at 0x7da20c7cb310> begin[:]
variable[collaborator] assign[=] constant[None]
if name[collaborator] begin[:]
call[name[LOG].debug, parameter[call[constant[Use collaborator {0}].format, parameter[name[collaborator]]]]]
call[name[query]][constant[collaborators]] assign[=] name[collaborator]
if name[owner] begin[:]
call[name[LOG].debug, parameter[call[constant[Use owner {0}].format, parameter[name[owner]]]]]
call[name[query]][constant[owner]] assign[=] name[owner]
if name[skip_assigned] begin[:]
call[name[query]][constant[assignees]] assign[=] dictionary[[<ast.Constant object at 0x7da20c7c8f70>], [<ast.Constant object at 0x7da20c7cadd0>]]
if name[has_causatives] begin[:]
call[name[query]][constant[causatives]] assign[=] dictionary[[<ast.Constant object at 0x7da20c7ca3e0>, <ast.Constant object at 0x7da20c7c9480>], [<ast.Constant object at 0x7da20c7cb880>, <ast.List object at 0x7da20c7c9f30>]]
if name[reruns] begin[:]
call[name[query]][constant[rerun_requested]] assign[=] constant[True]
if name[status] begin[:]
call[name[query]][constant[status]] assign[=] name[status]
if name[research_requested] begin[:]
call[name[query]][constant[research_requested]] assign[=] constant[True]
if name[is_research] begin[:]
call[name[query]][constant[is_research]] assign[=] dictionary[[<ast.Constant object at 0x7da18fe916f0>, <ast.Constant object at 0x7da18fe90dc0>], [<ast.Constant object at 0x7da18fe90880>, <ast.Constant object at 0x7da18fe90550>]]
if name[phenotype_terms] begin[:]
call[name[query]][constant[phenotype_terms]] assign[=] dictionary[[<ast.Constant object at 0x7da18fe90340>, <ast.Constant object at 0x7da18fe93eb0>], [<ast.Constant object at 0x7da18fe93be0>, <ast.List object at 0x7da18fe91ed0>]]
if name[pinned] begin[:]
call[name[query]][constant[suspects]] assign[=] dictionary[[<ast.Constant object at 0x7da18fe93c10>, <ast.Constant object at 0x7da18fe91030>], [<ast.Constant object at 0x7da18fe92800>, <ast.List object at 0x7da18fe91540>]]
if name[cohort] begin[:]
call[name[query]][constant[cohorts]] assign[=] dictionary[[<ast.Constant object at 0x7da18fe90760>, <ast.Constant object at 0x7da18fe93250>], [<ast.Constant object at 0x7da18fe93e20>, <ast.List object at 0x7da18fe90f40>]]
if name[name_query] begin[:]
variable[name_value] assign[=] call[call[name[name_query].split, parameter[constant[:]]]][<ast.UnaryOp object at 0x7da18fe90e20>]
variable[users] assign[=] call[name[self].user_collection.find, parameter[dictionary[[<ast.Constant object at 0x7da18dc99fc0>], [<ast.Dict object at 0x7da18dc9b6a0>]]]]
if compare[call[name[users].count, parameter[]] greater[>] constant[0]] begin[:]
call[name[query]][constant[assignees]] assign[=] dictionary[[<ast.Constant object at 0x7da18dc9ae90>], [<ast.ListComp object at 0x7da18dc98a90>]]
if name[yield_query] begin[:]
return[name[query]]
call[name[LOG].info, parameter[call[constant[Get cases with query {0}].format, parameter[name[query]]]]]
return[call[call[name[self].case_collection.find, parameter[name[query]]].sort, parameter[constant[updated_at], <ast.UnaryOp object at 0x7da18eb54b20>]]]
|
keyword[def] identifier[cases] ( identifier[self] , identifier[owner] = keyword[None] , identifier[collaborator] = keyword[None] , identifier[query] = keyword[None] , identifier[skip_assigned] = keyword[False] ,
identifier[has_causatives] = keyword[False] , identifier[reruns] = keyword[False] , identifier[finished] = keyword[False] ,
identifier[research_requested] = keyword[False] , identifier[is_research] = keyword[False] , identifier[status] = keyword[None] ,
identifier[phenotype_terms] = keyword[False] , identifier[pinned] = keyword[False] , identifier[cohort] = keyword[False] , identifier[name_query] = keyword[None] ,
identifier[yield_query] = keyword[False] ):
literal[string]
identifier[LOG] . identifier[debug] ( literal[string] )
identifier[query] = identifier[query] keyword[or] {}
keyword[if] identifier[collaborator] keyword[and] identifier[owner] :
identifier[collaborator] = keyword[None]
keyword[if] identifier[collaborator] :
identifier[LOG] . identifier[debug] ( literal[string] . identifier[format] ( identifier[collaborator] ))
identifier[query] [ literal[string] ]= identifier[collaborator]
keyword[if] identifier[owner] :
identifier[LOG] . identifier[debug] ( literal[string] . identifier[format] ( identifier[owner] ))
identifier[query] [ literal[string] ]= identifier[owner]
keyword[if] identifier[skip_assigned] :
identifier[query] [ literal[string] ]={ literal[string] : keyword[False] }
keyword[if] identifier[has_causatives] :
identifier[query] [ literal[string] ]={ literal[string] : keyword[True] , literal[string] :[]}
keyword[if] identifier[reruns] :
identifier[query] [ literal[string] ]= keyword[True]
keyword[if] identifier[status] :
identifier[query] [ literal[string] ]= identifier[status]
keyword[elif] identifier[finished] :
identifier[query] [ literal[string] ]={ literal[string] :[ literal[string] , literal[string] ]}
keyword[if] identifier[research_requested] :
identifier[query] [ literal[string] ]= keyword[True]
keyword[if] identifier[is_research] :
identifier[query] [ literal[string] ]={ literal[string] : keyword[True] , literal[string] : keyword[True] }
keyword[if] identifier[phenotype_terms] :
identifier[query] [ literal[string] ]={ literal[string] : keyword[True] , literal[string] :[]}
keyword[if] identifier[pinned] :
identifier[query] [ literal[string] ]={ literal[string] : keyword[True] , literal[string] :[]}
keyword[if] identifier[cohort] :
identifier[query] [ literal[string] ]={ literal[string] : keyword[True] , literal[string] :[]}
keyword[if] identifier[name_query] :
identifier[name_value] = identifier[name_query] . identifier[split] ( literal[string] )[- literal[int] ]
identifier[users] = identifier[self] . identifier[user_collection] . identifier[find] ({ literal[string] :{ literal[string] : identifier[name_query] , literal[string] : literal[string] }})
keyword[if] identifier[users] . identifier[count] ()> literal[int] :
identifier[query] [ literal[string] ]={ literal[string] :[ identifier[user] [ literal[string] ] keyword[for] identifier[user] keyword[in] identifier[users] ]}
keyword[elif] identifier[name_query] . identifier[startswith] ( literal[string] ):
identifier[LOG] . identifier[debug] ( literal[string] )
keyword[if] identifier[name_value] :
identifier[query] [ literal[string] ]= identifier[name_query]
keyword[else] :
identifier[query] [ literal[string] ]=[{ literal[string] :{ literal[string] : literal[int] }},{ literal[string] :{ literal[string] : keyword[False] }}]
keyword[elif] identifier[name_query] . identifier[startswith] ( literal[string] ):
identifier[LOG] . identifier[debug] ( literal[string] )
keyword[if] identifier[name_value] :
identifier[phenotype_group_query] = identifier[name_query] . identifier[replace] ( literal[string] , literal[string] )
identifier[query] [ literal[string] ]= identifier[phenotype_group_query]
keyword[else] :
identifier[query] [ literal[string] ]=[{ literal[string] :{ literal[string] : literal[int] }},{ literal[string] :{ literal[string] : keyword[False] }}]
keyword[elif] identifier[name_query] . identifier[startswith] ( literal[string] ):
keyword[if] identifier[name_value] :
identifier[query] [ literal[string] ]={ literal[string] : identifier[name_value] }
keyword[else] :
identifier[query] [ literal[string] ]= literal[string]
keyword[elif] identifier[name_query] . identifier[startswith] ( literal[string] ):
identifier[query] [ literal[string] ]= identifier[name_value]
keyword[elif] identifier[name_query] . identifier[startswith] ( literal[string] ):
identifier[query] [ literal[string] ]={ literal[string] :{ literal[string] : identifier[name_value] ,
literal[string] : keyword[True] }}
keyword[elif] identifier[name_query] . identifier[startswith] ( literal[string] ):
identifier[status_query] = identifier[name_query] . identifier[replace] ( literal[string] , literal[string] )
identifier[query] [ literal[string] ]= identifier[status_query]
keyword[elif] identifier[name_query] . identifier[startswith] ( literal[string] ):
identifier[query] [ literal[string] ]={ literal[string] : keyword[True] , literal[string] : keyword[True] }
keyword[else] :
identifier[query] [ literal[string] ]=[
{ literal[string] :{ literal[string] : identifier[name_query] }},
{ literal[string] :{ literal[string] : identifier[name_query] }},
]
keyword[if] identifier[yield_query] :
keyword[return] identifier[query]
identifier[LOG] . identifier[info] ( literal[string] . identifier[format] ( identifier[query] ))
keyword[return] identifier[self] . identifier[case_collection] . identifier[find] ( identifier[query] ). identifier[sort] ( literal[string] ,- literal[int] )
|
def cases(self, owner=None, collaborator=None, query=None, skip_assigned=False, has_causatives=False, reruns=False, finished=False, research_requested=False, is_research=False, status=None, phenotype_terms=False, pinned=False, cohort=False, name_query=None, yield_query=False):
"""Fetches all cases from the backend.
Args:
collaborator(str): If collaborator should be considered
owner(str): Query cases for specified case owner only
query(dict): If a specific query is used
skip_assigned(bool)
has_causatives(bool)
reruns(bool)
finished(bool)
research_requested(bool)
is_research(bool)
status(str)
phenotype_terms(bool): Fetch all cases with phenotype terms
pinned(bool): Fetch all cases with pinned variants
name_query(str): Could be hpo term, HPO-group, user, part of display name,
part of inds or part of synopsis
yield_query(bool): If true, only return mongo query dict for use in
compound querying.
Returns:
Cases ordered by date.
If yield_query is True, does not pose query to db;
instead returns corresponding query dict
that can be reused in compound queries or for testing.
"""
LOG.debug('Fetch all cases')
query = query or {}
# Prioritize when both owner and collaborator params are present
if collaborator and owner:
collaborator = None # depends on [control=['if'], data=[]]
if collaborator:
LOG.debug('Use collaborator {0}'.format(collaborator))
query['collaborators'] = collaborator # depends on [control=['if'], data=[]]
if owner:
LOG.debug('Use owner {0}'.format(owner))
query['owner'] = owner # depends on [control=['if'], data=[]]
if skip_assigned:
query['assignees'] = {'$exists': False} # depends on [control=['if'], data=[]]
if has_causatives:
query['causatives'] = {'$exists': True, '$ne': []} # depends on [control=['if'], data=[]]
if reruns:
query['rerun_requested'] = True # depends on [control=['if'], data=[]]
if status:
query['status'] = status # depends on [control=['if'], data=[]]
elif finished:
query['status'] = {'$in': ['solved', 'archived']} # depends on [control=['if'], data=[]]
if research_requested:
query['research_requested'] = True # depends on [control=['if'], data=[]]
if is_research:
query['is_research'] = {'$exists': True, '$eq': True} # depends on [control=['if'], data=[]]
if phenotype_terms:
query['phenotype_terms'] = {'$exists': True, '$ne': []} # depends on [control=['if'], data=[]]
if pinned:
query['suspects'] = {'$exists': True, '$ne': []} # depends on [control=['if'], data=[]]
if cohort:
query['cohorts'] = {'$exists': True, '$ne': []} # depends on [control=['if'], data=[]]
if name_query:
name_value = name_query.split(':')[-1] # capture ant value provided after query descriptor
users = self.user_collection.find({'name': {'$regex': name_query, '$options': 'i'}})
if users.count() > 0:
query['assignees'] = {'$in': [user['email'] for user in users]} # depends on [control=['if'], data=[]]
elif name_query.startswith('HP:'):
LOG.debug('HPO case query')
if name_value:
query['phenotype_terms.phenotype_id'] = name_query # depends on [control=['if'], data=[]]
else: # query for cases with no HPO terms
query['$or'] = [{'phenotype_terms': {'$size': 0}}, {'phenotype_terms': {'$exists': False}}] # depends on [control=['if'], data=[]]
elif name_query.startswith('PG:'):
LOG.debug('PG case query')
if name_value:
phenotype_group_query = name_query.replace('PG:', 'HP:')
query['phenotype_groups.phenotype_id'] = phenotype_group_query # depends on [control=['if'], data=[]]
else: # query for cases with no phenotype groups
query['$or'] = [{'phenotype_groups': {'$size': 0}}, {'phenotype_groups': {'$exists': False}}] # depends on [control=['if'], data=[]]
elif name_query.startswith('synopsis:'):
if name_value:
query['$text'] = {'$search': name_value} # depends on [control=['if'], data=[]]
else: # query for cases with missing synopsis
query['synopsis'] = '' # depends on [control=['if'], data=[]]
elif name_query.startswith('cohort:'):
query['cohorts'] = name_value # depends on [control=['if'], data=[]]
elif name_query.startswith('panel:'):
query['panels'] = {'$elemMatch': {'panel_name': name_value, 'is_default': True}} # depends on [control=['if'], data=[]]
elif name_query.startswith('status:'):
status_query = name_query.replace('status:', '')
query['status'] = status_query # depends on [control=['if'], data=[]]
elif name_query.startswith('is_research'):
query['is_research'] = {'$exists': True, '$eq': True} # depends on [control=['if'], data=[]]
else:
query['$or'] = [{'display_name': {'$regex': name_query}}, {'individuals.display_name': {'$regex': name_query}}] # depends on [control=['if'], data=[]]
if yield_query:
return query # depends on [control=['if'], data=[]]
LOG.info('Get cases with query {0}'.format(query))
return self.case_collection.find(query).sort('updated_at', -1)
|
def get_multi(
self, keys, missing=None, deferred=None, transaction=None, eventual=False
):
"""Retrieve entities, along with their attributes.
:type keys: list of :class:`google.cloud.datastore.key.Key`
:param keys: The keys to be retrieved from the datastore.
:type missing: list
:param missing: (Optional) If a list is passed, the key-only entities
returned by the backend as "missing" will be copied
into it. If the list is not empty, an error will occur.
:type deferred: list
:param deferred: (Optional) If a list is passed, the keys returned
by the backend as "deferred" will be copied into it.
If the list is not empty, an error will occur.
:type transaction:
:class:`~google.cloud.datastore.transaction.Transaction`
:param transaction: (Optional) Transaction to use for read consistency.
If not passed, uses current transaction, if set.
:type eventual: bool
:param eventual: (Optional) Defaults to strongly consistent (False).
Setting True will use eventual consistency, but cannot
be used inside a transaction or will raise ValueError.
:rtype: list of :class:`google.cloud.datastore.entity.Entity`
:returns: The requested entities.
:raises: :class:`ValueError` if one or more of ``keys`` has a project
which does not match our project.
:raises: :class:`ValueError` if eventual is True and in a transaction.
"""
if not keys:
return []
ids = set(key.project for key in keys)
for current_id in ids:
if current_id != self.project:
raise ValueError("Keys do not match project")
if transaction is None:
transaction = self.current_transaction
entity_pbs = _extended_lookup(
datastore_api=self._datastore_api,
project=self.project,
key_pbs=[key.to_protobuf() for key in keys],
eventual=eventual,
missing=missing,
deferred=deferred,
transaction_id=transaction and transaction.id,
)
if missing is not None:
missing[:] = [
helpers.entity_from_protobuf(missed_pb) for missed_pb in missing
]
if deferred is not None:
deferred[:] = [
helpers.key_from_protobuf(deferred_pb) for deferred_pb in deferred
]
return [helpers.entity_from_protobuf(entity_pb) for entity_pb in entity_pbs]
|
def function[get_multi, parameter[self, keys, missing, deferred, transaction, eventual]]:
constant[Retrieve entities, along with their attributes.
:type keys: list of :class:`google.cloud.datastore.key.Key`
:param keys: The keys to be retrieved from the datastore.
:type missing: list
:param missing: (Optional) If a list is passed, the key-only entities
returned by the backend as "missing" will be copied
into it. If the list is not empty, an error will occur.
:type deferred: list
:param deferred: (Optional) If a list is passed, the keys returned
by the backend as "deferred" will be copied into it.
If the list is not empty, an error will occur.
:type transaction:
:class:`~google.cloud.datastore.transaction.Transaction`
:param transaction: (Optional) Transaction to use for read consistency.
If not passed, uses current transaction, if set.
:type eventual: bool
:param eventual: (Optional) Defaults to strongly consistent (False).
Setting True will use eventual consistency, but cannot
be used inside a transaction or will raise ValueError.
:rtype: list of :class:`google.cloud.datastore.entity.Entity`
:returns: The requested entities.
:raises: :class:`ValueError` if one or more of ``keys`` has a project
which does not match our project.
:raises: :class:`ValueError` if eventual is True and in a transaction.
]
if <ast.UnaryOp object at 0x7da204344a60> begin[:]
return[list[[]]]
variable[ids] assign[=] call[name[set], parameter[<ast.GeneratorExp object at 0x7da204345e10>]]
for taget[name[current_id]] in starred[name[ids]] begin[:]
if compare[name[current_id] not_equal[!=] name[self].project] begin[:]
<ast.Raise object at 0x7da204346620>
if compare[name[transaction] is constant[None]] begin[:]
variable[transaction] assign[=] name[self].current_transaction
variable[entity_pbs] assign[=] call[name[_extended_lookup], parameter[]]
if compare[name[missing] is_not constant[None]] begin[:]
call[name[missing]][<ast.Slice object at 0x7da2043456f0>] assign[=] <ast.ListComp object at 0x7da204347070>
if compare[name[deferred] is_not constant[None]] begin[:]
call[name[deferred]][<ast.Slice object at 0x7da204344460>] assign[=] <ast.ListComp object at 0x7da204346dd0>
return[<ast.ListComp object at 0x7da204347730>]
|
keyword[def] identifier[get_multi] (
identifier[self] , identifier[keys] , identifier[missing] = keyword[None] , identifier[deferred] = keyword[None] , identifier[transaction] = keyword[None] , identifier[eventual] = keyword[False]
):
literal[string]
keyword[if] keyword[not] identifier[keys] :
keyword[return] []
identifier[ids] = identifier[set] ( identifier[key] . identifier[project] keyword[for] identifier[key] keyword[in] identifier[keys] )
keyword[for] identifier[current_id] keyword[in] identifier[ids] :
keyword[if] identifier[current_id] != identifier[self] . identifier[project] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[transaction] keyword[is] keyword[None] :
identifier[transaction] = identifier[self] . identifier[current_transaction]
identifier[entity_pbs] = identifier[_extended_lookup] (
identifier[datastore_api] = identifier[self] . identifier[_datastore_api] ,
identifier[project] = identifier[self] . identifier[project] ,
identifier[key_pbs] =[ identifier[key] . identifier[to_protobuf] () keyword[for] identifier[key] keyword[in] identifier[keys] ],
identifier[eventual] = identifier[eventual] ,
identifier[missing] = identifier[missing] ,
identifier[deferred] = identifier[deferred] ,
identifier[transaction_id] = identifier[transaction] keyword[and] identifier[transaction] . identifier[id] ,
)
keyword[if] identifier[missing] keyword[is] keyword[not] keyword[None] :
identifier[missing] [:]=[
identifier[helpers] . identifier[entity_from_protobuf] ( identifier[missed_pb] ) keyword[for] identifier[missed_pb] keyword[in] identifier[missing]
]
keyword[if] identifier[deferred] keyword[is] keyword[not] keyword[None] :
identifier[deferred] [:]=[
identifier[helpers] . identifier[key_from_protobuf] ( identifier[deferred_pb] ) keyword[for] identifier[deferred_pb] keyword[in] identifier[deferred]
]
keyword[return] [ identifier[helpers] . identifier[entity_from_protobuf] ( identifier[entity_pb] ) keyword[for] identifier[entity_pb] keyword[in] identifier[entity_pbs] ]
|
def get_multi(self, keys, missing=None, deferred=None, transaction=None, eventual=False):
"""Retrieve entities, along with their attributes.
:type keys: list of :class:`google.cloud.datastore.key.Key`
:param keys: The keys to be retrieved from the datastore.
:type missing: list
:param missing: (Optional) If a list is passed, the key-only entities
returned by the backend as "missing" will be copied
into it. If the list is not empty, an error will occur.
:type deferred: list
:param deferred: (Optional) If a list is passed, the keys returned
by the backend as "deferred" will be copied into it.
If the list is not empty, an error will occur.
:type transaction:
:class:`~google.cloud.datastore.transaction.Transaction`
:param transaction: (Optional) Transaction to use for read consistency.
If not passed, uses current transaction, if set.
:type eventual: bool
:param eventual: (Optional) Defaults to strongly consistent (False).
Setting True will use eventual consistency, but cannot
be used inside a transaction or will raise ValueError.
:rtype: list of :class:`google.cloud.datastore.entity.Entity`
:returns: The requested entities.
:raises: :class:`ValueError` if one or more of ``keys`` has a project
which does not match our project.
:raises: :class:`ValueError` if eventual is True and in a transaction.
"""
if not keys:
return [] # depends on [control=['if'], data=[]]
ids = set((key.project for key in keys))
for current_id in ids:
if current_id != self.project:
raise ValueError('Keys do not match project') # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['current_id']]
if transaction is None:
transaction = self.current_transaction # depends on [control=['if'], data=['transaction']]
entity_pbs = _extended_lookup(datastore_api=self._datastore_api, project=self.project, key_pbs=[key.to_protobuf() for key in keys], eventual=eventual, missing=missing, deferred=deferred, transaction_id=transaction and transaction.id)
if missing is not None:
missing[:] = [helpers.entity_from_protobuf(missed_pb) for missed_pb in missing] # depends on [control=['if'], data=['missing']]
if deferred is not None:
deferred[:] = [helpers.key_from_protobuf(deferred_pb) for deferred_pb in deferred] # depends on [control=['if'], data=['deferred']]
return [helpers.entity_from_protobuf(entity_pb) for entity_pb in entity_pbs]
|
def titles(self, key, value):
"""Populate the ``titles`` key."""
if not key.startswith('245'):
return {
'source': value.get('9'),
'subtitle': value.get('b'),
'title': value.get('a'),
}
self.setdefault('titles', []).insert(0, {
'source': value.get('9'),
'subtitle': value.get('b'),
'title': value.get('a'),
})
|
def function[titles, parameter[self, key, value]]:
constant[Populate the ``titles`` key.]
if <ast.UnaryOp object at 0x7da18ede4ac0> begin[:]
return[dictionary[[<ast.Constant object at 0x7da18bcc8460>, <ast.Constant object at 0x7da18bcc8e80>, <ast.Constant object at 0x7da18bcc8280>], [<ast.Call object at 0x7da18bccb040>, <ast.Call object at 0x7da18bcc92d0>, <ast.Call object at 0x7da18bcc9780>]]]
call[call[name[self].setdefault, parameter[constant[titles], list[[]]]].insert, parameter[constant[0], dictionary[[<ast.Constant object at 0x7da207f00a30>, <ast.Constant object at 0x7da207f020e0>, <ast.Constant object at 0x7da207f02ec0>], [<ast.Call object at 0x7da207f02f20>, <ast.Call object at 0x7da207f02680>, <ast.Call object at 0x7da207f01ab0>]]]]
|
keyword[def] identifier[titles] ( identifier[self] , identifier[key] , identifier[value] ):
literal[string]
keyword[if] keyword[not] identifier[key] . identifier[startswith] ( literal[string] ):
keyword[return] {
literal[string] : identifier[value] . identifier[get] ( literal[string] ),
literal[string] : identifier[value] . identifier[get] ( literal[string] ),
literal[string] : identifier[value] . identifier[get] ( literal[string] ),
}
identifier[self] . identifier[setdefault] ( literal[string] ,[]). identifier[insert] ( literal[int] ,{
literal[string] : identifier[value] . identifier[get] ( literal[string] ),
literal[string] : identifier[value] . identifier[get] ( literal[string] ),
literal[string] : identifier[value] . identifier[get] ( literal[string] ),
})
|
def titles(self, key, value):
"""Populate the ``titles`` key."""
if not key.startswith('245'):
return {'source': value.get('9'), 'subtitle': value.get('b'), 'title': value.get('a')} # depends on [control=['if'], data=[]]
self.setdefault('titles', []).insert(0, {'source': value.get('9'), 'subtitle': value.get('b'), 'title': value.get('a')})
|
def match_sp_sep(first, second):
"""
Verify that all the values in 'first' appear in 'second'.
The values can either be in the form of lists or as space separated
items.
:param first:
:param second:
:return: True/False
"""
if isinstance(first, list):
one = [set(v.split(" ")) for v in first]
else:
one = [{v} for v in first.split(" ")]
if isinstance(second, list):
other = [set(v.split(" ")) for v in second]
else:
other = [{v} for v in second.split(" ")]
# all values in one must appear in other
if any(rt not in other for rt in one):
return False
return True
|
def function[match_sp_sep, parameter[first, second]]:
constant[
Verify that all the values in 'first' appear in 'second'.
The values can either be in the form of lists or as space separated
items.
:param first:
:param second:
:return: True/False
]
if call[name[isinstance], parameter[name[first], name[list]]] begin[:]
variable[one] assign[=] <ast.ListComp object at 0x7da20c7c91b0>
if call[name[isinstance], parameter[name[second], name[list]]] begin[:]
variable[other] assign[=] <ast.ListComp object at 0x7da20c7c8910>
if call[name[any], parameter[<ast.GeneratorExp object at 0x7da20c7caec0>]] begin[:]
return[constant[False]]
return[constant[True]]
|
keyword[def] identifier[match_sp_sep] ( identifier[first] , identifier[second] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[first] , identifier[list] ):
identifier[one] =[ identifier[set] ( identifier[v] . identifier[split] ( literal[string] )) keyword[for] identifier[v] keyword[in] identifier[first] ]
keyword[else] :
identifier[one] =[{ identifier[v] } keyword[for] identifier[v] keyword[in] identifier[first] . identifier[split] ( literal[string] )]
keyword[if] identifier[isinstance] ( identifier[second] , identifier[list] ):
identifier[other] =[ identifier[set] ( identifier[v] . identifier[split] ( literal[string] )) keyword[for] identifier[v] keyword[in] identifier[second] ]
keyword[else] :
identifier[other] =[{ identifier[v] } keyword[for] identifier[v] keyword[in] identifier[second] . identifier[split] ( literal[string] )]
keyword[if] identifier[any] ( identifier[rt] keyword[not] keyword[in] identifier[other] keyword[for] identifier[rt] keyword[in] identifier[one] ):
keyword[return] keyword[False]
keyword[return] keyword[True]
|
def match_sp_sep(first, second):
"""
Verify that all the values in 'first' appear in 'second'.
The values can either be in the form of lists or as space separated
items.
:param first:
:param second:
:return: True/False
"""
if isinstance(first, list):
one = [set(v.split(' ')) for v in first] # depends on [control=['if'], data=[]]
else:
one = [{v} for v in first.split(' ')]
if isinstance(second, list):
other = [set(v.split(' ')) for v in second] # depends on [control=['if'], data=[]]
else:
other = [{v} for v in second.split(' ')]
# all values in one must appear in other
if any((rt not in other for rt in one)):
return False # depends on [control=['if'], data=[]]
return True
|
def setHighlightColor( self, color ):
"""
Sets the primary color used for highlighting this item.
:param color | <QColor>
"""
self._highlightColor = QColor(color)
self.setAlternateHighlightColor(self._highlightColor.darker(110))
|
def function[setHighlightColor, parameter[self, color]]:
constant[
Sets the primary color used for highlighting this item.
:param color | <QColor>
]
name[self]._highlightColor assign[=] call[name[QColor], parameter[name[color]]]
call[name[self].setAlternateHighlightColor, parameter[call[name[self]._highlightColor.darker, parameter[constant[110]]]]]
|
keyword[def] identifier[setHighlightColor] ( identifier[self] , identifier[color] ):
literal[string]
identifier[self] . identifier[_highlightColor] = identifier[QColor] ( identifier[color] )
identifier[self] . identifier[setAlternateHighlightColor] ( identifier[self] . identifier[_highlightColor] . identifier[darker] ( literal[int] ))
|
def setHighlightColor(self, color):
"""
Sets the primary color used for highlighting this item.
:param color | <QColor>
"""
self._highlightColor = QColor(color)
self.setAlternateHighlightColor(self._highlightColor.darker(110))
|
def resample(self, new_timepoints, extrapolate=False):
"""
Use linear interpolation to resample trajectory values.
The new values are interpolated for the provided time points.
This is generally before comparing or averaging trajectories.
:param new_timepoints: the new time points
:param extrapolate: whether extrapolation should be performed when some new time points
are out of the current time range. if extrapolate=False, it would raise an exception.
:return: a new trajectory.
:rtype: :class:`~means.simulation.trajectory.Trajectory`
"""
if not extrapolate:
if min(self.timepoints) > min(new_timepoints):
raise Exception("Some of the new time points are before any time points. If you really want to extrapolate, use `extrapolate=True`")
if max(self.timepoints) < max(new_timepoints):
raise Exception("Some of the new time points are after any time points. If you really want to extrapolate, use `extrapolate=True`")
new_values = np.interp(new_timepoints, self.timepoints, self.values)
return Trajectory(new_timepoints, new_values, self.description)
|
def function[resample, parameter[self, new_timepoints, extrapolate]]:
constant[
Use linear interpolation to resample trajectory values.
The new values are interpolated for the provided time points.
This is generally before comparing or averaging trajectories.
:param new_timepoints: the new time points
:param extrapolate: whether extrapolation should be performed when some new time points
are out of the current time range. if extrapolate=False, it would raise an exception.
:return: a new trajectory.
:rtype: :class:`~means.simulation.trajectory.Trajectory`
]
if <ast.UnaryOp object at 0x7da207f036d0> begin[:]
if compare[call[name[min], parameter[name[self].timepoints]] greater[>] call[name[min], parameter[name[new_timepoints]]]] begin[:]
<ast.Raise object at 0x7da207f03a60>
if compare[call[name[max], parameter[name[self].timepoints]] less[<] call[name[max], parameter[name[new_timepoints]]]] begin[:]
<ast.Raise object at 0x7da207f01600>
variable[new_values] assign[=] call[name[np].interp, parameter[name[new_timepoints], name[self].timepoints, name[self].values]]
return[call[name[Trajectory], parameter[name[new_timepoints], name[new_values], name[self].description]]]
|
keyword[def] identifier[resample] ( identifier[self] , identifier[new_timepoints] , identifier[extrapolate] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[extrapolate] :
keyword[if] identifier[min] ( identifier[self] . identifier[timepoints] )> identifier[min] ( identifier[new_timepoints] ):
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] identifier[max] ( identifier[self] . identifier[timepoints] )< identifier[max] ( identifier[new_timepoints] ):
keyword[raise] identifier[Exception] ( literal[string] )
identifier[new_values] = identifier[np] . identifier[interp] ( identifier[new_timepoints] , identifier[self] . identifier[timepoints] , identifier[self] . identifier[values] )
keyword[return] identifier[Trajectory] ( identifier[new_timepoints] , identifier[new_values] , identifier[self] . identifier[description] )
|
def resample(self, new_timepoints, extrapolate=False):
"""
Use linear interpolation to resample trajectory values.
The new values are interpolated for the provided time points.
This is generally before comparing or averaging trajectories.
:param new_timepoints: the new time points
:param extrapolate: whether extrapolation should be performed when some new time points
are out of the current time range. if extrapolate=False, it would raise an exception.
:return: a new trajectory.
:rtype: :class:`~means.simulation.trajectory.Trajectory`
"""
if not extrapolate:
if min(self.timepoints) > min(new_timepoints):
raise Exception('Some of the new time points are before any time points. If you really want to extrapolate, use `extrapolate=True`') # depends on [control=['if'], data=[]]
if max(self.timepoints) < max(new_timepoints):
raise Exception('Some of the new time points are after any time points. If you really want to extrapolate, use `extrapolate=True`') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
new_values = np.interp(new_timepoints, self.timepoints, self.values)
return Trajectory(new_timepoints, new_values, self.description)
|
def sim_mlipns(src, tar, threshold=0.25, max_mismatches=2):
"""Return the MLIPNS similarity of two strings.
This is a wrapper for :py:meth:`MLIPNS.sim`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
threshold : float
A number [0, 1] indicating the maximum similarity score, below which
the strings are considered 'similar' (0.25 by default)
max_mismatches : int
A number indicating the allowable number of mismatches to remove before
declaring two strings not similar (2 by default)
Returns
-------
float
MLIPNS similarity
Examples
--------
>>> sim_mlipns('cat', 'hat')
1.0
>>> sim_mlipns('Niall', 'Neil')
0.0
>>> sim_mlipns('aluminum', 'Catalan')
0.0
>>> sim_mlipns('ATCG', 'TAGC')
0.0
"""
return MLIPNS().sim(src, tar, threshold, max_mismatches)
|
def function[sim_mlipns, parameter[src, tar, threshold, max_mismatches]]:
constant[Return the MLIPNS similarity of two strings.
This is a wrapper for :py:meth:`MLIPNS.sim`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
threshold : float
A number [0, 1] indicating the maximum similarity score, below which
the strings are considered 'similar' (0.25 by default)
max_mismatches : int
A number indicating the allowable number of mismatches to remove before
declaring two strings not similar (2 by default)
Returns
-------
float
MLIPNS similarity
Examples
--------
>>> sim_mlipns('cat', 'hat')
1.0
>>> sim_mlipns('Niall', 'Neil')
0.0
>>> sim_mlipns('aluminum', 'Catalan')
0.0
>>> sim_mlipns('ATCG', 'TAGC')
0.0
]
return[call[call[name[MLIPNS], parameter[]].sim, parameter[name[src], name[tar], name[threshold], name[max_mismatches]]]]
|
keyword[def] identifier[sim_mlipns] ( identifier[src] , identifier[tar] , identifier[threshold] = literal[int] , identifier[max_mismatches] = literal[int] ):
literal[string]
keyword[return] identifier[MLIPNS] (). identifier[sim] ( identifier[src] , identifier[tar] , identifier[threshold] , identifier[max_mismatches] )
|
def sim_mlipns(src, tar, threshold=0.25, max_mismatches=2):
"""Return the MLIPNS similarity of two strings.
This is a wrapper for :py:meth:`MLIPNS.sim`.
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
threshold : float
A number [0, 1] indicating the maximum similarity score, below which
the strings are considered 'similar' (0.25 by default)
max_mismatches : int
A number indicating the allowable number of mismatches to remove before
declaring two strings not similar (2 by default)
Returns
-------
float
MLIPNS similarity
Examples
--------
>>> sim_mlipns('cat', 'hat')
1.0
>>> sim_mlipns('Niall', 'Neil')
0.0
>>> sim_mlipns('aluminum', 'Catalan')
0.0
>>> sim_mlipns('ATCG', 'TAGC')
0.0
"""
return MLIPNS().sim(src, tar, threshold, max_mismatches)
|
def get_resource(self, device_id, resource_path):
"""Get a resource.
:param str device_id: ID of the device (Required)
:param str path: Path of the resource to get (Required)
:returns: Device resource
:rtype Resource
"""
resources = self.list_resources(device_id)
for r in resources:
if r.path == resource_path:
return r
raise CloudApiException("Resource not found")
|
def function[get_resource, parameter[self, device_id, resource_path]]:
constant[Get a resource.
:param str device_id: ID of the device (Required)
:param str path: Path of the resource to get (Required)
:returns: Device resource
:rtype Resource
]
variable[resources] assign[=] call[name[self].list_resources, parameter[name[device_id]]]
for taget[name[r]] in starred[name[resources]] begin[:]
if compare[name[r].path equal[==] name[resource_path]] begin[:]
return[name[r]]
<ast.Raise object at 0x7da18dc9b2e0>
|
keyword[def] identifier[get_resource] ( identifier[self] , identifier[device_id] , identifier[resource_path] ):
literal[string]
identifier[resources] = identifier[self] . identifier[list_resources] ( identifier[device_id] )
keyword[for] identifier[r] keyword[in] identifier[resources] :
keyword[if] identifier[r] . identifier[path] == identifier[resource_path] :
keyword[return] identifier[r]
keyword[raise] identifier[CloudApiException] ( literal[string] )
|
def get_resource(self, device_id, resource_path):
"""Get a resource.
:param str device_id: ID of the device (Required)
:param str path: Path of the resource to get (Required)
:returns: Device resource
:rtype Resource
"""
resources = self.list_resources(device_id)
for r in resources:
if r.path == resource_path:
return r # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['r']]
raise CloudApiException('Resource not found')
|
def ginga_to_matplotlib_cmap(cm, name=None):
"""Convert Ginga colormap to matplotlib's."""
if name is None:
name = cm.name
from matplotlib.colors import ListedColormap
carr = np.asarray(cm.clst)
mpl_cm = ListedColormap(carr, name=name, N=len(carr))
return mpl_cm
|
def function[ginga_to_matplotlib_cmap, parameter[cm, name]]:
constant[Convert Ginga colormap to matplotlib's.]
if compare[name[name] is constant[None]] begin[:]
variable[name] assign[=] name[cm].name
from relative_module[matplotlib.colors] import module[ListedColormap]
variable[carr] assign[=] call[name[np].asarray, parameter[name[cm].clst]]
variable[mpl_cm] assign[=] call[name[ListedColormap], parameter[name[carr]]]
return[name[mpl_cm]]
|
keyword[def] identifier[ginga_to_matplotlib_cmap] ( identifier[cm] , identifier[name] = keyword[None] ):
literal[string]
keyword[if] identifier[name] keyword[is] keyword[None] :
identifier[name] = identifier[cm] . identifier[name]
keyword[from] identifier[matplotlib] . identifier[colors] keyword[import] identifier[ListedColormap]
identifier[carr] = identifier[np] . identifier[asarray] ( identifier[cm] . identifier[clst] )
identifier[mpl_cm] = identifier[ListedColormap] ( identifier[carr] , identifier[name] = identifier[name] , identifier[N] = identifier[len] ( identifier[carr] ))
keyword[return] identifier[mpl_cm]
|
def ginga_to_matplotlib_cmap(cm, name=None):
"""Convert Ginga colormap to matplotlib's."""
if name is None:
name = cm.name # depends on [control=['if'], data=['name']]
from matplotlib.colors import ListedColormap
carr = np.asarray(cm.clst)
mpl_cm = ListedColormap(carr, name=name, N=len(carr))
return mpl_cm
|
def transform_distance(self, dx, dy):
"""Transforms the distance vector ``(dx, dy)`` by this matrix.
This is similar to :meth:`transform_point`
except that the translation components of the transformation
are ignored.
The calculation of the returned vector is as follows::
dx2 = dx1 * xx + dy1 * xy
dy2 = dx1 * yx + dy1 * yy
Affine transformations are position invariant,
so the same vector always transforms to the same vector.
If ``(x1, y1)`` transforms to ``(x2, y2)``
then ``(x1 + dx1, y1 + dy1)`` will transform
to ``(x1 + dx2, y1 + dy2)`` for all values of ``x1`` and ``x2``.
:param dx: X component of a distance vector.
:param dy: Y component of a distance vector.
:type dx: float
:type dy: float
:returns: A ``(new_dx, new_dy)`` tuple of floats.
"""
xy = ffi.new('double[2]', [dx, dy])
cairo.cairo_matrix_transform_distance(self._pointer, xy + 0, xy + 1)
return tuple(xy)
|
def function[transform_distance, parameter[self, dx, dy]]:
constant[Transforms the distance vector ``(dx, dy)`` by this matrix.
This is similar to :meth:`transform_point`
except that the translation components of the transformation
are ignored.
The calculation of the returned vector is as follows::
dx2 = dx1 * xx + dy1 * xy
dy2 = dx1 * yx + dy1 * yy
Affine transformations are position invariant,
so the same vector always transforms to the same vector.
If ``(x1, y1)`` transforms to ``(x2, y2)``
then ``(x1 + dx1, y1 + dy1)`` will transform
to ``(x1 + dx2, y1 + dy2)`` for all values of ``x1`` and ``x2``.
:param dx: X component of a distance vector.
:param dy: Y component of a distance vector.
:type dx: float
:type dy: float
:returns: A ``(new_dx, new_dy)`` tuple of floats.
]
variable[xy] assign[=] call[name[ffi].new, parameter[constant[double[2]], list[[<ast.Name object at 0x7da1b100c310>, <ast.Name object at 0x7da1b100c820>]]]]
call[name[cairo].cairo_matrix_transform_distance, parameter[name[self]._pointer, binary_operation[name[xy] + constant[0]], binary_operation[name[xy] + constant[1]]]]
return[call[name[tuple], parameter[name[xy]]]]
|
keyword[def] identifier[transform_distance] ( identifier[self] , identifier[dx] , identifier[dy] ):
literal[string]
identifier[xy] = identifier[ffi] . identifier[new] ( literal[string] ,[ identifier[dx] , identifier[dy] ])
identifier[cairo] . identifier[cairo_matrix_transform_distance] ( identifier[self] . identifier[_pointer] , identifier[xy] + literal[int] , identifier[xy] + literal[int] )
keyword[return] identifier[tuple] ( identifier[xy] )
|
def transform_distance(self, dx, dy):
"""Transforms the distance vector ``(dx, dy)`` by this matrix.
This is similar to :meth:`transform_point`
except that the translation components of the transformation
are ignored.
The calculation of the returned vector is as follows::
dx2 = dx1 * xx + dy1 * xy
dy2 = dx1 * yx + dy1 * yy
Affine transformations are position invariant,
so the same vector always transforms to the same vector.
If ``(x1, y1)`` transforms to ``(x2, y2)``
then ``(x1 + dx1, y1 + dy1)`` will transform
to ``(x1 + dx2, y1 + dy2)`` for all values of ``x1`` and ``x2``.
:param dx: X component of a distance vector.
:param dy: Y component of a distance vector.
:type dx: float
:type dy: float
:returns: A ``(new_dx, new_dy)`` tuple of floats.
"""
xy = ffi.new('double[2]', [dx, dy])
cairo.cairo_matrix_transform_distance(self._pointer, xy + 0, xy + 1)
return tuple(xy)
|
def menuconfig(kconf):
"""
Launches the configuration interface, returning after the user exits.
kconf:
Kconfig instance to be configured
"""
global _kconf
global _conf_filename
global _conf_changed
global _minconf_filename
global _show_all
_kconf = kconf
# Load existing configuration and set _conf_changed True if it is outdated
_conf_changed = _load_config()
# Filename to save configuration to
_conf_filename = standard_config_filename()
# Filename to save minimal configuration to
_minconf_filename = "defconfig"
# Any visible items in the top menu?
_show_all = False
if not _shown_nodes(kconf.top_node):
# Nothing visible. Start in show-all mode and try again.
_show_all = True
if not _shown_nodes(kconf.top_node):
# Give up. The implementation relies on always having a selected
# node.
print("Empty configuration -- nothing to configure.\n"
"Check that environment variables are set properly.")
return
# Disable warnings. They get mangled in curses mode, and we deal with
# errors ourselves.
kconf.disable_warnings()
# Make curses use the locale settings specified in the environment
locale.setlocale(locale.LC_ALL, "")
# Try to fix Unicode issues on systems with bad defaults
if _CONVERT_C_LC_CTYPE_TO_UTF8:
_convert_c_lc_ctype_to_utf8()
# Get rid of the delay between pressing ESC and jumping to the parent menu,
# unless the user has set ESCDELAY (see ncurses(3)). This makes the UI much
# smoother to work with.
#
# Note: This is strictly pretty iffy, since escape codes for e.g. cursor
# keys start with ESC, but I've never seen it cause problems in practice
# (probably because it's unlikely that the escape code for a key would get
# split up across read()s, at least with a terminal emulator). Please
# report if you run into issues. Some suitable small default value could be
# used here instead in that case. Maybe it's silly to not put in the
# smallest imperceptible delay here already, though I don't like guessing.
#
# (From a quick glance at the ncurses source code, ESCDELAY might only be
# relevant for mouse events there, so maybe escapes are assumed to arrive
# in one piece already...)
os.environ.setdefault("ESCDELAY", "0")
# Enter curses mode. _menuconfig() returns a string to print on exit, after
# curses has been de-initialized.
print(curses.wrapper(_menuconfig))
|
def function[menuconfig, parameter[kconf]]:
constant[
Launches the configuration interface, returning after the user exits.
kconf:
Kconfig instance to be configured
]
<ast.Global object at 0x7da18f810ee0>
<ast.Global object at 0x7da18f811f90>
<ast.Global object at 0x7da18f8126e0>
<ast.Global object at 0x7da18f812680>
<ast.Global object at 0x7da18f813dc0>
variable[_kconf] assign[=] name[kconf]
variable[_conf_changed] assign[=] call[name[_load_config], parameter[]]
variable[_conf_filename] assign[=] call[name[standard_config_filename], parameter[]]
variable[_minconf_filename] assign[=] constant[defconfig]
variable[_show_all] assign[=] constant[False]
if <ast.UnaryOp object at 0x7da1b20aa140> begin[:]
variable[_show_all] assign[=] constant[True]
if <ast.UnaryOp object at 0x7da1b20a9e40> begin[:]
call[name[print], parameter[constant[Empty configuration -- nothing to configure.
Check that environment variables are set properly.]]]
return[None]
call[name[kconf].disable_warnings, parameter[]]
call[name[locale].setlocale, parameter[name[locale].LC_ALL, constant[]]]
if name[_CONVERT_C_LC_CTYPE_TO_UTF8] begin[:]
call[name[_convert_c_lc_ctype_to_utf8], parameter[]]
call[name[os].environ.setdefault, parameter[constant[ESCDELAY], constant[0]]]
call[name[print], parameter[call[name[curses].wrapper, parameter[name[_menuconfig]]]]]
|
keyword[def] identifier[menuconfig] ( identifier[kconf] ):
literal[string]
keyword[global] identifier[_kconf]
keyword[global] identifier[_conf_filename]
keyword[global] identifier[_conf_changed]
keyword[global] identifier[_minconf_filename]
keyword[global] identifier[_show_all]
identifier[_kconf] = identifier[kconf]
identifier[_conf_changed] = identifier[_load_config] ()
identifier[_conf_filename] = identifier[standard_config_filename] ()
identifier[_minconf_filename] = literal[string]
identifier[_show_all] = keyword[False]
keyword[if] keyword[not] identifier[_shown_nodes] ( identifier[kconf] . identifier[top_node] ):
identifier[_show_all] = keyword[True]
keyword[if] keyword[not] identifier[_shown_nodes] ( identifier[kconf] . identifier[top_node] ):
identifier[print] ( literal[string]
literal[string] )
keyword[return]
identifier[kconf] . identifier[disable_warnings] ()
identifier[locale] . identifier[setlocale] ( identifier[locale] . identifier[LC_ALL] , literal[string] )
keyword[if] identifier[_CONVERT_C_LC_CTYPE_TO_UTF8] :
identifier[_convert_c_lc_ctype_to_utf8] ()
identifier[os] . identifier[environ] . identifier[setdefault] ( literal[string] , literal[string] )
identifier[print] ( identifier[curses] . identifier[wrapper] ( identifier[_menuconfig] ))
|
def menuconfig(kconf):
"""
Launches the configuration interface, returning after the user exits.
kconf:
Kconfig instance to be configured
"""
global _kconf
global _conf_filename
global _conf_changed
global _minconf_filename
global _show_all
_kconf = kconf
# Load existing configuration and set _conf_changed True if it is outdated
_conf_changed = _load_config()
# Filename to save configuration to
_conf_filename = standard_config_filename()
# Filename to save minimal configuration to
_minconf_filename = 'defconfig'
# Any visible items in the top menu?
_show_all = False
if not _shown_nodes(kconf.top_node):
# Nothing visible. Start in show-all mode and try again.
_show_all = True
if not _shown_nodes(kconf.top_node):
# Give up. The implementation relies on always having a selected
# node.
print('Empty configuration -- nothing to configure.\nCheck that environment variables are set properly.')
return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Disable warnings. They get mangled in curses mode, and we deal with
# errors ourselves.
kconf.disable_warnings()
# Make curses use the locale settings specified in the environment
locale.setlocale(locale.LC_ALL, '')
# Try to fix Unicode issues on systems with bad defaults
if _CONVERT_C_LC_CTYPE_TO_UTF8:
_convert_c_lc_ctype_to_utf8() # depends on [control=['if'], data=[]]
# Get rid of the delay between pressing ESC and jumping to the parent menu,
# unless the user has set ESCDELAY (see ncurses(3)). This makes the UI much
# smoother to work with.
#
# Note: This is strictly pretty iffy, since escape codes for e.g. cursor
# keys start with ESC, but I've never seen it cause problems in practice
# (probably because it's unlikely that the escape code for a key would get
# split up across read()s, at least with a terminal emulator). Please
# report if you run into issues. Some suitable small default value could be
# used here instead in that case. Maybe it's silly to not put in the
# smallest imperceptible delay here already, though I don't like guessing.
#
# (From a quick glance at the ncurses source code, ESCDELAY might only be
# relevant for mouse events there, so maybe escapes are assumed to arrive
# in one piece already...)
os.environ.setdefault('ESCDELAY', '0')
# Enter curses mode. _menuconfig() returns a string to print on exit, after
# curses has been de-initialized.
print(curses.wrapper(_menuconfig))
|
def longest_path_weighted_nodes(G, source, target, weights=None):
"""
The longest path problem is the problem of finding a simple path of maximum
length in a given graph. While for general graph, this problem is NP-hard,
but if G is a directed acyclic graph (DAG), longest paths in G can be found
in linear time with dynamic programming.
>>> G = nx.DiGraph([(1, 2), (1, 3), (2, "M"), (3, "M")])
>>> longest_path_weighted_nodes(G, 1, "M", weights={1: 1, 2: 1, 3: 2, "M": 1})
([1, 3, 'M'], 4)
"""
assert nx.is_directed_acyclic_graph(G)
tree = nx.topological_sort(G)
node_to_index = dict((t, i) for i, t in enumerate(tree))
nnodes = len(tree)
weights = [weights.get(x, 1) for x in tree] if weights else [1] * nnodes
score, fromc = weights[:], [-1] * nnodes
si = node_to_index[source]
ti = node_to_index[target]
for a in tree[si: ti]:
ai = node_to_index[a]
for b, w in G[a].items():
bi = node_to_index[b]
w = w.get('weight', 1)
d = score[ai] + weights[bi] * w # Favor heavier edges
if d <= score[bi]:
continue
score[bi] = d # Update longest distance so far
fromc[bi] = ai
# Backtracking
path = []
while ti != -1:
path.append(ti)
ti = fromc[ti]
path = [tree[x] for x in path[::-1]]
return path, score[ti]
|
def function[longest_path_weighted_nodes, parameter[G, source, target, weights]]:
constant[
The longest path problem is the problem of finding a simple path of maximum
length in a given graph. While for general graph, this problem is NP-hard,
but if G is a directed acyclic graph (DAG), longest paths in G can be found
in linear time with dynamic programming.
>>> G = nx.DiGraph([(1, 2), (1, 3), (2, "M"), (3, "M")])
>>> longest_path_weighted_nodes(G, 1, "M", weights={1: 1, 2: 1, 3: 2, "M": 1})
([1, 3, 'M'], 4)
]
assert[call[name[nx].is_directed_acyclic_graph, parameter[name[G]]]]
variable[tree] assign[=] call[name[nx].topological_sort, parameter[name[G]]]
variable[node_to_index] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da20e74b7f0>]]
variable[nnodes] assign[=] call[name[len], parameter[name[tree]]]
variable[weights] assign[=] <ast.IfExp object at 0x7da20e74b850>
<ast.Tuple object at 0x7da20e74b070> assign[=] tuple[[<ast.Subscript object at 0x7da20e74b2e0>, <ast.BinOp object at 0x7da20e7494b0>]]
variable[si] assign[=] call[name[node_to_index]][name[source]]
variable[ti] assign[=] call[name[node_to_index]][name[target]]
for taget[name[a]] in starred[call[name[tree]][<ast.Slice object at 0x7da18f58d570>]] begin[:]
variable[ai] assign[=] call[name[node_to_index]][name[a]]
for taget[tuple[[<ast.Name object at 0x7da18f58e620>, <ast.Name object at 0x7da18f58cfd0>]]] in starred[call[call[name[G]][name[a]].items, parameter[]]] begin[:]
variable[bi] assign[=] call[name[node_to_index]][name[b]]
variable[w] assign[=] call[name[w].get, parameter[constant[weight], constant[1]]]
variable[d] assign[=] binary_operation[call[name[score]][name[ai]] + binary_operation[call[name[weights]][name[bi]] * name[w]]]
if compare[name[d] less_or_equal[<=] call[name[score]][name[bi]]] begin[:]
continue
call[name[score]][name[bi]] assign[=] name[d]
call[name[fromc]][name[bi]] assign[=] name[ai]
variable[path] assign[=] list[[]]
while compare[name[ti] not_equal[!=] <ast.UnaryOp object at 0x7da18f58c5e0>] begin[:]
call[name[path].append, parameter[name[ti]]]
variable[ti] assign[=] call[name[fromc]][name[ti]]
variable[path] assign[=] <ast.ListComp object at 0x7da18f58c0d0>
return[tuple[[<ast.Name object at 0x7da18f58e260>, <ast.Subscript object at 0x7da18f58d870>]]]
|
keyword[def] identifier[longest_path_weighted_nodes] ( identifier[G] , identifier[source] , identifier[target] , identifier[weights] = keyword[None] ):
literal[string]
keyword[assert] identifier[nx] . identifier[is_directed_acyclic_graph] ( identifier[G] )
identifier[tree] = identifier[nx] . identifier[topological_sort] ( identifier[G] )
identifier[node_to_index] = identifier[dict] (( identifier[t] , identifier[i] ) keyword[for] identifier[i] , identifier[t] keyword[in] identifier[enumerate] ( identifier[tree] ))
identifier[nnodes] = identifier[len] ( identifier[tree] )
identifier[weights] =[ identifier[weights] . identifier[get] ( identifier[x] , literal[int] ) keyword[for] identifier[x] keyword[in] identifier[tree] ] keyword[if] identifier[weights] keyword[else] [ literal[int] ]* identifier[nnodes]
identifier[score] , identifier[fromc] = identifier[weights] [:],[- literal[int] ]* identifier[nnodes]
identifier[si] = identifier[node_to_index] [ identifier[source] ]
identifier[ti] = identifier[node_to_index] [ identifier[target] ]
keyword[for] identifier[a] keyword[in] identifier[tree] [ identifier[si] : identifier[ti] ]:
identifier[ai] = identifier[node_to_index] [ identifier[a] ]
keyword[for] identifier[b] , identifier[w] keyword[in] identifier[G] [ identifier[a] ]. identifier[items] ():
identifier[bi] = identifier[node_to_index] [ identifier[b] ]
identifier[w] = identifier[w] . identifier[get] ( literal[string] , literal[int] )
identifier[d] = identifier[score] [ identifier[ai] ]+ identifier[weights] [ identifier[bi] ]* identifier[w]
keyword[if] identifier[d] <= identifier[score] [ identifier[bi] ]:
keyword[continue]
identifier[score] [ identifier[bi] ]= identifier[d]
identifier[fromc] [ identifier[bi] ]= identifier[ai]
identifier[path] =[]
keyword[while] identifier[ti] !=- literal[int] :
identifier[path] . identifier[append] ( identifier[ti] )
identifier[ti] = identifier[fromc] [ identifier[ti] ]
identifier[path] =[ identifier[tree] [ identifier[x] ] keyword[for] identifier[x] keyword[in] identifier[path] [::- literal[int] ]]
keyword[return] identifier[path] , identifier[score] [ identifier[ti] ]
|
def longest_path_weighted_nodes(G, source, target, weights=None):
"""
The longest path problem is the problem of finding a simple path of maximum
length in a given graph. While for general graph, this problem is NP-hard,
but if G is a directed acyclic graph (DAG), longest paths in G can be found
in linear time with dynamic programming.
>>> G = nx.DiGraph([(1, 2), (1, 3), (2, "M"), (3, "M")])
>>> longest_path_weighted_nodes(G, 1, "M", weights={1: 1, 2: 1, 3: 2, "M": 1})
([1, 3, 'M'], 4)
"""
assert nx.is_directed_acyclic_graph(G)
tree = nx.topological_sort(G)
node_to_index = dict(((t, i) for (i, t) in enumerate(tree)))
nnodes = len(tree)
weights = [weights.get(x, 1) for x in tree] if weights else [1] * nnodes
(score, fromc) = (weights[:], [-1] * nnodes)
si = node_to_index[source]
ti = node_to_index[target]
for a in tree[si:ti]:
ai = node_to_index[a]
for (b, w) in G[a].items():
bi = node_to_index[b]
w = w.get('weight', 1)
d = score[ai] + weights[bi] * w # Favor heavier edges
if d <= score[bi]:
continue # depends on [control=['if'], data=[]]
score[bi] = d # Update longest distance so far
fromc[bi] = ai # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['a']]
# Backtracking
path = []
while ti != -1:
path.append(ti)
ti = fromc[ti] # depends on [control=['while'], data=['ti']]
path = [tree[x] for x in path[::-1]]
return (path, score[ti])
|
def get_as_boolean(self, key):
"""
Converts map element into a boolean or returns false if conversion is not possible.
:param key: an index of element to get.
:return: boolean value ot the element or false if conversion is not supported.
"""
value = self.get(key)
return BooleanConverter.to_boolean(value)
|
def function[get_as_boolean, parameter[self, key]]:
constant[
Converts map element into a boolean or returns false if conversion is not possible.
:param key: an index of element to get.
:return: boolean value ot the element or false if conversion is not supported.
]
variable[value] assign[=] call[name[self].get, parameter[name[key]]]
return[call[name[BooleanConverter].to_boolean, parameter[name[value]]]]
|
keyword[def] identifier[get_as_boolean] ( identifier[self] , identifier[key] ):
literal[string]
identifier[value] = identifier[self] . identifier[get] ( identifier[key] )
keyword[return] identifier[BooleanConverter] . identifier[to_boolean] ( identifier[value] )
|
def get_as_boolean(self, key):
"""
Converts map element into a boolean or returns false if conversion is not possible.
:param key: an index of element to get.
:return: boolean value ot the element or false if conversion is not supported.
"""
value = self.get(key)
return BooleanConverter.to_boolean(value)
|
def inkscape_export(input_file, output_file, export_flag="-A", dpi=90, inkscape_binpath=None):
""" Call Inkscape to export the input_file to output_file using the
specific export argument flag for the output file type.
Parameters
----------
input_file: str
Path to the input file
output_file: str
Path to the output file
export_flag: str
Inkscape CLI flag to indicate the type of the output file
Returns
-------
return_value
Command call return value
"""
if not os.path.exists(input_file):
log.error('File {} not found.'.format(input_file))
raise IOError((0, 'File not found.', input_file))
if '=' not in export_flag:
export_flag += ' '
arg_strings = []
arg_strings += ['--without-gui']
arg_strings += ['--export-text-to-path']
arg_strings += ['{}"{}"'.format(export_flag, output_file)]
arg_strings += ['--export-dpi={}'.format(dpi)]
arg_strings += ['"{}"'.format(input_file)]
return call_inkscape(arg_strings, inkscape_binpath=inkscape_binpath)
|
def function[inkscape_export, parameter[input_file, output_file, export_flag, dpi, inkscape_binpath]]:
constant[ Call Inkscape to export the input_file to output_file using the
specific export argument flag for the output file type.
Parameters
----------
input_file: str
Path to the input file
output_file: str
Path to the output file
export_flag: str
Inkscape CLI flag to indicate the type of the output file
Returns
-------
return_value
Command call return value
]
if <ast.UnaryOp object at 0x7da2054a5a80> begin[:]
call[name[log].error, parameter[call[constant[File {} not found.].format, parameter[name[input_file]]]]]
<ast.Raise object at 0x7da2054a6c20>
if compare[constant[=] <ast.NotIn object at 0x7da2590d7190> name[export_flag]] begin[:]
<ast.AugAssign object at 0x7da2054a70a0>
variable[arg_strings] assign[=] list[[]]
<ast.AugAssign object at 0x7da2054a7490>
<ast.AugAssign object at 0x7da2054a64a0>
<ast.AugAssign object at 0x7da2054a4a60>
<ast.AugAssign object at 0x7da2054a54e0>
<ast.AugAssign object at 0x7da2054a5660>
return[call[name[call_inkscape], parameter[name[arg_strings]]]]
|
keyword[def] identifier[inkscape_export] ( identifier[input_file] , identifier[output_file] , identifier[export_flag] = literal[string] , identifier[dpi] = literal[int] , identifier[inkscape_binpath] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[input_file] ):
identifier[log] . identifier[error] ( literal[string] . identifier[format] ( identifier[input_file] ))
keyword[raise] identifier[IOError] (( literal[int] , literal[string] , identifier[input_file] ))
keyword[if] literal[string] keyword[not] keyword[in] identifier[export_flag] :
identifier[export_flag] += literal[string]
identifier[arg_strings] =[]
identifier[arg_strings] +=[ literal[string] ]
identifier[arg_strings] +=[ literal[string] ]
identifier[arg_strings] +=[ literal[string] . identifier[format] ( identifier[export_flag] , identifier[output_file] )]
identifier[arg_strings] +=[ literal[string] . identifier[format] ( identifier[dpi] )]
identifier[arg_strings] +=[ literal[string] . identifier[format] ( identifier[input_file] )]
keyword[return] identifier[call_inkscape] ( identifier[arg_strings] , identifier[inkscape_binpath] = identifier[inkscape_binpath] )
|
def inkscape_export(input_file, output_file, export_flag='-A', dpi=90, inkscape_binpath=None):
""" Call Inkscape to export the input_file to output_file using the
specific export argument flag for the output file type.
Parameters
----------
input_file: str
Path to the input file
output_file: str
Path to the output file
export_flag: str
Inkscape CLI flag to indicate the type of the output file
Returns
-------
return_value
Command call return value
"""
if not os.path.exists(input_file):
log.error('File {} not found.'.format(input_file))
raise IOError((0, 'File not found.', input_file)) # depends on [control=['if'], data=[]]
if '=' not in export_flag:
export_flag += ' ' # depends on [control=['if'], data=['export_flag']]
arg_strings = []
arg_strings += ['--without-gui']
arg_strings += ['--export-text-to-path']
arg_strings += ['{}"{}"'.format(export_flag, output_file)]
arg_strings += ['--export-dpi={}'.format(dpi)]
arg_strings += ['"{}"'.format(input_file)]
return call_inkscape(arg_strings, inkscape_binpath=inkscape_binpath)
|
def _add_to_tbz(tfile, filename, data_str):
'''
Adds string data to a tarfile
'''
# Create a bytesio object for adding to a tarfile
# https://stackoverflow.com/a/52724508
encoded_data = data_str.encode('utf-8')
ti = tarfile.TarInfo(name=filename)
ti.size = len(encoded_data)
tfile.addfile(tarinfo=ti, fileobj=io.BytesIO(encoded_data))
|
def function[_add_to_tbz, parameter[tfile, filename, data_str]]:
constant[
Adds string data to a tarfile
]
variable[encoded_data] assign[=] call[name[data_str].encode, parameter[constant[utf-8]]]
variable[ti] assign[=] call[name[tarfile].TarInfo, parameter[]]
name[ti].size assign[=] call[name[len], parameter[name[encoded_data]]]
call[name[tfile].addfile, parameter[]]
|
keyword[def] identifier[_add_to_tbz] ( identifier[tfile] , identifier[filename] , identifier[data_str] ):
literal[string]
identifier[encoded_data] = identifier[data_str] . identifier[encode] ( literal[string] )
identifier[ti] = identifier[tarfile] . identifier[TarInfo] ( identifier[name] = identifier[filename] )
identifier[ti] . identifier[size] = identifier[len] ( identifier[encoded_data] )
identifier[tfile] . identifier[addfile] ( identifier[tarinfo] = identifier[ti] , identifier[fileobj] = identifier[io] . identifier[BytesIO] ( identifier[encoded_data] ))
|
def _add_to_tbz(tfile, filename, data_str):
"""
Adds string data to a tarfile
"""
# Create a bytesio object for adding to a tarfile
# https://stackoverflow.com/a/52724508
encoded_data = data_str.encode('utf-8')
ti = tarfile.TarInfo(name=filename)
ti.size = len(encoded_data)
tfile.addfile(tarinfo=ti, fileobj=io.BytesIO(encoded_data))
|
def _ConvertParamType(self, paramType):
"""
Convert vmodl.reflect.DynamicTypeManager.ParamTypeInfo to pyVmomi param
definition
"""
if paramType:
name = paramType.name
version = paramType.version
aType = paramType.type
flags = self._ConvertAnnotations(paramType.annotation)
privId = paramType.privId
param = (name, aType, version, flags, privId)
else:
param = None
return param
|
def function[_ConvertParamType, parameter[self, paramType]]:
constant[
Convert vmodl.reflect.DynamicTypeManager.ParamTypeInfo to pyVmomi param
definition
]
if name[paramType] begin[:]
variable[name] assign[=] name[paramType].name
variable[version] assign[=] name[paramType].version
variable[aType] assign[=] name[paramType].type
variable[flags] assign[=] call[name[self]._ConvertAnnotations, parameter[name[paramType].annotation]]
variable[privId] assign[=] name[paramType].privId
variable[param] assign[=] tuple[[<ast.Name object at 0x7da20c990ca0>, <ast.Name object at 0x7da20c990880>, <ast.Name object at 0x7da20c9935b0>, <ast.Name object at 0x7da20c992cb0>, <ast.Name object at 0x7da20c993d60>]]
return[name[param]]
|
keyword[def] identifier[_ConvertParamType] ( identifier[self] , identifier[paramType] ):
literal[string]
keyword[if] identifier[paramType] :
identifier[name] = identifier[paramType] . identifier[name]
identifier[version] = identifier[paramType] . identifier[version]
identifier[aType] = identifier[paramType] . identifier[type]
identifier[flags] = identifier[self] . identifier[_ConvertAnnotations] ( identifier[paramType] . identifier[annotation] )
identifier[privId] = identifier[paramType] . identifier[privId]
identifier[param] =( identifier[name] , identifier[aType] , identifier[version] , identifier[flags] , identifier[privId] )
keyword[else] :
identifier[param] = keyword[None]
keyword[return] identifier[param]
|
def _ConvertParamType(self, paramType):
"""
Convert vmodl.reflect.DynamicTypeManager.ParamTypeInfo to pyVmomi param
definition
"""
if paramType:
name = paramType.name
version = paramType.version
aType = paramType.type
flags = self._ConvertAnnotations(paramType.annotation)
privId = paramType.privId
param = (name, aType, version, flags, privId) # depends on [control=['if'], data=[]]
else:
param = None
return param
|
def _augment_info(info):
"""Fill out the template information"""
info['description_header'] = "=" * len(info['description'])
info['component_name'] = info['plugin_name'].capitalize()
info['year'] = time.localtime().tm_year
info['license_longtext'] = ''
info['keyword_list'] = u""
for keyword in info['keywords'].split(" "):
print(keyword)
info['keyword_list'] += u"\'" + str(keyword) + u"\', "
print(info['keyword_list'])
if len(info['keyword_list']) > 0:
# strip last comma
info['keyword_list'] = info['keyword_list'][:-2]
return info
|
def function[_augment_info, parameter[info]]:
constant[Fill out the template information]
call[name[info]][constant[description_header]] assign[=] binary_operation[constant[=] * call[name[len], parameter[call[name[info]][constant[description]]]]]
call[name[info]][constant[component_name]] assign[=] call[call[name[info]][constant[plugin_name]].capitalize, parameter[]]
call[name[info]][constant[year]] assign[=] call[name[time].localtime, parameter[]].tm_year
call[name[info]][constant[license_longtext]] assign[=] constant[]
call[name[info]][constant[keyword_list]] assign[=] constant[]
for taget[name[keyword]] in starred[call[call[name[info]][constant[keywords]].split, parameter[constant[ ]]]] begin[:]
call[name[print], parameter[name[keyword]]]
<ast.AugAssign object at 0x7da1b0ef6170>
call[name[print], parameter[call[name[info]][constant[keyword_list]]]]
if compare[call[name[len], parameter[call[name[info]][constant[keyword_list]]]] greater[>] constant[0]] begin[:]
call[name[info]][constant[keyword_list]] assign[=] call[call[name[info]][constant[keyword_list]]][<ast.Slice object at 0x7da1b0ef4850>]
return[name[info]]
|
keyword[def] identifier[_augment_info] ( identifier[info] ):
literal[string]
identifier[info] [ literal[string] ]= literal[string] * identifier[len] ( identifier[info] [ literal[string] ])
identifier[info] [ literal[string] ]= identifier[info] [ literal[string] ]. identifier[capitalize] ()
identifier[info] [ literal[string] ]= identifier[time] . identifier[localtime] (). identifier[tm_year]
identifier[info] [ literal[string] ]= literal[string]
identifier[info] [ literal[string] ]= literal[string]
keyword[for] identifier[keyword] keyword[in] identifier[info] [ literal[string] ]. identifier[split] ( literal[string] ):
identifier[print] ( identifier[keyword] )
identifier[info] [ literal[string] ]+= literal[string] + identifier[str] ( identifier[keyword] )+ literal[string]
identifier[print] ( identifier[info] [ literal[string] ])
keyword[if] identifier[len] ( identifier[info] [ literal[string] ])> literal[int] :
identifier[info] [ literal[string] ]= identifier[info] [ literal[string] ][:- literal[int] ]
keyword[return] identifier[info]
|
def _augment_info(info):
"""Fill out the template information"""
info['description_header'] = '=' * len(info['description'])
info['component_name'] = info['plugin_name'].capitalize()
info['year'] = time.localtime().tm_year
info['license_longtext'] = ''
info['keyword_list'] = u''
for keyword in info['keywords'].split(' '):
print(keyword)
info['keyword_list'] += u"'" + str(keyword) + u"', " # depends on [control=['for'], data=['keyword']]
print(info['keyword_list'])
if len(info['keyword_list']) > 0:
# strip last comma
info['keyword_list'] = info['keyword_list'][:-2] # depends on [control=['if'], data=[]]
return info
|
def get_filestore_instance(img_dir=None, data_dir=None):
"""Return an instance of FileStore."""
global _filestore_instances
key = "%s:%s" % (img_dir, data_dir)
try:
instance = _filestore_instances[key]
except KeyError:
instance = FileStore(
img_dir=img_dir, data_dir=data_dir
)
_filestore_instances[key] = instance
return instance
|
def function[get_filestore_instance, parameter[img_dir, data_dir]]:
constant[Return an instance of FileStore.]
<ast.Global object at 0x7da20c991900>
variable[key] assign[=] binary_operation[constant[%s:%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da20c992110>, <ast.Name object at 0x7da20c993430>]]]
<ast.Try object at 0x7da20c990490>
return[name[instance]]
|
keyword[def] identifier[get_filestore_instance] ( identifier[img_dir] = keyword[None] , identifier[data_dir] = keyword[None] ):
literal[string]
keyword[global] identifier[_filestore_instances]
identifier[key] = literal[string] %( identifier[img_dir] , identifier[data_dir] )
keyword[try] :
identifier[instance] = identifier[_filestore_instances] [ identifier[key] ]
keyword[except] identifier[KeyError] :
identifier[instance] = identifier[FileStore] (
identifier[img_dir] = identifier[img_dir] , identifier[data_dir] = identifier[data_dir]
)
identifier[_filestore_instances] [ identifier[key] ]= identifier[instance]
keyword[return] identifier[instance]
|
def get_filestore_instance(img_dir=None, data_dir=None):
"""Return an instance of FileStore."""
global _filestore_instances
key = '%s:%s' % (img_dir, data_dir)
try:
instance = _filestore_instances[key] # depends on [control=['try'], data=[]]
except KeyError:
instance = FileStore(img_dir=img_dir, data_dir=data_dir)
_filestore_instances[key] = instance # depends on [control=['except'], data=[]]
return instance
|
def extend(self, base, key, value=None):
"""
Adds a new definition to this enumerated type, extending the given
base type. This will create a new key for the type and register
it as a new viable option from the system, however, it will also
register its base information so you can use enum.base to retrieve
the root type.
:param base | <variant> | value for this enumeration
key | <str> | new key for the value
value | <variant> | if None is supplied, it will be auto-assigned
:usage |>>> from projex.enum import enum
|>>> Types = enum('Integer', 'Boolean')
|>>> Types.Integer
|1
|>>> Types.Boolean
|2
|>>> Types.extend(Types.Integer, 'BigInteger')
|>>> Types.BigInteger
|4
|>>> Types.base(Types.BigInteger)
|1
"""
new_val = self.add(key, value)
self._bases[new_val] = base
|
def function[extend, parameter[self, base, key, value]]:
constant[
Adds a new definition to this enumerated type, extending the given
base type. This will create a new key for the type and register
it as a new viable option from the system, however, it will also
register its base information so you can use enum.base to retrieve
the root type.
:param base | <variant> | value for this enumeration
key | <str> | new key for the value
value | <variant> | if None is supplied, it will be auto-assigned
:usage |>>> from projex.enum import enum
|>>> Types = enum('Integer', 'Boolean')
|>>> Types.Integer
|1
|>>> Types.Boolean
|2
|>>> Types.extend(Types.Integer, 'BigInteger')
|>>> Types.BigInteger
|4
|>>> Types.base(Types.BigInteger)
|1
]
variable[new_val] assign[=] call[name[self].add, parameter[name[key], name[value]]]
call[name[self]._bases][name[new_val]] assign[=] name[base]
|
keyword[def] identifier[extend] ( identifier[self] , identifier[base] , identifier[key] , identifier[value] = keyword[None] ):
literal[string]
identifier[new_val] = identifier[self] . identifier[add] ( identifier[key] , identifier[value] )
identifier[self] . identifier[_bases] [ identifier[new_val] ]= identifier[base]
|
def extend(self, base, key, value=None):
"""
Adds a new definition to this enumerated type, extending the given
base type. This will create a new key for the type and register
it as a new viable option from the system, however, it will also
register its base information so you can use enum.base to retrieve
the root type.
:param base | <variant> | value for this enumeration
key | <str> | new key for the value
value | <variant> | if None is supplied, it will be auto-assigned
:usage |>>> from projex.enum import enum
|>>> Types = enum('Integer', 'Boolean')
|>>> Types.Integer
|1
|>>> Types.Boolean
|2
|>>> Types.extend(Types.Integer, 'BigInteger')
|>>> Types.BigInteger
|4
|>>> Types.base(Types.BigInteger)
|1
"""
new_val = self.add(key, value)
self._bases[new_val] = base
|
def wikidata_get(identifier):
"""
https://www.wikidata.org/wiki/Special:EntityData/P248.json
"""
url = 'https://www.wikidata.org/wiki/Special:EntityData/{}.json'.format(identifier)
#logging.info(url)
return json.loads(requests.get(url).content)
|
def function[wikidata_get, parameter[identifier]]:
constant[
https://www.wikidata.org/wiki/Special:EntityData/P248.json
]
variable[url] assign[=] call[constant[https://www.wikidata.org/wiki/Special:EntityData/{}.json].format, parameter[name[identifier]]]
return[call[name[json].loads, parameter[call[name[requests].get, parameter[name[url]]].content]]]
|
keyword[def] identifier[wikidata_get] ( identifier[identifier] ):
literal[string]
identifier[url] = literal[string] . identifier[format] ( identifier[identifier] )
keyword[return] identifier[json] . identifier[loads] ( identifier[requests] . identifier[get] ( identifier[url] ). identifier[content] )
|
def wikidata_get(identifier):
"""
https://www.wikidata.org/wiki/Special:EntityData/P248.json
"""
url = 'https://www.wikidata.org/wiki/Special:EntityData/{}.json'.format(identifier)
#logging.info(url)
return json.loads(requests.get(url).content)
|
def get_rank():
"""
Gets distributed rank or returns zero if distributed is not initialized.
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
else:
rank = 0
return rank
|
def function[get_rank, parameter[]]:
constant[
Gets distributed rank or returns zero if distributed is not initialized.
]
if <ast.BoolOp object at 0x7da2054a7010> begin[:]
variable[rank] assign[=] call[name[torch].distributed.get_rank, parameter[]]
return[name[rank]]
|
keyword[def] identifier[get_rank] ():
literal[string]
keyword[if] identifier[torch] . identifier[distributed] . identifier[is_available] () keyword[and] identifier[torch] . identifier[distributed] . identifier[is_initialized] ():
identifier[rank] = identifier[torch] . identifier[distributed] . identifier[get_rank] ()
keyword[else] :
identifier[rank] = literal[int]
keyword[return] identifier[rank]
|
def get_rank():
"""
Gets distributed rank or returns zero if distributed is not initialized.
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
rank = torch.distributed.get_rank() # depends on [control=['if'], data=[]]
else:
rank = 0
return rank
|
def create_multidim_plot(parameters, samples, labels=None,
mins=None, maxs=None, expected_parameters=None,
expected_parameters_color='r',
plot_marginal=True, plot_scatter=True,
marginal_percentiles=None, contour_percentiles=None,
marginal_title=True, marginal_linestyle='-',
zvals=None, show_colorbar=True, cbar_label=None,
vmin=None, vmax=None, scatter_cmap='plasma',
plot_density=False, plot_contours=True,
density_cmap='viridis',
contour_color=None, hist_color='black',
line_color=None, fill_color='gray',
use_kombine=False, fig=None, axis_dict=None):
"""Generate a figure with several plots and histograms.
Parameters
----------
parameters: list
Names of the variables to be plotted.
samples : FieldArray
A field array of the samples to plot.
labels: dict, optional
A dictionary mapping parameters to labels. If none provided, will just
use the parameter strings as the labels.
mins : {None, dict}, optional
Minimum value for the axis of each variable in `parameters`.
If None, it will use the minimum of the corresponding variable in
`samples`.
maxs : {None, dict}, optional
Maximum value for the axis of each variable in `parameters`.
If None, it will use the maximum of the corresponding variable in
`samples`.
expected_parameters : {None, dict}, optional
Expected values of `parameters`, as a dictionary mapping parameter
names -> values. A cross will be plotted at the location of the
expected parameters on axes that plot any of the expected parameters.
expected_parameters_color : {'r', string}, optional
What color to make the expected parameters cross.
plot_marginal : {True, bool}
Plot the marginalized distribution on the diagonals. If False, the
diagonal axes will be turned off.
plot_scatter : {True, bool}
Plot each sample point as a scatter plot.
marginal_percentiles : {None, array}
What percentiles to draw lines at on the 1D histograms.
If None, will draw lines at `[5, 50, 95]` (i.e., the bounds on the
upper 90th percentile and the median).
marginal_title : bool, optional
Add a title over the 1D marginal plots that gives an estimated value
+/- uncertainty. The estimated value is the pecentile halfway between
the max/min of ``maginal_percentiles``, while the uncertainty is given
by the max/min of the ``marginal_percentiles. If no
``marginal_percentiles`` are specified, the median +/- 95/5 percentiles
will be quoted.
marginal_linestyle : str, optional
What line style to use for the marginal histograms.
contour_percentiles : {None, array}
What percentile contours to draw on the scatter plots. If None,
will plot the 50th and 90th percentiles.
zvals : {None, array}
An array to use for coloring the scatter plots. If None, scatter points
will be the same color.
show_colorbar : {True, bool}
Show the colorbar of zvalues used for the scatter points. A ValueError
will be raised if zvals is None and this is True.
cbar_label : {None, str}
Specify a label to add to the colorbar.
vmin: {None, float}, optional
Minimum value for the colorbar. If None, will use the minimum of zvals.
vmax: {None, float}, optional
Maximum value for the colorbar. If None, will use the maxmimum of
zvals.
scatter_cmap : {'plasma', string}
The color map to use for the scatter points. Default is 'plasma'.
plot_density : {False, bool}
Plot the density of points as a color map.
plot_contours : {True, bool}
Draw contours showing the 50th and 90th percentile confidence regions.
density_cmap : {'viridis', string}
The color map to use for the density plot.
contour_color : {None, string}
The color to use for the contour lines. Defaults to white for
density plots, navy for scatter plots without zvals, and black
otherwise.
use_kombine : {False, bool}
Use kombine's KDE to calculate density. Otherwise, will use
`scipy.stats.gaussian_kde.` Default is False.
Returns
-------
fig : pyplot.figure
The figure that was created.
axis_dict : dict
A dictionary mapping the parameter combinations to the axis and their
location in the subplots grid; i.e., the key, values are:
`{('param1', 'param2'): (pyplot.axes, row index, column index)}`
"""
if labels is None:
labels = {p: p for p in parameters}
# set up the figure with a grid of axes
# if only plotting 2 parameters, make the marginal plots smaller
nparams = len(parameters)
if nparams == 2:
width_ratios = [3, 1]
height_ratios = [1, 3]
else:
width_ratios = height_ratios = None
# only plot scatter if more than one parameter
plot_scatter = plot_scatter and nparams > 1
# Sort zvals to get higher values on top in scatter plots
if plot_scatter:
if zvals is not None:
sort_indices = zvals.argsort()
zvals = zvals[sort_indices]
samples = samples[sort_indices]
if contour_color is None:
contour_color = 'k'
elif show_colorbar:
raise ValueError("must provide z values to create a colorbar")
else:
# just make all scatter points same color
zvals = 'gray'
if plot_contours and contour_color is None:
contour_color = 'navy'
# convert samples to a dictionary to avoid re-computing derived parameters
# every time they are needed
samples = dict([[p, samples[p]] for p in parameters])
# values for axis bounds
if mins is None:
mins = {p: samples[p].min() for p in parameters}
else:
# copy the dict
mins = {p: val for p, val in mins.items()}
if maxs is None:
maxs = {p: samples[p].max() for p in parameters}
else:
# copy the dict
maxs = {p: val for p, val in maxs.items()}
# remove common offsets
for pi, param in enumerate(parameters):
values, offset = remove_common_offset(samples[param])
if offset != 0:
# we'll add the offset removed to the label
labels[param] = '{} - {:d}'.format(labels[param], offset)
samples[param] = values
mins[param] = mins[param] - float(offset)
maxs[param] = maxs[param] - float(offset)
# also remove from expected parameters, if they were provided
if expected_parameters is not None:
try:
expected_parameters[param] -= offset
except KeyError:
pass
# create the axis grid
if fig is None and axis_dict is None:
fig, axis_dict = create_axes_grid(
parameters, labels=labels,
width_ratios=width_ratios, height_ratios=height_ratios,
no_diagonals=not plot_marginal)
# Diagonals...
if plot_marginal:
for pi, param in enumerate(parameters):
ax, _, _ = axis_dict[param, param]
# if only plotting 2 parameters and on the second parameter,
# rotate the marginal plot
rotated = nparams == 2 and pi == nparams-1
# see if there are expected values
if expected_parameters is not None:
try:
expected_value = expected_parameters[param]
except KeyError:
expected_value = None
else:
expected_value = None
create_marginalized_hist(
ax, samples[param], label=labels[param],
color=hist_color, fillcolor=fill_color,
linestyle=marginal_linestyle, linecolor=line_color,
title=marginal_title, expected_value=expected_value,
expected_color=expected_parameters_color,
rotated=rotated, plot_min=mins[param], plot_max=maxs[param],
percentiles=marginal_percentiles)
# Off-diagonals...
for px, py in axis_dict:
if px == py:
continue
ax, _, _ = axis_dict[px, py]
if plot_scatter:
if plot_density:
alpha = 0.3
else:
alpha = 1.
plt = ax.scatter(x=samples[px], y=samples[py], c=zvals, s=5,
edgecolors='none', vmin=vmin, vmax=vmax,
cmap=scatter_cmap, alpha=alpha, zorder=2)
if plot_contours or plot_density:
# Exclude out-of-bound regions
# this is a bit kludgy; should probably figure out a better
# solution to eventually allow for more than just m_p m_s
if (px == 'm_p' and py == 'm_s') or (py == 'm_p' and px == 'm_s'):
exclude_region = 'm_s > m_p'
else:
exclude_region = None
create_density_plot(
px, py, samples, plot_density=plot_density,
plot_contours=plot_contours, cmap=density_cmap,
percentiles=contour_percentiles,
contour_color=contour_color, xmin=mins[px], xmax=maxs[px],
ymin=mins[py], ymax=maxs[py],
exclude_region=exclude_region, ax=ax,
use_kombine=use_kombine)
if expected_parameters is not None:
try:
ax.axvline(expected_parameters[px], lw=1.5,
color=expected_parameters_color, zorder=5)
except KeyError:
pass
try:
ax.axhline(expected_parameters[py], lw=1.5,
color=expected_parameters_color, zorder=5)
except KeyError:
pass
ax.set_xlim(mins[px], maxs[px])
ax.set_ylim(mins[py], maxs[py])
# adjust tick number for large number of plots
if len(parameters) > 3:
for px, py in axis_dict:
ax, _, _ = axis_dict[px, py]
ax.set_xticks(reduce_ticks(ax, 'x', maxticks=3))
ax.set_yticks(reduce_ticks(ax, 'y', maxticks=3))
if plot_scatter and show_colorbar:
# compute font size based on fig size
scale_fac = get_scale_fac(fig)
fig.subplots_adjust(right=0.85, wspace=0.03)
cbar_ax = fig.add_axes([0.9, 0.1, 0.03, 0.8])
cb = fig.colorbar(plt, cax=cbar_ax)
if cbar_label is not None:
cb.set_label(cbar_label, fontsize=12*scale_fac)
cb.ax.tick_params(labelsize=8*scale_fac)
return fig, axis_dict
|
def function[create_multidim_plot, parameter[parameters, samples, labels, mins, maxs, expected_parameters, expected_parameters_color, plot_marginal, plot_scatter, marginal_percentiles, contour_percentiles, marginal_title, marginal_linestyle, zvals, show_colorbar, cbar_label, vmin, vmax, scatter_cmap, plot_density, plot_contours, density_cmap, contour_color, hist_color, line_color, fill_color, use_kombine, fig, axis_dict]]:
constant[Generate a figure with several plots and histograms.
Parameters
----------
parameters: list
Names of the variables to be plotted.
samples : FieldArray
A field array of the samples to plot.
labels: dict, optional
A dictionary mapping parameters to labels. If none provided, will just
use the parameter strings as the labels.
mins : {None, dict}, optional
Minimum value for the axis of each variable in `parameters`.
If None, it will use the minimum of the corresponding variable in
`samples`.
maxs : {None, dict}, optional
Maximum value for the axis of each variable in `parameters`.
If None, it will use the maximum of the corresponding variable in
`samples`.
expected_parameters : {None, dict}, optional
Expected values of `parameters`, as a dictionary mapping parameter
names -> values. A cross will be plotted at the location of the
expected parameters on axes that plot any of the expected parameters.
expected_parameters_color : {'r', string}, optional
What color to make the expected parameters cross.
plot_marginal : {True, bool}
Plot the marginalized distribution on the diagonals. If False, the
diagonal axes will be turned off.
plot_scatter : {True, bool}
Plot each sample point as a scatter plot.
marginal_percentiles : {None, array}
What percentiles to draw lines at on the 1D histograms.
If None, will draw lines at `[5, 50, 95]` (i.e., the bounds on the
upper 90th percentile and the median).
marginal_title : bool, optional
Add a title over the 1D marginal plots that gives an estimated value
+/- uncertainty. The estimated value is the pecentile halfway between
the max/min of ``maginal_percentiles``, while the uncertainty is given
by the max/min of the ``marginal_percentiles. If no
``marginal_percentiles`` are specified, the median +/- 95/5 percentiles
will be quoted.
marginal_linestyle : str, optional
What line style to use for the marginal histograms.
contour_percentiles : {None, array}
What percentile contours to draw on the scatter plots. If None,
will plot the 50th and 90th percentiles.
zvals : {None, array}
An array to use for coloring the scatter plots. If None, scatter points
will be the same color.
show_colorbar : {True, bool}
Show the colorbar of zvalues used for the scatter points. A ValueError
will be raised if zvals is None and this is True.
cbar_label : {None, str}
Specify a label to add to the colorbar.
vmin: {None, float}, optional
Minimum value for the colorbar. If None, will use the minimum of zvals.
vmax: {None, float}, optional
Maximum value for the colorbar. If None, will use the maxmimum of
zvals.
scatter_cmap : {'plasma', string}
The color map to use for the scatter points. Default is 'plasma'.
plot_density : {False, bool}
Plot the density of points as a color map.
plot_contours : {True, bool}
Draw contours showing the 50th and 90th percentile confidence regions.
density_cmap : {'viridis', string}
The color map to use for the density plot.
contour_color : {None, string}
The color to use for the contour lines. Defaults to white for
density plots, navy for scatter plots without zvals, and black
otherwise.
use_kombine : {False, bool}
Use kombine's KDE to calculate density. Otherwise, will use
`scipy.stats.gaussian_kde.` Default is False.
Returns
-------
fig : pyplot.figure
The figure that was created.
axis_dict : dict
A dictionary mapping the parameter combinations to the axis and their
location in the subplots grid; i.e., the key, values are:
`{('param1', 'param2'): (pyplot.axes, row index, column index)}`
]
if compare[name[labels] is constant[None]] begin[:]
variable[labels] assign[=] <ast.DictComp object at 0x7da20e74ae30>
variable[nparams] assign[=] call[name[len], parameter[name[parameters]]]
if compare[name[nparams] equal[==] constant[2]] begin[:]
variable[width_ratios] assign[=] list[[<ast.Constant object at 0x7da20e74bf40>, <ast.Constant object at 0x7da20e74ae60>]]
variable[height_ratios] assign[=] list[[<ast.Constant object at 0x7da20e748730>, <ast.Constant object at 0x7da20e74bfd0>]]
variable[plot_scatter] assign[=] <ast.BoolOp object at 0x7da20e74b220>
if name[plot_scatter] begin[:]
if compare[name[zvals] is_not constant[None]] begin[:]
variable[sort_indices] assign[=] call[name[zvals].argsort, parameter[]]
variable[zvals] assign[=] call[name[zvals]][name[sort_indices]]
variable[samples] assign[=] call[name[samples]][name[sort_indices]]
if compare[name[contour_color] is constant[None]] begin[:]
variable[contour_color] assign[=] constant[k]
variable[samples] assign[=] call[name[dict], parameter[<ast.ListComp object at 0x7da1b1e71840>]]
if compare[name[mins] is constant[None]] begin[:]
variable[mins] assign[=] <ast.DictComp object at 0x7da18c4cfee0>
if compare[name[maxs] is constant[None]] begin[:]
variable[maxs] assign[=] <ast.DictComp object at 0x7da18c4cee60>
for taget[tuple[[<ast.Name object at 0x7da18c4cdd50>, <ast.Name object at 0x7da18c4cde40>]]] in starred[call[name[enumerate], parameter[name[parameters]]]] begin[:]
<ast.Tuple object at 0x7da18c4cf970> assign[=] call[name[remove_common_offset], parameter[call[name[samples]][name[param]]]]
if compare[name[offset] not_equal[!=] constant[0]] begin[:]
call[name[labels]][name[param]] assign[=] call[constant[{} - {:d}].format, parameter[call[name[labels]][name[param]], name[offset]]]
call[name[samples]][name[param]] assign[=] name[values]
call[name[mins]][name[param]] assign[=] binary_operation[call[name[mins]][name[param]] - call[name[float], parameter[name[offset]]]]
call[name[maxs]][name[param]] assign[=] binary_operation[call[name[maxs]][name[param]] - call[name[float], parameter[name[offset]]]]
if compare[name[expected_parameters] is_not constant[None]] begin[:]
<ast.Try object at 0x7da18c4cfa90>
if <ast.BoolOp object at 0x7da18c4cdde0> begin[:]
<ast.Tuple object at 0x7da18c4cc730> assign[=] call[name[create_axes_grid], parameter[name[parameters]]]
if name[plot_marginal] begin[:]
for taget[tuple[[<ast.Name object at 0x7da18c4ceb90>, <ast.Name object at 0x7da18c4cc4c0>]]] in starred[call[name[enumerate], parameter[name[parameters]]]] begin[:]
<ast.Tuple object at 0x7da18c4ccac0> assign[=] call[name[axis_dict]][tuple[[<ast.Name object at 0x7da18c4cd150>, <ast.Name object at 0x7da18c4cd720>]]]
variable[rotated] assign[=] <ast.BoolOp object at 0x7da18c4cca30>
if compare[name[expected_parameters] is_not constant[None]] begin[:]
<ast.Try object at 0x7da18c4cce50>
call[name[create_marginalized_hist], parameter[name[ax], call[name[samples]][name[param]]]]
for taget[tuple[[<ast.Name object at 0x7da18c4cdae0>, <ast.Name object at 0x7da18c4cf8e0>]]] in starred[name[axis_dict]] begin[:]
if compare[name[px] equal[==] name[py]] begin[:]
continue
<ast.Tuple object at 0x7da18c4cc7f0> assign[=] call[name[axis_dict]][tuple[[<ast.Name object at 0x7da18c4cd210>, <ast.Name object at 0x7da18c4cdff0>]]]
if name[plot_scatter] begin[:]
if name[plot_density] begin[:]
variable[alpha] assign[=] constant[0.3]
variable[plt] assign[=] call[name[ax].scatter, parameter[]]
if <ast.BoolOp object at 0x7da18f00fa60> begin[:]
if <ast.BoolOp object at 0x7da18f00ceb0> begin[:]
variable[exclude_region] assign[=] constant[m_s > m_p]
call[name[create_density_plot], parameter[name[px], name[py], name[samples]]]
if compare[name[expected_parameters] is_not constant[None]] begin[:]
<ast.Try object at 0x7da18f00c580>
<ast.Try object at 0x7da18f00da80>
call[name[ax].set_xlim, parameter[call[name[mins]][name[px]], call[name[maxs]][name[px]]]]
call[name[ax].set_ylim, parameter[call[name[mins]][name[py]], call[name[maxs]][name[py]]]]
if compare[call[name[len], parameter[name[parameters]]] greater[>] constant[3]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da18f00d660>, <ast.Name object at 0x7da18f00d300>]]] in starred[name[axis_dict]] begin[:]
<ast.Tuple object at 0x7da18f00c250> assign[=] call[name[axis_dict]][tuple[[<ast.Name object at 0x7da18f00de70>, <ast.Name object at 0x7da18f00c1c0>]]]
call[name[ax].set_xticks, parameter[call[name[reduce_ticks], parameter[name[ax], constant[x]]]]]
call[name[ax].set_yticks, parameter[call[name[reduce_ticks], parameter[name[ax], constant[y]]]]]
if <ast.BoolOp object at 0x7da18f00fd00> begin[:]
variable[scale_fac] assign[=] call[name[get_scale_fac], parameter[name[fig]]]
call[name[fig].subplots_adjust, parameter[]]
variable[cbar_ax] assign[=] call[name[fig].add_axes, parameter[list[[<ast.Constant object at 0x7da18f00e920>, <ast.Constant object at 0x7da18f00c9d0>, <ast.Constant object at 0x7da18f00e770>, <ast.Constant object at 0x7da18f00f190>]]]]
variable[cb] assign[=] call[name[fig].colorbar, parameter[name[plt]]]
if compare[name[cbar_label] is_not constant[None]] begin[:]
call[name[cb].set_label, parameter[name[cbar_label]]]
call[name[cb].ax.tick_params, parameter[]]
return[tuple[[<ast.Name object at 0x7da18f00c0d0>, <ast.Name object at 0x7da18f00da50>]]]
|
keyword[def] identifier[create_multidim_plot] ( identifier[parameters] , identifier[samples] , identifier[labels] = keyword[None] ,
identifier[mins] = keyword[None] , identifier[maxs] = keyword[None] , identifier[expected_parameters] = keyword[None] ,
identifier[expected_parameters_color] = literal[string] ,
identifier[plot_marginal] = keyword[True] , identifier[plot_scatter] = keyword[True] ,
identifier[marginal_percentiles] = keyword[None] , identifier[contour_percentiles] = keyword[None] ,
identifier[marginal_title] = keyword[True] , identifier[marginal_linestyle] = literal[string] ,
identifier[zvals] = keyword[None] , identifier[show_colorbar] = keyword[True] , identifier[cbar_label] = keyword[None] ,
identifier[vmin] = keyword[None] , identifier[vmax] = keyword[None] , identifier[scatter_cmap] = literal[string] ,
identifier[plot_density] = keyword[False] , identifier[plot_contours] = keyword[True] ,
identifier[density_cmap] = literal[string] ,
identifier[contour_color] = keyword[None] , identifier[hist_color] = literal[string] ,
identifier[line_color] = keyword[None] , identifier[fill_color] = literal[string] ,
identifier[use_kombine] = keyword[False] , identifier[fig] = keyword[None] , identifier[axis_dict] = keyword[None] ):
literal[string]
keyword[if] identifier[labels] keyword[is] keyword[None] :
identifier[labels] ={ identifier[p] : identifier[p] keyword[for] identifier[p] keyword[in] identifier[parameters] }
identifier[nparams] = identifier[len] ( identifier[parameters] )
keyword[if] identifier[nparams] == literal[int] :
identifier[width_ratios] =[ literal[int] , literal[int] ]
identifier[height_ratios] =[ literal[int] , literal[int] ]
keyword[else] :
identifier[width_ratios] = identifier[height_ratios] = keyword[None]
identifier[plot_scatter] = identifier[plot_scatter] keyword[and] identifier[nparams] > literal[int]
keyword[if] identifier[plot_scatter] :
keyword[if] identifier[zvals] keyword[is] keyword[not] keyword[None] :
identifier[sort_indices] = identifier[zvals] . identifier[argsort] ()
identifier[zvals] = identifier[zvals] [ identifier[sort_indices] ]
identifier[samples] = identifier[samples] [ identifier[sort_indices] ]
keyword[if] identifier[contour_color] keyword[is] keyword[None] :
identifier[contour_color] = literal[string]
keyword[elif] identifier[show_colorbar] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[else] :
identifier[zvals] = literal[string]
keyword[if] identifier[plot_contours] keyword[and] identifier[contour_color] keyword[is] keyword[None] :
identifier[contour_color] = literal[string]
identifier[samples] = identifier[dict] ([[ identifier[p] , identifier[samples] [ identifier[p] ]] keyword[for] identifier[p] keyword[in] identifier[parameters] ])
keyword[if] identifier[mins] keyword[is] keyword[None] :
identifier[mins] ={ identifier[p] : identifier[samples] [ identifier[p] ]. identifier[min] () keyword[for] identifier[p] keyword[in] identifier[parameters] }
keyword[else] :
identifier[mins] ={ identifier[p] : identifier[val] keyword[for] identifier[p] , identifier[val] keyword[in] identifier[mins] . identifier[items] ()}
keyword[if] identifier[maxs] keyword[is] keyword[None] :
identifier[maxs] ={ identifier[p] : identifier[samples] [ identifier[p] ]. identifier[max] () keyword[for] identifier[p] keyword[in] identifier[parameters] }
keyword[else] :
identifier[maxs] ={ identifier[p] : identifier[val] keyword[for] identifier[p] , identifier[val] keyword[in] identifier[maxs] . identifier[items] ()}
keyword[for] identifier[pi] , identifier[param] keyword[in] identifier[enumerate] ( identifier[parameters] ):
identifier[values] , identifier[offset] = identifier[remove_common_offset] ( identifier[samples] [ identifier[param] ])
keyword[if] identifier[offset] != literal[int] :
identifier[labels] [ identifier[param] ]= literal[string] . identifier[format] ( identifier[labels] [ identifier[param] ], identifier[offset] )
identifier[samples] [ identifier[param] ]= identifier[values]
identifier[mins] [ identifier[param] ]= identifier[mins] [ identifier[param] ]- identifier[float] ( identifier[offset] )
identifier[maxs] [ identifier[param] ]= identifier[maxs] [ identifier[param] ]- identifier[float] ( identifier[offset] )
keyword[if] identifier[expected_parameters] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[expected_parameters] [ identifier[param] ]-= identifier[offset]
keyword[except] identifier[KeyError] :
keyword[pass]
keyword[if] identifier[fig] keyword[is] keyword[None] keyword[and] identifier[axis_dict] keyword[is] keyword[None] :
identifier[fig] , identifier[axis_dict] = identifier[create_axes_grid] (
identifier[parameters] , identifier[labels] = identifier[labels] ,
identifier[width_ratios] = identifier[width_ratios] , identifier[height_ratios] = identifier[height_ratios] ,
identifier[no_diagonals] = keyword[not] identifier[plot_marginal] )
keyword[if] identifier[plot_marginal] :
keyword[for] identifier[pi] , identifier[param] keyword[in] identifier[enumerate] ( identifier[parameters] ):
identifier[ax] , identifier[_] , identifier[_] = identifier[axis_dict] [ identifier[param] , identifier[param] ]
identifier[rotated] = identifier[nparams] == literal[int] keyword[and] identifier[pi] == identifier[nparams] - literal[int]
keyword[if] identifier[expected_parameters] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[expected_value] = identifier[expected_parameters] [ identifier[param] ]
keyword[except] identifier[KeyError] :
identifier[expected_value] = keyword[None]
keyword[else] :
identifier[expected_value] = keyword[None]
identifier[create_marginalized_hist] (
identifier[ax] , identifier[samples] [ identifier[param] ], identifier[label] = identifier[labels] [ identifier[param] ],
identifier[color] = identifier[hist_color] , identifier[fillcolor] = identifier[fill_color] ,
identifier[linestyle] = identifier[marginal_linestyle] , identifier[linecolor] = identifier[line_color] ,
identifier[title] = identifier[marginal_title] , identifier[expected_value] = identifier[expected_value] ,
identifier[expected_color] = identifier[expected_parameters_color] ,
identifier[rotated] = identifier[rotated] , identifier[plot_min] = identifier[mins] [ identifier[param] ], identifier[plot_max] = identifier[maxs] [ identifier[param] ],
identifier[percentiles] = identifier[marginal_percentiles] )
keyword[for] identifier[px] , identifier[py] keyword[in] identifier[axis_dict] :
keyword[if] identifier[px] == identifier[py] :
keyword[continue]
identifier[ax] , identifier[_] , identifier[_] = identifier[axis_dict] [ identifier[px] , identifier[py] ]
keyword[if] identifier[plot_scatter] :
keyword[if] identifier[plot_density] :
identifier[alpha] = literal[int]
keyword[else] :
identifier[alpha] = literal[int]
identifier[plt] = identifier[ax] . identifier[scatter] ( identifier[x] = identifier[samples] [ identifier[px] ], identifier[y] = identifier[samples] [ identifier[py] ], identifier[c] = identifier[zvals] , identifier[s] = literal[int] ,
identifier[edgecolors] = literal[string] , identifier[vmin] = identifier[vmin] , identifier[vmax] = identifier[vmax] ,
identifier[cmap] = identifier[scatter_cmap] , identifier[alpha] = identifier[alpha] , identifier[zorder] = literal[int] )
keyword[if] identifier[plot_contours] keyword[or] identifier[plot_density] :
keyword[if] ( identifier[px] == literal[string] keyword[and] identifier[py] == literal[string] ) keyword[or] ( identifier[py] == literal[string] keyword[and] identifier[px] == literal[string] ):
identifier[exclude_region] = literal[string]
keyword[else] :
identifier[exclude_region] = keyword[None]
identifier[create_density_plot] (
identifier[px] , identifier[py] , identifier[samples] , identifier[plot_density] = identifier[plot_density] ,
identifier[plot_contours] = identifier[plot_contours] , identifier[cmap] = identifier[density_cmap] ,
identifier[percentiles] = identifier[contour_percentiles] ,
identifier[contour_color] = identifier[contour_color] , identifier[xmin] = identifier[mins] [ identifier[px] ], identifier[xmax] = identifier[maxs] [ identifier[px] ],
identifier[ymin] = identifier[mins] [ identifier[py] ], identifier[ymax] = identifier[maxs] [ identifier[py] ],
identifier[exclude_region] = identifier[exclude_region] , identifier[ax] = identifier[ax] ,
identifier[use_kombine] = identifier[use_kombine] )
keyword[if] identifier[expected_parameters] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[ax] . identifier[axvline] ( identifier[expected_parameters] [ identifier[px] ], identifier[lw] = literal[int] ,
identifier[color] = identifier[expected_parameters_color] , identifier[zorder] = literal[int] )
keyword[except] identifier[KeyError] :
keyword[pass]
keyword[try] :
identifier[ax] . identifier[axhline] ( identifier[expected_parameters] [ identifier[py] ], identifier[lw] = literal[int] ,
identifier[color] = identifier[expected_parameters_color] , identifier[zorder] = literal[int] )
keyword[except] identifier[KeyError] :
keyword[pass]
identifier[ax] . identifier[set_xlim] ( identifier[mins] [ identifier[px] ], identifier[maxs] [ identifier[px] ])
identifier[ax] . identifier[set_ylim] ( identifier[mins] [ identifier[py] ], identifier[maxs] [ identifier[py] ])
keyword[if] identifier[len] ( identifier[parameters] )> literal[int] :
keyword[for] identifier[px] , identifier[py] keyword[in] identifier[axis_dict] :
identifier[ax] , identifier[_] , identifier[_] = identifier[axis_dict] [ identifier[px] , identifier[py] ]
identifier[ax] . identifier[set_xticks] ( identifier[reduce_ticks] ( identifier[ax] , literal[string] , identifier[maxticks] = literal[int] ))
identifier[ax] . identifier[set_yticks] ( identifier[reduce_ticks] ( identifier[ax] , literal[string] , identifier[maxticks] = literal[int] ))
keyword[if] identifier[plot_scatter] keyword[and] identifier[show_colorbar] :
identifier[scale_fac] = identifier[get_scale_fac] ( identifier[fig] )
identifier[fig] . identifier[subplots_adjust] ( identifier[right] = literal[int] , identifier[wspace] = literal[int] )
identifier[cbar_ax] = identifier[fig] . identifier[add_axes] ([ literal[int] , literal[int] , literal[int] , literal[int] ])
identifier[cb] = identifier[fig] . identifier[colorbar] ( identifier[plt] , identifier[cax] = identifier[cbar_ax] )
keyword[if] identifier[cbar_label] keyword[is] keyword[not] keyword[None] :
identifier[cb] . identifier[set_label] ( identifier[cbar_label] , identifier[fontsize] = literal[int] * identifier[scale_fac] )
identifier[cb] . identifier[ax] . identifier[tick_params] ( identifier[labelsize] = literal[int] * identifier[scale_fac] )
keyword[return] identifier[fig] , identifier[axis_dict]
|
def create_multidim_plot(parameters, samples, labels=None, mins=None, maxs=None, expected_parameters=None, expected_parameters_color='r', plot_marginal=True, plot_scatter=True, marginal_percentiles=None, contour_percentiles=None, marginal_title=True, marginal_linestyle='-', zvals=None, show_colorbar=True, cbar_label=None, vmin=None, vmax=None, scatter_cmap='plasma', plot_density=False, plot_contours=True, density_cmap='viridis', contour_color=None, hist_color='black', line_color=None, fill_color='gray', use_kombine=False, fig=None, axis_dict=None):
"""Generate a figure with several plots and histograms.
Parameters
----------
parameters: list
Names of the variables to be plotted.
samples : FieldArray
A field array of the samples to plot.
labels: dict, optional
A dictionary mapping parameters to labels. If none provided, will just
use the parameter strings as the labels.
mins : {None, dict}, optional
Minimum value for the axis of each variable in `parameters`.
If None, it will use the minimum of the corresponding variable in
`samples`.
maxs : {None, dict}, optional
Maximum value for the axis of each variable in `parameters`.
If None, it will use the maximum of the corresponding variable in
`samples`.
expected_parameters : {None, dict}, optional
Expected values of `parameters`, as a dictionary mapping parameter
names -> values. A cross will be plotted at the location of the
expected parameters on axes that plot any of the expected parameters.
expected_parameters_color : {'r', string}, optional
What color to make the expected parameters cross.
plot_marginal : {True, bool}
Plot the marginalized distribution on the diagonals. If False, the
diagonal axes will be turned off.
plot_scatter : {True, bool}
Plot each sample point as a scatter plot.
marginal_percentiles : {None, array}
What percentiles to draw lines at on the 1D histograms.
If None, will draw lines at `[5, 50, 95]` (i.e., the bounds on the
upper 90th percentile and the median).
marginal_title : bool, optional
Add a title over the 1D marginal plots that gives an estimated value
+/- uncertainty. The estimated value is the pecentile halfway between
the max/min of ``maginal_percentiles``, while the uncertainty is given
by the max/min of the ``marginal_percentiles. If no
``marginal_percentiles`` are specified, the median +/- 95/5 percentiles
will be quoted.
marginal_linestyle : str, optional
What line style to use for the marginal histograms.
contour_percentiles : {None, array}
What percentile contours to draw on the scatter plots. If None,
will plot the 50th and 90th percentiles.
zvals : {None, array}
An array to use for coloring the scatter plots. If None, scatter points
will be the same color.
show_colorbar : {True, bool}
Show the colorbar of zvalues used for the scatter points. A ValueError
will be raised if zvals is None and this is True.
cbar_label : {None, str}
Specify a label to add to the colorbar.
vmin: {None, float}, optional
Minimum value for the colorbar. If None, will use the minimum of zvals.
vmax: {None, float}, optional
Maximum value for the colorbar. If None, will use the maxmimum of
zvals.
scatter_cmap : {'plasma', string}
The color map to use for the scatter points. Default is 'plasma'.
plot_density : {False, bool}
Plot the density of points as a color map.
plot_contours : {True, bool}
Draw contours showing the 50th and 90th percentile confidence regions.
density_cmap : {'viridis', string}
The color map to use for the density plot.
contour_color : {None, string}
The color to use for the contour lines. Defaults to white for
density plots, navy for scatter plots without zvals, and black
otherwise.
use_kombine : {False, bool}
Use kombine's KDE to calculate density. Otherwise, will use
`scipy.stats.gaussian_kde.` Default is False.
Returns
-------
fig : pyplot.figure
The figure that was created.
axis_dict : dict
A dictionary mapping the parameter combinations to the axis and their
location in the subplots grid; i.e., the key, values are:
`{('param1', 'param2'): (pyplot.axes, row index, column index)}`
"""
if labels is None:
labels = {p: p for p in parameters} # depends on [control=['if'], data=['labels']]
# set up the figure with a grid of axes
# if only plotting 2 parameters, make the marginal plots smaller
nparams = len(parameters)
if nparams == 2:
width_ratios = [3, 1]
height_ratios = [1, 3] # depends on [control=['if'], data=[]]
else:
width_ratios = height_ratios = None
# only plot scatter if more than one parameter
plot_scatter = plot_scatter and nparams > 1
# Sort zvals to get higher values on top in scatter plots
if plot_scatter:
if zvals is not None:
sort_indices = zvals.argsort()
zvals = zvals[sort_indices]
samples = samples[sort_indices]
if contour_color is None:
contour_color = 'k' # depends on [control=['if'], data=['contour_color']] # depends on [control=['if'], data=['zvals']]
elif show_colorbar:
raise ValueError('must provide z values to create a colorbar') # depends on [control=['if'], data=[]]
else:
# just make all scatter points same color
zvals = 'gray'
if plot_contours and contour_color is None:
contour_color = 'navy' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# convert samples to a dictionary to avoid re-computing derived parameters
# every time they are needed
samples = dict([[p, samples[p]] for p in parameters])
# values for axis bounds
if mins is None:
mins = {p: samples[p].min() for p in parameters} # depends on [control=['if'], data=['mins']]
else:
# copy the dict
mins = {p: val for (p, val) in mins.items()}
if maxs is None:
maxs = {p: samples[p].max() for p in parameters} # depends on [control=['if'], data=['maxs']]
else:
# copy the dict
maxs = {p: val for (p, val) in maxs.items()}
# remove common offsets
for (pi, param) in enumerate(parameters):
(values, offset) = remove_common_offset(samples[param])
if offset != 0:
# we'll add the offset removed to the label
labels[param] = '{} - {:d}'.format(labels[param], offset)
samples[param] = values
mins[param] = mins[param] - float(offset)
maxs[param] = maxs[param] - float(offset) # depends on [control=['if'], data=['offset']]
# also remove from expected parameters, if they were provided
if expected_parameters is not None:
try:
expected_parameters[param] -= offset # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['expected_parameters']] # depends on [control=['for'], data=[]]
# create the axis grid
if fig is None and axis_dict is None:
(fig, axis_dict) = create_axes_grid(parameters, labels=labels, width_ratios=width_ratios, height_ratios=height_ratios, no_diagonals=not plot_marginal) # depends on [control=['if'], data=[]]
# Diagonals...
if plot_marginal:
for (pi, param) in enumerate(parameters):
(ax, _, _) = axis_dict[param, param]
# if only plotting 2 parameters and on the second parameter,
# rotate the marginal plot
rotated = nparams == 2 and pi == nparams - 1
# see if there are expected values
if expected_parameters is not None:
try:
expected_value = expected_parameters[param] # depends on [control=['try'], data=[]]
except KeyError:
expected_value = None # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['expected_parameters']]
else:
expected_value = None
create_marginalized_hist(ax, samples[param], label=labels[param], color=hist_color, fillcolor=fill_color, linestyle=marginal_linestyle, linecolor=line_color, title=marginal_title, expected_value=expected_value, expected_color=expected_parameters_color, rotated=rotated, plot_min=mins[param], plot_max=maxs[param], percentiles=marginal_percentiles) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
# Off-diagonals...
for (px, py) in axis_dict:
if px == py:
continue # depends on [control=['if'], data=[]]
(ax, _, _) = axis_dict[px, py]
if plot_scatter:
if plot_density:
alpha = 0.3 # depends on [control=['if'], data=[]]
else:
alpha = 1.0
plt = ax.scatter(x=samples[px], y=samples[py], c=zvals, s=5, edgecolors='none', vmin=vmin, vmax=vmax, cmap=scatter_cmap, alpha=alpha, zorder=2) # depends on [control=['if'], data=[]]
if plot_contours or plot_density:
# Exclude out-of-bound regions
# this is a bit kludgy; should probably figure out a better
# solution to eventually allow for more than just m_p m_s
if px == 'm_p' and py == 'm_s' or (py == 'm_p' and px == 'm_s'):
exclude_region = 'm_s > m_p' # depends on [control=['if'], data=[]]
else:
exclude_region = None
create_density_plot(px, py, samples, plot_density=plot_density, plot_contours=plot_contours, cmap=density_cmap, percentiles=contour_percentiles, contour_color=contour_color, xmin=mins[px], xmax=maxs[px], ymin=mins[py], ymax=maxs[py], exclude_region=exclude_region, ax=ax, use_kombine=use_kombine) # depends on [control=['if'], data=[]]
if expected_parameters is not None:
try:
ax.axvline(expected_parameters[px], lw=1.5, color=expected_parameters_color, zorder=5) # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]]
try:
ax.axhline(expected_parameters[py], lw=1.5, color=expected_parameters_color, zorder=5) # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['expected_parameters']]
ax.set_xlim(mins[px], maxs[px])
ax.set_ylim(mins[py], maxs[py]) # depends on [control=['for'], data=[]]
# adjust tick number for large number of plots
if len(parameters) > 3:
for (px, py) in axis_dict:
(ax, _, _) = axis_dict[px, py]
ax.set_xticks(reduce_ticks(ax, 'x', maxticks=3))
ax.set_yticks(reduce_ticks(ax, 'y', maxticks=3)) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
if plot_scatter and show_colorbar:
# compute font size based on fig size
scale_fac = get_scale_fac(fig)
fig.subplots_adjust(right=0.85, wspace=0.03)
cbar_ax = fig.add_axes([0.9, 0.1, 0.03, 0.8])
cb = fig.colorbar(plt, cax=cbar_ax)
if cbar_label is not None:
cb.set_label(cbar_label, fontsize=12 * scale_fac) # depends on [control=['if'], data=['cbar_label']]
cb.ax.tick_params(labelsize=8 * scale_fac) # depends on [control=['if'], data=[]]
return (fig, axis_dict)
|
def get_total_degree_day_too_low_warning(
model_type,
balance_point,
degree_day_type,
avg_degree_days,
period_days,
minimum_total,
):
""" Return an empty list or a single warning wrapped in a list regarding
the total summed degree day values.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
balance_point : :any:`float`
The balance point in question.
degree_day_type : :any:`str`
The type of degree days (``'cdd'`` or ``'hdd'``).
avg_degree_days : :any:`pandas.Series`
A series of degree day values.
period_days : :any:`pandas.Series`
A series of containing day counts.
minimum_total : :any:`float`
Minimum allowable total sum of degree day values.
Returns
-------
warnings : :any:`list` of :any:`eemeter.EEMeterWarning`
Empty list or list of single warning.
"""
warnings = []
total_degree_days = (avg_degree_days * period_days).sum()
if total_degree_days < minimum_total:
warnings.append(
EEMeterWarning(
qualified_name=(
"eemeter.caltrack_daily.{model_type}.total_{degree_day_type}_too_low".format(
model_type=model_type, degree_day_type=degree_day_type
)
),
description=(
"Total {degree_day_type} below accepted minimum."
" Candidate fit not attempted.".format(
degree_day_type=degree_day_type.upper()
)
),
data={
"total_{degree_day_type}".format(
degree_day_type=degree_day_type
): total_degree_days,
"total_{degree_day_type}_minimum".format(
degree_day_type=degree_day_type
): minimum_total,
"{degree_day_type}_balance_point".format(
degree_day_type=degree_day_type
): balance_point,
},
)
)
return warnings
|
def function[get_total_degree_day_too_low_warning, parameter[model_type, balance_point, degree_day_type, avg_degree_days, period_days, minimum_total]]:
constant[ Return an empty list or a single warning wrapped in a list regarding
the total summed degree day values.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
balance_point : :any:`float`
The balance point in question.
degree_day_type : :any:`str`
The type of degree days (``'cdd'`` or ``'hdd'``).
avg_degree_days : :any:`pandas.Series`
A series of degree day values.
period_days : :any:`pandas.Series`
A series of containing day counts.
minimum_total : :any:`float`
Minimum allowable total sum of degree day values.
Returns
-------
warnings : :any:`list` of :any:`eemeter.EEMeterWarning`
Empty list or list of single warning.
]
variable[warnings] assign[=] list[[]]
variable[total_degree_days] assign[=] call[binary_operation[name[avg_degree_days] * name[period_days]].sum, parameter[]]
if compare[name[total_degree_days] less[<] name[minimum_total]] begin[:]
call[name[warnings].append, parameter[call[name[EEMeterWarning], parameter[]]]]
return[name[warnings]]
|
keyword[def] identifier[get_total_degree_day_too_low_warning] (
identifier[model_type] ,
identifier[balance_point] ,
identifier[degree_day_type] ,
identifier[avg_degree_days] ,
identifier[period_days] ,
identifier[minimum_total] ,
):
literal[string]
identifier[warnings] =[]
identifier[total_degree_days] =( identifier[avg_degree_days] * identifier[period_days] ). identifier[sum] ()
keyword[if] identifier[total_degree_days] < identifier[minimum_total] :
identifier[warnings] . identifier[append] (
identifier[EEMeterWarning] (
identifier[qualified_name] =(
literal[string] . identifier[format] (
identifier[model_type] = identifier[model_type] , identifier[degree_day_type] = identifier[degree_day_type]
)
),
identifier[description] =(
literal[string]
literal[string] . identifier[format] (
identifier[degree_day_type] = identifier[degree_day_type] . identifier[upper] ()
)
),
identifier[data] ={
literal[string] . identifier[format] (
identifier[degree_day_type] = identifier[degree_day_type]
): identifier[total_degree_days] ,
literal[string] . identifier[format] (
identifier[degree_day_type] = identifier[degree_day_type]
): identifier[minimum_total] ,
literal[string] . identifier[format] (
identifier[degree_day_type] = identifier[degree_day_type]
): identifier[balance_point] ,
},
)
)
keyword[return] identifier[warnings]
|
def get_total_degree_day_too_low_warning(model_type, balance_point, degree_day_type, avg_degree_days, period_days, minimum_total):
""" Return an empty list or a single warning wrapped in a list regarding
the total summed degree day values.
Parameters
----------
model_type : :any:`str`
Model type (e.g., ``'cdd_hdd'``).
balance_point : :any:`float`
The balance point in question.
degree_day_type : :any:`str`
The type of degree days (``'cdd'`` or ``'hdd'``).
avg_degree_days : :any:`pandas.Series`
A series of degree day values.
period_days : :any:`pandas.Series`
A series of containing day counts.
minimum_total : :any:`float`
Minimum allowable total sum of degree day values.
Returns
-------
warnings : :any:`list` of :any:`eemeter.EEMeterWarning`
Empty list or list of single warning.
"""
warnings = []
total_degree_days = (avg_degree_days * period_days).sum()
if total_degree_days < minimum_total:
warnings.append(EEMeterWarning(qualified_name='eemeter.caltrack_daily.{model_type}.total_{degree_day_type}_too_low'.format(model_type=model_type, degree_day_type=degree_day_type), description='Total {degree_day_type} below accepted minimum. Candidate fit not attempted.'.format(degree_day_type=degree_day_type.upper()), data={'total_{degree_day_type}'.format(degree_day_type=degree_day_type): total_degree_days, 'total_{degree_day_type}_minimum'.format(degree_day_type=degree_day_type): minimum_total, '{degree_day_type}_balance_point'.format(degree_day_type=degree_day_type): balance_point})) # depends on [control=['if'], data=['total_degree_days', 'minimum_total']]
return warnings
|
def get_track_info(track_id):
"""
Fetches track info from Soundcloud, given a track_id
"""
logger.info('Retrieving more info on the track')
info_url = url["trackinfo"].format(track_id)
r = requests.get(info_url, params={'client_id': CLIENT_ID}, stream=True)
item = r.json()
logger.debug(item)
return item
|
def function[get_track_info, parameter[track_id]]:
constant[
Fetches track info from Soundcloud, given a track_id
]
call[name[logger].info, parameter[constant[Retrieving more info on the track]]]
variable[info_url] assign[=] call[call[name[url]][constant[trackinfo]].format, parameter[name[track_id]]]
variable[r] assign[=] call[name[requests].get, parameter[name[info_url]]]
variable[item] assign[=] call[name[r].json, parameter[]]
call[name[logger].debug, parameter[name[item]]]
return[name[item]]
|
keyword[def] identifier[get_track_info] ( identifier[track_id] ):
literal[string]
identifier[logger] . identifier[info] ( literal[string] )
identifier[info_url] = identifier[url] [ literal[string] ]. identifier[format] ( identifier[track_id] )
identifier[r] = identifier[requests] . identifier[get] ( identifier[info_url] , identifier[params] ={ literal[string] : identifier[CLIENT_ID] }, identifier[stream] = keyword[True] )
identifier[item] = identifier[r] . identifier[json] ()
identifier[logger] . identifier[debug] ( identifier[item] )
keyword[return] identifier[item]
|
def get_track_info(track_id):
"""
Fetches track info from Soundcloud, given a track_id
"""
logger.info('Retrieving more info on the track')
info_url = url['trackinfo'].format(track_id)
r = requests.get(info_url, params={'client_id': CLIENT_ID}, stream=True)
item = r.json()
logger.debug(item)
return item
|
def _start_priority_containers(self, groups, group_orders, tool_d):
""" Select containers based on priorities to start """
vent_cfg = Template(self.path_dirs.cfg_file)
cfg_groups = vent_cfg.option('groups', 'start_order')
if cfg_groups[0]:
cfg_groups = cfg_groups[1].split(',')
else:
cfg_groups = []
all_groups = sorted(set(groups))
s_conts = []
f_conts = []
# start tools in order of group defined in vent.cfg
for group in cfg_groups:
# remove from all_groups because already checked out
if group in all_groups:
all_groups.remove(group)
if group in group_orders:
for cont_t in sorted(group_orders[group]):
if cont_t[1] not in s_conts:
s_conts, f_conts = self._start_container(cont_t[1],
tool_d,
s_conts,
f_conts)
# start tools that haven't been specified in the vent.cfg, if any
for group in all_groups:
if group in group_orders:
for cont_t in sorted(group_orders[group]):
if cont_t[1] not in s_conts:
s_conts, f_conts = self._start_container(cont_t[1],
tool_d,
s_conts,
f_conts)
return (s_conts, f_conts)
|
def function[_start_priority_containers, parameter[self, groups, group_orders, tool_d]]:
constant[ Select containers based on priorities to start ]
variable[vent_cfg] assign[=] call[name[Template], parameter[name[self].path_dirs.cfg_file]]
variable[cfg_groups] assign[=] call[name[vent_cfg].option, parameter[constant[groups], constant[start_order]]]
if call[name[cfg_groups]][constant[0]] begin[:]
variable[cfg_groups] assign[=] call[call[name[cfg_groups]][constant[1]].split, parameter[constant[,]]]
variable[all_groups] assign[=] call[name[sorted], parameter[call[name[set], parameter[name[groups]]]]]
variable[s_conts] assign[=] list[[]]
variable[f_conts] assign[=] list[[]]
for taget[name[group]] in starred[name[cfg_groups]] begin[:]
if compare[name[group] in name[all_groups]] begin[:]
call[name[all_groups].remove, parameter[name[group]]]
if compare[name[group] in name[group_orders]] begin[:]
for taget[name[cont_t]] in starred[call[name[sorted], parameter[call[name[group_orders]][name[group]]]]] begin[:]
if compare[call[name[cont_t]][constant[1]] <ast.NotIn object at 0x7da2590d7190> name[s_conts]] begin[:]
<ast.Tuple object at 0x7da1b0b843a0> assign[=] call[name[self]._start_container, parameter[call[name[cont_t]][constant[1]], name[tool_d], name[s_conts], name[f_conts]]]
for taget[name[group]] in starred[name[all_groups]] begin[:]
if compare[name[group] in name[group_orders]] begin[:]
for taget[name[cont_t]] in starred[call[name[sorted], parameter[call[name[group_orders]][name[group]]]]] begin[:]
if compare[call[name[cont_t]][constant[1]] <ast.NotIn object at 0x7da2590d7190> name[s_conts]] begin[:]
<ast.Tuple object at 0x7da1b0b86ad0> assign[=] call[name[self]._start_container, parameter[call[name[cont_t]][constant[1]], name[tool_d], name[s_conts], name[f_conts]]]
return[tuple[[<ast.Name object at 0x7da1b0b84490>, <ast.Name object at 0x7da1b0b84310>]]]
|
keyword[def] identifier[_start_priority_containers] ( identifier[self] , identifier[groups] , identifier[group_orders] , identifier[tool_d] ):
literal[string]
identifier[vent_cfg] = identifier[Template] ( identifier[self] . identifier[path_dirs] . identifier[cfg_file] )
identifier[cfg_groups] = identifier[vent_cfg] . identifier[option] ( literal[string] , literal[string] )
keyword[if] identifier[cfg_groups] [ literal[int] ]:
identifier[cfg_groups] = identifier[cfg_groups] [ literal[int] ]. identifier[split] ( literal[string] )
keyword[else] :
identifier[cfg_groups] =[]
identifier[all_groups] = identifier[sorted] ( identifier[set] ( identifier[groups] ))
identifier[s_conts] =[]
identifier[f_conts] =[]
keyword[for] identifier[group] keyword[in] identifier[cfg_groups] :
keyword[if] identifier[group] keyword[in] identifier[all_groups] :
identifier[all_groups] . identifier[remove] ( identifier[group] )
keyword[if] identifier[group] keyword[in] identifier[group_orders] :
keyword[for] identifier[cont_t] keyword[in] identifier[sorted] ( identifier[group_orders] [ identifier[group] ]):
keyword[if] identifier[cont_t] [ literal[int] ] keyword[not] keyword[in] identifier[s_conts] :
identifier[s_conts] , identifier[f_conts] = identifier[self] . identifier[_start_container] ( identifier[cont_t] [ literal[int] ],
identifier[tool_d] ,
identifier[s_conts] ,
identifier[f_conts] )
keyword[for] identifier[group] keyword[in] identifier[all_groups] :
keyword[if] identifier[group] keyword[in] identifier[group_orders] :
keyword[for] identifier[cont_t] keyword[in] identifier[sorted] ( identifier[group_orders] [ identifier[group] ]):
keyword[if] identifier[cont_t] [ literal[int] ] keyword[not] keyword[in] identifier[s_conts] :
identifier[s_conts] , identifier[f_conts] = identifier[self] . identifier[_start_container] ( identifier[cont_t] [ literal[int] ],
identifier[tool_d] ,
identifier[s_conts] ,
identifier[f_conts] )
keyword[return] ( identifier[s_conts] , identifier[f_conts] )
|
def _start_priority_containers(self, groups, group_orders, tool_d):
""" Select containers based on priorities to start """
vent_cfg = Template(self.path_dirs.cfg_file)
cfg_groups = vent_cfg.option('groups', 'start_order')
if cfg_groups[0]:
cfg_groups = cfg_groups[1].split(',') # depends on [control=['if'], data=[]]
else:
cfg_groups = []
all_groups = sorted(set(groups))
s_conts = []
f_conts = []
# start tools in order of group defined in vent.cfg
for group in cfg_groups:
# remove from all_groups because already checked out
if group in all_groups:
all_groups.remove(group) # depends on [control=['if'], data=['group', 'all_groups']]
if group in group_orders:
for cont_t in sorted(group_orders[group]):
if cont_t[1] not in s_conts:
(s_conts, f_conts) = self._start_container(cont_t[1], tool_d, s_conts, f_conts) # depends on [control=['if'], data=['s_conts']] # depends on [control=['for'], data=['cont_t']] # depends on [control=['if'], data=['group', 'group_orders']] # depends on [control=['for'], data=['group']]
# start tools that haven't been specified in the vent.cfg, if any
for group in all_groups:
if group in group_orders:
for cont_t in sorted(group_orders[group]):
if cont_t[1] not in s_conts:
(s_conts, f_conts) = self._start_container(cont_t[1], tool_d, s_conts, f_conts) # depends on [control=['if'], data=['s_conts']] # depends on [control=['for'], data=['cont_t']] # depends on [control=['if'], data=['group', 'group_orders']] # depends on [control=['for'], data=['group']]
return (s_conts, f_conts)
|
def chromsizes(self, uid):
"""
Return the chromosome sizes from the given filename
"""
url = "http://{host}:{port}/api/v1/chrom-sizes/?id={uid}".format(
host=self.host, port=self.port, uid=uid
)
req = requests.get(url)
if req.status_code != 200:
raise ServerError("Error fetching chromsizes:", req.content)
return req.content
|
def function[chromsizes, parameter[self, uid]]:
constant[
Return the chromosome sizes from the given filename
]
variable[url] assign[=] call[constant[http://{host}:{port}/api/v1/chrom-sizes/?id={uid}].format, parameter[]]
variable[req] assign[=] call[name[requests].get, parameter[name[url]]]
if compare[name[req].status_code not_equal[!=] constant[200]] begin[:]
<ast.Raise object at 0x7da18eb55510>
return[name[req].content]
|
keyword[def] identifier[chromsizes] ( identifier[self] , identifier[uid] ):
literal[string]
identifier[url] = literal[string] . identifier[format] (
identifier[host] = identifier[self] . identifier[host] , identifier[port] = identifier[self] . identifier[port] , identifier[uid] = identifier[uid]
)
identifier[req] = identifier[requests] . identifier[get] ( identifier[url] )
keyword[if] identifier[req] . identifier[status_code] != literal[int] :
keyword[raise] identifier[ServerError] ( literal[string] , identifier[req] . identifier[content] )
keyword[return] identifier[req] . identifier[content]
|
def chromsizes(self, uid):
"""
Return the chromosome sizes from the given filename
"""
url = 'http://{host}:{port}/api/v1/chrom-sizes/?id={uid}'.format(host=self.host, port=self.port, uid=uid)
req = requests.get(url)
if req.status_code != 200:
raise ServerError('Error fetching chromsizes:', req.content) # depends on [control=['if'], data=[]]
return req.content
|
def build_application(conf):
"""Do some setup and return the wsgi app."""
if isinstance(conf.adapter_options, list):
conf['adapter_options'] = {key: val for _dict in conf.adapter_options
for key, val in _dict.items()}
elif conf.adapter_options is None:
conf['adapter_options'] = {}
else:
conf['adapter_options'] = copy.copy(conf.adapter_options)
# get wsgi app the same way bottle does if it receives a string.
conf['app'] = conf.app or bottle.default_app()
if isinstance(conf.app, six.string_types):
conf['app'] = bottle.load_app(conf.app)
def _find_bottle_app(_app):
"""Lookup the underlying Bottle() instance."""
while hasattr(_app, 'app'):
if isinstance(_app, bottle.Bottle):
break
_app = _app.app
assert isinstance(_app, bottle.Bottle), 'Could not find Bottle app.'
return _app
bottle_app = _find_bottle_app(conf.app)
bottle_app.route(
path='/_simpl', method='GET', callback=_version_callback)
def _show_routes():
"""Conditionally print the app's routes."""
if conf.app and not conf.quiet:
if conf.reloader and os.getenv('BOTTLE_CHILD'):
LOG.info("Running bottle server with reloader.")
elif not conf.reloader:
pass
else:
return
routes = fmt_routes(bottle_app)
if routes:
print('\n{}'.format(routes), end='\n\n')
_show_routes()
return conf.app
|
def function[build_application, parameter[conf]]:
constant[Do some setup and return the wsgi app.]
if call[name[isinstance], parameter[name[conf].adapter_options, name[list]]] begin[:]
call[name[conf]][constant[adapter_options]] assign[=] <ast.DictComp object at 0x7da1b09e8fa0>
call[name[conf]][constant[app]] assign[=] <ast.BoolOp object at 0x7da1b0806170>
if call[name[isinstance], parameter[name[conf].app, name[six].string_types]] begin[:]
call[name[conf]][constant[app]] assign[=] call[name[bottle].load_app, parameter[name[conf].app]]
def function[_find_bottle_app, parameter[_app]]:
constant[Lookup the underlying Bottle() instance.]
while call[name[hasattr], parameter[name[_app], constant[app]]] begin[:]
if call[name[isinstance], parameter[name[_app], name[bottle].Bottle]] begin[:]
break
variable[_app] assign[=] name[_app].app
assert[call[name[isinstance], parameter[name[_app], name[bottle].Bottle]]]
return[name[_app]]
variable[bottle_app] assign[=] call[name[_find_bottle_app], parameter[name[conf].app]]
call[name[bottle_app].route, parameter[]]
def function[_show_routes, parameter[]]:
constant[Conditionally print the app's routes.]
if <ast.BoolOp object at 0x7da1b0804a60> begin[:]
if <ast.BoolOp object at 0x7da1b0805120> begin[:]
call[name[LOG].info, parameter[constant[Running bottle server with reloader.]]]
variable[routes] assign[=] call[name[fmt_routes], parameter[name[bottle_app]]]
if name[routes] begin[:]
call[name[print], parameter[call[constant[
{}].format, parameter[name[routes]]]]]
call[name[_show_routes], parameter[]]
return[name[conf].app]
|
keyword[def] identifier[build_application] ( identifier[conf] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[conf] . identifier[adapter_options] , identifier[list] ):
identifier[conf] [ literal[string] ]={ identifier[key] : identifier[val] keyword[for] identifier[_dict] keyword[in] identifier[conf] . identifier[adapter_options]
keyword[for] identifier[key] , identifier[val] keyword[in] identifier[_dict] . identifier[items] ()}
keyword[elif] identifier[conf] . identifier[adapter_options] keyword[is] keyword[None] :
identifier[conf] [ literal[string] ]={}
keyword[else] :
identifier[conf] [ literal[string] ]= identifier[copy] . identifier[copy] ( identifier[conf] . identifier[adapter_options] )
identifier[conf] [ literal[string] ]= identifier[conf] . identifier[app] keyword[or] identifier[bottle] . identifier[default_app] ()
keyword[if] identifier[isinstance] ( identifier[conf] . identifier[app] , identifier[six] . identifier[string_types] ):
identifier[conf] [ literal[string] ]= identifier[bottle] . identifier[load_app] ( identifier[conf] . identifier[app] )
keyword[def] identifier[_find_bottle_app] ( identifier[_app] ):
literal[string]
keyword[while] identifier[hasattr] ( identifier[_app] , literal[string] ):
keyword[if] identifier[isinstance] ( identifier[_app] , identifier[bottle] . identifier[Bottle] ):
keyword[break]
identifier[_app] = identifier[_app] . identifier[app]
keyword[assert] identifier[isinstance] ( identifier[_app] , identifier[bottle] . identifier[Bottle] ), literal[string]
keyword[return] identifier[_app]
identifier[bottle_app] = identifier[_find_bottle_app] ( identifier[conf] . identifier[app] )
identifier[bottle_app] . identifier[route] (
identifier[path] = literal[string] , identifier[method] = literal[string] , identifier[callback] = identifier[_version_callback] )
keyword[def] identifier[_show_routes] ():
literal[string]
keyword[if] identifier[conf] . identifier[app] keyword[and] keyword[not] identifier[conf] . identifier[quiet] :
keyword[if] identifier[conf] . identifier[reloader] keyword[and] identifier[os] . identifier[getenv] ( literal[string] ):
identifier[LOG] . identifier[info] ( literal[string] )
keyword[elif] keyword[not] identifier[conf] . identifier[reloader] :
keyword[pass]
keyword[else] :
keyword[return]
identifier[routes] = identifier[fmt_routes] ( identifier[bottle_app] )
keyword[if] identifier[routes] :
identifier[print] ( literal[string] . identifier[format] ( identifier[routes] ), identifier[end] = literal[string] )
identifier[_show_routes] ()
keyword[return] identifier[conf] . identifier[app]
|
def build_application(conf):
"""Do some setup and return the wsgi app."""
if isinstance(conf.adapter_options, list):
conf['adapter_options'] = {key: val for _dict in conf.adapter_options for (key, val) in _dict.items()} # depends on [control=['if'], data=[]]
elif conf.adapter_options is None:
conf['adapter_options'] = {} # depends on [control=['if'], data=[]]
else:
conf['adapter_options'] = copy.copy(conf.adapter_options)
# get wsgi app the same way bottle does if it receives a string.
conf['app'] = conf.app or bottle.default_app()
if isinstance(conf.app, six.string_types):
conf['app'] = bottle.load_app(conf.app) # depends on [control=['if'], data=[]]
def _find_bottle_app(_app):
"""Lookup the underlying Bottle() instance."""
while hasattr(_app, 'app'):
if isinstance(_app, bottle.Bottle):
break # depends on [control=['if'], data=[]]
_app = _app.app # depends on [control=['while'], data=[]]
assert isinstance(_app, bottle.Bottle), 'Could not find Bottle app.'
return _app
bottle_app = _find_bottle_app(conf.app)
bottle_app.route(path='/_simpl', method='GET', callback=_version_callback)
def _show_routes():
"""Conditionally print the app's routes."""
if conf.app and (not conf.quiet):
if conf.reloader and os.getenv('BOTTLE_CHILD'):
LOG.info('Running bottle server with reloader.') # depends on [control=['if'], data=[]]
elif not conf.reloader:
pass # depends on [control=['if'], data=[]]
else:
return
routes = fmt_routes(bottle_app)
if routes:
print('\n{}'.format(routes), end='\n\n') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
_show_routes()
return conf.app
|
def get_access_bits(self, c1, c2, c3):
"""
Calculates the access bits for a sector trailer based on their access conditions
c1, c2, c3, c4 are 4 items tuples containing the values for each block
returns the 3 bytes for the sector trailer
"""
byte_6 = ((~c2[3] & 1) << 7) + ((~c2[2] & 1) << 6) + ((~c2[1] & 1) << 5) + ((~c2[0] & 1) << 4) + \
((~c1[3] & 1) << 3) + ((~c1[2] & 1) << 2) + ((~c1[1] & 1) << 1) + (~c1[0] & 1)
byte_7 = ((c1[3] & 1) << 7) + ((c1[2] & 1) << 6) + ((c1[1] & 1) << 5) + ((c1[0] & 1) << 4) + \
((~c3[3] & 1) << 3) + ((~c3[2] & 1) << 2) + ((~c3[1] & 1) << 1) + (~c3[0] & 1)
byte_8 = ((c3[3] & 1) << 7) + ((c3[2] & 1) << 6) + ((c3[1] & 1) << 5) + ((c3[0] & 1) << 4) + \
((c2[3] & 1) << 3) + ((c2[2] & 1) << 2) + ((c2[1] & 1) << 1) + (c2[0] & 1)
return byte_6, byte_7, byte_8
|
def function[get_access_bits, parameter[self, c1, c2, c3]]:
constant[
Calculates the access bits for a sector trailer based on their access conditions
c1, c2, c3, c4 are 4 items tuples containing the values for each block
returns the 3 bytes for the sector trailer
]
variable[byte_6] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[<ast.UnaryOp object at 0x7da18fe914b0> <ast.BitAnd object at 0x7da2590d6b60> constant[1]] <ast.LShift object at 0x7da2590d69e0> constant[7]] + binary_operation[binary_operation[<ast.UnaryOp object at 0x7da18fe937f0> <ast.BitAnd object at 0x7da2590d6b60> constant[1]] <ast.LShift object at 0x7da2590d69e0> constant[6]]] + binary_operation[binary_operation[<ast.UnaryOp object at 0x7da18fe91300> <ast.BitAnd object at 0x7da2590d6b60> constant[1]] <ast.LShift object at 0x7da2590d69e0> constant[5]]] + binary_operation[binary_operation[<ast.UnaryOp object at 0x7da18fe903a0> <ast.BitAnd object at 0x7da2590d6b60> constant[1]] <ast.LShift object at 0x7da2590d69e0> constant[4]]] + binary_operation[binary_operation[<ast.UnaryOp object at 0x7da1b09e9690> <ast.BitAnd object at 0x7da2590d6b60> constant[1]] <ast.LShift object at 0x7da2590d69e0> constant[3]]] + binary_operation[binary_operation[<ast.UnaryOp object at 0x7da1b09ea920> <ast.BitAnd object at 0x7da2590d6b60> constant[1]] <ast.LShift object at 0x7da2590d69e0> constant[2]]] + binary_operation[binary_operation[<ast.UnaryOp object at 0x7da2101f50c0> <ast.BitAnd object at 0x7da2590d6b60> constant[1]] <ast.LShift object at 0x7da2590d69e0> constant[1]]] + binary_operation[<ast.UnaryOp object at 0x7da18dc06e60> <ast.BitAnd object at 0x7da2590d6b60> constant[1]]]
variable[byte_7] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[call[name[c1]][constant[3]] <ast.BitAnd object at 0x7da2590d6b60> constant[1]] <ast.LShift object at 0x7da2590d69e0> constant[7]] + binary_operation[binary_operation[call[name[c1]][constant[2]] <ast.BitAnd object at 0x7da2590d6b60> constant[1]] <ast.LShift object at 0x7da2590d69e0> constant[6]]] + binary_operation[binary_operation[call[name[c1]][constant[1]] <ast.BitAnd object at 0x7da2590d6b60> constant[1]] <ast.LShift object at 0x7da2590d69e0> constant[5]]] + binary_operation[binary_operation[call[name[c1]][constant[0]] <ast.BitAnd object at 0x7da2590d6b60> constant[1]] <ast.LShift object at 0x7da2590d69e0> constant[4]]] + binary_operation[binary_operation[<ast.UnaryOp object at 0x7da18dc04f10> <ast.BitAnd object at 0x7da2590d6b60> constant[1]] <ast.LShift object at 0x7da2590d69e0> constant[3]]] + binary_operation[binary_operation[<ast.UnaryOp object at 0x7da18dc04ee0> <ast.BitAnd object at 0x7da2590d6b60> constant[1]] <ast.LShift object at 0x7da2590d69e0> constant[2]]] + binary_operation[binary_operation[<ast.UnaryOp object at 0x7da18dc07ac0> <ast.BitAnd object at 0x7da2590d6b60> constant[1]] <ast.LShift object at 0x7da2590d69e0> constant[1]]] + binary_operation[<ast.UnaryOp object at 0x7da1b0abba00> <ast.BitAnd object at 0x7da2590d6b60> constant[1]]]
variable[byte_8] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[call[name[c3]][constant[3]] <ast.BitAnd object at 0x7da2590d6b60> constant[1]] <ast.LShift object at 0x7da2590d69e0> constant[7]] + binary_operation[binary_operation[call[name[c3]][constant[2]] <ast.BitAnd object at 0x7da2590d6b60> constant[1]] <ast.LShift object at 0x7da2590d69e0> constant[6]]] + binary_operation[binary_operation[call[name[c3]][constant[1]] <ast.BitAnd object at 0x7da2590d6b60> constant[1]] <ast.LShift object at 0x7da2590d69e0> constant[5]]] + binary_operation[binary_operation[call[name[c3]][constant[0]] <ast.BitAnd object at 0x7da2590d6b60> constant[1]] <ast.LShift object at 0x7da2590d69e0> constant[4]]] + binary_operation[binary_operation[call[name[c2]][constant[3]] <ast.BitAnd object at 0x7da2590d6b60> constant[1]] <ast.LShift object at 0x7da2590d69e0> constant[3]]] + binary_operation[binary_operation[call[name[c2]][constant[2]] <ast.BitAnd object at 0x7da2590d6b60> constant[1]] <ast.LShift object at 0x7da2590d69e0> constant[2]]] + binary_operation[binary_operation[call[name[c2]][constant[1]] <ast.BitAnd object at 0x7da2590d6b60> constant[1]] <ast.LShift object at 0x7da2590d69e0> constant[1]]] + binary_operation[call[name[c2]][constant[0]] <ast.BitAnd object at 0x7da2590d6b60> constant[1]]]
return[tuple[[<ast.Name object at 0x7da18eb57b20>, <ast.Name object at 0x7da18eb572b0>, <ast.Name object at 0x7da18eb54040>]]]
|
keyword[def] identifier[get_access_bits] ( identifier[self] , identifier[c1] , identifier[c2] , identifier[c3] ):
literal[string]
identifier[byte_6] =((~ identifier[c2] [ literal[int] ]& literal[int] )<< literal[int] )+((~ identifier[c2] [ literal[int] ]& literal[int] )<< literal[int] )+((~ identifier[c2] [ literal[int] ]& literal[int] )<< literal[int] )+((~ identifier[c2] [ literal[int] ]& literal[int] )<< literal[int] )+((~ identifier[c1] [ literal[int] ]& literal[int] )<< literal[int] )+((~ identifier[c1] [ literal[int] ]& literal[int] )<< literal[int] )+((~ identifier[c1] [ literal[int] ]& literal[int] )<< literal[int] )+(~ identifier[c1] [ literal[int] ]& literal[int] )
identifier[byte_7] =(( identifier[c1] [ literal[int] ]& literal[int] )<< literal[int] )+(( identifier[c1] [ literal[int] ]& literal[int] )<< literal[int] )+(( identifier[c1] [ literal[int] ]& literal[int] )<< literal[int] )+(( identifier[c1] [ literal[int] ]& literal[int] )<< literal[int] )+((~ identifier[c3] [ literal[int] ]& literal[int] )<< literal[int] )+((~ identifier[c3] [ literal[int] ]& literal[int] )<< literal[int] )+((~ identifier[c3] [ literal[int] ]& literal[int] )<< literal[int] )+(~ identifier[c3] [ literal[int] ]& literal[int] )
identifier[byte_8] =(( identifier[c3] [ literal[int] ]& literal[int] )<< literal[int] )+(( identifier[c3] [ literal[int] ]& literal[int] )<< literal[int] )+(( identifier[c3] [ literal[int] ]& literal[int] )<< literal[int] )+(( identifier[c3] [ literal[int] ]& literal[int] )<< literal[int] )+(( identifier[c2] [ literal[int] ]& literal[int] )<< literal[int] )+(( identifier[c2] [ literal[int] ]& literal[int] )<< literal[int] )+(( identifier[c2] [ literal[int] ]& literal[int] )<< literal[int] )+( identifier[c2] [ literal[int] ]& literal[int] )
keyword[return] identifier[byte_6] , identifier[byte_7] , identifier[byte_8]
|
def get_access_bits(self, c1, c2, c3):
"""
Calculates the access bits for a sector trailer based on their access conditions
c1, c2, c3, c4 are 4 items tuples containing the values for each block
returns the 3 bytes for the sector trailer
"""
byte_6 = ((~c2[3] & 1) << 7) + ((~c2[2] & 1) << 6) + ((~c2[1] & 1) << 5) + ((~c2[0] & 1) << 4) + ((~c1[3] & 1) << 3) + ((~c1[2] & 1) << 2) + ((~c1[1] & 1) << 1) + (~c1[0] & 1)
byte_7 = ((c1[3] & 1) << 7) + ((c1[2] & 1) << 6) + ((c1[1] & 1) << 5) + ((c1[0] & 1) << 4) + ((~c3[3] & 1) << 3) + ((~c3[2] & 1) << 2) + ((~c3[1] & 1) << 1) + (~c3[0] & 1)
byte_8 = ((c3[3] & 1) << 7) + ((c3[2] & 1) << 6) + ((c3[1] & 1) << 5) + ((c3[0] & 1) << 4) + ((c2[3] & 1) << 3) + ((c2[2] & 1) << 2) + ((c2[1] & 1) << 1) + (c2[0] & 1)
return (byte_6, byte_7, byte_8)
|
def match(self, s):
""" Matching the pattern to the input string, returns True/False and
saves the matched string in the internal list
"""
if self.re.match(s):
self.list.append(s)
return True
else: return False
|
def function[match, parameter[self, s]]:
constant[ Matching the pattern to the input string, returns True/False and
saves the matched string in the internal list
]
if call[name[self].re.match, parameter[name[s]]] begin[:]
call[name[self].list.append, parameter[name[s]]]
return[constant[True]]
|
keyword[def] identifier[match] ( identifier[self] , identifier[s] ):
literal[string]
keyword[if] identifier[self] . identifier[re] . identifier[match] ( identifier[s] ):
identifier[self] . identifier[list] . identifier[append] ( identifier[s] )
keyword[return] keyword[True]
keyword[else] : keyword[return] keyword[False]
|
def match(self, s):
""" Matching the pattern to the input string, returns True/False and
saves the matched string in the internal list
"""
if self.re.match(s):
self.list.append(s)
return True # depends on [control=['if'], data=[]]
else:
return False
|
def lat_lon_grid_spacing(longitude, latitude, **kwargs):
r"""Calculate the distance between grid points that are in a latitude/longitude format.
Calculate the distance between grid points when the grid spacing is defined by
delta lat/lon rather than delta x/y
Parameters
----------
longitude : array_like
array of longitudes defining the grid
latitude : array_like
array of latitudes defining the grid
kwargs
Other keyword arguments to pass to :class:`~pyproj.Geod`
Returns
-------
dx, dy: 2D arrays of distances between grid points in the x and y direction
Notes
-----
Accepts, 1D or 2D arrays for latitude and longitude
Assumes [Y, X] for 2D arrays
.. deprecated:: 0.8.0
Function has been replaced with the signed delta distance calculation
`lat_lon_grid_deltas` and will be removed from MetPy in 0.11.0.
"""
# Use the absolute value of the signed function replacing this
dx, dy = lat_lon_grid_deltas(longitude, latitude, **kwargs)
return np.abs(dx), np.abs(dy)
|
def function[lat_lon_grid_spacing, parameter[longitude, latitude]]:
constant[Calculate the distance between grid points that are in a latitude/longitude format.
Calculate the distance between grid points when the grid spacing is defined by
delta lat/lon rather than delta x/y
Parameters
----------
longitude : array_like
array of longitudes defining the grid
latitude : array_like
array of latitudes defining the grid
kwargs
Other keyword arguments to pass to :class:`~pyproj.Geod`
Returns
-------
dx, dy: 2D arrays of distances between grid points in the x and y direction
Notes
-----
Accepts, 1D or 2D arrays for latitude and longitude
Assumes [Y, X] for 2D arrays
.. deprecated:: 0.8.0
Function has been replaced with the signed delta distance calculation
`lat_lon_grid_deltas` and will be removed from MetPy in 0.11.0.
]
<ast.Tuple object at 0x7da1b1d34250> assign[=] call[name[lat_lon_grid_deltas], parameter[name[longitude], name[latitude]]]
return[tuple[[<ast.Call object at 0x7da1b1d37340>, <ast.Call object at 0x7da1b1d35a80>]]]
|
keyword[def] identifier[lat_lon_grid_spacing] ( identifier[longitude] , identifier[latitude] ,** identifier[kwargs] ):
literal[string]
identifier[dx] , identifier[dy] = identifier[lat_lon_grid_deltas] ( identifier[longitude] , identifier[latitude] ,** identifier[kwargs] )
keyword[return] identifier[np] . identifier[abs] ( identifier[dx] ), identifier[np] . identifier[abs] ( identifier[dy] )
|
def lat_lon_grid_spacing(longitude, latitude, **kwargs):
"""Calculate the distance between grid points that are in a latitude/longitude format.
Calculate the distance between grid points when the grid spacing is defined by
delta lat/lon rather than delta x/y
Parameters
----------
longitude : array_like
array of longitudes defining the grid
latitude : array_like
array of latitudes defining the grid
kwargs
Other keyword arguments to pass to :class:`~pyproj.Geod`
Returns
-------
dx, dy: 2D arrays of distances between grid points in the x and y direction
Notes
-----
Accepts, 1D or 2D arrays for latitude and longitude
Assumes [Y, X] for 2D arrays
.. deprecated:: 0.8.0
Function has been replaced with the signed delta distance calculation
`lat_lon_grid_deltas` and will be removed from MetPy in 0.11.0.
"""
# Use the absolute value of the signed function replacing this
(dx, dy) = lat_lon_grid_deltas(longitude, latitude, **kwargs)
return (np.abs(dx), np.abs(dy))
|
def load_plugins(self, directory):
"""
Loads plugins from the specified directory.
`directory` is the full path to a directory containing python modules
which each contain a subclass of the Plugin class.
There is no criteria for a valid plugin at this level - any python
module found in the directory will be loaded. Only modules that
implement a subclass of the Plugin class above will be collected.
The directory will be traversed recursively.
"""
# walk directory
for filename in os.listdir(directory):
# path to file
filepath = os.path.join(directory, filename)
# if it's a file, load it
modname, ext = os.path.splitext(filename)
if os.path.isfile(filepath) and ext == '.py':
file, path, descr = imp.find_module(modname, [directory])
if file:
mod = imp.load_module(modname, file, path, descr)
# if it's a directory, recurse into it
if os.path.isdir(filepath):
self.load_plugins(filepath)
|
def function[load_plugins, parameter[self, directory]]:
constant[
Loads plugins from the specified directory.
`directory` is the full path to a directory containing python modules
which each contain a subclass of the Plugin class.
There is no criteria for a valid plugin at this level - any python
module found in the directory will be loaded. Only modules that
implement a subclass of the Plugin class above will be collected.
The directory will be traversed recursively.
]
for taget[name[filename]] in starred[call[name[os].listdir, parameter[name[directory]]]] begin[:]
variable[filepath] assign[=] call[name[os].path.join, parameter[name[directory], name[filename]]]
<ast.Tuple object at 0x7da1b0373eb0> assign[=] call[name[os].path.splitext, parameter[name[filename]]]
if <ast.BoolOp object at 0x7da1b0373af0> begin[:]
<ast.Tuple object at 0x7da1b0372b30> assign[=] call[name[imp].find_module, parameter[name[modname], list[[<ast.Name object at 0x7da1b0212230>]]]]
if name[file] begin[:]
variable[mod] assign[=] call[name[imp].load_module, parameter[name[modname], name[file], name[path], name[descr]]]
if call[name[os].path.isdir, parameter[name[filepath]]] begin[:]
call[name[self].load_plugins, parameter[name[filepath]]]
|
keyword[def] identifier[load_plugins] ( identifier[self] , identifier[directory] ):
literal[string]
keyword[for] identifier[filename] keyword[in] identifier[os] . identifier[listdir] ( identifier[directory] ):
identifier[filepath] = identifier[os] . identifier[path] . identifier[join] ( identifier[directory] , identifier[filename] )
identifier[modname] , identifier[ext] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[filename] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[filepath] ) keyword[and] identifier[ext] == literal[string] :
identifier[file] , identifier[path] , identifier[descr] = identifier[imp] . identifier[find_module] ( identifier[modname] ,[ identifier[directory] ])
keyword[if] identifier[file] :
identifier[mod] = identifier[imp] . identifier[load_module] ( identifier[modname] , identifier[file] , identifier[path] , identifier[descr] )
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[filepath] ):
identifier[self] . identifier[load_plugins] ( identifier[filepath] )
|
def load_plugins(self, directory):
"""
Loads plugins from the specified directory.
`directory` is the full path to a directory containing python modules
which each contain a subclass of the Plugin class.
There is no criteria for a valid plugin at this level - any python
module found in the directory will be loaded. Only modules that
implement a subclass of the Plugin class above will be collected.
The directory will be traversed recursively.
"""
# walk directory
for filename in os.listdir(directory):
# path to file
filepath = os.path.join(directory, filename)
# if it's a file, load it
(modname, ext) = os.path.splitext(filename)
if os.path.isfile(filepath) and ext == '.py':
(file, path, descr) = imp.find_module(modname, [directory])
if file:
mod = imp.load_module(modname, file, path, descr) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# if it's a directory, recurse into it
if os.path.isdir(filepath):
self.load_plugins(filepath) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['filename']]
|
def get_data(self):
"""Returns the x, y, w, h, data as a tuple."""
return self.x, self.y, self.w, self.h
|
def function[get_data, parameter[self]]:
constant[Returns the x, y, w, h, data as a tuple.]
return[tuple[[<ast.Attribute object at 0x7da1b2592a10>, <ast.Attribute object at 0x7da1b2590f10>, <ast.Attribute object at 0x7da1b2592cb0>, <ast.Attribute object at 0x7da1b2590340>]]]
|
keyword[def] identifier[get_data] ( identifier[self] ):
literal[string]
keyword[return] identifier[self] . identifier[x] , identifier[self] . identifier[y] , identifier[self] . identifier[w] , identifier[self] . identifier[h]
|
def get_data(self):
"""Returns the x, y, w, h, data as a tuple."""
return (self.x, self.y, self.w, self.h)
|
def _change_soi(self, body):
"""Modify the inner parameters of the Kepler propagator in order to place
the spacecraft in the right Sphere of Influence
"""
if body == self.central:
self.bodies = [self.central]
self.step = self.central_step
self.active = self.central.name
self.frame = self.central.name
else:
soi = self.SOI[body.name]
self.bodies = [body]
self.step = self.alt_step
self.active = body.name
self.frame = soi.frame
|
def function[_change_soi, parameter[self, body]]:
constant[Modify the inner parameters of the Kepler propagator in order to place
the spacecraft in the right Sphere of Influence
]
if compare[name[body] equal[==] name[self].central] begin[:]
name[self].bodies assign[=] list[[<ast.Attribute object at 0x7da1b0cf7dc0>]]
name[self].step assign[=] name[self].central_step
name[self].active assign[=] name[self].central.name
name[self].frame assign[=] name[self].central.name
|
keyword[def] identifier[_change_soi] ( identifier[self] , identifier[body] ):
literal[string]
keyword[if] identifier[body] == identifier[self] . identifier[central] :
identifier[self] . identifier[bodies] =[ identifier[self] . identifier[central] ]
identifier[self] . identifier[step] = identifier[self] . identifier[central_step]
identifier[self] . identifier[active] = identifier[self] . identifier[central] . identifier[name]
identifier[self] . identifier[frame] = identifier[self] . identifier[central] . identifier[name]
keyword[else] :
identifier[soi] = identifier[self] . identifier[SOI] [ identifier[body] . identifier[name] ]
identifier[self] . identifier[bodies] =[ identifier[body] ]
identifier[self] . identifier[step] = identifier[self] . identifier[alt_step]
identifier[self] . identifier[active] = identifier[body] . identifier[name]
identifier[self] . identifier[frame] = identifier[soi] . identifier[frame]
|
def _change_soi(self, body):
"""Modify the inner parameters of the Kepler propagator in order to place
the spacecraft in the right Sphere of Influence
"""
if body == self.central:
self.bodies = [self.central]
self.step = self.central_step
self.active = self.central.name
self.frame = self.central.name # depends on [control=['if'], data=[]]
else:
soi = self.SOI[body.name]
self.bodies = [body]
self.step = self.alt_step
self.active = body.name
self.frame = soi.frame
|
def outline(dataset, generate_faces=False):
"""Produces an outline of the full extent for the input dataset.
Parameters
----------
generate_faces : bool, optional
Generate solid faces for the box. This is off by default
"""
alg = vtk.vtkOutlineFilter()
alg.SetInputDataObject(dataset)
alg.SetGenerateFaces(generate_faces)
alg.Update()
return wrap(alg.GetOutputDataObject(0))
|
def function[outline, parameter[dataset, generate_faces]]:
constant[Produces an outline of the full extent for the input dataset.
Parameters
----------
generate_faces : bool, optional
Generate solid faces for the box. This is off by default
]
variable[alg] assign[=] call[name[vtk].vtkOutlineFilter, parameter[]]
call[name[alg].SetInputDataObject, parameter[name[dataset]]]
call[name[alg].SetGenerateFaces, parameter[name[generate_faces]]]
call[name[alg].Update, parameter[]]
return[call[name[wrap], parameter[call[name[alg].GetOutputDataObject, parameter[constant[0]]]]]]
|
keyword[def] identifier[outline] ( identifier[dataset] , identifier[generate_faces] = keyword[False] ):
literal[string]
identifier[alg] = identifier[vtk] . identifier[vtkOutlineFilter] ()
identifier[alg] . identifier[SetInputDataObject] ( identifier[dataset] )
identifier[alg] . identifier[SetGenerateFaces] ( identifier[generate_faces] )
identifier[alg] . identifier[Update] ()
keyword[return] identifier[wrap] ( identifier[alg] . identifier[GetOutputDataObject] ( literal[int] ))
|
def outline(dataset, generate_faces=False):
"""Produces an outline of the full extent for the input dataset.
Parameters
----------
generate_faces : bool, optional
Generate solid faces for the box. This is off by default
"""
alg = vtk.vtkOutlineFilter()
alg.SetInputDataObject(dataset)
alg.SetGenerateFaces(generate_faces)
alg.Update()
return wrap(alg.GetOutputDataObject(0))
|
def save(self):
"""Convert to JSON.
Returns
-------
`dict`
JSON data.
"""
data = super().save()
data['end_chars'] = self.end_chars
data['default_end'] = self.default_end
return data
|
def function[save, parameter[self]]:
constant[Convert to JSON.
Returns
-------
`dict`
JSON data.
]
variable[data] assign[=] call[call[name[super], parameter[]].save, parameter[]]
call[name[data]][constant[end_chars]] assign[=] name[self].end_chars
call[name[data]][constant[default_end]] assign[=] name[self].default_end
return[name[data]]
|
keyword[def] identifier[save] ( identifier[self] ):
literal[string]
identifier[data] = identifier[super] (). identifier[save] ()
identifier[data] [ literal[string] ]= identifier[self] . identifier[end_chars]
identifier[data] [ literal[string] ]= identifier[self] . identifier[default_end]
keyword[return] identifier[data]
|
def save(self):
"""Convert to JSON.
Returns
-------
`dict`
JSON data.
"""
data = super().save()
data['end_chars'] = self.end_chars
data['default_end'] = self.default_end
return data
|
def _check_pcre_minions(self, expr, greedy): # pylint: disable=unused-argument
'''
Return the minions found by looking via regular expressions
'''
reg = re.compile(expr)
return {'minions': [m for m in self._pki_minions() if reg.match(m)],
'missing': []}
|
def function[_check_pcre_minions, parameter[self, expr, greedy]]:
constant[
Return the minions found by looking via regular expressions
]
variable[reg] assign[=] call[name[re].compile, parameter[name[expr]]]
return[dictionary[[<ast.Constant object at 0x7da1b2197730>, <ast.Constant object at 0x7da1b2197970>], [<ast.ListComp object at 0x7da1b2197940>, <ast.List object at 0x7da1b21972b0>]]]
|
keyword[def] identifier[_check_pcre_minions] ( identifier[self] , identifier[expr] , identifier[greedy] ):
literal[string]
identifier[reg] = identifier[re] . identifier[compile] ( identifier[expr] )
keyword[return] { literal[string] :[ identifier[m] keyword[for] identifier[m] keyword[in] identifier[self] . identifier[_pki_minions] () keyword[if] identifier[reg] . identifier[match] ( identifier[m] )],
literal[string] :[]}
|
def _check_pcre_minions(self, expr, greedy): # pylint: disable=unused-argument
'\n Return the minions found by looking via regular expressions\n '
reg = re.compile(expr)
return {'minions': [m for m in self._pki_minions() if reg.match(m)], 'missing': []}
|
def wait_for_job(self, job, interval=5, timeout=60):
"""
Waits until the job indicated by job_resource is done or has failed
Parameters
----------
job : Union[dict, str]
``dict`` representing a BigQuery job resource, or a ``str``
representing the BigQuery job id
interval : float, optional
Polling interval in seconds, default = 5
timeout : float, optional
Timeout in seconds, default = 60
Returns
-------
dict
Final state of the job resouce, as described here:
https://developers.google.com/resources/api-libraries/documentation/bigquery/v2/python/latest/bigquery_v2.jobs.html#get
Raises
------
Union[JobExecutingException, BigQueryTimeoutException]
On http/auth failures or timeout
"""
complete = False
job_id = str(job if isinstance(job,
(six.binary_type, six.text_type, int))
else job['jobReference']['jobId'])
job_resource = None
start_time = time()
elapsed_time = 0
while not (complete or elapsed_time > timeout):
sleep(interval)
request = self.bigquery.jobs().get(projectId=self.project_id,
jobId=job_id)
job_resource = request.execute(num_retries=self.num_retries)
self._raise_executing_exception_if_error(job_resource)
complete = job_resource.get('status').get('state') == u'DONE'
elapsed_time = time() - start_time
# raise exceptions if timeout
if not complete:
logger.error('BigQuery job %s timeout' % job_id)
raise BigQueryTimeoutException()
return job_resource
|
def function[wait_for_job, parameter[self, job, interval, timeout]]:
constant[
Waits until the job indicated by job_resource is done or has failed
Parameters
----------
job : Union[dict, str]
``dict`` representing a BigQuery job resource, or a ``str``
representing the BigQuery job id
interval : float, optional
Polling interval in seconds, default = 5
timeout : float, optional
Timeout in seconds, default = 60
Returns
-------
dict
Final state of the job resouce, as described here:
https://developers.google.com/resources/api-libraries/documentation/bigquery/v2/python/latest/bigquery_v2.jobs.html#get
Raises
------
Union[JobExecutingException, BigQueryTimeoutException]
On http/auth failures or timeout
]
variable[complete] assign[=] constant[False]
variable[job_id] assign[=] call[name[str], parameter[<ast.IfExp object at 0x7da20c6aaa40>]]
variable[job_resource] assign[=] constant[None]
variable[start_time] assign[=] call[name[time], parameter[]]
variable[elapsed_time] assign[=] constant[0]
while <ast.UnaryOp object at 0x7da20c6a9120> begin[:]
call[name[sleep], parameter[name[interval]]]
variable[request] assign[=] call[call[name[self].bigquery.jobs, parameter[]].get, parameter[]]
variable[job_resource] assign[=] call[name[request].execute, parameter[]]
call[name[self]._raise_executing_exception_if_error, parameter[name[job_resource]]]
variable[complete] assign[=] compare[call[call[name[job_resource].get, parameter[constant[status]]].get, parameter[constant[state]]] equal[==] constant[DONE]]
variable[elapsed_time] assign[=] binary_operation[call[name[time], parameter[]] - name[start_time]]
if <ast.UnaryOp object at 0x7da20c6a9f60> begin[:]
call[name[logger].error, parameter[binary_operation[constant[BigQuery job %s timeout] <ast.Mod object at 0x7da2590d6920> name[job_id]]]]
<ast.Raise object at 0x7da20c6ab760>
return[name[job_resource]]
|
keyword[def] identifier[wait_for_job] ( identifier[self] , identifier[job] , identifier[interval] = literal[int] , identifier[timeout] = literal[int] ):
literal[string]
identifier[complete] = keyword[False]
identifier[job_id] = identifier[str] ( identifier[job] keyword[if] identifier[isinstance] ( identifier[job] ,
( identifier[six] . identifier[binary_type] , identifier[six] . identifier[text_type] , identifier[int] ))
keyword[else] identifier[job] [ literal[string] ][ literal[string] ])
identifier[job_resource] = keyword[None]
identifier[start_time] = identifier[time] ()
identifier[elapsed_time] = literal[int]
keyword[while] keyword[not] ( identifier[complete] keyword[or] identifier[elapsed_time] > identifier[timeout] ):
identifier[sleep] ( identifier[interval] )
identifier[request] = identifier[self] . identifier[bigquery] . identifier[jobs] (). identifier[get] ( identifier[projectId] = identifier[self] . identifier[project_id] ,
identifier[jobId] = identifier[job_id] )
identifier[job_resource] = identifier[request] . identifier[execute] ( identifier[num_retries] = identifier[self] . identifier[num_retries] )
identifier[self] . identifier[_raise_executing_exception_if_error] ( identifier[job_resource] )
identifier[complete] = identifier[job_resource] . identifier[get] ( literal[string] ). identifier[get] ( literal[string] )== literal[string]
identifier[elapsed_time] = identifier[time] ()- identifier[start_time]
keyword[if] keyword[not] identifier[complete] :
identifier[logger] . identifier[error] ( literal[string] % identifier[job_id] )
keyword[raise] identifier[BigQueryTimeoutException] ()
keyword[return] identifier[job_resource]
|
def wait_for_job(self, job, interval=5, timeout=60):
"""
Waits until the job indicated by job_resource is done or has failed
Parameters
----------
job : Union[dict, str]
``dict`` representing a BigQuery job resource, or a ``str``
representing the BigQuery job id
interval : float, optional
Polling interval in seconds, default = 5
timeout : float, optional
Timeout in seconds, default = 60
Returns
-------
dict
Final state of the job resouce, as described here:
https://developers.google.com/resources/api-libraries/documentation/bigquery/v2/python/latest/bigquery_v2.jobs.html#get
Raises
------
Union[JobExecutingException, BigQueryTimeoutException]
On http/auth failures or timeout
"""
complete = False
job_id = str(job if isinstance(job, (six.binary_type, six.text_type, int)) else job['jobReference']['jobId'])
job_resource = None
start_time = time()
elapsed_time = 0
while not (complete or elapsed_time > timeout):
sleep(interval)
request = self.bigquery.jobs().get(projectId=self.project_id, jobId=job_id)
job_resource = request.execute(num_retries=self.num_retries)
self._raise_executing_exception_if_error(job_resource)
complete = job_resource.get('status').get('state') == u'DONE'
elapsed_time = time() - start_time # depends on [control=['while'], data=[]]
# raise exceptions if timeout
if not complete:
logger.error('BigQuery job %s timeout' % job_id)
raise BigQueryTimeoutException() # depends on [control=['if'], data=[]]
return job_resource
|
def __create_proj_mat(self, size):
"""Create a random projection matrix
[1] D. Achlioptas. Database-friendly random projections: Johnson-Lindenstrauss with binary coins.
[2] P. Li, et al. Very sparse random projections.
http://scikit-learn.org/stable/modules/random_projection.html#sparse-random-projection
"""
# [1]
# return np.random.choice([-np.sqrt(3), 0, np.sqrt(3)], size=size, p=[1 / 6, 2 / 3, 1 / 6])
# [2]
s = 1 / self.density
return np.random.choice([-np.sqrt(s / self.k), 0, np.sqrt(s / self.k)],
size=size,
p=[1 / (2 * s), 1 - 1 / s, 1 / (2 * s)])
|
def function[__create_proj_mat, parameter[self, size]]:
constant[Create a random projection matrix
[1] D. Achlioptas. Database-friendly random projections: Johnson-Lindenstrauss with binary coins.
[2] P. Li, et al. Very sparse random projections.
http://scikit-learn.org/stable/modules/random_projection.html#sparse-random-projection
]
variable[s] assign[=] binary_operation[constant[1] / name[self].density]
return[call[name[np].random.choice, parameter[list[[<ast.UnaryOp object at 0x7da1b06244c0>, <ast.Constant object at 0x7da1b0627c40>, <ast.Call object at 0x7da1b06279a0>]]]]]
|
keyword[def] identifier[__create_proj_mat] ( identifier[self] , identifier[size] ):
literal[string]
identifier[s] = literal[int] / identifier[self] . identifier[density]
keyword[return] identifier[np] . identifier[random] . identifier[choice] ([- identifier[np] . identifier[sqrt] ( identifier[s] / identifier[self] . identifier[k] ), literal[int] , identifier[np] . identifier[sqrt] ( identifier[s] / identifier[self] . identifier[k] )],
identifier[size] = identifier[size] ,
identifier[p] =[ literal[int] /( literal[int] * identifier[s] ), literal[int] - literal[int] / identifier[s] , literal[int] /( literal[int] * identifier[s] )])
|
def __create_proj_mat(self, size):
"""Create a random projection matrix
[1] D. Achlioptas. Database-friendly random projections: Johnson-Lindenstrauss with binary coins.
[2] P. Li, et al. Very sparse random projections.
http://scikit-learn.org/stable/modules/random_projection.html#sparse-random-projection
"""
# [1]
# return np.random.choice([-np.sqrt(3), 0, np.sqrt(3)], size=size, p=[1 / 6, 2 / 3, 1 / 6])
# [2]
s = 1 / self.density
return np.random.choice([-np.sqrt(s / self.k), 0, np.sqrt(s / self.k)], size=size, p=[1 / (2 * s), 1 - 1 / s, 1 / (2 * s)])
|
def _recv_flow_ok(self, method_frame):
'''
Receive a flow control ack from the broker.
'''
self.channel._active = method_frame.args.read_bit()
if self._flow_control_cb is not None:
self._flow_control_cb()
|
def function[_recv_flow_ok, parameter[self, method_frame]]:
constant[
Receive a flow control ack from the broker.
]
name[self].channel._active assign[=] call[name[method_frame].args.read_bit, parameter[]]
if compare[name[self]._flow_control_cb is_not constant[None]] begin[:]
call[name[self]._flow_control_cb, parameter[]]
|
keyword[def] identifier[_recv_flow_ok] ( identifier[self] , identifier[method_frame] ):
literal[string]
identifier[self] . identifier[channel] . identifier[_active] = identifier[method_frame] . identifier[args] . identifier[read_bit] ()
keyword[if] identifier[self] . identifier[_flow_control_cb] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_flow_control_cb] ()
|
def _recv_flow_ok(self, method_frame):
"""
Receive a flow control ack from the broker.
"""
self.channel._active = method_frame.args.read_bit()
if self._flow_control_cb is not None:
self._flow_control_cb() # depends on [control=['if'], data=[]]
|
def process_blast(
blast_dir,
org_lengths,
fraglengths=None,
mode="ANIb",
identity=0.3,
coverage=0.7,
logger=None,
):
"""Returns a tuple of ANIb results for .blast_tab files in the output dir.
- blast_dir - path to the directory containing .blast_tab files
- org_lengths - the base count for each input sequence
- fraglengths - dictionary of query sequence fragment lengths, only
needed for BLASTALL output
- mode - parsing BLASTN+ or BLASTALL output?
- logger - a logger for messages
Returns the following pandas dataframes in an ANIResults object;
query sequences are rows, subject sequences are columns:
- alignment_lengths - non-symmetrical: total length of alignment
- percentage_identity - non-symmetrical: ANIb (Goris) percentage identity
- alignment_coverage - non-symmetrical: coverage of query
- similarity_errors - non-symmetrical: count of similarity errors
May throw a ZeroDivisionError if one or more BLAST runs failed, or a
very distant sequence was included in the analysis.
"""
# Process directory to identify input files
blastfiles = pyani_files.get_input_files(blast_dir, ".blast_tab")
# Hold data in ANIResults object
results = ANIResults(list(org_lengths.keys()), mode)
# Fill diagonal NA values for alignment_length with org_lengths
for org, length in list(org_lengths.items()):
results.alignment_lengths[org][org] = length
# Process .blast_tab files assuming that the filename format holds:
# org1_vs_org2.blast_tab:
for blastfile in blastfiles:
qname, sname = os.path.splitext(os.path.split(blastfile)[-1])[0].split("_vs_")
# We may have BLAST files from other analyses in the same directory
# If this occurs, we raise a warning, and skip the file
if qname not in list(org_lengths.keys()):
if logger:
logger.warning(
"Query name %s not in input " % qname
+ "sequence list, skipping %s" % blastfile
)
continue
if sname not in list(org_lengths.keys()):
if logger:
logger.warning(
"Subject name %s not in input " % sname
+ "sequence list, skipping %s" % blastfile
)
continue
resultvals = parse_blast_tab(blastfile, fraglengths, identity, coverage, mode)
query_cover = float(resultvals[0]) / org_lengths[qname]
# Populate dataframes: when assigning data, we need to note that
# we have asymmetrical data from BLAST output, so only the
# upper triangle is populated
results.add_tot_length(qname, sname, resultvals[0], sym=False)
results.add_sim_errors(qname, sname, resultvals[1], sym=False)
results.add_pid(qname, sname, 0.01 * resultvals[2], sym=False)
results.add_coverage(qname, sname, query_cover)
return results
|
def function[process_blast, parameter[blast_dir, org_lengths, fraglengths, mode, identity, coverage, logger]]:
constant[Returns a tuple of ANIb results for .blast_tab files in the output dir.
- blast_dir - path to the directory containing .blast_tab files
- org_lengths - the base count for each input sequence
- fraglengths - dictionary of query sequence fragment lengths, only
needed for BLASTALL output
- mode - parsing BLASTN+ or BLASTALL output?
- logger - a logger for messages
Returns the following pandas dataframes in an ANIResults object;
query sequences are rows, subject sequences are columns:
- alignment_lengths - non-symmetrical: total length of alignment
- percentage_identity - non-symmetrical: ANIb (Goris) percentage identity
- alignment_coverage - non-symmetrical: coverage of query
- similarity_errors - non-symmetrical: count of similarity errors
May throw a ZeroDivisionError if one or more BLAST runs failed, or a
very distant sequence was included in the analysis.
]
variable[blastfiles] assign[=] call[name[pyani_files].get_input_files, parameter[name[blast_dir], constant[.blast_tab]]]
variable[results] assign[=] call[name[ANIResults], parameter[call[name[list], parameter[call[name[org_lengths].keys, parameter[]]]], name[mode]]]
for taget[tuple[[<ast.Name object at 0x7da1b0da3880>, <ast.Name object at 0x7da1b0da3850>]]] in starred[call[name[list], parameter[call[name[org_lengths].items, parameter[]]]]] begin[:]
call[call[name[results].alignment_lengths][name[org]]][name[org]] assign[=] name[length]
for taget[name[blastfile]] in starred[name[blastfiles]] begin[:]
<ast.Tuple object at 0x7da1b0da34f0> assign[=] call[call[call[name[os].path.splitext, parameter[call[call[name[os].path.split, parameter[name[blastfile]]]][<ast.UnaryOp object at 0x7da1b0da31f0>]]]][constant[0]].split, parameter[constant[_vs_]]]
if compare[name[qname] <ast.NotIn object at 0x7da2590d7190> call[name[list], parameter[call[name[org_lengths].keys, parameter[]]]]] begin[:]
if name[logger] begin[:]
call[name[logger].warning, parameter[binary_operation[binary_operation[constant[Query name %s not in input ] <ast.Mod object at 0x7da2590d6920> name[qname]] + binary_operation[constant[sequence list, skipping %s] <ast.Mod object at 0x7da2590d6920> name[blastfile]]]]]
continue
if compare[name[sname] <ast.NotIn object at 0x7da2590d7190> call[name[list], parameter[call[name[org_lengths].keys, parameter[]]]]] begin[:]
if name[logger] begin[:]
call[name[logger].warning, parameter[binary_operation[binary_operation[constant[Subject name %s not in input ] <ast.Mod object at 0x7da2590d6920> name[sname]] + binary_operation[constant[sequence list, skipping %s] <ast.Mod object at 0x7da2590d6920> name[blastfile]]]]]
continue
variable[resultvals] assign[=] call[name[parse_blast_tab], parameter[name[blastfile], name[fraglengths], name[identity], name[coverage], name[mode]]]
variable[query_cover] assign[=] binary_operation[call[name[float], parameter[call[name[resultvals]][constant[0]]]] / call[name[org_lengths]][name[qname]]]
call[name[results].add_tot_length, parameter[name[qname], name[sname], call[name[resultvals]][constant[0]]]]
call[name[results].add_sim_errors, parameter[name[qname], name[sname], call[name[resultvals]][constant[1]]]]
call[name[results].add_pid, parameter[name[qname], name[sname], binary_operation[constant[0.01] * call[name[resultvals]][constant[2]]]]]
call[name[results].add_coverage, parameter[name[qname], name[sname], name[query_cover]]]
return[name[results]]
|
keyword[def] identifier[process_blast] (
identifier[blast_dir] ,
identifier[org_lengths] ,
identifier[fraglengths] = keyword[None] ,
identifier[mode] = literal[string] ,
identifier[identity] = literal[int] ,
identifier[coverage] = literal[int] ,
identifier[logger] = keyword[None] ,
):
literal[string]
identifier[blastfiles] = identifier[pyani_files] . identifier[get_input_files] ( identifier[blast_dir] , literal[string] )
identifier[results] = identifier[ANIResults] ( identifier[list] ( identifier[org_lengths] . identifier[keys] ()), identifier[mode] )
keyword[for] identifier[org] , identifier[length] keyword[in] identifier[list] ( identifier[org_lengths] . identifier[items] ()):
identifier[results] . identifier[alignment_lengths] [ identifier[org] ][ identifier[org] ]= identifier[length]
keyword[for] identifier[blastfile] keyword[in] identifier[blastfiles] :
identifier[qname] , identifier[sname] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[os] . identifier[path] . identifier[split] ( identifier[blastfile] )[- literal[int] ])[ literal[int] ]. identifier[split] ( literal[string] )
keyword[if] identifier[qname] keyword[not] keyword[in] identifier[list] ( identifier[org_lengths] . identifier[keys] ()):
keyword[if] identifier[logger] :
identifier[logger] . identifier[warning] (
literal[string] % identifier[qname]
+ literal[string] % identifier[blastfile]
)
keyword[continue]
keyword[if] identifier[sname] keyword[not] keyword[in] identifier[list] ( identifier[org_lengths] . identifier[keys] ()):
keyword[if] identifier[logger] :
identifier[logger] . identifier[warning] (
literal[string] % identifier[sname]
+ literal[string] % identifier[blastfile]
)
keyword[continue]
identifier[resultvals] = identifier[parse_blast_tab] ( identifier[blastfile] , identifier[fraglengths] , identifier[identity] , identifier[coverage] , identifier[mode] )
identifier[query_cover] = identifier[float] ( identifier[resultvals] [ literal[int] ])/ identifier[org_lengths] [ identifier[qname] ]
identifier[results] . identifier[add_tot_length] ( identifier[qname] , identifier[sname] , identifier[resultvals] [ literal[int] ], identifier[sym] = keyword[False] )
identifier[results] . identifier[add_sim_errors] ( identifier[qname] , identifier[sname] , identifier[resultvals] [ literal[int] ], identifier[sym] = keyword[False] )
identifier[results] . identifier[add_pid] ( identifier[qname] , identifier[sname] , literal[int] * identifier[resultvals] [ literal[int] ], identifier[sym] = keyword[False] )
identifier[results] . identifier[add_coverage] ( identifier[qname] , identifier[sname] , identifier[query_cover] )
keyword[return] identifier[results]
|
def process_blast(blast_dir, org_lengths, fraglengths=None, mode='ANIb', identity=0.3, coverage=0.7, logger=None):
"""Returns a tuple of ANIb results for .blast_tab files in the output dir.
- blast_dir - path to the directory containing .blast_tab files
- org_lengths - the base count for each input sequence
- fraglengths - dictionary of query sequence fragment lengths, only
needed for BLASTALL output
- mode - parsing BLASTN+ or BLASTALL output?
- logger - a logger for messages
Returns the following pandas dataframes in an ANIResults object;
query sequences are rows, subject sequences are columns:
- alignment_lengths - non-symmetrical: total length of alignment
- percentage_identity - non-symmetrical: ANIb (Goris) percentage identity
- alignment_coverage - non-symmetrical: coverage of query
- similarity_errors - non-symmetrical: count of similarity errors
May throw a ZeroDivisionError if one or more BLAST runs failed, or a
very distant sequence was included in the analysis.
"""
# Process directory to identify input files
blastfiles = pyani_files.get_input_files(blast_dir, '.blast_tab')
# Hold data in ANIResults object
results = ANIResults(list(org_lengths.keys()), mode)
# Fill diagonal NA values for alignment_length with org_lengths
for (org, length) in list(org_lengths.items()):
results.alignment_lengths[org][org] = length # depends on [control=['for'], data=[]]
# Process .blast_tab files assuming that the filename format holds:
# org1_vs_org2.blast_tab:
for blastfile in blastfiles:
(qname, sname) = os.path.splitext(os.path.split(blastfile)[-1])[0].split('_vs_')
# We may have BLAST files from other analyses in the same directory
# If this occurs, we raise a warning, and skip the file
if qname not in list(org_lengths.keys()):
if logger:
logger.warning('Query name %s not in input ' % qname + 'sequence list, skipping %s' % blastfile) # depends on [control=['if'], data=[]]
continue # depends on [control=['if'], data=['qname']]
if sname not in list(org_lengths.keys()):
if logger:
logger.warning('Subject name %s not in input ' % sname + 'sequence list, skipping %s' % blastfile) # depends on [control=['if'], data=[]]
continue # depends on [control=['if'], data=['sname']]
resultvals = parse_blast_tab(blastfile, fraglengths, identity, coverage, mode)
query_cover = float(resultvals[0]) / org_lengths[qname]
# Populate dataframes: when assigning data, we need to note that
# we have asymmetrical data from BLAST output, so only the
# upper triangle is populated
results.add_tot_length(qname, sname, resultvals[0], sym=False)
results.add_sim_errors(qname, sname, resultvals[1], sym=False)
results.add_pid(qname, sname, 0.01 * resultvals[2], sym=False)
results.add_coverage(qname, sname, query_cover) # depends on [control=['for'], data=['blastfile']]
return results
|
async def replace_keys_start(wallet_handle: int,
did: str,
identity_json: str) -> str:
"""
Generated new keys (signing and encryption keys) for an existing
DID (owned by the caller of the library).
:param wallet_handle: wallet handler (created by open_wallet).
:param did: signing DID
:param identity_json: Identity information as json. Example:
{
"seed": string, (optional) Seed that allows deterministic key creation (if not set random one will be created).
Can be UTF-8, base64 or hex string.
"crypto_type": string, (optional; if not set then ed25519 curve is used;
currently only 'ed25519' value is supported for this field)
}
:return: verkey
"""
logger = logging.getLogger(__name__)
logger.debug("replace_keys_start: >>> wallet_handle: %r, did: %r, identity_json: %r",
wallet_handle,
did,
identity_json)
if not hasattr(replace_keys_start, "cb"):
logger.debug("replace_keys_start: Creating callback")
replace_keys_start.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_wallet_handle = c_int32(wallet_handle)
c_did = c_char_p(did.encode('utf-8'))
c_identity_json = c_char_p(identity_json.encode('utf-8'))
verkey = await do_call('indy_replace_keys_start',
c_wallet_handle,
c_did,
c_identity_json,
replace_keys_start.cb)
res = verkey.decode()
logger.debug("replace_keys_start: <<< res: %r", res)
return res
|
<ast.AsyncFunctionDef object at 0x7da207f03280>
|
keyword[async] keyword[def] identifier[replace_keys_start] ( identifier[wallet_handle] : identifier[int] ,
identifier[did] : identifier[str] ,
identifier[identity_json] : identifier[str] )-> identifier[str] :
literal[string]
identifier[logger] = identifier[logging] . identifier[getLogger] ( identifier[__name__] )
identifier[logger] . identifier[debug] ( literal[string] ,
identifier[wallet_handle] ,
identifier[did] ,
identifier[identity_json] )
keyword[if] keyword[not] identifier[hasattr] ( identifier[replace_keys_start] , literal[string] ):
identifier[logger] . identifier[debug] ( literal[string] )
identifier[replace_keys_start] . identifier[cb] = identifier[create_cb] ( identifier[CFUNCTYPE] ( keyword[None] , identifier[c_int32] , identifier[c_int32] , identifier[c_char_p] ))
identifier[c_wallet_handle] = identifier[c_int32] ( identifier[wallet_handle] )
identifier[c_did] = identifier[c_char_p] ( identifier[did] . identifier[encode] ( literal[string] ))
identifier[c_identity_json] = identifier[c_char_p] ( identifier[identity_json] . identifier[encode] ( literal[string] ))
identifier[verkey] = keyword[await] identifier[do_call] ( literal[string] ,
identifier[c_wallet_handle] ,
identifier[c_did] ,
identifier[c_identity_json] ,
identifier[replace_keys_start] . identifier[cb] )
identifier[res] = identifier[verkey] . identifier[decode] ()
identifier[logger] . identifier[debug] ( literal[string] , identifier[res] )
keyword[return] identifier[res]
|
async def replace_keys_start(wallet_handle: int, did: str, identity_json: str) -> str:
"""
Generated new keys (signing and encryption keys) for an existing
DID (owned by the caller of the library).
:param wallet_handle: wallet handler (created by open_wallet).
:param did: signing DID
:param identity_json: Identity information as json. Example:
{
"seed": string, (optional) Seed that allows deterministic key creation (if not set random one will be created).
Can be UTF-8, base64 or hex string.
"crypto_type": string, (optional; if not set then ed25519 curve is used;
currently only 'ed25519' value is supported for this field)
}
:return: verkey
"""
logger = logging.getLogger(__name__)
logger.debug('replace_keys_start: >>> wallet_handle: %r, did: %r, identity_json: %r', wallet_handle, did, identity_json)
if not hasattr(replace_keys_start, 'cb'):
logger.debug('replace_keys_start: Creating callback')
replace_keys_start.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p)) # depends on [control=['if'], data=[]]
c_wallet_handle = c_int32(wallet_handle)
c_did = c_char_p(did.encode('utf-8'))
c_identity_json = c_char_p(identity_json.encode('utf-8'))
verkey = await do_call('indy_replace_keys_start', c_wallet_handle, c_did, c_identity_json, replace_keys_start.cb)
res = verkey.decode()
logger.debug('replace_keys_start: <<< res: %r', res)
return res
|
def _should_sign_response_header(header_name):
"""
:type header_name: str
:rtype: bool
"""
if header_name == _HEADER_SERVER_SIGNATURE:
return False
if re.match(_PATTERN_HEADER_PREFIX_BUNQ, header_name):
return True
return False
|
def function[_should_sign_response_header, parameter[header_name]]:
constant[
:type header_name: str
:rtype: bool
]
if compare[name[header_name] equal[==] name[_HEADER_SERVER_SIGNATURE]] begin[:]
return[constant[False]]
if call[name[re].match, parameter[name[_PATTERN_HEADER_PREFIX_BUNQ], name[header_name]]] begin[:]
return[constant[True]]
return[constant[False]]
|
keyword[def] identifier[_should_sign_response_header] ( identifier[header_name] ):
literal[string]
keyword[if] identifier[header_name] == identifier[_HEADER_SERVER_SIGNATURE] :
keyword[return] keyword[False]
keyword[if] identifier[re] . identifier[match] ( identifier[_PATTERN_HEADER_PREFIX_BUNQ] , identifier[header_name] ):
keyword[return] keyword[True]
keyword[return] keyword[False]
|
def _should_sign_response_header(header_name):
"""
:type header_name: str
:rtype: bool
"""
if header_name == _HEADER_SERVER_SIGNATURE:
return False # depends on [control=['if'], data=[]]
if re.match(_PATTERN_HEADER_PREFIX_BUNQ, header_name):
return True # depends on [control=['if'], data=[]]
return False
|
def check_pool(self, name):
'''
Check to see if a pool exists
'''
pools = self.bigIP.LocalLB.Pool
for pool in pools.get_list():
if pool.split('/')[-1] == name:
return True
return False
|
def function[check_pool, parameter[self, name]]:
constant[
Check to see if a pool exists
]
variable[pools] assign[=] name[self].bigIP.LocalLB.Pool
for taget[name[pool]] in starred[call[name[pools].get_list, parameter[]]] begin[:]
if compare[call[call[name[pool].split, parameter[constant[/]]]][<ast.UnaryOp object at 0x7da1b21097b0>] equal[==] name[name]] begin[:]
return[constant[True]]
return[constant[False]]
|
keyword[def] identifier[check_pool] ( identifier[self] , identifier[name] ):
literal[string]
identifier[pools] = identifier[self] . identifier[bigIP] . identifier[LocalLB] . identifier[Pool]
keyword[for] identifier[pool] keyword[in] identifier[pools] . identifier[get_list] ():
keyword[if] identifier[pool] . identifier[split] ( literal[string] )[- literal[int] ]== identifier[name] :
keyword[return] keyword[True]
keyword[return] keyword[False]
|
def check_pool(self, name):
"""
Check to see if a pool exists
"""
pools = self.bigIP.LocalLB.Pool
for pool in pools.get_list():
if pool.split('/')[-1] == name:
return True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['pool']]
return False
|
def indent (text, indent_string=" "):
"""Indent each line of text with the given indent string."""
lines = str(text).splitlines()
return os.linesep.join("%s%s" % (indent_string, x) for x in lines)
|
def function[indent, parameter[text, indent_string]]:
constant[Indent each line of text with the given indent string.]
variable[lines] assign[=] call[call[name[str], parameter[name[text]]].splitlines, parameter[]]
return[call[name[os].linesep.join, parameter[<ast.GeneratorExp object at 0x7da18f722230>]]]
|
keyword[def] identifier[indent] ( identifier[text] , identifier[indent_string] = literal[string] ):
literal[string]
identifier[lines] = identifier[str] ( identifier[text] ). identifier[splitlines] ()
keyword[return] identifier[os] . identifier[linesep] . identifier[join] ( literal[string] %( identifier[indent_string] , identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[lines] )
|
def indent(text, indent_string=' '):
"""Indent each line of text with the given indent string."""
lines = str(text).splitlines()
return os.linesep.join(('%s%s' % (indent_string, x) for x in lines))
|
def build(self):
"""Build single DNA strand along z-axis, starting with P on x-axis"""
ang_per_res = (2 * numpy.pi) / self.nucleotides_per_turn
atom_offset_coords = _backbone_properties[self.helix_type]['atoms']
if self.handedness == 'l':
handedness = -1
else:
handedness = 1
base_atom_labels = _backbone_properties[self.helix_type]['labels']
monomers = []
mol_code_format = _backbone_properties[self.helix_type]['mol_code_format']
for i, b in enumerate(self.base_sequence):
nucleotide = Nucleotide(
mol_code=mol_code_format.format(b), ampal_parent=self)
atoms_dict = OrderedDict()
if (i == (len(self.base_sequence) - 1)) and not self.phos_3_prime:
# Do not include phosphate on last nucleotide
atom_labels = base_atom_labels[3:] + [_bases[b]['labels'][2]]
atom_offsets = {k: v for k, v in zip(
atom_labels, atom_offset_coords[3:])}
else:
atom_labels = base_atom_labels + [_bases[b]['labels'][2]]
atom_offsets = {k: v for k, v in zip(
atom_labels, atom_offset_coords)}
for atom_label in atom_labels:
r, zeta, z_shift = atom_offsets[atom_label]
rot_ang = ((i * ang_per_res) + zeta) * handedness
z = (self.rise_per_nucleotide * i) + z_shift
coords = cylindrical_to_cartesian(
radius=r, azimuth=rot_ang, z=z, radians=True)
atom = Atom(
coordinates=coords, element=atom_label[0],
ampal_parent=nucleotide, res_label=atom_label)
atoms_dict[atom_label] = atom
base_ref = _bases[b]['ref_atom']
rot_adj = _bases[b]['rot_adj']
base_dict = OrderedDict(
zip(_bases[b]['labels'], _bases[b]['atoms']))
translation, angle, axis, point = find_transformations(
base_dict[base_ref], base_dict["C1'"],
atoms_dict[base_ref]._vector, atoms_dict["C1'"]._vector)
q1 = Quaternion.angle_and_axis(angle, axis)
# Align N9 C1'
for k, v in base_dict.items():
base_dict[k] = q1.rotate_vector(v, point) + translation
# Rotate to align O4'
axis = numpy.array(base_dict["C1'"]) - base_dict[base_ref]
angle = dihedral(base_dict["O4'"], base_dict[base_ref],
base_dict["C1'"], atoms_dict["O4'"]) - rot_adj
q2 = Quaternion.angle_and_axis(angle, axis)
for k, v in list(base_dict.items()):
if k not in atoms_dict:
atom = Atom(q2.rotate_vector(v, base_dict[base_ref]),
element=k[0], ampal_parent=nucleotide,
res_label=k)
atoms_dict[k] = atom
nucleotide.atoms = atoms_dict
monomers.append(nucleotide)
self._monomers = monomers
self.relabel_monomers()
self.relabel_atoms()
return
|
def function[build, parameter[self]]:
constant[Build single DNA strand along z-axis, starting with P on x-axis]
variable[ang_per_res] assign[=] binary_operation[binary_operation[constant[2] * name[numpy].pi] / name[self].nucleotides_per_turn]
variable[atom_offset_coords] assign[=] call[call[name[_backbone_properties]][name[self].helix_type]][constant[atoms]]
if compare[name[self].handedness equal[==] constant[l]] begin[:]
variable[handedness] assign[=] <ast.UnaryOp object at 0x7da1b264ada0>
variable[base_atom_labels] assign[=] call[call[name[_backbone_properties]][name[self].helix_type]][constant[labels]]
variable[monomers] assign[=] list[[]]
variable[mol_code_format] assign[=] call[call[name[_backbone_properties]][name[self].helix_type]][constant[mol_code_format]]
for taget[tuple[[<ast.Name object at 0x7da1b260bd00>, <ast.Name object at 0x7da1b2609ae0>]]] in starred[call[name[enumerate], parameter[name[self].base_sequence]]] begin[:]
variable[nucleotide] assign[=] call[name[Nucleotide], parameter[]]
variable[atoms_dict] assign[=] call[name[OrderedDict], parameter[]]
if <ast.BoolOp object at 0x7da1b2609510> begin[:]
variable[atom_labels] assign[=] binary_operation[call[name[base_atom_labels]][<ast.Slice object at 0x7da1b260a290>] + list[[<ast.Subscript object at 0x7da1b2609090>]]]
variable[atom_offsets] assign[=] <ast.DictComp object at 0x7da1b260b1f0>
for taget[name[atom_label]] in starred[name[atom_labels]] begin[:]
<ast.Tuple object at 0x7da1b2649210> assign[=] call[name[atom_offsets]][name[atom_label]]
variable[rot_ang] assign[=] binary_operation[binary_operation[binary_operation[name[i] * name[ang_per_res]] + name[zeta]] * name[handedness]]
variable[z] assign[=] binary_operation[binary_operation[name[self].rise_per_nucleotide * name[i]] + name[z_shift]]
variable[coords] assign[=] call[name[cylindrical_to_cartesian], parameter[]]
variable[atom] assign[=] call[name[Atom], parameter[]]
call[name[atoms_dict]][name[atom_label]] assign[=] name[atom]
variable[base_ref] assign[=] call[call[name[_bases]][name[b]]][constant[ref_atom]]
variable[rot_adj] assign[=] call[call[name[_bases]][name[b]]][constant[rot_adj]]
variable[base_dict] assign[=] call[name[OrderedDict], parameter[call[name[zip], parameter[call[call[name[_bases]][name[b]]][constant[labels]], call[call[name[_bases]][name[b]]][constant[atoms]]]]]]
<ast.Tuple object at 0x7da1b264a5f0> assign[=] call[name[find_transformations], parameter[call[name[base_dict]][name[base_ref]], call[name[base_dict]][constant[C1']], call[name[atoms_dict]][name[base_ref]]._vector, call[name[atoms_dict]][constant[C1']]._vector]]
variable[q1] assign[=] call[name[Quaternion].angle_and_axis, parameter[name[angle], name[axis]]]
for taget[tuple[[<ast.Name object at 0x7da1b2649bd0>, <ast.Name object at 0x7da1b264a4d0>]]] in starred[call[name[base_dict].items, parameter[]]] begin[:]
call[name[base_dict]][name[k]] assign[=] binary_operation[call[name[q1].rotate_vector, parameter[name[v], name[point]]] + name[translation]]
variable[axis] assign[=] binary_operation[call[name[numpy].array, parameter[call[name[base_dict]][constant[C1']]]] - call[name[base_dict]][name[base_ref]]]
variable[angle] assign[=] binary_operation[call[name[dihedral], parameter[call[name[base_dict]][constant[O4']], call[name[base_dict]][name[base_ref]], call[name[base_dict]][constant[C1']], call[name[atoms_dict]][constant[O4']]]] - name[rot_adj]]
variable[q2] assign[=] call[name[Quaternion].angle_and_axis, parameter[name[angle], name[axis]]]
for taget[tuple[[<ast.Name object at 0x7da1b26251b0>, <ast.Name object at 0x7da1b2626320>]]] in starred[call[name[list], parameter[call[name[base_dict].items, parameter[]]]]] begin[:]
if compare[name[k] <ast.NotIn object at 0x7da2590d7190> name[atoms_dict]] begin[:]
variable[atom] assign[=] call[name[Atom], parameter[call[name[q2].rotate_vector, parameter[name[v], call[name[base_dict]][name[base_ref]]]]]]
call[name[atoms_dict]][name[k]] assign[=] name[atom]
name[nucleotide].atoms assign[=] name[atoms_dict]
call[name[monomers].append, parameter[name[nucleotide]]]
name[self]._monomers assign[=] name[monomers]
call[name[self].relabel_monomers, parameter[]]
call[name[self].relabel_atoms, parameter[]]
return[None]
|
keyword[def] identifier[build] ( identifier[self] ):
literal[string]
identifier[ang_per_res] =( literal[int] * identifier[numpy] . identifier[pi] )/ identifier[self] . identifier[nucleotides_per_turn]
identifier[atom_offset_coords] = identifier[_backbone_properties] [ identifier[self] . identifier[helix_type] ][ literal[string] ]
keyword[if] identifier[self] . identifier[handedness] == literal[string] :
identifier[handedness] =- literal[int]
keyword[else] :
identifier[handedness] = literal[int]
identifier[base_atom_labels] = identifier[_backbone_properties] [ identifier[self] . identifier[helix_type] ][ literal[string] ]
identifier[monomers] =[]
identifier[mol_code_format] = identifier[_backbone_properties] [ identifier[self] . identifier[helix_type] ][ literal[string] ]
keyword[for] identifier[i] , identifier[b] keyword[in] identifier[enumerate] ( identifier[self] . identifier[base_sequence] ):
identifier[nucleotide] = identifier[Nucleotide] (
identifier[mol_code] = identifier[mol_code_format] . identifier[format] ( identifier[b] ), identifier[ampal_parent] = identifier[self] )
identifier[atoms_dict] = identifier[OrderedDict] ()
keyword[if] ( identifier[i] ==( identifier[len] ( identifier[self] . identifier[base_sequence] )- literal[int] )) keyword[and] keyword[not] identifier[self] . identifier[phos_3_prime] :
identifier[atom_labels] = identifier[base_atom_labels] [ literal[int] :]+[ identifier[_bases] [ identifier[b] ][ literal[string] ][ literal[int] ]]
identifier[atom_offsets] ={ identifier[k] : identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[zip] (
identifier[atom_labels] , identifier[atom_offset_coords] [ literal[int] :])}
keyword[else] :
identifier[atom_labels] = identifier[base_atom_labels] +[ identifier[_bases] [ identifier[b] ][ literal[string] ][ literal[int] ]]
identifier[atom_offsets] ={ identifier[k] : identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[zip] (
identifier[atom_labels] , identifier[atom_offset_coords] )}
keyword[for] identifier[atom_label] keyword[in] identifier[atom_labels] :
identifier[r] , identifier[zeta] , identifier[z_shift] = identifier[atom_offsets] [ identifier[atom_label] ]
identifier[rot_ang] =(( identifier[i] * identifier[ang_per_res] )+ identifier[zeta] )* identifier[handedness]
identifier[z] =( identifier[self] . identifier[rise_per_nucleotide] * identifier[i] )+ identifier[z_shift]
identifier[coords] = identifier[cylindrical_to_cartesian] (
identifier[radius] = identifier[r] , identifier[azimuth] = identifier[rot_ang] , identifier[z] = identifier[z] , identifier[radians] = keyword[True] )
identifier[atom] = identifier[Atom] (
identifier[coordinates] = identifier[coords] , identifier[element] = identifier[atom_label] [ literal[int] ],
identifier[ampal_parent] = identifier[nucleotide] , identifier[res_label] = identifier[atom_label] )
identifier[atoms_dict] [ identifier[atom_label] ]= identifier[atom]
identifier[base_ref] = identifier[_bases] [ identifier[b] ][ literal[string] ]
identifier[rot_adj] = identifier[_bases] [ identifier[b] ][ literal[string] ]
identifier[base_dict] = identifier[OrderedDict] (
identifier[zip] ( identifier[_bases] [ identifier[b] ][ literal[string] ], identifier[_bases] [ identifier[b] ][ literal[string] ]))
identifier[translation] , identifier[angle] , identifier[axis] , identifier[point] = identifier[find_transformations] (
identifier[base_dict] [ identifier[base_ref] ], identifier[base_dict] [ literal[string] ],
identifier[atoms_dict] [ identifier[base_ref] ]. identifier[_vector] , identifier[atoms_dict] [ literal[string] ]. identifier[_vector] )
identifier[q1] = identifier[Quaternion] . identifier[angle_and_axis] ( identifier[angle] , identifier[axis] )
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[base_dict] . identifier[items] ():
identifier[base_dict] [ identifier[k] ]= identifier[q1] . identifier[rotate_vector] ( identifier[v] , identifier[point] )+ identifier[translation]
identifier[axis] = identifier[numpy] . identifier[array] ( identifier[base_dict] [ literal[string] ])- identifier[base_dict] [ identifier[base_ref] ]
identifier[angle] = identifier[dihedral] ( identifier[base_dict] [ literal[string] ], identifier[base_dict] [ identifier[base_ref] ],
identifier[base_dict] [ literal[string] ], identifier[atoms_dict] [ literal[string] ])- identifier[rot_adj]
identifier[q2] = identifier[Quaternion] . identifier[angle_and_axis] ( identifier[angle] , identifier[axis] )
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[list] ( identifier[base_dict] . identifier[items] ()):
keyword[if] identifier[k] keyword[not] keyword[in] identifier[atoms_dict] :
identifier[atom] = identifier[Atom] ( identifier[q2] . identifier[rotate_vector] ( identifier[v] , identifier[base_dict] [ identifier[base_ref] ]),
identifier[element] = identifier[k] [ literal[int] ], identifier[ampal_parent] = identifier[nucleotide] ,
identifier[res_label] = identifier[k] )
identifier[atoms_dict] [ identifier[k] ]= identifier[atom]
identifier[nucleotide] . identifier[atoms] = identifier[atoms_dict]
identifier[monomers] . identifier[append] ( identifier[nucleotide] )
identifier[self] . identifier[_monomers] = identifier[monomers]
identifier[self] . identifier[relabel_monomers] ()
identifier[self] . identifier[relabel_atoms] ()
keyword[return]
|
def build(self):
"""Build single DNA strand along z-axis, starting with P on x-axis"""
ang_per_res = 2 * numpy.pi / self.nucleotides_per_turn
atom_offset_coords = _backbone_properties[self.helix_type]['atoms']
if self.handedness == 'l':
handedness = -1 # depends on [control=['if'], data=[]]
else:
handedness = 1
base_atom_labels = _backbone_properties[self.helix_type]['labels']
monomers = []
mol_code_format = _backbone_properties[self.helix_type]['mol_code_format']
for (i, b) in enumerate(self.base_sequence):
nucleotide = Nucleotide(mol_code=mol_code_format.format(b), ampal_parent=self)
atoms_dict = OrderedDict()
if i == len(self.base_sequence) - 1 and (not self.phos_3_prime):
# Do not include phosphate on last nucleotide
atom_labels = base_atom_labels[3:] + [_bases[b]['labels'][2]]
atom_offsets = {k: v for (k, v) in zip(atom_labels, atom_offset_coords[3:])} # depends on [control=['if'], data=[]]
else:
atom_labels = base_atom_labels + [_bases[b]['labels'][2]]
atom_offsets = {k: v for (k, v) in zip(atom_labels, atom_offset_coords)}
for atom_label in atom_labels:
(r, zeta, z_shift) = atom_offsets[atom_label]
rot_ang = (i * ang_per_res + zeta) * handedness
z = self.rise_per_nucleotide * i + z_shift
coords = cylindrical_to_cartesian(radius=r, azimuth=rot_ang, z=z, radians=True)
atom = Atom(coordinates=coords, element=atom_label[0], ampal_parent=nucleotide, res_label=atom_label)
atoms_dict[atom_label] = atom # depends on [control=['for'], data=['atom_label']]
base_ref = _bases[b]['ref_atom']
rot_adj = _bases[b]['rot_adj']
base_dict = OrderedDict(zip(_bases[b]['labels'], _bases[b]['atoms']))
(translation, angle, axis, point) = find_transformations(base_dict[base_ref], base_dict["C1'"], atoms_dict[base_ref]._vector, atoms_dict["C1'"]._vector)
q1 = Quaternion.angle_and_axis(angle, axis)
# Align N9 C1'
for (k, v) in base_dict.items():
base_dict[k] = q1.rotate_vector(v, point) + translation # depends on [control=['for'], data=[]]
# Rotate to align O4'
axis = numpy.array(base_dict["C1'"]) - base_dict[base_ref]
angle = dihedral(base_dict["O4'"], base_dict[base_ref], base_dict["C1'"], atoms_dict["O4'"]) - rot_adj
q2 = Quaternion.angle_and_axis(angle, axis)
for (k, v) in list(base_dict.items()):
if k not in atoms_dict:
atom = Atom(q2.rotate_vector(v, base_dict[base_ref]), element=k[0], ampal_parent=nucleotide, res_label=k)
atoms_dict[k] = atom # depends on [control=['if'], data=['k', 'atoms_dict']] # depends on [control=['for'], data=[]]
nucleotide.atoms = atoms_dict
monomers.append(nucleotide) # depends on [control=['for'], data=[]]
self._monomers = monomers
self.relabel_monomers()
self.relabel_atoms()
return
|
def _geom_series_uint32(r, n):
"""Unsigned integer calculation of sum of geometric series:
1 + r + r^2 + r^3 + ... r^(n-1)
summed to n terms.
Calculated modulo 2**32.
Use the formula (r**n - 1) / (r - 1)
"""
if n == 0:
return 0
if n == 1 or r == 0:
return 1
m = 2**32
# Split (r - 1) into common factors with the modulo 2**32 -- i.e. all
# factors of 2; and other factors which are coprime with the modulo 2**32.
other_factors = r - 1
common_factor = 1
while (other_factors % 2) == 0:
other_factors //= 2
common_factor *= 2
other_factors_inverse = pow(other_factors, m - 1, m)
numerator = pow(r, n, common_factor * m) - 1
return (numerator // common_factor * other_factors_inverse) % m
|
def function[_geom_series_uint32, parameter[r, n]]:
constant[Unsigned integer calculation of sum of geometric series:
1 + r + r^2 + r^3 + ... r^(n-1)
summed to n terms.
Calculated modulo 2**32.
Use the formula (r**n - 1) / (r - 1)
]
if compare[name[n] equal[==] constant[0]] begin[:]
return[constant[0]]
if <ast.BoolOp object at 0x7da18f722260> begin[:]
return[constant[1]]
variable[m] assign[=] binary_operation[constant[2] ** constant[32]]
variable[other_factors] assign[=] binary_operation[name[r] - constant[1]]
variable[common_factor] assign[=] constant[1]
while compare[binary_operation[name[other_factors] <ast.Mod object at 0x7da2590d6920> constant[2]] equal[==] constant[0]] begin[:]
<ast.AugAssign object at 0x7da18f723490>
<ast.AugAssign object at 0x7da18f720160>
variable[other_factors_inverse] assign[=] call[name[pow], parameter[name[other_factors], binary_operation[name[m] - constant[1]], name[m]]]
variable[numerator] assign[=] binary_operation[call[name[pow], parameter[name[r], name[n], binary_operation[name[common_factor] * name[m]]]] - constant[1]]
return[binary_operation[binary_operation[binary_operation[name[numerator] <ast.FloorDiv object at 0x7da2590d6bc0> name[common_factor]] * name[other_factors_inverse]] <ast.Mod object at 0x7da2590d6920> name[m]]]
|
keyword[def] identifier[_geom_series_uint32] ( identifier[r] , identifier[n] ):
literal[string]
keyword[if] identifier[n] == literal[int] :
keyword[return] literal[int]
keyword[if] identifier[n] == literal[int] keyword[or] identifier[r] == literal[int] :
keyword[return] literal[int]
identifier[m] = literal[int] ** literal[int]
identifier[other_factors] = identifier[r] - literal[int]
identifier[common_factor] = literal[int]
keyword[while] ( identifier[other_factors] % literal[int] )== literal[int] :
identifier[other_factors] //= literal[int]
identifier[common_factor] *= literal[int]
identifier[other_factors_inverse] = identifier[pow] ( identifier[other_factors] , identifier[m] - literal[int] , identifier[m] )
identifier[numerator] = identifier[pow] ( identifier[r] , identifier[n] , identifier[common_factor] * identifier[m] )- literal[int]
keyword[return] ( identifier[numerator] // identifier[common_factor] * identifier[other_factors_inverse] )% identifier[m]
|
def _geom_series_uint32(r, n):
"""Unsigned integer calculation of sum of geometric series:
1 + r + r^2 + r^3 + ... r^(n-1)
summed to n terms.
Calculated modulo 2**32.
Use the formula (r**n - 1) / (r - 1)
"""
if n == 0:
return 0 # depends on [control=['if'], data=[]]
if n == 1 or r == 0:
return 1 # depends on [control=['if'], data=[]]
m = 2 ** 32
# Split (r - 1) into common factors with the modulo 2**32 -- i.e. all
# factors of 2; and other factors which are coprime with the modulo 2**32.
other_factors = r - 1
common_factor = 1
while other_factors % 2 == 0:
other_factors //= 2
common_factor *= 2 # depends on [control=['while'], data=[]]
other_factors_inverse = pow(other_factors, m - 1, m)
numerator = pow(r, n, common_factor * m) - 1
return numerator // common_factor * other_factors_inverse % m
|
def contains_all(self, other, atol=0.0):
"""Return ``True`` if all points defined by ``other`` are contained.
Parameters
----------
other :
Collection of points to be tested. Can be given as a single
point, a ``(d, N)`` array-like where ``d`` is the
number of dimensions, or a length-``d`` `meshgrid` tuple.
atol : float, optional
The maximum allowed distance in 'inf'-norm between the
other set and this interval product.
Returns
-------
contains : bool
``True`` if all points are contained, ``False`` otherwise.
Examples
--------
>>> min_pt, max_pt = [-1, 0, 2], [-0.5, 0, 3]
>>> rbox = IntervalProd(min_pt, max_pt)
Arrays are expected in ``(ndim, npoints)`` shape:
>>> arr = np.array([[-1, 0, 2], # defining one point at a time
... [-0.5, 0, 2]])
>>> rbox.contains_all(arr.T)
True
Implicit meshgrids defined by coordinate vectors:
>>> from odl.discr.grid import sparse_meshgrid
>>> vec1 = (-1, -0.9, -0.7)
>>> vec2 = (0, 0, 0)
>>> vec3 = (2.5, 2.75, 3)
>>> mg = sparse_meshgrid(vec1, vec2, vec3)
>>> rbox.contains_all(mg)
True
Works also with an arbitrary iterable:
>>> rbox.contains_all([[-1, -0.5], # define points by axis
... [0, 0],
... [2, 2]])
True
Grids are also accepted as input:
>>> agrid = odl.uniform_grid(rbox.min_pt, rbox.max_pt, [3, 1, 3])
>>> rbox.contains_all(agrid)
True
"""
atol = float(atol)
# First try optimized methods
if other in self:
return True
if hasattr(other, 'meshgrid'):
return self.contains_all(other.meshgrid, atol=atol)
elif is_valid_input_meshgrid(other, self.ndim):
vecs = tuple(vec.squeeze() for vec in other)
mins = np.fromiter((np.min(vec) for vec in vecs), dtype=float)
maxs = np.fromiter((np.max(vec) for vec in vecs), dtype=float)
return (np.all(mins >= self.min_pt - atol) and
np.all(maxs <= self.max_pt + atol))
# Convert to array and check each element
other = np.asarray(other)
if is_valid_input_array(other, self.ndim):
if self.ndim == 1:
mins = np.min(other)
maxs = np.max(other)
else:
mins = np.min(other, axis=1)
maxs = np.max(other, axis=1)
return np.all(mins >= self.min_pt) and np.all(maxs <= self.max_pt)
else:
return False
|
def function[contains_all, parameter[self, other, atol]]:
constant[Return ``True`` if all points defined by ``other`` are contained.
Parameters
----------
other :
Collection of points to be tested. Can be given as a single
point, a ``(d, N)`` array-like where ``d`` is the
number of dimensions, or a length-``d`` `meshgrid` tuple.
atol : float, optional
The maximum allowed distance in 'inf'-norm between the
other set and this interval product.
Returns
-------
contains : bool
``True`` if all points are contained, ``False`` otherwise.
Examples
--------
>>> min_pt, max_pt = [-1, 0, 2], [-0.5, 0, 3]
>>> rbox = IntervalProd(min_pt, max_pt)
Arrays are expected in ``(ndim, npoints)`` shape:
>>> arr = np.array([[-1, 0, 2], # defining one point at a time
... [-0.5, 0, 2]])
>>> rbox.contains_all(arr.T)
True
Implicit meshgrids defined by coordinate vectors:
>>> from odl.discr.grid import sparse_meshgrid
>>> vec1 = (-1, -0.9, -0.7)
>>> vec2 = (0, 0, 0)
>>> vec3 = (2.5, 2.75, 3)
>>> mg = sparse_meshgrid(vec1, vec2, vec3)
>>> rbox.contains_all(mg)
True
Works also with an arbitrary iterable:
>>> rbox.contains_all([[-1, -0.5], # define points by axis
... [0, 0],
... [2, 2]])
True
Grids are also accepted as input:
>>> agrid = odl.uniform_grid(rbox.min_pt, rbox.max_pt, [3, 1, 3])
>>> rbox.contains_all(agrid)
True
]
variable[atol] assign[=] call[name[float], parameter[name[atol]]]
if compare[name[other] in name[self]] begin[:]
return[constant[True]]
if call[name[hasattr], parameter[name[other], constant[meshgrid]]] begin[:]
return[call[name[self].contains_all, parameter[name[other].meshgrid]]]
variable[other] assign[=] call[name[np].asarray, parameter[name[other]]]
if call[name[is_valid_input_array], parameter[name[other], name[self].ndim]] begin[:]
if compare[name[self].ndim equal[==] constant[1]] begin[:]
variable[mins] assign[=] call[name[np].min, parameter[name[other]]]
variable[maxs] assign[=] call[name[np].max, parameter[name[other]]]
return[<ast.BoolOp object at 0x7da1b1e952d0>]
|
keyword[def] identifier[contains_all] ( identifier[self] , identifier[other] , identifier[atol] = literal[int] ):
literal[string]
identifier[atol] = identifier[float] ( identifier[atol] )
keyword[if] identifier[other] keyword[in] identifier[self] :
keyword[return] keyword[True]
keyword[if] identifier[hasattr] ( identifier[other] , literal[string] ):
keyword[return] identifier[self] . identifier[contains_all] ( identifier[other] . identifier[meshgrid] , identifier[atol] = identifier[atol] )
keyword[elif] identifier[is_valid_input_meshgrid] ( identifier[other] , identifier[self] . identifier[ndim] ):
identifier[vecs] = identifier[tuple] ( identifier[vec] . identifier[squeeze] () keyword[for] identifier[vec] keyword[in] identifier[other] )
identifier[mins] = identifier[np] . identifier[fromiter] (( identifier[np] . identifier[min] ( identifier[vec] ) keyword[for] identifier[vec] keyword[in] identifier[vecs] ), identifier[dtype] = identifier[float] )
identifier[maxs] = identifier[np] . identifier[fromiter] (( identifier[np] . identifier[max] ( identifier[vec] ) keyword[for] identifier[vec] keyword[in] identifier[vecs] ), identifier[dtype] = identifier[float] )
keyword[return] ( identifier[np] . identifier[all] ( identifier[mins] >= identifier[self] . identifier[min_pt] - identifier[atol] ) keyword[and]
identifier[np] . identifier[all] ( identifier[maxs] <= identifier[self] . identifier[max_pt] + identifier[atol] ))
identifier[other] = identifier[np] . identifier[asarray] ( identifier[other] )
keyword[if] identifier[is_valid_input_array] ( identifier[other] , identifier[self] . identifier[ndim] ):
keyword[if] identifier[self] . identifier[ndim] == literal[int] :
identifier[mins] = identifier[np] . identifier[min] ( identifier[other] )
identifier[maxs] = identifier[np] . identifier[max] ( identifier[other] )
keyword[else] :
identifier[mins] = identifier[np] . identifier[min] ( identifier[other] , identifier[axis] = literal[int] )
identifier[maxs] = identifier[np] . identifier[max] ( identifier[other] , identifier[axis] = literal[int] )
keyword[return] identifier[np] . identifier[all] ( identifier[mins] >= identifier[self] . identifier[min_pt] ) keyword[and] identifier[np] . identifier[all] ( identifier[maxs] <= identifier[self] . identifier[max_pt] )
keyword[else] :
keyword[return] keyword[False]
|
def contains_all(self, other, atol=0.0):
"""Return ``True`` if all points defined by ``other`` are contained.
Parameters
----------
other :
Collection of points to be tested. Can be given as a single
point, a ``(d, N)`` array-like where ``d`` is the
number of dimensions, or a length-``d`` `meshgrid` tuple.
atol : float, optional
The maximum allowed distance in 'inf'-norm between the
other set and this interval product.
Returns
-------
contains : bool
``True`` if all points are contained, ``False`` otherwise.
Examples
--------
>>> min_pt, max_pt = [-1, 0, 2], [-0.5, 0, 3]
>>> rbox = IntervalProd(min_pt, max_pt)
Arrays are expected in ``(ndim, npoints)`` shape:
>>> arr = np.array([[-1, 0, 2], # defining one point at a time
... [-0.5, 0, 2]])
>>> rbox.contains_all(arr.T)
True
Implicit meshgrids defined by coordinate vectors:
>>> from odl.discr.grid import sparse_meshgrid
>>> vec1 = (-1, -0.9, -0.7)
>>> vec2 = (0, 0, 0)
>>> vec3 = (2.5, 2.75, 3)
>>> mg = sparse_meshgrid(vec1, vec2, vec3)
>>> rbox.contains_all(mg)
True
Works also with an arbitrary iterable:
>>> rbox.contains_all([[-1, -0.5], # define points by axis
... [0, 0],
... [2, 2]])
True
Grids are also accepted as input:
>>> agrid = odl.uniform_grid(rbox.min_pt, rbox.max_pt, [3, 1, 3])
>>> rbox.contains_all(agrid)
True
"""
atol = float(atol)
# First try optimized methods
if other in self:
return True # depends on [control=['if'], data=[]]
if hasattr(other, 'meshgrid'):
return self.contains_all(other.meshgrid, atol=atol) # depends on [control=['if'], data=[]]
elif is_valid_input_meshgrid(other, self.ndim):
vecs = tuple((vec.squeeze() for vec in other))
mins = np.fromiter((np.min(vec) for vec in vecs), dtype=float)
maxs = np.fromiter((np.max(vec) for vec in vecs), dtype=float)
return np.all(mins >= self.min_pt - atol) and np.all(maxs <= self.max_pt + atol) # depends on [control=['if'], data=[]]
# Convert to array and check each element
other = np.asarray(other)
if is_valid_input_array(other, self.ndim):
if self.ndim == 1:
mins = np.min(other)
maxs = np.max(other) # depends on [control=['if'], data=[]]
else:
mins = np.min(other, axis=1)
maxs = np.max(other, axis=1)
return np.all(mins >= self.min_pt) and np.all(maxs <= self.max_pt) # depends on [control=['if'], data=[]]
else:
return False
|
def create_role(self, name, policies=None, ttl=None, max_ttl=None, period=None, bound_service_principal_ids=None,
bound_group_ids=None, bound_location=None, bound_subscription_ids=None,
bound_resource_group_names=None, bound_scale_sets=None, mount_point=DEFAULT_MOUNT_POINT):
"""Create a role in the method.
Role types have specific entities that can perform login operations against this endpoint. Constraints specific
to the role type must be set on the role. These are applied to the authenticated entities attempting to login.
Supported methods:
POST: /auth/{mount_point}/role/{name}. Produces: 204 (empty body)
:param name: Name of the role.
:type name: str | unicode
:param policies: Policies to be set on tokens issued using this role.
:type policies: list
:param ttl: The TTL period of tokens issued using this role in seconds.
:type ttl: str | unicode
:param max_ttl: The maximum allowed lifetime of tokens issued in seconds using this role.
:type max_ttl: str | unicode
:param period: If set, indicates that the token generated using this role should never expire. The token should
be renewed within the duration specified by this value. At each renewal, the token's TTL will be set to the
value of this parameter.
:type period: str | unicode
:param bound_service_principal_ids: The list of Service Principal IDs that login is restricted to.
:type bound_service_principal_ids: list
:param bound_group_ids: The list of group ids that login is restricted to.
:type bound_group_ids: list
:param bound_location: The list of locations that login is restricted to.
:type bound_location: list
:param bound_subscription_ids: The list of subscription IDs that login is restricted to.
:type bound_subscription_ids: list
:param bound_resource_group_names: The list of resource groups that login is restricted to.
:type bound_resource_group_names: list
:param bound_scale_sets: The list of scale set names that the login is restricted to.
:type bound_scale_sets: list
:param mount_point: The "path" the azure auth method was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
if policies is None:
policies = []
if not isinstance(policies, list) or not all([isinstance(p, str) for p in policies]):
error_msg = 'unsupported policies argument provided "{arg}" ({arg_type}), required type: List[str]"'
raise exceptions.ParamValidationError(error_msg.format(
arg=policies,
arg_type=type(policies),
))
params = {
'policies': policies,
'ttl': ttl,
'max_ttl': max_ttl,
'period': period,
'bound_service_principal_ids': bound_service_principal_ids,
'bound_group_ids': bound_group_ids,
'bound_location': bound_location,
'bound_subscription_ids': bound_subscription_ids,
'bound_resource_group_names': bound_resource_group_names,
'bound_scale_sets': bound_scale_sets,
}
api_path = '/v1/auth/{mount_point}/role/{name}'.format(mount_point=mount_point, name=name)
return self._adapter.post(
url=api_path,
json=params,
)
|
def function[create_role, parameter[self, name, policies, ttl, max_ttl, period, bound_service_principal_ids, bound_group_ids, bound_location, bound_subscription_ids, bound_resource_group_names, bound_scale_sets, mount_point]]:
constant[Create a role in the method.
Role types have specific entities that can perform login operations against this endpoint. Constraints specific
to the role type must be set on the role. These are applied to the authenticated entities attempting to login.
Supported methods:
POST: /auth/{mount_point}/role/{name}. Produces: 204 (empty body)
:param name: Name of the role.
:type name: str | unicode
:param policies: Policies to be set on tokens issued using this role.
:type policies: list
:param ttl: The TTL period of tokens issued using this role in seconds.
:type ttl: str | unicode
:param max_ttl: The maximum allowed lifetime of tokens issued in seconds using this role.
:type max_ttl: str | unicode
:param period: If set, indicates that the token generated using this role should never expire. The token should
be renewed within the duration specified by this value. At each renewal, the token's TTL will be set to the
value of this parameter.
:type period: str | unicode
:param bound_service_principal_ids: The list of Service Principal IDs that login is restricted to.
:type bound_service_principal_ids: list
:param bound_group_ids: The list of group ids that login is restricted to.
:type bound_group_ids: list
:param bound_location: The list of locations that login is restricted to.
:type bound_location: list
:param bound_subscription_ids: The list of subscription IDs that login is restricted to.
:type bound_subscription_ids: list
:param bound_resource_group_names: The list of resource groups that login is restricted to.
:type bound_resource_group_names: list
:param bound_scale_sets: The list of scale set names that the login is restricted to.
:type bound_scale_sets: list
:param mount_point: The "path" the azure auth method was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
]
if compare[name[policies] is constant[None]] begin[:]
variable[policies] assign[=] list[[]]
if <ast.BoolOp object at 0x7da18bc71f60> begin[:]
variable[error_msg] assign[=] constant[unsupported policies argument provided "{arg}" ({arg_type}), required type: List[str]"]
<ast.Raise object at 0x7da18bc71570>
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da18bc72260>, <ast.Constant object at 0x7da18bc71f90>, <ast.Constant object at 0x7da18bc73040>, <ast.Constant object at 0x7da18bc711b0>, <ast.Constant object at 0x7da18bc71990>, <ast.Constant object at 0x7da18bc721d0>, <ast.Constant object at 0x7da18bc71660>, <ast.Constant object at 0x7da18bc70d90>, <ast.Constant object at 0x7da18bc70d30>, <ast.Constant object at 0x7da18bc71e70>], [<ast.Name object at 0x7da18bc70130>, <ast.Name object at 0x7da18bc70b50>, <ast.Name object at 0x7da18bc72350>, <ast.Name object at 0x7da18bc713c0>, <ast.Name object at 0x7da18bc722f0>, <ast.Name object at 0x7da18bc700d0>, <ast.Name object at 0x7da18bc739d0>, <ast.Name object at 0x7da18bc72e90>, <ast.Name object at 0x7da18bc73520>, <ast.Name object at 0x7da18bc737f0>]]
variable[api_path] assign[=] call[constant[/v1/auth/{mount_point}/role/{name}].format, parameter[]]
return[call[name[self]._adapter.post, parameter[]]]
|
keyword[def] identifier[create_role] ( identifier[self] , identifier[name] , identifier[policies] = keyword[None] , identifier[ttl] = keyword[None] , identifier[max_ttl] = keyword[None] , identifier[period] = keyword[None] , identifier[bound_service_principal_ids] = keyword[None] ,
identifier[bound_group_ids] = keyword[None] , identifier[bound_location] = keyword[None] , identifier[bound_subscription_ids] = keyword[None] ,
identifier[bound_resource_group_names] = keyword[None] , identifier[bound_scale_sets] = keyword[None] , identifier[mount_point] = identifier[DEFAULT_MOUNT_POINT] ):
literal[string]
keyword[if] identifier[policies] keyword[is] keyword[None] :
identifier[policies] =[]
keyword[if] keyword[not] identifier[isinstance] ( identifier[policies] , identifier[list] ) keyword[or] keyword[not] identifier[all] ([ identifier[isinstance] ( identifier[p] , identifier[str] ) keyword[for] identifier[p] keyword[in] identifier[policies] ]):
identifier[error_msg] = literal[string]
keyword[raise] identifier[exceptions] . identifier[ParamValidationError] ( identifier[error_msg] . identifier[format] (
identifier[arg] = identifier[policies] ,
identifier[arg_type] = identifier[type] ( identifier[policies] ),
))
identifier[params] ={
literal[string] : identifier[policies] ,
literal[string] : identifier[ttl] ,
literal[string] : identifier[max_ttl] ,
literal[string] : identifier[period] ,
literal[string] : identifier[bound_service_principal_ids] ,
literal[string] : identifier[bound_group_ids] ,
literal[string] : identifier[bound_location] ,
literal[string] : identifier[bound_subscription_ids] ,
literal[string] : identifier[bound_resource_group_names] ,
literal[string] : identifier[bound_scale_sets] ,
}
identifier[api_path] = literal[string] . identifier[format] ( identifier[mount_point] = identifier[mount_point] , identifier[name] = identifier[name] )
keyword[return] identifier[self] . identifier[_adapter] . identifier[post] (
identifier[url] = identifier[api_path] ,
identifier[json] = identifier[params] ,
)
|
def create_role(self, name, policies=None, ttl=None, max_ttl=None, period=None, bound_service_principal_ids=None, bound_group_ids=None, bound_location=None, bound_subscription_ids=None, bound_resource_group_names=None, bound_scale_sets=None, mount_point=DEFAULT_MOUNT_POINT):
"""Create a role in the method.
Role types have specific entities that can perform login operations against this endpoint. Constraints specific
to the role type must be set on the role. These are applied to the authenticated entities attempting to login.
Supported methods:
POST: /auth/{mount_point}/role/{name}. Produces: 204 (empty body)
:param name: Name of the role.
:type name: str | unicode
:param policies: Policies to be set on tokens issued using this role.
:type policies: list
:param ttl: The TTL period of tokens issued using this role in seconds.
:type ttl: str | unicode
:param max_ttl: The maximum allowed lifetime of tokens issued in seconds using this role.
:type max_ttl: str | unicode
:param period: If set, indicates that the token generated using this role should never expire. The token should
be renewed within the duration specified by this value. At each renewal, the token's TTL will be set to the
value of this parameter.
:type period: str | unicode
:param bound_service_principal_ids: The list of Service Principal IDs that login is restricted to.
:type bound_service_principal_ids: list
:param bound_group_ids: The list of group ids that login is restricted to.
:type bound_group_ids: list
:param bound_location: The list of locations that login is restricted to.
:type bound_location: list
:param bound_subscription_ids: The list of subscription IDs that login is restricted to.
:type bound_subscription_ids: list
:param bound_resource_group_names: The list of resource groups that login is restricted to.
:type bound_resource_group_names: list
:param bound_scale_sets: The list of scale set names that the login is restricted to.
:type bound_scale_sets: list
:param mount_point: The "path" the azure auth method was mounted on.
:type mount_point: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
if policies is None:
policies = [] # depends on [control=['if'], data=['policies']]
if not isinstance(policies, list) or not all([isinstance(p, str) for p in policies]):
error_msg = 'unsupported policies argument provided "{arg}" ({arg_type}), required type: List[str]"'
raise exceptions.ParamValidationError(error_msg.format(arg=policies, arg_type=type(policies))) # depends on [control=['if'], data=[]]
params = {'policies': policies, 'ttl': ttl, 'max_ttl': max_ttl, 'period': period, 'bound_service_principal_ids': bound_service_principal_ids, 'bound_group_ids': bound_group_ids, 'bound_location': bound_location, 'bound_subscription_ids': bound_subscription_ids, 'bound_resource_group_names': bound_resource_group_names, 'bound_scale_sets': bound_scale_sets}
api_path = '/v1/auth/{mount_point}/role/{name}'.format(mount_point=mount_point, name=name)
return self._adapter.post(url=api_path, json=params)
|
def import_parms(self, args):
"""Import external dict to internal dict"""
for key, val in args.items():
self.set_parm(key, val)
|
def function[import_parms, parameter[self, args]]:
constant[Import external dict to internal dict]
for taget[tuple[[<ast.Name object at 0x7da1b0123a00>, <ast.Name object at 0x7da1b0122da0>]]] in starred[call[name[args].items, parameter[]]] begin[:]
call[name[self].set_parm, parameter[name[key], name[val]]]
|
keyword[def] identifier[import_parms] ( identifier[self] , identifier[args] ):
literal[string]
keyword[for] identifier[key] , identifier[val] keyword[in] identifier[args] . identifier[items] ():
identifier[self] . identifier[set_parm] ( identifier[key] , identifier[val] )
|
def import_parms(self, args):
"""Import external dict to internal dict"""
for (key, val) in args.items():
self.set_parm(key, val) # depends on [control=['for'], data=[]]
|
def flush(self):
"""Flushes the collected information"""
self.__flushLevel(0)
if self.__lastImport is not None:
self.imports.append(self.__lastImport)
|
def function[flush, parameter[self]]:
constant[Flushes the collected information]
call[name[self].__flushLevel, parameter[constant[0]]]
if compare[name[self].__lastImport is_not constant[None]] begin[:]
call[name[self].imports.append, parameter[name[self].__lastImport]]
|
keyword[def] identifier[flush] ( identifier[self] ):
literal[string]
identifier[self] . identifier[__flushLevel] ( literal[int] )
keyword[if] identifier[self] . identifier[__lastImport] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[imports] . identifier[append] ( identifier[self] . identifier[__lastImport] )
|
def flush(self):
"""Flushes the collected information"""
self.__flushLevel(0)
if self.__lastImport is not None:
self.imports.append(self.__lastImport) # depends on [control=['if'], data=[]]
|
def remove_child_vault(self, vault_id, child_id):
"""Removes a child from a vault.
arg: vault_id (osid.id.Id): the ``Id`` of a vault
arg: child_id (osid.id.Id): the ``Id`` of the child
raise: NotFound - ``vault_id`` not parent of ``child_id``
raise: NullArgument - ``vault_id`` or ``child_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.remove_child_bin_template
if self._catalog_session is not None:
return self._catalog_session.remove_child_catalog(catalog_id=vault_id, child_id=child_id)
return self._hierarchy_session.remove_child(id_=vault_id, child_id=child_id)
|
def function[remove_child_vault, parameter[self, vault_id, child_id]]:
constant[Removes a child from a vault.
arg: vault_id (osid.id.Id): the ``Id`` of a vault
arg: child_id (osid.id.Id): the ``Id`` of the child
raise: NotFound - ``vault_id`` not parent of ``child_id``
raise: NullArgument - ``vault_id`` or ``child_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
]
if compare[name[self]._catalog_session is_not constant[None]] begin[:]
return[call[name[self]._catalog_session.remove_child_catalog, parameter[]]]
return[call[name[self]._hierarchy_session.remove_child, parameter[]]]
|
keyword[def] identifier[remove_child_vault] ( identifier[self] , identifier[vault_id] , identifier[child_id] ):
literal[string]
keyword[if] identifier[self] . identifier[_catalog_session] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[self] . identifier[_catalog_session] . identifier[remove_child_catalog] ( identifier[catalog_id] = identifier[vault_id] , identifier[child_id] = identifier[child_id] )
keyword[return] identifier[self] . identifier[_hierarchy_session] . identifier[remove_child] ( identifier[id_] = identifier[vault_id] , identifier[child_id] = identifier[child_id] )
|
def remove_child_vault(self, vault_id, child_id):
"""Removes a child from a vault.
arg: vault_id (osid.id.Id): the ``Id`` of a vault
arg: child_id (osid.id.Id): the ``Id`` of the child
raise: NotFound - ``vault_id`` not parent of ``child_id``
raise: NullArgument - ``vault_id`` or ``child_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.BinHierarchyDesignSession.remove_child_bin_template
if self._catalog_session is not None:
return self._catalog_session.remove_child_catalog(catalog_id=vault_id, child_id=child_id) # depends on [control=['if'], data=[]]
return self._hierarchy_session.remove_child(id_=vault_id, child_id=child_id)
|
def validate_config(cls, config):
"""
Validates a config dictionary parsed from a cluster config file.
Checks that a discovery method is defined and that at least one of
the balancers in the config are installed and available.
"""
if "discovery" not in config:
raise ValueError("No discovery method defined.")
installed_balancers = Balancer.get_installed_classes().keys()
if not any([balancer in config for balancer in installed_balancers]):
raise ValueError("No available balancer configs defined.")
|
def function[validate_config, parameter[cls, config]]:
constant[
Validates a config dictionary parsed from a cluster config file.
Checks that a discovery method is defined and that at least one of
the balancers in the config are installed and available.
]
if compare[constant[discovery] <ast.NotIn object at 0x7da2590d7190> name[config]] begin[:]
<ast.Raise object at 0x7da18f09c3d0>
variable[installed_balancers] assign[=] call[call[name[Balancer].get_installed_classes, parameter[]].keys, parameter[]]
if <ast.UnaryOp object at 0x7da18f09d0f0> begin[:]
<ast.Raise object at 0x7da18f09e5f0>
|
keyword[def] identifier[validate_config] ( identifier[cls] , identifier[config] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[config] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[installed_balancers] = identifier[Balancer] . identifier[get_installed_classes] (). identifier[keys] ()
keyword[if] keyword[not] identifier[any] ([ identifier[balancer] keyword[in] identifier[config] keyword[for] identifier[balancer] keyword[in] identifier[installed_balancers] ]):
keyword[raise] identifier[ValueError] ( literal[string] )
|
def validate_config(cls, config):
"""
Validates a config dictionary parsed from a cluster config file.
Checks that a discovery method is defined and that at least one of
the balancers in the config are installed and available.
"""
if 'discovery' not in config:
raise ValueError('No discovery method defined.') # depends on [control=['if'], data=[]]
installed_balancers = Balancer.get_installed_classes().keys()
if not any([balancer in config for balancer in installed_balancers]):
raise ValueError('No available balancer configs defined.') # depends on [control=['if'], data=[]]
|
def match_sr(self, svc_ref, cid=None):
# type: (ServiceReference, Optional[Tuple[str, str]] ) -> bool
"""
Checks if this export registration matches the given service reference
:param svc_ref: A service reference
:param cid: A container ID
:return: True if the service matches this export registration
"""
with self.__lock:
our_sr = self.get_reference()
if our_sr is None:
return False
sr_compare = our_sr == svc_ref
if cid is None:
return sr_compare
our_cid = self.get_export_container_id()
if our_cid is None:
return False
return sr_compare and our_cid == cid
|
def function[match_sr, parameter[self, svc_ref, cid]]:
constant[
Checks if this export registration matches the given service reference
:param svc_ref: A service reference
:param cid: A container ID
:return: True if the service matches this export registration
]
with name[self].__lock begin[:]
variable[our_sr] assign[=] call[name[self].get_reference, parameter[]]
if compare[name[our_sr] is constant[None]] begin[:]
return[constant[False]]
variable[sr_compare] assign[=] compare[name[our_sr] equal[==] name[svc_ref]]
if compare[name[cid] is constant[None]] begin[:]
return[name[sr_compare]]
variable[our_cid] assign[=] call[name[self].get_export_container_id, parameter[]]
if compare[name[our_cid] is constant[None]] begin[:]
return[constant[False]]
return[<ast.BoolOp object at 0x7da1b03931c0>]
|
keyword[def] identifier[match_sr] ( identifier[self] , identifier[svc_ref] , identifier[cid] = keyword[None] ):
literal[string]
keyword[with] identifier[self] . identifier[__lock] :
identifier[our_sr] = identifier[self] . identifier[get_reference] ()
keyword[if] identifier[our_sr] keyword[is] keyword[None] :
keyword[return] keyword[False]
identifier[sr_compare] = identifier[our_sr] == identifier[svc_ref]
keyword[if] identifier[cid] keyword[is] keyword[None] :
keyword[return] identifier[sr_compare]
identifier[our_cid] = identifier[self] . identifier[get_export_container_id] ()
keyword[if] identifier[our_cid] keyword[is] keyword[None] :
keyword[return] keyword[False]
keyword[return] identifier[sr_compare] keyword[and] identifier[our_cid] == identifier[cid]
|
def match_sr(self, svc_ref, cid=None):
# type: (ServiceReference, Optional[Tuple[str, str]] ) -> bool
'\n Checks if this export registration matches the given service reference\n\n :param svc_ref: A service reference\n :param cid: A container ID\n :return: True if the service matches this export registration\n '
with self.__lock:
our_sr = self.get_reference()
if our_sr is None:
return False # depends on [control=['if'], data=[]]
sr_compare = our_sr == svc_ref
if cid is None:
return sr_compare # depends on [control=['if'], data=[]]
our_cid = self.get_export_container_id()
if our_cid is None:
return False # depends on [control=['if'], data=[]]
return sr_compare and our_cid == cid # depends on [control=['with'], data=[]]
|
def profile(self):
"""Return raster metadata."""
with rasterio.open(self.path, "r") as src:
return deepcopy(src.meta)
|
def function[profile, parameter[self]]:
constant[Return raster metadata.]
with call[name[rasterio].open, parameter[name[self].path, constant[r]]] begin[:]
return[call[name[deepcopy], parameter[name[src].meta]]]
|
keyword[def] identifier[profile] ( identifier[self] ):
literal[string]
keyword[with] identifier[rasterio] . identifier[open] ( identifier[self] . identifier[path] , literal[string] ) keyword[as] identifier[src] :
keyword[return] identifier[deepcopy] ( identifier[src] . identifier[meta] )
|
def profile(self):
"""Return raster metadata."""
with rasterio.open(self.path, 'r') as src:
return deepcopy(src.meta) # depends on [control=['with'], data=['src']]
|
def propagate(self, date):
"""Propagate the orbit to a new date
Args:
date (Date)
Return:
Orbit
"""
if self.propagator.orbit is not self:
self.propagator.orbit = self
return self.propagator.propagate(date)
|
def function[propagate, parameter[self, date]]:
constant[Propagate the orbit to a new date
Args:
date (Date)
Return:
Orbit
]
if compare[name[self].propagator.orbit is_not name[self]] begin[:]
name[self].propagator.orbit assign[=] name[self]
return[call[name[self].propagator.propagate, parameter[name[date]]]]
|
keyword[def] identifier[propagate] ( identifier[self] , identifier[date] ):
literal[string]
keyword[if] identifier[self] . identifier[propagator] . identifier[orbit] keyword[is] keyword[not] identifier[self] :
identifier[self] . identifier[propagator] . identifier[orbit] = identifier[self]
keyword[return] identifier[self] . identifier[propagator] . identifier[propagate] ( identifier[date] )
|
def propagate(self, date):
"""Propagate the orbit to a new date
Args:
date (Date)
Return:
Orbit
"""
if self.propagator.orbit is not self:
self.propagator.orbit = self # depends on [control=['if'], data=['self']]
return self.propagator.propagate(date)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.