code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
|---|---|---|---|
def from_vertices(cls, data):
"""
Uses Delauney triangulation to compute triangle simplices for
each point.
"""
try:
from scipy.spatial import Delaunay
except:
raise ImportError("Generating triangles from points requires, "
"SciPy to be installed.")
if not isinstance(data, Points):
data = Points(data)
if not len(data):
return cls(([], []))
tris = Delaunay(data.array([0, 1]))
return cls((tris.simplices, data))
|
def function[from_vertices, parameter[cls, data]]:
constant[
Uses Delauney triangulation to compute triangle simplices for
each point.
]
<ast.Try object at 0x7da18bcc8ee0>
if <ast.UnaryOp object at 0x7da18bcc9e10> begin[:]
variable[data] assign[=] call[name[Points], parameter[name[data]]]
if <ast.UnaryOp object at 0x7da18bcc81f0> begin[:]
return[call[name[cls], parameter[tuple[[<ast.List object at 0x7da18bcc9a20>, <ast.List object at 0x7da18bccae60>]]]]]
variable[tris] assign[=] call[name[Delaunay], parameter[call[name[data].array, parameter[list[[<ast.Constant object at 0x7da18bcc8880>, <ast.Constant object at 0x7da18bccb040>]]]]]]
return[call[name[cls], parameter[tuple[[<ast.Attribute object at 0x7da18bccb2e0>, <ast.Name object at 0x7da18bcc9090>]]]]]
|
keyword[def] identifier[from_vertices] ( identifier[cls] , identifier[data] ):
literal[string]
keyword[try] :
keyword[from] identifier[scipy] . identifier[spatial] keyword[import] identifier[Delaunay]
keyword[except] :
keyword[raise] identifier[ImportError] ( literal[string]
literal[string] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[data] , identifier[Points] ):
identifier[data] = identifier[Points] ( identifier[data] )
keyword[if] keyword[not] identifier[len] ( identifier[data] ):
keyword[return] identifier[cls] (([],[]))
identifier[tris] = identifier[Delaunay] ( identifier[data] . identifier[array] ([ literal[int] , literal[int] ]))
keyword[return] identifier[cls] (( identifier[tris] . identifier[simplices] , identifier[data] ))
|
def from_vertices(cls, data):
"""
Uses Delauney triangulation to compute triangle simplices for
each point.
"""
try:
from scipy.spatial import Delaunay # depends on [control=['try'], data=[]]
except:
raise ImportError('Generating triangles from points requires, SciPy to be installed.') # depends on [control=['except'], data=[]]
if not isinstance(data, Points):
data = Points(data) # depends on [control=['if'], data=[]]
if not len(data):
return cls(([], [])) # depends on [control=['if'], data=[]]
tris = Delaunay(data.array([0, 1]))
return cls((tris.simplices, data))
|
def reflect(x, y, x0, y0, d=1.0, a=180):
""" Returns the reflection of a point through origin (x0,y0).
"""
return coordinates(x0, y0, d * distance(x0, y0, x, y),
a + angle(x0, y0, x, y))
|
def function[reflect, parameter[x, y, x0, y0, d, a]]:
constant[ Returns the reflection of a point through origin (x0,y0).
]
return[call[name[coordinates], parameter[name[x0], name[y0], binary_operation[name[d] * call[name[distance], parameter[name[x0], name[y0], name[x], name[y]]]], binary_operation[name[a] + call[name[angle], parameter[name[x0], name[y0], name[x], name[y]]]]]]]
|
keyword[def] identifier[reflect] ( identifier[x] , identifier[y] , identifier[x0] , identifier[y0] , identifier[d] = literal[int] , identifier[a] = literal[int] ):
literal[string]
keyword[return] identifier[coordinates] ( identifier[x0] , identifier[y0] , identifier[d] * identifier[distance] ( identifier[x0] , identifier[y0] , identifier[x] , identifier[y] ),
identifier[a] + identifier[angle] ( identifier[x0] , identifier[y0] , identifier[x] , identifier[y] ))
|
def reflect(x, y, x0, y0, d=1.0, a=180):
""" Returns the reflection of a point through origin (x0,y0).
"""
return coordinates(x0, y0, d * distance(x0, y0, x, y), a + angle(x0, y0, x, y))
|
def lraise(self,message):
"""log an exception, close the log file, then raise the exception
Parameters
----------
message : str
the exception message
Raises
------
exception with message
"""
s = str(datetime.now()) + " ERROR: " + message + '\n'
print(s,end='')
if self.filename:
self.f.write(s)
self.f.flush
self.f.close()
raise Exception(message)
|
def function[lraise, parameter[self, message]]:
constant[log an exception, close the log file, then raise the exception
Parameters
----------
message : str
the exception message
Raises
------
exception with message
]
variable[s] assign[=] binary_operation[binary_operation[binary_operation[call[name[str], parameter[call[name[datetime].now, parameter[]]]] + constant[ ERROR: ]] + name[message]] + constant[
]]
call[name[print], parameter[name[s]]]
if name[self].filename begin[:]
call[name[self].f.write, parameter[name[s]]]
name[self].f.flush
call[name[self].f.close, parameter[]]
<ast.Raise object at 0x7da1b24b3130>
|
keyword[def] identifier[lraise] ( identifier[self] , identifier[message] ):
literal[string]
identifier[s] = identifier[str] ( identifier[datetime] . identifier[now] ())+ literal[string] + identifier[message] + literal[string]
identifier[print] ( identifier[s] , identifier[end] = literal[string] )
keyword[if] identifier[self] . identifier[filename] :
identifier[self] . identifier[f] . identifier[write] ( identifier[s] )
identifier[self] . identifier[f] . identifier[flush]
identifier[self] . identifier[f] . identifier[close] ()
keyword[raise] identifier[Exception] ( identifier[message] )
|
def lraise(self, message):
"""log an exception, close the log file, then raise the exception
Parameters
----------
message : str
the exception message
Raises
------
exception with message
"""
s = str(datetime.now()) + ' ERROR: ' + message + '\n'
print(s, end='')
if self.filename:
self.f.write(s)
self.f.flush
self.f.close() # depends on [control=['if'], data=[]]
raise Exception(message)
|
def stat(self):
"""
Return disk usage information for the partition in :py:class:`Fstat`
format. If disk usage information is not available, then ``None`` is
returned. Disk usage information is only available for regular
filesystems that are mounted.
"""
try:
# Always use the last mount point because it's a very common
# situation when mount points are moved, or sotorage is
# double-mounted. Last mount point is always the newest.
mp = self.mount_points[-1]
except IndexError:
return None
stat = os.statvfs(mp)
free = stat.f_frsize * stat.f_bavail
total = stat.f_frsize * stat.f_blocks
used = total - free
used_pct = round(used / total * 100)
free_pct = 100 - used_pct
return Fstat(total, used, free, used_pct, free_pct)
|
def function[stat, parameter[self]]:
constant[
Return disk usage information for the partition in :py:class:`Fstat`
format. If disk usage information is not available, then ``None`` is
returned. Disk usage information is only available for regular
filesystems that are mounted.
]
<ast.Try object at 0x7da2044c3490>
variable[stat] assign[=] call[name[os].statvfs, parameter[name[mp]]]
variable[free] assign[=] binary_operation[name[stat].f_frsize * name[stat].f_bavail]
variable[total] assign[=] binary_operation[name[stat].f_frsize * name[stat].f_blocks]
variable[used] assign[=] binary_operation[name[total] - name[free]]
variable[used_pct] assign[=] call[name[round], parameter[binary_operation[binary_operation[name[used] / name[total]] * constant[100]]]]
variable[free_pct] assign[=] binary_operation[constant[100] - name[used_pct]]
return[call[name[Fstat], parameter[name[total], name[used], name[free], name[used_pct], name[free_pct]]]]
|
keyword[def] identifier[stat] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[mp] = identifier[self] . identifier[mount_points] [- literal[int] ]
keyword[except] identifier[IndexError] :
keyword[return] keyword[None]
identifier[stat] = identifier[os] . identifier[statvfs] ( identifier[mp] )
identifier[free] = identifier[stat] . identifier[f_frsize] * identifier[stat] . identifier[f_bavail]
identifier[total] = identifier[stat] . identifier[f_frsize] * identifier[stat] . identifier[f_blocks]
identifier[used] = identifier[total] - identifier[free]
identifier[used_pct] = identifier[round] ( identifier[used] / identifier[total] * literal[int] )
identifier[free_pct] = literal[int] - identifier[used_pct]
keyword[return] identifier[Fstat] ( identifier[total] , identifier[used] , identifier[free] , identifier[used_pct] , identifier[free_pct] )
|
def stat(self):
"""
Return disk usage information for the partition in :py:class:`Fstat`
format. If disk usage information is not available, then ``None`` is
returned. Disk usage information is only available for regular
filesystems that are mounted.
"""
try:
# Always use the last mount point because it's a very common
# situation when mount points are moved, or sotorage is
# double-mounted. Last mount point is always the newest.
mp = self.mount_points[-1] # depends on [control=['try'], data=[]]
except IndexError:
return None # depends on [control=['except'], data=[]]
stat = os.statvfs(mp)
free = stat.f_frsize * stat.f_bavail
total = stat.f_frsize * stat.f_blocks
used = total - free
used_pct = round(used / total * 100)
free_pct = 100 - used_pct
return Fstat(total, used, free, used_pct, free_pct)
|
def register_filter(self, attr_name, filterimage_cls):
"""
Register a new FilteredImage subclass (`filterimage_cls`).
To be used via the attribute (filters.`attr_name`)
"""
if attr_name.startswith('_'):
raise UnallowedFilterName(
'`%s` is an unallowed Filter name. Filter names cannot begin '
'with an underscore.' % attr_name
)
if not issubclass(filterimage_cls, FilteredImage):
raise InvalidFilteredImageSubclass(
'Only subclasses of FilteredImage may be registered as '
'filters with VersatileImageFieldRegistry'
)
if attr_name in self._filter_registry:
raise AlreadyRegistered(
'A ProcessedImageMixIn class is already registered to the `%s`'
' attribute. If you would like to override this attribute, '
'use the unregister method' % attr_name
)
else:
self._filter_registry[attr_name] = filterimage_cls
|
def function[register_filter, parameter[self, attr_name, filterimage_cls]]:
constant[
Register a new FilteredImage subclass (`filterimage_cls`).
To be used via the attribute (filters.`attr_name`)
]
if call[name[attr_name].startswith, parameter[constant[_]]] begin[:]
<ast.Raise object at 0x7da1b26adc60>
if <ast.UnaryOp object at 0x7da1b26ac340> begin[:]
<ast.Raise object at 0x7da1b26ada80>
if compare[name[attr_name] in name[self]._filter_registry] begin[:]
<ast.Raise object at 0x7da1b26ac460>
|
keyword[def] identifier[register_filter] ( identifier[self] , identifier[attr_name] , identifier[filterimage_cls] ):
literal[string]
keyword[if] identifier[attr_name] . identifier[startswith] ( literal[string] ):
keyword[raise] identifier[UnallowedFilterName] (
literal[string]
literal[string] % identifier[attr_name]
)
keyword[if] keyword[not] identifier[issubclass] ( identifier[filterimage_cls] , identifier[FilteredImage] ):
keyword[raise] identifier[InvalidFilteredImageSubclass] (
literal[string]
literal[string]
)
keyword[if] identifier[attr_name] keyword[in] identifier[self] . identifier[_filter_registry] :
keyword[raise] identifier[AlreadyRegistered] (
literal[string]
literal[string]
literal[string] % identifier[attr_name]
)
keyword[else] :
identifier[self] . identifier[_filter_registry] [ identifier[attr_name] ]= identifier[filterimage_cls]
|
def register_filter(self, attr_name, filterimage_cls):
"""
Register a new FilteredImage subclass (`filterimage_cls`).
To be used via the attribute (filters.`attr_name`)
"""
if attr_name.startswith('_'):
raise UnallowedFilterName('`%s` is an unallowed Filter name. Filter names cannot begin with an underscore.' % attr_name) # depends on [control=['if'], data=[]]
if not issubclass(filterimage_cls, FilteredImage):
raise InvalidFilteredImageSubclass('Only subclasses of FilteredImage may be registered as filters with VersatileImageFieldRegistry') # depends on [control=['if'], data=[]]
if attr_name in self._filter_registry:
raise AlreadyRegistered('A ProcessedImageMixIn class is already registered to the `%s` attribute. If you would like to override this attribute, use the unregister method' % attr_name) # depends on [control=['if'], data=['attr_name']]
else:
self._filter_registry[attr_name] = filterimage_cls
|
def container_running(self, container_name):
"""
Finds out if a container with name ``container_name`` is running.
:return: :class:`Container <docker.models.containers.Container>` if it's running, ``None`` otherwise.
:rtype: Optional[docker.models.container.Container]
"""
filters = {
"name": container_name,
"status": "running",
}
for container in self.client.containers.list(filters=filters):
if container_name == container.name:
return container
return None
|
def function[container_running, parameter[self, container_name]]:
constant[
Finds out if a container with name ``container_name`` is running.
:return: :class:`Container <docker.models.containers.Container>` if it's running, ``None`` otherwise.
:rtype: Optional[docker.models.container.Container]
]
variable[filters] assign[=] dictionary[[<ast.Constant object at 0x7da20e9b1210>, <ast.Constant object at 0x7da20e9b19c0>], [<ast.Name object at 0x7da20e9b1b70>, <ast.Constant object at 0x7da20e9b27d0>]]
for taget[name[container]] in starred[call[name[self].client.containers.list, parameter[]]] begin[:]
if compare[name[container_name] equal[==] name[container].name] begin[:]
return[name[container]]
return[constant[None]]
|
keyword[def] identifier[container_running] ( identifier[self] , identifier[container_name] ):
literal[string]
identifier[filters] ={
literal[string] : identifier[container_name] ,
literal[string] : literal[string] ,
}
keyword[for] identifier[container] keyword[in] identifier[self] . identifier[client] . identifier[containers] . identifier[list] ( identifier[filters] = identifier[filters] ):
keyword[if] identifier[container_name] == identifier[container] . identifier[name] :
keyword[return] identifier[container]
keyword[return] keyword[None]
|
def container_running(self, container_name):
"""
Finds out if a container with name ``container_name`` is running.
:return: :class:`Container <docker.models.containers.Container>` if it's running, ``None`` otherwise.
:rtype: Optional[docker.models.container.Container]
"""
filters = {'name': container_name, 'status': 'running'}
for container in self.client.containers.list(filters=filters):
if container_name == container.name:
return container # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['container']]
return None
|
def data(cls, cube, weighted, prune):
"""Return ndarray representing table index by margin."""
return cls()._data(cube, weighted, prune)
|
def function[data, parameter[cls, cube, weighted, prune]]:
constant[Return ndarray representing table index by margin.]
return[call[call[name[cls], parameter[]]._data, parameter[name[cube], name[weighted], name[prune]]]]
|
keyword[def] identifier[data] ( identifier[cls] , identifier[cube] , identifier[weighted] , identifier[prune] ):
literal[string]
keyword[return] identifier[cls] (). identifier[_data] ( identifier[cube] , identifier[weighted] , identifier[prune] )
|
def data(cls, cube, weighted, prune):
"""Return ndarray representing table index by margin."""
return cls()._data(cube, weighted, prune)
|
def qs_field(
model_class,
field,
filters=None,
formatter=queryset_formatter,
manager_name='objects',
):
"""
Show computed fields based on QuerySet's.
This is a workaround since sometimes some filtering is involved to see if a user
owns and object, is a student, etc.
Example
-------
class MyModel(ModelView):
details_extra_columns = [
('courses_owned', 'Courses (Owner of)'),
]
column_formatters_detail = {
'courses_owner': qs_field(model.Course, 'owner'),
]
"""
if filters is None:
filters = {}
def _(view, context, _model, name):
filters[field] = _model # e.g. students: user
# e.g. User.objects, User.deleted_objects
manager = getattr(model_class, manager_name)
return formatter(manager(**filters))
return _
|
def function[qs_field, parameter[model_class, field, filters, formatter, manager_name]]:
constant[
Show computed fields based on QuerySet's.
This is a workaround since sometimes some filtering is involved to see if a user
owns and object, is a student, etc.
Example
-------
class MyModel(ModelView):
details_extra_columns = [
('courses_owned', 'Courses (Owner of)'),
]
column_formatters_detail = {
'courses_owner': qs_field(model.Course, 'owner'),
]
]
if compare[name[filters] is constant[None]] begin[:]
variable[filters] assign[=] dictionary[[], []]
def function[_, parameter[view, context, _model, name]]:
call[name[filters]][name[field]] assign[=] name[_model]
variable[manager] assign[=] call[name[getattr], parameter[name[model_class], name[manager_name]]]
return[call[name[formatter], parameter[call[name[manager], parameter[]]]]]
return[name[_]]
|
keyword[def] identifier[qs_field] (
identifier[model_class] ,
identifier[field] ,
identifier[filters] = keyword[None] ,
identifier[formatter] = identifier[queryset_formatter] ,
identifier[manager_name] = literal[string] ,
):
literal[string]
keyword[if] identifier[filters] keyword[is] keyword[None] :
identifier[filters] ={}
keyword[def] identifier[_] ( identifier[view] , identifier[context] , identifier[_model] , identifier[name] ):
identifier[filters] [ identifier[field] ]= identifier[_model]
identifier[manager] = identifier[getattr] ( identifier[model_class] , identifier[manager_name] )
keyword[return] identifier[formatter] ( identifier[manager] (** identifier[filters] ))
keyword[return] identifier[_]
|
def qs_field(model_class, field, filters=None, formatter=queryset_formatter, manager_name='objects'):
"""
Show computed fields based on QuerySet's.
This is a workaround since sometimes some filtering is involved to see if a user
owns and object, is a student, etc.
Example
-------
class MyModel(ModelView):
details_extra_columns = [
('courses_owned', 'Courses (Owner of)'),
]
column_formatters_detail = {
'courses_owner': qs_field(model.Course, 'owner'),
]
"""
if filters is None:
filters = {} # depends on [control=['if'], data=['filters']]
def _(view, context, _model, name):
filters[field] = _model # e.g. students: user
# e.g. User.objects, User.deleted_objects
manager = getattr(model_class, manager_name)
return formatter(manager(**filters))
return _
|
def _create_minimum_needs_action(self):
"""Create action for minimum needs dialog."""
icon = resources_path('img', 'icons', 'show-minimum-needs.svg')
self.action_minimum_needs = QAction(
QIcon(icon),
self.tr('Minimum Needs Calculator'), self.iface.mainWindow())
self.action_minimum_needs.setStatusTip(self.tr(
'Open InaSAFE minimum needs calculator'))
self.action_minimum_needs.setWhatsThis(self.tr(
'Open InaSAFE minimum needs calculator'))
self.action_minimum_needs.triggered.connect(self.show_minimum_needs)
self.add_action(
self.action_minimum_needs, add_to_toolbar=self.full_toolbar)
|
def function[_create_minimum_needs_action, parameter[self]]:
constant[Create action for minimum needs dialog.]
variable[icon] assign[=] call[name[resources_path], parameter[constant[img], constant[icons], constant[show-minimum-needs.svg]]]
name[self].action_minimum_needs assign[=] call[name[QAction], parameter[call[name[QIcon], parameter[name[icon]]], call[name[self].tr, parameter[constant[Minimum Needs Calculator]]], call[name[self].iface.mainWindow, parameter[]]]]
call[name[self].action_minimum_needs.setStatusTip, parameter[call[name[self].tr, parameter[constant[Open InaSAFE minimum needs calculator]]]]]
call[name[self].action_minimum_needs.setWhatsThis, parameter[call[name[self].tr, parameter[constant[Open InaSAFE minimum needs calculator]]]]]
call[name[self].action_minimum_needs.triggered.connect, parameter[name[self].show_minimum_needs]]
call[name[self].add_action, parameter[name[self].action_minimum_needs]]
|
keyword[def] identifier[_create_minimum_needs_action] ( identifier[self] ):
literal[string]
identifier[icon] = identifier[resources_path] ( literal[string] , literal[string] , literal[string] )
identifier[self] . identifier[action_minimum_needs] = identifier[QAction] (
identifier[QIcon] ( identifier[icon] ),
identifier[self] . identifier[tr] ( literal[string] ), identifier[self] . identifier[iface] . identifier[mainWindow] ())
identifier[self] . identifier[action_minimum_needs] . identifier[setStatusTip] ( identifier[self] . identifier[tr] (
literal[string] ))
identifier[self] . identifier[action_minimum_needs] . identifier[setWhatsThis] ( identifier[self] . identifier[tr] (
literal[string] ))
identifier[self] . identifier[action_minimum_needs] . identifier[triggered] . identifier[connect] ( identifier[self] . identifier[show_minimum_needs] )
identifier[self] . identifier[add_action] (
identifier[self] . identifier[action_minimum_needs] , identifier[add_to_toolbar] = identifier[self] . identifier[full_toolbar] )
|
def _create_minimum_needs_action(self):
"""Create action for minimum needs dialog."""
icon = resources_path('img', 'icons', 'show-minimum-needs.svg')
self.action_minimum_needs = QAction(QIcon(icon), self.tr('Minimum Needs Calculator'), self.iface.mainWindow())
self.action_minimum_needs.setStatusTip(self.tr('Open InaSAFE minimum needs calculator'))
self.action_minimum_needs.setWhatsThis(self.tr('Open InaSAFE minimum needs calculator'))
self.action_minimum_needs.triggered.connect(self.show_minimum_needs)
self.add_action(self.action_minimum_needs, add_to_toolbar=self.full_toolbar)
|
def widgetEdited(self, event=None, val=None, action='entry', skipDups=True):
""" A general method for firing any applicable triggers when
a value has been set. This is meant to be easily callable from any
part of this class (or its subclasses), so that it can be called
as soon as need be (immed. on click?). This is smart enough to
be called multiple times, itself handling the removal of any/all
duplicate successive calls (unless skipDups is False). If val is
None, it will use the GUI entry's current value via choice.get().
See teal.py for a description of action.
"""
# be as lightweight as possible if obj doesn't care about this stuff
if not self._editedCallbackObj and not self._flagNonDefaultVals:
return
# get the current value
curVal = val # take this first, if it is given
if curVal is None:
curVal = self.choice.get()
# do any flagging
self.flagThisPar(curVal, False)
# see if this is a duplicate successive call for the same value
if skipDups and curVal==self._lastWidgetEditedVal: return
# pull trigger
if not self._editedCallbackObj: return
self._editedCallbackObj.edited(self.paramInfo.scope,
self.paramInfo.name,
self.previousValue, curVal,
action)
# for our duplicate checker
self._lastWidgetEditedVal = curVal
|
def function[widgetEdited, parameter[self, event, val, action, skipDups]]:
constant[ A general method for firing any applicable triggers when
a value has been set. This is meant to be easily callable from any
part of this class (or its subclasses), so that it can be called
as soon as need be (immed. on click?). This is smart enough to
be called multiple times, itself handling the removal of any/all
duplicate successive calls (unless skipDups is False). If val is
None, it will use the GUI entry's current value via choice.get().
See teal.py for a description of action.
]
if <ast.BoolOp object at 0x7da1b0ff91e0> begin[:]
return[None]
variable[curVal] assign[=] name[val]
if compare[name[curVal] is constant[None]] begin[:]
variable[curVal] assign[=] call[name[self].choice.get, parameter[]]
call[name[self].flagThisPar, parameter[name[curVal], constant[False]]]
if <ast.BoolOp object at 0x7da1b0ff96f0> begin[:]
return[None]
if <ast.UnaryOp object at 0x7da1b0ff9fc0> begin[:]
return[None]
call[name[self]._editedCallbackObj.edited, parameter[name[self].paramInfo.scope, name[self].paramInfo.name, name[self].previousValue, name[curVal], name[action]]]
name[self]._lastWidgetEditedVal assign[=] name[curVal]
|
keyword[def] identifier[widgetEdited] ( identifier[self] , identifier[event] = keyword[None] , identifier[val] = keyword[None] , identifier[action] = literal[string] , identifier[skipDups] = keyword[True] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_editedCallbackObj] keyword[and] keyword[not] identifier[self] . identifier[_flagNonDefaultVals] :
keyword[return]
identifier[curVal] = identifier[val]
keyword[if] identifier[curVal] keyword[is] keyword[None] :
identifier[curVal] = identifier[self] . identifier[choice] . identifier[get] ()
identifier[self] . identifier[flagThisPar] ( identifier[curVal] , keyword[False] )
keyword[if] identifier[skipDups] keyword[and] identifier[curVal] == identifier[self] . identifier[_lastWidgetEditedVal] : keyword[return]
keyword[if] keyword[not] identifier[self] . identifier[_editedCallbackObj] : keyword[return]
identifier[self] . identifier[_editedCallbackObj] . identifier[edited] ( identifier[self] . identifier[paramInfo] . identifier[scope] ,
identifier[self] . identifier[paramInfo] . identifier[name] ,
identifier[self] . identifier[previousValue] , identifier[curVal] ,
identifier[action] )
identifier[self] . identifier[_lastWidgetEditedVal] = identifier[curVal]
|
def widgetEdited(self, event=None, val=None, action='entry', skipDups=True):
""" A general method for firing any applicable triggers when
a value has been set. This is meant to be easily callable from any
part of this class (or its subclasses), so that it can be called
as soon as need be (immed. on click?). This is smart enough to
be called multiple times, itself handling the removal of any/all
duplicate successive calls (unless skipDups is False). If val is
None, it will use the GUI entry's current value via choice.get().
See teal.py for a description of action.
"""
# be as lightweight as possible if obj doesn't care about this stuff
if not self._editedCallbackObj and (not self._flagNonDefaultVals):
return # depends on [control=['if'], data=[]]
# get the current value
curVal = val # take this first, if it is given
if curVal is None:
curVal = self.choice.get() # depends on [control=['if'], data=['curVal']]
# do any flagging
self.flagThisPar(curVal, False)
# see if this is a duplicate successive call for the same value
if skipDups and curVal == self._lastWidgetEditedVal:
return # depends on [control=['if'], data=[]]
# pull trigger
if not self._editedCallbackObj:
return # depends on [control=['if'], data=[]]
self._editedCallbackObj.edited(self.paramInfo.scope, self.paramInfo.name, self.previousValue, curVal, action)
# for our duplicate checker
self._lastWidgetEditedVal = curVal
|
def page_list(
request, template_name='wakawaka/page_list.html', extra_context=None
):
"""
Displays all Pages
"""
page_list = WikiPage.objects.all()
page_list = page_list.order_by('slug')
template_context = {
'page_list': page_list,
'index_slug': getattr(settings, 'WAKAWAKA_DEFAULT_INDEX', 'WikiIndex'),
}
template_context.update(extra_context or {})
return render(request, template_name, template_context)
|
def function[page_list, parameter[request, template_name, extra_context]]:
constant[
Displays all Pages
]
variable[page_list] assign[=] call[name[WikiPage].objects.all, parameter[]]
variable[page_list] assign[=] call[name[page_list].order_by, parameter[constant[slug]]]
variable[template_context] assign[=] dictionary[[<ast.Constant object at 0x7da1b10426e0>, <ast.Constant object at 0x7da1b1041570>], [<ast.Name object at 0x7da1b1043cd0>, <ast.Call object at 0x7da1b1042fb0>]]
call[name[template_context].update, parameter[<ast.BoolOp object at 0x7da1b10401c0>]]
return[call[name[render], parameter[name[request], name[template_name], name[template_context]]]]
|
keyword[def] identifier[page_list] (
identifier[request] , identifier[template_name] = literal[string] , identifier[extra_context] = keyword[None]
):
literal[string]
identifier[page_list] = identifier[WikiPage] . identifier[objects] . identifier[all] ()
identifier[page_list] = identifier[page_list] . identifier[order_by] ( literal[string] )
identifier[template_context] ={
literal[string] : identifier[page_list] ,
literal[string] : identifier[getattr] ( identifier[settings] , literal[string] , literal[string] ),
}
identifier[template_context] . identifier[update] ( identifier[extra_context] keyword[or] {})
keyword[return] identifier[render] ( identifier[request] , identifier[template_name] , identifier[template_context] )
|
def page_list(request, template_name='wakawaka/page_list.html', extra_context=None):
"""
Displays all Pages
"""
page_list = WikiPage.objects.all()
page_list = page_list.order_by('slug')
template_context = {'page_list': page_list, 'index_slug': getattr(settings, 'WAKAWAKA_DEFAULT_INDEX', 'WikiIndex')}
template_context.update(extra_context or {})
return render(request, template_name, template_context)
|
def Group(expressions, final_function, inbetweens, name=""):
""" Group expressions together with ``inbetweens`` and with the output of a ``final_functions``.
"""
lengths = []
functions = []
regex = ""
i = 0
for expression in expressions:
regex += inbetweens[i]
regex += "(?:" + expression.regex + ")"
lengths.append(sum(expression.group_lengths))
functions.append(expression.run)
i += 1
regex += inbetweens[i]
return Expression(regex, functions, lengths, final_function, name)
|
def function[Group, parameter[expressions, final_function, inbetweens, name]]:
constant[ Group expressions together with ``inbetweens`` and with the output of a ``final_functions``.
]
variable[lengths] assign[=] list[[]]
variable[functions] assign[=] list[[]]
variable[regex] assign[=] constant[]
variable[i] assign[=] constant[0]
for taget[name[expression]] in starred[name[expressions]] begin[:]
<ast.AugAssign object at 0x7da1b0f47010>
<ast.AugAssign object at 0x7da1b0f46560>
call[name[lengths].append, parameter[call[name[sum], parameter[name[expression].group_lengths]]]]
call[name[functions].append, parameter[name[expression].run]]
<ast.AugAssign object at 0x7da1b0f44fd0>
<ast.AugAssign object at 0x7da1b0f47580>
return[call[name[Expression], parameter[name[regex], name[functions], name[lengths], name[final_function], name[name]]]]
|
keyword[def] identifier[Group] ( identifier[expressions] , identifier[final_function] , identifier[inbetweens] , identifier[name] = literal[string] ):
literal[string]
identifier[lengths] =[]
identifier[functions] =[]
identifier[regex] = literal[string]
identifier[i] = literal[int]
keyword[for] identifier[expression] keyword[in] identifier[expressions] :
identifier[regex] += identifier[inbetweens] [ identifier[i] ]
identifier[regex] += literal[string] + identifier[expression] . identifier[regex] + literal[string]
identifier[lengths] . identifier[append] ( identifier[sum] ( identifier[expression] . identifier[group_lengths] ))
identifier[functions] . identifier[append] ( identifier[expression] . identifier[run] )
identifier[i] += literal[int]
identifier[regex] += identifier[inbetweens] [ identifier[i] ]
keyword[return] identifier[Expression] ( identifier[regex] , identifier[functions] , identifier[lengths] , identifier[final_function] , identifier[name] )
|
def Group(expressions, final_function, inbetweens, name=''):
""" Group expressions together with ``inbetweens`` and with the output of a ``final_functions``.
"""
lengths = []
functions = []
regex = ''
i = 0
for expression in expressions:
regex += inbetweens[i]
regex += '(?:' + expression.regex + ')'
lengths.append(sum(expression.group_lengths))
functions.append(expression.run)
i += 1 # depends on [control=['for'], data=['expression']]
regex += inbetweens[i]
return Expression(regex, functions, lengths, final_function, name)
|
def remove_volatile(type_):
"""removes volatile from the type definition
If type is not volatile type, it will be returned as is
"""
nake_type = remove_alias(type_)
if not is_volatile(nake_type):
return type_
else:
if isinstance(nake_type, cpptypes.array_t):
is_c = is_const(nake_type)
if is_c:
base_type_ = nake_type.base.base.base
else:
base_type_ = nake_type.base.base
result_type = base_type_
if is_c:
result_type = cpptypes.const_t(result_type)
return cpptypes.array_t(result_type, nake_type.size)
return nake_type.base
|
def function[remove_volatile, parameter[type_]]:
constant[removes volatile from the type definition
If type is not volatile type, it will be returned as is
]
variable[nake_type] assign[=] call[name[remove_alias], parameter[name[type_]]]
if <ast.UnaryOp object at 0x7da18dc9a7d0> begin[:]
return[name[type_]]
|
keyword[def] identifier[remove_volatile] ( identifier[type_] ):
literal[string]
identifier[nake_type] = identifier[remove_alias] ( identifier[type_] )
keyword[if] keyword[not] identifier[is_volatile] ( identifier[nake_type] ):
keyword[return] identifier[type_]
keyword[else] :
keyword[if] identifier[isinstance] ( identifier[nake_type] , identifier[cpptypes] . identifier[array_t] ):
identifier[is_c] = identifier[is_const] ( identifier[nake_type] )
keyword[if] identifier[is_c] :
identifier[base_type_] = identifier[nake_type] . identifier[base] . identifier[base] . identifier[base]
keyword[else] :
identifier[base_type_] = identifier[nake_type] . identifier[base] . identifier[base]
identifier[result_type] = identifier[base_type_]
keyword[if] identifier[is_c] :
identifier[result_type] = identifier[cpptypes] . identifier[const_t] ( identifier[result_type] )
keyword[return] identifier[cpptypes] . identifier[array_t] ( identifier[result_type] , identifier[nake_type] . identifier[size] )
keyword[return] identifier[nake_type] . identifier[base]
|
def remove_volatile(type_):
"""removes volatile from the type definition
If type is not volatile type, it will be returned as is
"""
nake_type = remove_alias(type_)
if not is_volatile(nake_type):
return type_ # depends on [control=['if'], data=[]]
else:
if isinstance(nake_type, cpptypes.array_t):
is_c = is_const(nake_type)
if is_c:
base_type_ = nake_type.base.base.base # depends on [control=['if'], data=[]]
else:
base_type_ = nake_type.base.base
result_type = base_type_
if is_c:
result_type = cpptypes.const_t(result_type) # depends on [control=['if'], data=[]]
return cpptypes.array_t(result_type, nake_type.size) # depends on [control=['if'], data=[]]
return nake_type.base
|
def lex_count(self, low, high):
"""
Count the number of members in a sorted set between a given
lexicographical range.
"""
return self.database.zlexcount(self.key, low, high)
|
def function[lex_count, parameter[self, low, high]]:
constant[
Count the number of members in a sorted set between a given
lexicographical range.
]
return[call[name[self].database.zlexcount, parameter[name[self].key, name[low], name[high]]]]
|
keyword[def] identifier[lex_count] ( identifier[self] , identifier[low] , identifier[high] ):
literal[string]
keyword[return] identifier[self] . identifier[database] . identifier[zlexcount] ( identifier[self] . identifier[key] , identifier[low] , identifier[high] )
|
def lex_count(self, low, high):
"""
Count the number of members in a sorted set between a given
lexicographical range.
"""
return self.database.zlexcount(self.key, low, high)
|
def copyidfobject(self, idfobject):
"""Add an IDF object to the IDF.
Parameters
----------
idfobject : EpBunch object
The IDF object to remove. This usually comes from another idf file,
or it can be used to copy within this idf file.
"""
return addthisbunch(self.idfobjects,
self.model,
self.idd_info,
idfobject, self)
|
def function[copyidfobject, parameter[self, idfobject]]:
constant[Add an IDF object to the IDF.
Parameters
----------
idfobject : EpBunch object
The IDF object to remove. This usually comes from another idf file,
or it can be used to copy within this idf file.
]
return[call[name[addthisbunch], parameter[name[self].idfobjects, name[self].model, name[self].idd_info, name[idfobject], name[self]]]]
|
keyword[def] identifier[copyidfobject] ( identifier[self] , identifier[idfobject] ):
literal[string]
keyword[return] identifier[addthisbunch] ( identifier[self] . identifier[idfobjects] ,
identifier[self] . identifier[model] ,
identifier[self] . identifier[idd_info] ,
identifier[idfobject] , identifier[self] )
|
def copyidfobject(self, idfobject):
"""Add an IDF object to the IDF.
Parameters
----------
idfobject : EpBunch object
The IDF object to remove. This usually comes from another idf file,
or it can be used to copy within this idf file.
"""
return addthisbunch(self.idfobjects, self.model, self.idd_info, idfobject, self)
|
def garud_h(h):
"""Compute the H1, H12, H123 and H2/H1 statistics for detecting signatures
of soft sweeps, as defined in Garud et al. (2015).
Parameters
----------
h : array_like, int, shape (n_variants, n_haplotypes)
Haplotype array.
Returns
-------
h1 : float
H1 statistic (sum of squares of haplotype frequencies).
h12 : float
H12 statistic (sum of squares of haplotype frequencies, combining
the two most common haplotypes into a single frequency).
h123 : float
H123 statistic (sum of squares of haplotype frequencies, combining
the three most common haplotypes into a single frequency).
h2_h1 : float
H2/H1 statistic, indicating the "softness" of a sweep.
"""
# check inputs
h = HaplotypeArray(h, copy=False)
# compute haplotype frequencies
f = h.distinct_frequencies()
# compute H1
h1 = np.sum(f**2)
# compute H12
h12 = np.sum(f[:2])**2 + np.sum(f[2:]**2)
# compute H123
h123 = np.sum(f[:3])**2 + np.sum(f[3:]**2)
# compute H2/H1
h2 = h1 - f[0]**2
h2_h1 = h2 / h1
return h1, h12, h123, h2_h1
|
def function[garud_h, parameter[h]]:
constant[Compute the H1, H12, H123 and H2/H1 statistics for detecting signatures
of soft sweeps, as defined in Garud et al. (2015).
Parameters
----------
h : array_like, int, shape (n_variants, n_haplotypes)
Haplotype array.
Returns
-------
h1 : float
H1 statistic (sum of squares of haplotype frequencies).
h12 : float
H12 statistic (sum of squares of haplotype frequencies, combining
the two most common haplotypes into a single frequency).
h123 : float
H123 statistic (sum of squares of haplotype frequencies, combining
the three most common haplotypes into a single frequency).
h2_h1 : float
H2/H1 statistic, indicating the "softness" of a sweep.
]
variable[h] assign[=] call[name[HaplotypeArray], parameter[name[h]]]
variable[f] assign[=] call[name[h].distinct_frequencies, parameter[]]
variable[h1] assign[=] call[name[np].sum, parameter[binary_operation[name[f] ** constant[2]]]]
variable[h12] assign[=] binary_operation[binary_operation[call[name[np].sum, parameter[call[name[f]][<ast.Slice object at 0x7da1b0538910>]]] ** constant[2]] + call[name[np].sum, parameter[binary_operation[call[name[f]][<ast.Slice object at 0x7da1b05387c0>] ** constant[2]]]]]
variable[h123] assign[=] binary_operation[binary_operation[call[name[np].sum, parameter[call[name[f]][<ast.Slice object at 0x7da1b053b4c0>]]] ** constant[2]] + call[name[np].sum, parameter[binary_operation[call[name[f]][<ast.Slice object at 0x7da1b0539090>] ** constant[2]]]]]
variable[h2] assign[=] binary_operation[name[h1] - binary_operation[call[name[f]][constant[0]] ** constant[2]]]
variable[h2_h1] assign[=] binary_operation[name[h2] / name[h1]]
return[tuple[[<ast.Name object at 0x7da1b053a8f0>, <ast.Name object at 0x7da1b053a830>, <ast.Name object at 0x7da1b053a890>, <ast.Name object at 0x7da1b053a950>]]]
|
keyword[def] identifier[garud_h] ( identifier[h] ):
literal[string]
identifier[h] = identifier[HaplotypeArray] ( identifier[h] , identifier[copy] = keyword[False] )
identifier[f] = identifier[h] . identifier[distinct_frequencies] ()
identifier[h1] = identifier[np] . identifier[sum] ( identifier[f] ** literal[int] )
identifier[h12] = identifier[np] . identifier[sum] ( identifier[f] [: literal[int] ])** literal[int] + identifier[np] . identifier[sum] ( identifier[f] [ literal[int] :]** literal[int] )
identifier[h123] = identifier[np] . identifier[sum] ( identifier[f] [: literal[int] ])** literal[int] + identifier[np] . identifier[sum] ( identifier[f] [ literal[int] :]** literal[int] )
identifier[h2] = identifier[h1] - identifier[f] [ literal[int] ]** literal[int]
identifier[h2_h1] = identifier[h2] / identifier[h1]
keyword[return] identifier[h1] , identifier[h12] , identifier[h123] , identifier[h2_h1]
|
def garud_h(h):
"""Compute the H1, H12, H123 and H2/H1 statistics for detecting signatures
of soft sweeps, as defined in Garud et al. (2015).
Parameters
----------
h : array_like, int, shape (n_variants, n_haplotypes)
Haplotype array.
Returns
-------
h1 : float
H1 statistic (sum of squares of haplotype frequencies).
h12 : float
H12 statistic (sum of squares of haplotype frequencies, combining
the two most common haplotypes into a single frequency).
h123 : float
H123 statistic (sum of squares of haplotype frequencies, combining
the three most common haplotypes into a single frequency).
h2_h1 : float
H2/H1 statistic, indicating the "softness" of a sweep.
"""
# check inputs
h = HaplotypeArray(h, copy=False)
# compute haplotype frequencies
f = h.distinct_frequencies()
# compute H1
h1 = np.sum(f ** 2)
# compute H12
h12 = np.sum(f[:2]) ** 2 + np.sum(f[2:] ** 2)
# compute H123
h123 = np.sum(f[:3]) ** 2 + np.sum(f[3:] ** 2)
# compute H2/H1
h2 = h1 - f[0] ** 2
h2_h1 = h2 / h1
return (h1, h12, h123, h2_h1)
|
def list_metafeatures(cls, group="all"):
"""
Returns a list of metafeatures computable by the Metafeatures class.
"""
# todo make group for intractable metafeatures for wide datasets or
# datasets with high cardinality categorical columns:
# PredPCA1, PredPCA2, PredPCA3, PredEigen1, PredEigen2, PredEigen3,
# PredDet, kNN1NErrRate, kNN1NKappa, LinearDiscriminantAnalysisKappa,
# LinearDiscriminantAnalysisErrRate
if group == "all":
return copy.deepcopy(cls.IDS)
elif group == "landmarking":
return list(filter(
lambda mf_id: "ErrRate" in mf_id or "Kappa" in mf_id, cls.IDS
))
elif group == "target_dependent":
return list(filter(
cls._resource_is_target_dependent, cls.IDS
))
else:
raise ValueError(f"Unknown group {group}")
|
def function[list_metafeatures, parameter[cls, group]]:
constant[
Returns a list of metafeatures computable by the Metafeatures class.
]
if compare[name[group] equal[==] constant[all]] begin[:]
return[call[name[copy].deepcopy, parameter[name[cls].IDS]]]
|
keyword[def] identifier[list_metafeatures] ( identifier[cls] , identifier[group] = literal[string] ):
literal[string]
keyword[if] identifier[group] == literal[string] :
keyword[return] identifier[copy] . identifier[deepcopy] ( identifier[cls] . identifier[IDS] )
keyword[elif] identifier[group] == literal[string] :
keyword[return] identifier[list] ( identifier[filter] (
keyword[lambda] identifier[mf_id] : literal[string] keyword[in] identifier[mf_id] keyword[or] literal[string] keyword[in] identifier[mf_id] , identifier[cls] . identifier[IDS]
))
keyword[elif] identifier[group] == literal[string] :
keyword[return] identifier[list] ( identifier[filter] (
identifier[cls] . identifier[_resource_is_target_dependent] , identifier[cls] . identifier[IDS]
))
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] )
|
def list_metafeatures(cls, group='all'):
"""
Returns a list of metafeatures computable by the Metafeatures class.
"""
# todo make group for intractable metafeatures for wide datasets or
# datasets with high cardinality categorical columns:
# PredPCA1, PredPCA2, PredPCA3, PredEigen1, PredEigen2, PredEigen3,
# PredDet, kNN1NErrRate, kNN1NKappa, LinearDiscriminantAnalysisKappa,
# LinearDiscriminantAnalysisErrRate
if group == 'all':
return copy.deepcopy(cls.IDS) # depends on [control=['if'], data=[]]
elif group == 'landmarking':
return list(filter(lambda mf_id: 'ErrRate' in mf_id or 'Kappa' in mf_id, cls.IDS)) # depends on [control=['if'], data=[]]
elif group == 'target_dependent':
return list(filter(cls._resource_is_target_dependent, cls.IDS)) # depends on [control=['if'], data=[]]
else:
raise ValueError(f'Unknown group {group}')
|
def doc(elt):
"Show `show_doc` info in preview window along with link to full docs."
global use_relative_links
use_relative_links = False
elt = getattr(elt, '__func__', elt)
md = show_doc(elt, markdown=False)
if is_fastai_class(elt):
md += f'\n\n<a href="{get_fn_link(elt)}" target="_blank" rel="noreferrer noopener">Show in docs</a>'
output = HTMLExporter().markdown2html(md)
use_relative_links = True
if IS_IN_COLAB: get_ipython().run_cell_magic(u'html', u'', output)
else:
try: page.page({'text/html': output})
except: display(Markdown(md))
|
def function[doc, parameter[elt]]:
constant[Show `show_doc` info in preview window along with link to full docs.]
<ast.Global object at 0x7da18ede4f10>
variable[use_relative_links] assign[=] constant[False]
variable[elt] assign[=] call[name[getattr], parameter[name[elt], constant[__func__], name[elt]]]
variable[md] assign[=] call[name[show_doc], parameter[name[elt]]]
if call[name[is_fastai_class], parameter[name[elt]]] begin[:]
<ast.AugAssign object at 0x7da1b2029360>
variable[output] assign[=] call[call[name[HTMLExporter], parameter[]].markdown2html, parameter[name[md]]]
variable[use_relative_links] assign[=] constant[True]
if name[IS_IN_COLAB] begin[:]
call[call[name[get_ipython], parameter[]].run_cell_magic, parameter[constant[html], constant[], name[output]]]
|
keyword[def] identifier[doc] ( identifier[elt] ):
literal[string]
keyword[global] identifier[use_relative_links]
identifier[use_relative_links] = keyword[False]
identifier[elt] = identifier[getattr] ( identifier[elt] , literal[string] , identifier[elt] )
identifier[md] = identifier[show_doc] ( identifier[elt] , identifier[markdown] = keyword[False] )
keyword[if] identifier[is_fastai_class] ( identifier[elt] ):
identifier[md] += literal[string]
identifier[output] = identifier[HTMLExporter] (). identifier[markdown2html] ( identifier[md] )
identifier[use_relative_links] = keyword[True]
keyword[if] identifier[IS_IN_COLAB] : identifier[get_ipython] (). identifier[run_cell_magic] ( literal[string] , literal[string] , identifier[output] )
keyword[else] :
keyword[try] : identifier[page] . identifier[page] ({ literal[string] : identifier[output] })
keyword[except] : identifier[display] ( identifier[Markdown] ( identifier[md] ))
|
def doc(elt):
"""Show `show_doc` info in preview window along with link to full docs."""
global use_relative_links
use_relative_links = False
elt = getattr(elt, '__func__', elt)
md = show_doc(elt, markdown=False)
if is_fastai_class(elt):
md += f'\n\n<a href="{get_fn_link(elt)}" target="_blank" rel="noreferrer noopener">Show in docs</a>' # depends on [control=['if'], data=[]]
output = HTMLExporter().markdown2html(md)
use_relative_links = True
if IS_IN_COLAB:
get_ipython().run_cell_magic(u'html', u'', output) # depends on [control=['if'], data=[]]
else:
try:
page.page({'text/html': output}) # depends on [control=['try'], data=[]]
except:
display(Markdown(md)) # depends on [control=['except'], data=[]]
|
def scaled_pressure3_send(self, time_boot_ms, press_abs, press_diff, temperature, force_mavlink1=False):
'''
Barometer readings for 3rd barometer
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
press_abs : Absolute pressure (hectopascal) (float)
press_diff : Differential pressure 1 (hectopascal) (float)
temperature : Temperature measurement (0.01 degrees celsius) (int16_t)
'''
return self.send(self.scaled_pressure3_encode(time_boot_ms, press_abs, press_diff, temperature), force_mavlink1=force_mavlink1)
|
def function[scaled_pressure3_send, parameter[self, time_boot_ms, press_abs, press_diff, temperature, force_mavlink1]]:
constant[
Barometer readings for 3rd barometer
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
press_abs : Absolute pressure (hectopascal) (float)
press_diff : Differential pressure 1 (hectopascal) (float)
temperature : Temperature measurement (0.01 degrees celsius) (int16_t)
]
return[call[name[self].send, parameter[call[name[self].scaled_pressure3_encode, parameter[name[time_boot_ms], name[press_abs], name[press_diff], name[temperature]]]]]]
|
keyword[def] identifier[scaled_pressure3_send] ( identifier[self] , identifier[time_boot_ms] , identifier[press_abs] , identifier[press_diff] , identifier[temperature] , identifier[force_mavlink1] = keyword[False] ):
literal[string]
keyword[return] identifier[self] . identifier[send] ( identifier[self] . identifier[scaled_pressure3_encode] ( identifier[time_boot_ms] , identifier[press_abs] , identifier[press_diff] , identifier[temperature] ), identifier[force_mavlink1] = identifier[force_mavlink1] )
|
def scaled_pressure3_send(self, time_boot_ms, press_abs, press_diff, temperature, force_mavlink1=False):
"""
Barometer readings for 3rd barometer
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
press_abs : Absolute pressure (hectopascal) (float)
press_diff : Differential pressure 1 (hectopascal) (float)
temperature : Temperature measurement (0.01 degrees celsius) (int16_t)
"""
return self.send(self.scaled_pressure3_encode(time_boot_ms, press_abs, press_diff, temperature), force_mavlink1=force_mavlink1)
|
def error_response(
error_type="Internal server error",
error_text="",
status=400,
participant=None,
simple=False,
request_data="",
):
"""Return a generic server error response."""
last_exception = sys.exc_info()
if last_exception[0]:
db.logger.error(
"Failure for request: {!r}".format(dict(request.args)),
exc_info=last_exception,
)
data = {"status": "error"}
if simple:
data["message"] = error_text
else:
data["html"] = (
error_page(
error_text=error_text,
error_type=error_type,
participant=participant,
request_data=request_data,
)
.get_data()
.decode("utf-8")
)
return Response(dumps(data), status=status, mimetype="application/json")
|
def function[error_response, parameter[error_type, error_text, status, participant, simple, request_data]]:
constant[Return a generic server error response.]
variable[last_exception] assign[=] call[name[sys].exc_info, parameter[]]
if call[name[last_exception]][constant[0]] begin[:]
call[name[db].logger.error, parameter[call[constant[Failure for request: {!r}].format, parameter[call[name[dict], parameter[name[request].args]]]]]]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b0383040>], [<ast.Constant object at 0x7da1b0383d90>]]
if name[simple] begin[:]
call[name[data]][constant[message]] assign[=] name[error_text]
return[call[name[Response], parameter[call[name[dumps], parameter[name[data]]]]]]
|
keyword[def] identifier[error_response] (
identifier[error_type] = literal[string] ,
identifier[error_text] = literal[string] ,
identifier[status] = literal[int] ,
identifier[participant] = keyword[None] ,
identifier[simple] = keyword[False] ,
identifier[request_data] = literal[string] ,
):
literal[string]
identifier[last_exception] = identifier[sys] . identifier[exc_info] ()
keyword[if] identifier[last_exception] [ literal[int] ]:
identifier[db] . identifier[logger] . identifier[error] (
literal[string] . identifier[format] ( identifier[dict] ( identifier[request] . identifier[args] )),
identifier[exc_info] = identifier[last_exception] ,
)
identifier[data] ={ literal[string] : literal[string] }
keyword[if] identifier[simple] :
identifier[data] [ literal[string] ]= identifier[error_text]
keyword[else] :
identifier[data] [ literal[string] ]=(
identifier[error_page] (
identifier[error_text] = identifier[error_text] ,
identifier[error_type] = identifier[error_type] ,
identifier[participant] = identifier[participant] ,
identifier[request_data] = identifier[request_data] ,
)
. identifier[get_data] ()
. identifier[decode] ( literal[string] )
)
keyword[return] identifier[Response] ( identifier[dumps] ( identifier[data] ), identifier[status] = identifier[status] , identifier[mimetype] = literal[string] )
|
def error_response(error_type='Internal server error', error_text='', status=400, participant=None, simple=False, request_data=''):
"""Return a generic server error response."""
last_exception = sys.exc_info()
if last_exception[0]:
db.logger.error('Failure for request: {!r}'.format(dict(request.args)), exc_info=last_exception) # depends on [control=['if'], data=[]]
data = {'status': 'error'}
if simple:
data['message'] = error_text # depends on [control=['if'], data=[]]
else:
data['html'] = error_page(error_text=error_text, error_type=error_type, participant=participant, request_data=request_data).get_data().decode('utf-8')
return Response(dumps(data), status=status, mimetype='application/json')
|
def add_speaker(self, **kwargs):
"""
Creates a new BGPSpeaker instance.
Usage:
======= ================
Method URI
======= ================
POST /vtep/speakers
======= ================
Request parameters:
========== ============================================
Attribute Description
========== ============================================
dpid ID of Datapath binding to speaker. (e.g. 1)
as_number AS number. (e.g. 65000)
router_id Router ID. (e.g. "172.17.0.1")
========== ============================================
Example::
$ curl -X POST -d '{
"dpid": 1,
"as_number": 65000,
"router_id": "172.17.0.1"
}' http://localhost:8080/vtep/speakers | python -m json.tool
::
{
"172.17.0.1": {
"EvpnSpeaker": {
"as_number": 65000,
"dpid": 1,
"neighbors": {},
"router_id": "172.17.0.1"
}
}
}
"""
try:
body = self.vtep_app.add_speaker(**kwargs)
except DatapathNotFound as e:
return e.to_response(status=404)
return Response(content_type='application/json',
body=json.dumps(body))
|
def function[add_speaker, parameter[self]]:
constant[
Creates a new BGPSpeaker instance.
Usage:
======= ================
Method URI
======= ================
POST /vtep/speakers
======= ================
Request parameters:
========== ============================================
Attribute Description
========== ============================================
dpid ID of Datapath binding to speaker. (e.g. 1)
as_number AS number. (e.g. 65000)
router_id Router ID. (e.g. "172.17.0.1")
========== ============================================
Example::
$ curl -X POST -d '{
"dpid": 1,
"as_number": 65000,
"router_id": "172.17.0.1"
}' http://localhost:8080/vtep/speakers | python -m json.tool
::
{
"172.17.0.1": {
"EvpnSpeaker": {
"as_number": 65000,
"dpid": 1,
"neighbors": {},
"router_id": "172.17.0.1"
}
}
}
]
<ast.Try object at 0x7da1b1a37370>
return[call[name[Response], parameter[]]]
|
keyword[def] identifier[add_speaker] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
identifier[body] = identifier[self] . identifier[vtep_app] . identifier[add_speaker] (** identifier[kwargs] )
keyword[except] identifier[DatapathNotFound] keyword[as] identifier[e] :
keyword[return] identifier[e] . identifier[to_response] ( identifier[status] = literal[int] )
keyword[return] identifier[Response] ( identifier[content_type] = literal[string] ,
identifier[body] = identifier[json] . identifier[dumps] ( identifier[body] ))
|
def add_speaker(self, **kwargs):
"""
Creates a new BGPSpeaker instance.
Usage:
======= ================
Method URI
======= ================
POST /vtep/speakers
======= ================
Request parameters:
========== ============================================
Attribute Description
========== ============================================
dpid ID of Datapath binding to speaker. (e.g. 1)
as_number AS number. (e.g. 65000)
router_id Router ID. (e.g. "172.17.0.1")
========== ============================================
Example::
$ curl -X POST -d '{
"dpid": 1,
"as_number": 65000,
"router_id": "172.17.0.1"
}' http://localhost:8080/vtep/speakers | python -m json.tool
::
{
"172.17.0.1": {
"EvpnSpeaker": {
"as_number": 65000,
"dpid": 1,
"neighbors": {},
"router_id": "172.17.0.1"
}
}
}
"""
try:
body = self.vtep_app.add_speaker(**kwargs) # depends on [control=['try'], data=[]]
except DatapathNotFound as e:
return e.to_response(status=404) # depends on [control=['except'], data=['e']]
return Response(content_type='application/json', body=json.dumps(body))
|
def mrc_to_marc(mrc):
"""
Convert MRC data format to MARC XML.
Args:
mrc (str): MRC as string.
Returns:
str: XML with MARC.
"""
# ignore blank lines
lines = [
line
for line in mrc.splitlines()
if line.strip()
]
def split_to_parts(lines):
for line in lines:
first_part, second_part = line.split(" L ", 1)
yield line, first_part, second_part.lstrip()
control_lines = []
data_lines = []
for line, first_part, second_part in split_to_parts(lines):
if second_part.startswith("$"):
data_lines.append(line)
else:
control_lines.append(line)
# convert controlfield lines
record = MARCXMLRecord()
record.oai_marc = True
for line, descr, content in split_to_parts(control_lines):
record.controlfields[descr.strip()[:3]] = content
def get_subfield_dict(line):
fields = (
(field[0], field[1:])
for field in line.split("$$")[1:]
)
fields_dict = defaultdict(list)
for key, val in fields:
fields_dict[key].append(val)
return fields_dict
# convert datafield lines
for line, descr, content_line in split_to_parts(data_lines):
name = descr[:3]
i1 = descr[3]
i2 = descr[4]
record.add_data_field(
name,
i1,
i2,
get_subfield_dict(content_line)
)
return record.to_XML()
|
def function[mrc_to_marc, parameter[mrc]]:
constant[
Convert MRC data format to MARC XML.
Args:
mrc (str): MRC as string.
Returns:
str: XML with MARC.
]
variable[lines] assign[=] <ast.ListComp object at 0x7da18dc9b760>
def function[split_to_parts, parameter[lines]]:
for taget[name[line]] in starred[name[lines]] begin[:]
<ast.Tuple object at 0x7da18dc9baf0> assign[=] call[name[line].split, parameter[constant[ L ], constant[1]]]
<ast.Yield object at 0x7da18dc9bd30>
variable[control_lines] assign[=] list[[]]
variable[data_lines] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18dc99180>, <ast.Name object at 0x7da18dc9aa70>, <ast.Name object at 0x7da18dc9af80>]]] in starred[call[name[split_to_parts], parameter[name[lines]]]] begin[:]
if call[name[second_part].startswith, parameter[constant[$]]] begin[:]
call[name[data_lines].append, parameter[name[line]]]
variable[record] assign[=] call[name[MARCXMLRecord], parameter[]]
name[record].oai_marc assign[=] constant[True]
for taget[tuple[[<ast.Name object at 0x7da18dc9a5f0>, <ast.Name object at 0x7da18dc9a950>, <ast.Name object at 0x7da18dc98c10>]]] in starred[call[name[split_to_parts], parameter[name[control_lines]]]] begin[:]
call[name[record].controlfields][call[call[name[descr].strip, parameter[]]][<ast.Slice object at 0x7da18dc9b3a0>]] assign[=] name[content]
def function[get_subfield_dict, parameter[line]]:
variable[fields] assign[=] <ast.GeneratorExp object at 0x7da18dc984c0>
variable[fields_dict] assign[=] call[name[defaultdict], parameter[name[list]]]
for taget[tuple[[<ast.Name object at 0x7da1b0ae1e70>, <ast.Name object at 0x7da1b0ae1e40>]]] in starred[name[fields]] begin[:]
call[call[name[fields_dict]][name[key]].append, parameter[name[val]]]
return[name[fields_dict]]
for taget[tuple[[<ast.Name object at 0x7da1b0ae3fd0>, <ast.Name object at 0x7da1b0ae2650>, <ast.Name object at 0x7da1b0ae11e0>]]] in starred[call[name[split_to_parts], parameter[name[data_lines]]]] begin[:]
variable[name] assign[=] call[name[descr]][<ast.Slice object at 0x7da1b0ae3730>]
variable[i1] assign[=] call[name[descr]][constant[3]]
variable[i2] assign[=] call[name[descr]][constant[4]]
call[name[record].add_data_field, parameter[name[name], name[i1], name[i2], call[name[get_subfield_dict], parameter[name[content_line]]]]]
return[call[name[record].to_XML, parameter[]]]
|
keyword[def] identifier[mrc_to_marc] ( identifier[mrc] ):
literal[string]
identifier[lines] =[
identifier[line]
keyword[for] identifier[line] keyword[in] identifier[mrc] . identifier[splitlines] ()
keyword[if] identifier[line] . identifier[strip] ()
]
keyword[def] identifier[split_to_parts] ( identifier[lines] ):
keyword[for] identifier[line] keyword[in] identifier[lines] :
identifier[first_part] , identifier[second_part] = identifier[line] . identifier[split] ( literal[string] , literal[int] )
keyword[yield] identifier[line] , identifier[first_part] , identifier[second_part] . identifier[lstrip] ()
identifier[control_lines] =[]
identifier[data_lines] =[]
keyword[for] identifier[line] , identifier[first_part] , identifier[second_part] keyword[in] identifier[split_to_parts] ( identifier[lines] ):
keyword[if] identifier[second_part] . identifier[startswith] ( literal[string] ):
identifier[data_lines] . identifier[append] ( identifier[line] )
keyword[else] :
identifier[control_lines] . identifier[append] ( identifier[line] )
identifier[record] = identifier[MARCXMLRecord] ()
identifier[record] . identifier[oai_marc] = keyword[True]
keyword[for] identifier[line] , identifier[descr] , identifier[content] keyword[in] identifier[split_to_parts] ( identifier[control_lines] ):
identifier[record] . identifier[controlfields] [ identifier[descr] . identifier[strip] ()[: literal[int] ]]= identifier[content]
keyword[def] identifier[get_subfield_dict] ( identifier[line] ):
identifier[fields] =(
( identifier[field] [ literal[int] ], identifier[field] [ literal[int] :])
keyword[for] identifier[field] keyword[in] identifier[line] . identifier[split] ( literal[string] )[ literal[int] :]
)
identifier[fields_dict] = identifier[defaultdict] ( identifier[list] )
keyword[for] identifier[key] , identifier[val] keyword[in] identifier[fields] :
identifier[fields_dict] [ identifier[key] ]. identifier[append] ( identifier[val] )
keyword[return] identifier[fields_dict]
keyword[for] identifier[line] , identifier[descr] , identifier[content_line] keyword[in] identifier[split_to_parts] ( identifier[data_lines] ):
identifier[name] = identifier[descr] [: literal[int] ]
identifier[i1] = identifier[descr] [ literal[int] ]
identifier[i2] = identifier[descr] [ literal[int] ]
identifier[record] . identifier[add_data_field] (
identifier[name] ,
identifier[i1] ,
identifier[i2] ,
identifier[get_subfield_dict] ( identifier[content_line] )
)
keyword[return] identifier[record] . identifier[to_XML] ()
|
def mrc_to_marc(mrc):
"""
Convert MRC data format to MARC XML.
Args:
mrc (str): MRC as string.
Returns:
str: XML with MARC.
"""
# ignore blank lines
lines = [line for line in mrc.splitlines() if line.strip()]
def split_to_parts(lines):
for line in lines:
(first_part, second_part) = line.split(' L ', 1)
yield (line, first_part, second_part.lstrip()) # depends on [control=['for'], data=['line']]
control_lines = []
data_lines = []
for (line, first_part, second_part) in split_to_parts(lines):
if second_part.startswith('$'):
data_lines.append(line) # depends on [control=['if'], data=[]]
else:
control_lines.append(line) # depends on [control=['for'], data=[]]
# convert controlfield lines
record = MARCXMLRecord()
record.oai_marc = True
for (line, descr, content) in split_to_parts(control_lines):
record.controlfields[descr.strip()[:3]] = content # depends on [control=['for'], data=[]]
def get_subfield_dict(line):
fields = ((field[0], field[1:]) for field in line.split('$$')[1:])
fields_dict = defaultdict(list)
for (key, val) in fields:
fields_dict[key].append(val) # depends on [control=['for'], data=[]]
return fields_dict
# convert datafield lines
for (line, descr, content_line) in split_to_parts(data_lines):
name = descr[:3]
i1 = descr[3]
i2 = descr[4]
record.add_data_field(name, i1, i2, get_subfield_dict(content_line)) # depends on [control=['for'], data=[]]
return record.to_XML()
|
def _get_domain_id(self, domain_text_element): # pylint: disable=no-self-use
"""Return the easyname id of the domain."""
try:
# Hierarchy: TR > TD > SPAN > Domain Text
tr_anchor = domain_text_element.parent.parent.parent
td_anchor = tr_anchor.find('td', {'class': 'td_2'})
link = td_anchor.find('a')['href']
domain_id = link.rsplit('/', 1)[-1]
return domain_id
except Exception as error:
errmsg = ('Cannot get the domain id even though the domain seems '
'to exist (%s).', error)
LOGGER.warning(errmsg)
raise AssertionError(errmsg)
|
def function[_get_domain_id, parameter[self, domain_text_element]]:
constant[Return the easyname id of the domain.]
<ast.Try object at 0x7da1b1d22170>
|
keyword[def] identifier[_get_domain_id] ( identifier[self] , identifier[domain_text_element] ):
literal[string]
keyword[try] :
identifier[tr_anchor] = identifier[domain_text_element] . identifier[parent] . identifier[parent] . identifier[parent]
identifier[td_anchor] = identifier[tr_anchor] . identifier[find] ( literal[string] ,{ literal[string] : literal[string] })
identifier[link] = identifier[td_anchor] . identifier[find] ( literal[string] )[ literal[string] ]
identifier[domain_id] = identifier[link] . identifier[rsplit] ( literal[string] , literal[int] )[- literal[int] ]
keyword[return] identifier[domain_id]
keyword[except] identifier[Exception] keyword[as] identifier[error] :
identifier[errmsg] =( literal[string]
literal[string] , identifier[error] )
identifier[LOGGER] . identifier[warning] ( identifier[errmsg] )
keyword[raise] identifier[AssertionError] ( identifier[errmsg] )
|
def _get_domain_id(self, domain_text_element): # pylint: disable=no-self-use
'Return the easyname id of the domain.'
try:
# Hierarchy: TR > TD > SPAN > Domain Text
tr_anchor = domain_text_element.parent.parent.parent
td_anchor = tr_anchor.find('td', {'class': 'td_2'})
link = td_anchor.find('a')['href']
domain_id = link.rsplit('/', 1)[-1]
return domain_id # depends on [control=['try'], data=[]]
except Exception as error:
errmsg = ('Cannot get the domain id even though the domain seems to exist (%s).', error)
LOGGER.warning(errmsg)
raise AssertionError(errmsg) # depends on [control=['except'], data=['error']]
|
def get_prefix():
"""Global prefix to identify ncluster created resources name used to identify ncluster created resources,
(name of EFS, VPC, keypair prefixes), can be changed through $NCLUSTER_PREFIX for debugging purposes. """
name = os.environ.get('NCLUSTER_PREFIX', DEFAULT_PREFIX)
if name != DEFAULT_PREFIX:
validate_prefix(name)
return name
|
def function[get_prefix, parameter[]]:
constant[Global prefix to identify ncluster created resources name used to identify ncluster created resources,
(name of EFS, VPC, keypair prefixes), can be changed through $NCLUSTER_PREFIX for debugging purposes. ]
variable[name] assign[=] call[name[os].environ.get, parameter[constant[NCLUSTER_PREFIX], name[DEFAULT_PREFIX]]]
if compare[name[name] not_equal[!=] name[DEFAULT_PREFIX]] begin[:]
call[name[validate_prefix], parameter[name[name]]]
return[name[name]]
|
keyword[def] identifier[get_prefix] ():
literal[string]
identifier[name] = identifier[os] . identifier[environ] . identifier[get] ( literal[string] , identifier[DEFAULT_PREFIX] )
keyword[if] identifier[name] != identifier[DEFAULT_PREFIX] :
identifier[validate_prefix] ( identifier[name] )
keyword[return] identifier[name]
|
def get_prefix():
"""Global prefix to identify ncluster created resources name used to identify ncluster created resources,
(name of EFS, VPC, keypair prefixes), can be changed through $NCLUSTER_PREFIX for debugging purposes. """
name = os.environ.get('NCLUSTER_PREFIX', DEFAULT_PREFIX)
if name != DEFAULT_PREFIX:
validate_prefix(name) # depends on [control=['if'], data=['name']]
return name
|
def do_scan_all(self, line):
"""Call ScanAllObjects. Command syntax is: scan_all"""
self.application.master.ScanAllObjects(opendnp3.GroupVariationID(2, 1), opendnp3.TaskConfig().Default())
|
def function[do_scan_all, parameter[self, line]]:
constant[Call ScanAllObjects. Command syntax is: scan_all]
call[name[self].application.master.ScanAllObjects, parameter[call[name[opendnp3].GroupVariationID, parameter[constant[2], constant[1]]], call[call[name[opendnp3].TaskConfig, parameter[]].Default, parameter[]]]]
|
keyword[def] identifier[do_scan_all] ( identifier[self] , identifier[line] ):
literal[string]
identifier[self] . identifier[application] . identifier[master] . identifier[ScanAllObjects] ( identifier[opendnp3] . identifier[GroupVariationID] ( literal[int] , literal[int] ), identifier[opendnp3] . identifier[TaskConfig] (). identifier[Default] ())
|
def do_scan_all(self, line):
"""Call ScanAllObjects. Command syntax is: scan_all"""
self.application.master.ScanAllObjects(opendnp3.GroupVariationID(2, 1), opendnp3.TaskConfig().Default())
|
def intersection(self, table):
"""
Select nuclei which also belong to ``table``
Parameters
----------
table: Table, Table object
Example:
----------
Table('AME2003').intersection(Table('AME1995'))
"""
idx = self.df.index & table.df.index
return Table(df=self.df[idx], name=self.name)
|
def function[intersection, parameter[self, table]]:
constant[
Select nuclei which also belong to ``table``
Parameters
----------
table: Table, Table object
Example:
----------
Table('AME2003').intersection(Table('AME1995'))
]
variable[idx] assign[=] binary_operation[name[self].df.index <ast.BitAnd object at 0x7da2590d6b60> name[table].df.index]
return[call[name[Table], parameter[]]]
|
keyword[def] identifier[intersection] ( identifier[self] , identifier[table] ):
literal[string]
identifier[idx] = identifier[self] . identifier[df] . identifier[index] & identifier[table] . identifier[df] . identifier[index]
keyword[return] identifier[Table] ( identifier[df] = identifier[self] . identifier[df] [ identifier[idx] ], identifier[name] = identifier[self] . identifier[name] )
|
def intersection(self, table):
"""
Select nuclei which also belong to ``table``
Parameters
----------
table: Table, Table object
Example:
----------
Table('AME2003').intersection(Table('AME1995'))
"""
idx = self.df.index & table.df.index
return Table(df=self.df[idx], name=self.name)
|
def makeOrmValuesSubqueryCondition(ormSession, column, values: List[Union[int, str]]):
""" Make Orm Values Subquery
:param ormSession: The orm session instance
:param column: The column from the Declarative table, eg TableItem.colName
:param values: A list of string or int values
"""
if isPostGreSQLDialect(ormSession.bind):
return column.in_(values)
if not isMssqlDialect(ormSession.bind):
raise NotImplementedError()
sql = _createMssqlSqlText(values)
sub_qry = ormSession.query(column) # Any column, it just assigns a name
sub_qry = sub_qry.from_statement(sql)
return column.in_(sub_qry)
|
def function[makeOrmValuesSubqueryCondition, parameter[ormSession, column, values]]:
constant[ Make Orm Values Subquery
:param ormSession: The orm session instance
:param column: The column from the Declarative table, eg TableItem.colName
:param values: A list of string or int values
]
if call[name[isPostGreSQLDialect], parameter[name[ormSession].bind]] begin[:]
return[call[name[column].in_, parameter[name[values]]]]
if <ast.UnaryOp object at 0x7da20c990490> begin[:]
<ast.Raise object at 0x7da20c992350>
variable[sql] assign[=] call[name[_createMssqlSqlText], parameter[name[values]]]
variable[sub_qry] assign[=] call[name[ormSession].query, parameter[name[column]]]
variable[sub_qry] assign[=] call[name[sub_qry].from_statement, parameter[name[sql]]]
return[call[name[column].in_, parameter[name[sub_qry]]]]
|
keyword[def] identifier[makeOrmValuesSubqueryCondition] ( identifier[ormSession] , identifier[column] , identifier[values] : identifier[List] [ identifier[Union] [ identifier[int] , identifier[str] ]]):
literal[string]
keyword[if] identifier[isPostGreSQLDialect] ( identifier[ormSession] . identifier[bind] ):
keyword[return] identifier[column] . identifier[in_] ( identifier[values] )
keyword[if] keyword[not] identifier[isMssqlDialect] ( identifier[ormSession] . identifier[bind] ):
keyword[raise] identifier[NotImplementedError] ()
identifier[sql] = identifier[_createMssqlSqlText] ( identifier[values] )
identifier[sub_qry] = identifier[ormSession] . identifier[query] ( identifier[column] )
identifier[sub_qry] = identifier[sub_qry] . identifier[from_statement] ( identifier[sql] )
keyword[return] identifier[column] . identifier[in_] ( identifier[sub_qry] )
|
def makeOrmValuesSubqueryCondition(ormSession, column, values: List[Union[int, str]]):
""" Make Orm Values Subquery
:param ormSession: The orm session instance
:param column: The column from the Declarative table, eg TableItem.colName
:param values: A list of string or int values
"""
if isPostGreSQLDialect(ormSession.bind):
return column.in_(values) # depends on [control=['if'], data=[]]
if not isMssqlDialect(ormSession.bind):
raise NotImplementedError() # depends on [control=['if'], data=[]]
sql = _createMssqlSqlText(values)
sub_qry = ormSession.query(column) # Any column, it just assigns a name
sub_qry = sub_qry.from_statement(sql)
return column.in_(sub_qry)
|
def authenticate_credentials(self, payload):
"""Get or create an active user with the username contained in the payload."""
username = payload.get('preferred_username') or payload.get('username')
if username is None:
raise exceptions.AuthenticationFailed('JWT must include a preferred_username or username claim!')
else:
try:
user, __ = get_user_model().objects.get_or_create(username=username)
attributes_updated = False
for claim, attr in self.get_jwt_claim_attribute_map().items():
payload_value = payload.get(claim)
if getattr(user, attr) != payload_value and payload_value is not None:
setattr(user, attr, payload_value)
attributes_updated = True
if attributes_updated:
user.save()
except:
msg = 'User retrieval failed.'
logger.exception(msg)
raise exceptions.AuthenticationFailed(msg)
return user
|
def function[authenticate_credentials, parameter[self, payload]]:
constant[Get or create an active user with the username contained in the payload.]
variable[username] assign[=] <ast.BoolOp object at 0x7da1b04d5990>
if compare[name[username] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b04d6650>
return[name[user]]
|
keyword[def] identifier[authenticate_credentials] ( identifier[self] , identifier[payload] ):
literal[string]
identifier[username] = identifier[payload] . identifier[get] ( literal[string] ) keyword[or] identifier[payload] . identifier[get] ( literal[string] )
keyword[if] identifier[username] keyword[is] keyword[None] :
keyword[raise] identifier[exceptions] . identifier[AuthenticationFailed] ( literal[string] )
keyword[else] :
keyword[try] :
identifier[user] , identifier[__] = identifier[get_user_model] (). identifier[objects] . identifier[get_or_create] ( identifier[username] = identifier[username] )
identifier[attributes_updated] = keyword[False]
keyword[for] identifier[claim] , identifier[attr] keyword[in] identifier[self] . identifier[get_jwt_claim_attribute_map] (). identifier[items] ():
identifier[payload_value] = identifier[payload] . identifier[get] ( identifier[claim] )
keyword[if] identifier[getattr] ( identifier[user] , identifier[attr] )!= identifier[payload_value] keyword[and] identifier[payload_value] keyword[is] keyword[not] keyword[None] :
identifier[setattr] ( identifier[user] , identifier[attr] , identifier[payload_value] )
identifier[attributes_updated] = keyword[True]
keyword[if] identifier[attributes_updated] :
identifier[user] . identifier[save] ()
keyword[except] :
identifier[msg] = literal[string]
identifier[logger] . identifier[exception] ( identifier[msg] )
keyword[raise] identifier[exceptions] . identifier[AuthenticationFailed] ( identifier[msg] )
keyword[return] identifier[user]
|
def authenticate_credentials(self, payload):
"""Get or create an active user with the username contained in the payload."""
username = payload.get('preferred_username') or payload.get('username')
if username is None:
raise exceptions.AuthenticationFailed('JWT must include a preferred_username or username claim!') # depends on [control=['if'], data=[]]
else:
try:
(user, __) = get_user_model().objects.get_or_create(username=username)
attributes_updated = False
for (claim, attr) in self.get_jwt_claim_attribute_map().items():
payload_value = payload.get(claim)
if getattr(user, attr) != payload_value and payload_value is not None:
setattr(user, attr, payload_value)
attributes_updated = True # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if attributes_updated:
user.save() # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except:
msg = 'User retrieval failed.'
logger.exception(msg)
raise exceptions.AuthenticationFailed(msg) # depends on [control=['except'], data=[]]
return user
|
def exists(self, relpath, rsc=None, useFilepath=None):
"""
Checks to see if the inputed path represents an existing file or directory.
:param relpath | <str>
rsc | <str>
useFilepath | <bool> or None
"""
path = self.find(relpath, rsc, useFilepath)
if path.startswith(':'):
return QtCore.QResource(path).isValid()
else:
return os.path.exists(path)
|
def function[exists, parameter[self, relpath, rsc, useFilepath]]:
constant[
Checks to see if the inputed path represents an existing file or directory.
:param relpath | <str>
rsc | <str>
useFilepath | <bool> or None
]
variable[path] assign[=] call[name[self].find, parameter[name[relpath], name[rsc], name[useFilepath]]]
if call[name[path].startswith, parameter[constant[:]]] begin[:]
return[call[call[name[QtCore].QResource, parameter[name[path]]].isValid, parameter[]]]
|
keyword[def] identifier[exists] ( identifier[self] , identifier[relpath] , identifier[rsc] = keyword[None] , identifier[useFilepath] = keyword[None] ):
literal[string]
identifier[path] = identifier[self] . identifier[find] ( identifier[relpath] , identifier[rsc] , identifier[useFilepath] )
keyword[if] identifier[path] . identifier[startswith] ( literal[string] ):
keyword[return] identifier[QtCore] . identifier[QResource] ( identifier[path] ). identifier[isValid] ()
keyword[else] :
keyword[return] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] )
|
def exists(self, relpath, rsc=None, useFilepath=None):
"""
Checks to see if the inputed path represents an existing file or directory.
:param relpath | <str>
rsc | <str>
useFilepath | <bool> or None
"""
path = self.find(relpath, rsc, useFilepath)
if path.startswith(':'):
return QtCore.QResource(path).isValid() # depends on [control=['if'], data=[]]
else:
return os.path.exists(path)
|
def restore_env(env_dict):
'''Set environment variables in the current python process from a dict
containing envvars and values.'''
if hasattr(sys, 'real_prefix'):
sys.prefix = sys.real_prefix
del(sys.real_prefix)
replace_osenviron(expand_envvars(dict_to_env(env_dict)))
|
def function[restore_env, parameter[env_dict]]:
constant[Set environment variables in the current python process from a dict
containing envvars and values.]
if call[name[hasattr], parameter[name[sys], constant[real_prefix]]] begin[:]
name[sys].prefix assign[=] name[sys].real_prefix
<ast.Delete object at 0x7da1b0049d80>
call[name[replace_osenviron], parameter[call[name[expand_envvars], parameter[call[name[dict_to_env], parameter[name[env_dict]]]]]]]
|
keyword[def] identifier[restore_env] ( identifier[env_dict] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[sys] , literal[string] ):
identifier[sys] . identifier[prefix] = identifier[sys] . identifier[real_prefix]
keyword[del] ( identifier[sys] . identifier[real_prefix] )
identifier[replace_osenviron] ( identifier[expand_envvars] ( identifier[dict_to_env] ( identifier[env_dict] )))
|
def restore_env(env_dict):
"""Set environment variables in the current python process from a dict
containing envvars and values."""
if hasattr(sys, 'real_prefix'):
sys.prefix = sys.real_prefix
del sys.real_prefix # depends on [control=['if'], data=[]]
replace_osenviron(expand_envvars(dict_to_env(env_dict)))
|
def _get_text(self):
"""
Get the current metadatas
"""
device = self._get_device()
if device is None:
return (UNKNOWN_DEVICE, self.py3.COLOR_BAD)
if not device["isReachable"] or not device["isTrusted"]:
return (
self.py3.safe_format(
self.format_disconnected, {"name": device["name"]}
),
self.py3.COLOR_BAD,
)
battery = self._get_battery()
(charge, bat_status, color) = self._get_battery_status(battery)
notif = self._get_notifications()
(notif_size, notif_status) = self._get_notifications_status(notif)
return (
self.py3.safe_format(
self.format,
dict(
name=device["name"],
charge=charge,
bat_status=bat_status,
notif_size=notif_size,
notif_status=notif_status,
),
),
color,
)
|
def function[_get_text, parameter[self]]:
constant[
Get the current metadatas
]
variable[device] assign[=] call[name[self]._get_device, parameter[]]
if compare[name[device] is constant[None]] begin[:]
return[tuple[[<ast.Name object at 0x7da2041d9b40>, <ast.Attribute object at 0x7da2041dbeb0>]]]
if <ast.BoolOp object at 0x7da2041d9630> begin[:]
return[tuple[[<ast.Call object at 0x7da2041dacb0>, <ast.Attribute object at 0x7da1b20885e0>]]]
variable[battery] assign[=] call[name[self]._get_battery, parameter[]]
<ast.Tuple object at 0x7da1b208ba30> assign[=] call[name[self]._get_battery_status, parameter[name[battery]]]
variable[notif] assign[=] call[name[self]._get_notifications, parameter[]]
<ast.Tuple object at 0x7da1b2088ee0> assign[=] call[name[self]._get_notifications_status, parameter[name[notif]]]
return[tuple[[<ast.Call object at 0x7da1b208ac20>, <ast.Name object at 0x7da1b208a9e0>]]]
|
keyword[def] identifier[_get_text] ( identifier[self] ):
literal[string]
identifier[device] = identifier[self] . identifier[_get_device] ()
keyword[if] identifier[device] keyword[is] keyword[None] :
keyword[return] ( identifier[UNKNOWN_DEVICE] , identifier[self] . identifier[py3] . identifier[COLOR_BAD] )
keyword[if] keyword[not] identifier[device] [ literal[string] ] keyword[or] keyword[not] identifier[device] [ literal[string] ]:
keyword[return] (
identifier[self] . identifier[py3] . identifier[safe_format] (
identifier[self] . identifier[format_disconnected] ,{ literal[string] : identifier[device] [ literal[string] ]}
),
identifier[self] . identifier[py3] . identifier[COLOR_BAD] ,
)
identifier[battery] = identifier[self] . identifier[_get_battery] ()
( identifier[charge] , identifier[bat_status] , identifier[color] )= identifier[self] . identifier[_get_battery_status] ( identifier[battery] )
identifier[notif] = identifier[self] . identifier[_get_notifications] ()
( identifier[notif_size] , identifier[notif_status] )= identifier[self] . identifier[_get_notifications_status] ( identifier[notif] )
keyword[return] (
identifier[self] . identifier[py3] . identifier[safe_format] (
identifier[self] . identifier[format] ,
identifier[dict] (
identifier[name] = identifier[device] [ literal[string] ],
identifier[charge] = identifier[charge] ,
identifier[bat_status] = identifier[bat_status] ,
identifier[notif_size] = identifier[notif_size] ,
identifier[notif_status] = identifier[notif_status] ,
),
),
identifier[color] ,
)
|
def _get_text(self):
"""
Get the current metadatas
"""
device = self._get_device()
if device is None:
return (UNKNOWN_DEVICE, self.py3.COLOR_BAD) # depends on [control=['if'], data=[]]
if not device['isReachable'] or not device['isTrusted']:
return (self.py3.safe_format(self.format_disconnected, {'name': device['name']}), self.py3.COLOR_BAD) # depends on [control=['if'], data=[]]
battery = self._get_battery()
(charge, bat_status, color) = self._get_battery_status(battery)
notif = self._get_notifications()
(notif_size, notif_status) = self._get_notifications_status(notif)
return (self.py3.safe_format(self.format, dict(name=device['name'], charge=charge, bat_status=bat_status, notif_size=notif_size, notif_status=notif_status)), color)
|
def get_mutations_size(self):
""" Gets the total mutations size for current row
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_get_mutations_size]
:end-before: [END bigtable_row_get_mutations_size]
"""
mutation_size = 0
for mutation in self._get_mutations():
mutation_size += mutation.ByteSize()
return mutation_size
|
def function[get_mutations_size, parameter[self]]:
constant[ Gets the total mutations size for current row
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_get_mutations_size]
:end-before: [END bigtable_row_get_mutations_size]
]
variable[mutation_size] assign[=] constant[0]
for taget[name[mutation]] in starred[call[name[self]._get_mutations, parameter[]]] begin[:]
<ast.AugAssign object at 0x7da1b2345ea0>
return[name[mutation_size]]
|
keyword[def] identifier[get_mutations_size] ( identifier[self] ):
literal[string]
identifier[mutation_size] = literal[int]
keyword[for] identifier[mutation] keyword[in] identifier[self] . identifier[_get_mutations] ():
identifier[mutation_size] += identifier[mutation] . identifier[ByteSize] ()
keyword[return] identifier[mutation_size]
|
def get_mutations_size(self):
""" Gets the total mutations size for current row
For example:
.. literalinclude:: snippets_table.py
:start-after: [START bigtable_row_get_mutations_size]
:end-before: [END bigtable_row_get_mutations_size]
"""
mutation_size = 0
for mutation in self._get_mutations():
mutation_size += mutation.ByteSize() # depends on [control=['for'], data=['mutation']]
return mutation_size
|
def run_display_profile(self, program_main):
"""Print profile name with programMain.
Args:
program_main (str): The executable name.
"""
install_json = self.profile.get('install_json')
output = 'Profile: '
output += '{}{}{}{} '.format(
c.Style.BRIGHT, c.Fore.CYAN, self.profile.get('profile_name'), c.Style.RESET_ALL
)
output += '[{}{}{}{}'.format(
c.Style.BRIGHT, c.Fore.MAGENTA, program_main, c.Style.RESET_ALL
)
if install_json.get('programVersion') is not None:
output += '{}:{}'.format(c.Style.BRIGHT, c.Style.RESET_ALL)
output += '{}{}{}{}'.format(
c.Style.BRIGHT,
c.Fore.MAGENTA,
install_json.get('programVersion'),
c.Style.RESET_ALL,
)
output += ']'
print(output)
|
def function[run_display_profile, parameter[self, program_main]]:
constant[Print profile name with programMain.
Args:
program_main (str): The executable name.
]
variable[install_json] assign[=] call[name[self].profile.get, parameter[constant[install_json]]]
variable[output] assign[=] constant[Profile: ]
<ast.AugAssign object at 0x7da2044c1f30>
<ast.AugAssign object at 0x7da2044c2680>
if compare[call[name[install_json].get, parameter[constant[programVersion]]] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da2044c0250>
<ast.AugAssign object at 0x7da2044c3e80>
<ast.AugAssign object at 0x7da2044c1390>
call[name[print], parameter[name[output]]]
|
keyword[def] identifier[run_display_profile] ( identifier[self] , identifier[program_main] ):
literal[string]
identifier[install_json] = identifier[self] . identifier[profile] . identifier[get] ( literal[string] )
identifier[output] = literal[string]
identifier[output] += literal[string] . identifier[format] (
identifier[c] . identifier[Style] . identifier[BRIGHT] , identifier[c] . identifier[Fore] . identifier[CYAN] , identifier[self] . identifier[profile] . identifier[get] ( literal[string] ), identifier[c] . identifier[Style] . identifier[RESET_ALL]
)
identifier[output] += literal[string] . identifier[format] (
identifier[c] . identifier[Style] . identifier[BRIGHT] , identifier[c] . identifier[Fore] . identifier[MAGENTA] , identifier[program_main] , identifier[c] . identifier[Style] . identifier[RESET_ALL]
)
keyword[if] identifier[install_json] . identifier[get] ( literal[string] ) keyword[is] keyword[not] keyword[None] :
identifier[output] += literal[string] . identifier[format] ( identifier[c] . identifier[Style] . identifier[BRIGHT] , identifier[c] . identifier[Style] . identifier[RESET_ALL] )
identifier[output] += literal[string] . identifier[format] (
identifier[c] . identifier[Style] . identifier[BRIGHT] ,
identifier[c] . identifier[Fore] . identifier[MAGENTA] ,
identifier[install_json] . identifier[get] ( literal[string] ),
identifier[c] . identifier[Style] . identifier[RESET_ALL] ,
)
identifier[output] += literal[string]
identifier[print] ( identifier[output] )
|
def run_display_profile(self, program_main):
"""Print profile name with programMain.
Args:
program_main (str): The executable name.
"""
install_json = self.profile.get('install_json')
output = 'Profile: '
output += '{}{}{}{} '.format(c.Style.BRIGHT, c.Fore.CYAN, self.profile.get('profile_name'), c.Style.RESET_ALL)
output += '[{}{}{}{}'.format(c.Style.BRIGHT, c.Fore.MAGENTA, program_main, c.Style.RESET_ALL)
if install_json.get('programVersion') is not None:
output += '{}:{}'.format(c.Style.BRIGHT, c.Style.RESET_ALL)
output += '{}{}{}{}'.format(c.Style.BRIGHT, c.Fore.MAGENTA, install_json.get('programVersion'), c.Style.RESET_ALL) # depends on [control=['if'], data=[]]
output += ']'
print(output)
|
def call_for_each_tower(
towers, func, devices=None, use_vs=None):
"""
Run `func` on all GPUs (towers) and return the results.
Args:
towers (list[int]): a list of GPU id.
func: a lambda to be called inside each tower
devices: a list of devices to be used. By default will use '/gpu:{tower}'
use_vs (list[bool]): list of use_vs to passed to TowerContext
Returns:
List of outputs of ``func``, evaluated on each tower.
"""
ret = []
if devices is not None:
assert len(devices) == len(towers)
if use_vs is not None:
assert len(use_vs) == len(towers)
tower_names = ['tower{}'.format(idx) for idx in range(len(towers))]
for idx, t in enumerate(towers):
device = devices[idx] if devices is not None else '/gpu:{}'.format(t)
usevs = use_vs[idx] if use_vs is not None else False
reuse = not usevs and idx > 0
with tfv1.device(device), _maybe_reuse_vs(reuse), TrainTowerContext(
tower_names[idx],
vs_name=tower_names[idx] if usevs else '',
index=idx, total=len(towers)):
if len(str(device)) < 10: # a device function doesn't have good string description
logger.info("Building graph for training tower {} on device {} ...".format(idx, device))
else:
logger.info("Building graph for training tower {} ...".format(idx))
# When use_vs is True, use LOCAL_VARIABLES,
# so these duplicated variables won't be saved by default.
with override_to_local_variable(enable=usevs):
ret.append(func())
return ret
|
def function[call_for_each_tower, parameter[towers, func, devices, use_vs]]:
constant[
Run `func` on all GPUs (towers) and return the results.
Args:
towers (list[int]): a list of GPU id.
func: a lambda to be called inside each tower
devices: a list of devices to be used. By default will use '/gpu:{tower}'
use_vs (list[bool]): list of use_vs to passed to TowerContext
Returns:
List of outputs of ``func``, evaluated on each tower.
]
variable[ret] assign[=] list[[]]
if compare[name[devices] is_not constant[None]] begin[:]
assert[compare[call[name[len], parameter[name[devices]]] equal[==] call[name[len], parameter[name[towers]]]]]
if compare[name[use_vs] is_not constant[None]] begin[:]
assert[compare[call[name[len], parameter[name[use_vs]]] equal[==] call[name[len], parameter[name[towers]]]]]
variable[tower_names] assign[=] <ast.ListComp object at 0x7da18ede6830>
for taget[tuple[[<ast.Name object at 0x7da18ede4e50>, <ast.Name object at 0x7da18ede5b40>]]] in starred[call[name[enumerate], parameter[name[towers]]]] begin[:]
variable[device] assign[=] <ast.IfExp object at 0x7da18ede49d0>
variable[usevs] assign[=] <ast.IfExp object at 0x7da18ede48e0>
variable[reuse] assign[=] <ast.BoolOp object at 0x7da18ede7850>
with call[name[tfv1].device, parameter[name[device]]] begin[:]
if compare[call[name[len], parameter[call[name[str], parameter[name[device]]]]] less[<] constant[10]] begin[:]
call[name[logger].info, parameter[call[constant[Building graph for training tower {} on device {} ...].format, parameter[name[idx], name[device]]]]]
with call[name[override_to_local_variable], parameter[]] begin[:]
call[name[ret].append, parameter[call[name[func], parameter[]]]]
return[name[ret]]
|
keyword[def] identifier[call_for_each_tower] (
identifier[towers] , identifier[func] , identifier[devices] = keyword[None] , identifier[use_vs] = keyword[None] ):
literal[string]
identifier[ret] =[]
keyword[if] identifier[devices] keyword[is] keyword[not] keyword[None] :
keyword[assert] identifier[len] ( identifier[devices] )== identifier[len] ( identifier[towers] )
keyword[if] identifier[use_vs] keyword[is] keyword[not] keyword[None] :
keyword[assert] identifier[len] ( identifier[use_vs] )== identifier[len] ( identifier[towers] )
identifier[tower_names] =[ literal[string] . identifier[format] ( identifier[idx] ) keyword[for] identifier[idx] keyword[in] identifier[range] ( identifier[len] ( identifier[towers] ))]
keyword[for] identifier[idx] , identifier[t] keyword[in] identifier[enumerate] ( identifier[towers] ):
identifier[device] = identifier[devices] [ identifier[idx] ] keyword[if] identifier[devices] keyword[is] keyword[not] keyword[None] keyword[else] literal[string] . identifier[format] ( identifier[t] )
identifier[usevs] = identifier[use_vs] [ identifier[idx] ] keyword[if] identifier[use_vs] keyword[is] keyword[not] keyword[None] keyword[else] keyword[False]
identifier[reuse] = keyword[not] identifier[usevs] keyword[and] identifier[idx] > literal[int]
keyword[with] identifier[tfv1] . identifier[device] ( identifier[device] ), identifier[_maybe_reuse_vs] ( identifier[reuse] ), identifier[TrainTowerContext] (
identifier[tower_names] [ identifier[idx] ],
identifier[vs_name] = identifier[tower_names] [ identifier[idx] ] keyword[if] identifier[usevs] keyword[else] literal[string] ,
identifier[index] = identifier[idx] , identifier[total] = identifier[len] ( identifier[towers] )):
keyword[if] identifier[len] ( identifier[str] ( identifier[device] ))< literal[int] :
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[idx] , identifier[device] ))
keyword[else] :
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[idx] ))
keyword[with] identifier[override_to_local_variable] ( identifier[enable] = identifier[usevs] ):
identifier[ret] . identifier[append] ( identifier[func] ())
keyword[return] identifier[ret]
|
def call_for_each_tower(towers, func, devices=None, use_vs=None):
"""
Run `func` on all GPUs (towers) and return the results.
Args:
towers (list[int]): a list of GPU id.
func: a lambda to be called inside each tower
devices: a list of devices to be used. By default will use '/gpu:{tower}'
use_vs (list[bool]): list of use_vs to passed to TowerContext
Returns:
List of outputs of ``func``, evaluated on each tower.
"""
ret = []
if devices is not None:
assert len(devices) == len(towers) # depends on [control=['if'], data=['devices']]
if use_vs is not None:
assert len(use_vs) == len(towers) # depends on [control=['if'], data=['use_vs']]
tower_names = ['tower{}'.format(idx) for idx in range(len(towers))]
for (idx, t) in enumerate(towers):
device = devices[idx] if devices is not None else '/gpu:{}'.format(t)
usevs = use_vs[idx] if use_vs is not None else False
reuse = not usevs and idx > 0
with tfv1.device(device), _maybe_reuse_vs(reuse), TrainTowerContext(tower_names[idx], vs_name=tower_names[idx] if usevs else '', index=idx, total=len(towers)):
if len(str(device)) < 10: # a device function doesn't have good string description
logger.info('Building graph for training tower {} on device {} ...'.format(idx, device)) # depends on [control=['if'], data=[]]
else:
logger.info('Building graph for training tower {} ...'.format(idx))
# When use_vs is True, use LOCAL_VARIABLES,
# so these duplicated variables won't be saved by default.
with override_to_local_variable(enable=usevs):
ret.append(func()) # depends on [control=['with'], data=[]] # depends on [control=['with'], data=[]] # depends on [control=['for'], data=[]]
return ret
|
def _calc_block_mean_variance(image, mask, blocksize):
"""Adaptively determines image background.
Args:
image: image converted 1-channel image.
mask: 1-channel mask, same size as image.
blocksize: adaptive algorithm parameter.
Returns:
image of same size as input with foreground inpainted with background.
"""
I = image.copy()
I_f = I.astype(np.float32) / 255. # Used for mean and std.
result = np.zeros(
(image.shape[0] / blocksize, image.shape[1] / blocksize),
dtype=np.float32)
for i in xrange(0, image.shape[0] - blocksize, blocksize):
for j in xrange(0, image.shape[1] - blocksize, blocksize):
patch = I_f[i:i+blocksize+1, j:j+blocksize+1]
mask_patch = mask[i:i+blocksize+1, j:j+blocksize+1]
tmp1 = np.zeros((blocksize, blocksize))
tmp2 = np.zeros((blocksize, blocksize))
mean, std_dev = cv2.meanStdDev(patch, tmp1, tmp2, mask_patch)
value = 0
if std_dev[0][0] > MEAN_VARIANCE_THRESHOLD:
value = mean[0][0]
result[i/blocksize, j/blocksize] = value
small_image = cv2.resize(I, (image.shape[1] / blocksize,
image.shape[0] / blocksize))
res, inpaintmask = cv2.threshold(result, 0.02, 1, cv2.THRESH_BINARY)
inpainted = cv2.inpaint(small_image, inpaintmask.astype(np.uint8), 5,
cv2.INPAINT_TELEA)
res = cv2.resize(inpainted, (image.shape[1], image.shape[0]))
return res
|
def function[_calc_block_mean_variance, parameter[image, mask, blocksize]]:
constant[Adaptively determines image background.
Args:
image: image converted 1-channel image.
mask: 1-channel mask, same size as image.
blocksize: adaptive algorithm parameter.
Returns:
image of same size as input with foreground inpainted with background.
]
variable[I] assign[=] call[name[image].copy, parameter[]]
variable[I_f] assign[=] binary_operation[call[name[I].astype, parameter[name[np].float32]] / constant[255.0]]
variable[result] assign[=] call[name[np].zeros, parameter[tuple[[<ast.BinOp object at 0x7da20c7cbd30>, <ast.BinOp object at 0x7da20c7ca8f0>]]]]
for taget[name[i]] in starred[call[name[xrange], parameter[constant[0], binary_operation[call[name[image].shape][constant[0]] - name[blocksize]], name[blocksize]]]] begin[:]
for taget[name[j]] in starred[call[name[xrange], parameter[constant[0], binary_operation[call[name[image].shape][constant[1]] - name[blocksize]], name[blocksize]]]] begin[:]
variable[patch] assign[=] call[name[I_f]][tuple[[<ast.Slice object at 0x7da20c7ca8c0>, <ast.Slice object at 0x7da20c7c9810>]]]
variable[mask_patch] assign[=] call[name[mask]][tuple[[<ast.Slice object at 0x7da20c7cb640>, <ast.Slice object at 0x7da20c7c96c0>]]]
variable[tmp1] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da20c7c8700>, <ast.Name object at 0x7da20c7cb190>]]]]
variable[tmp2] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da20c7cbe50>, <ast.Name object at 0x7da20c7ca680>]]]]
<ast.Tuple object at 0x7da20c7cae30> assign[=] call[name[cv2].meanStdDev, parameter[name[patch], name[tmp1], name[tmp2], name[mask_patch]]]
variable[value] assign[=] constant[0]
if compare[call[call[name[std_dev]][constant[0]]][constant[0]] greater[>] name[MEAN_VARIANCE_THRESHOLD]] begin[:]
variable[value] assign[=] call[call[name[mean]][constant[0]]][constant[0]]
call[name[result]][tuple[[<ast.BinOp object at 0x7da20c7cbc70>, <ast.BinOp object at 0x7da20c7cb790>]]] assign[=] name[value]
variable[small_image] assign[=] call[name[cv2].resize, parameter[name[I], tuple[[<ast.BinOp object at 0x7da20c7cba60>, <ast.BinOp object at 0x7da20c7cb4f0>]]]]
<ast.Tuple object at 0x7da20c7ca890> assign[=] call[name[cv2].threshold, parameter[name[result], constant[0.02], constant[1], name[cv2].THRESH_BINARY]]
variable[inpainted] assign[=] call[name[cv2].inpaint, parameter[name[small_image], call[name[inpaintmask].astype, parameter[name[np].uint8]], constant[5], name[cv2].INPAINT_TELEA]]
variable[res] assign[=] call[name[cv2].resize, parameter[name[inpainted], tuple[[<ast.Subscript object at 0x7da204565c30>, <ast.Subscript object at 0x7da2045674f0>]]]]
return[name[res]]
|
keyword[def] identifier[_calc_block_mean_variance] ( identifier[image] , identifier[mask] , identifier[blocksize] ):
literal[string]
identifier[I] = identifier[image] . identifier[copy] ()
identifier[I_f] = identifier[I] . identifier[astype] ( identifier[np] . identifier[float32] )/ literal[int]
identifier[result] = identifier[np] . identifier[zeros] (
( identifier[image] . identifier[shape] [ literal[int] ]/ identifier[blocksize] , identifier[image] . identifier[shape] [ literal[int] ]/ identifier[blocksize] ),
identifier[dtype] = identifier[np] . identifier[float32] )
keyword[for] identifier[i] keyword[in] identifier[xrange] ( literal[int] , identifier[image] . identifier[shape] [ literal[int] ]- identifier[blocksize] , identifier[blocksize] ):
keyword[for] identifier[j] keyword[in] identifier[xrange] ( literal[int] , identifier[image] . identifier[shape] [ literal[int] ]- identifier[blocksize] , identifier[blocksize] ):
identifier[patch] = identifier[I_f] [ identifier[i] : identifier[i] + identifier[blocksize] + literal[int] , identifier[j] : identifier[j] + identifier[blocksize] + literal[int] ]
identifier[mask_patch] = identifier[mask] [ identifier[i] : identifier[i] + identifier[blocksize] + literal[int] , identifier[j] : identifier[j] + identifier[blocksize] + literal[int] ]
identifier[tmp1] = identifier[np] . identifier[zeros] (( identifier[blocksize] , identifier[blocksize] ))
identifier[tmp2] = identifier[np] . identifier[zeros] (( identifier[blocksize] , identifier[blocksize] ))
identifier[mean] , identifier[std_dev] = identifier[cv2] . identifier[meanStdDev] ( identifier[patch] , identifier[tmp1] , identifier[tmp2] , identifier[mask_patch] )
identifier[value] = literal[int]
keyword[if] identifier[std_dev] [ literal[int] ][ literal[int] ]> identifier[MEAN_VARIANCE_THRESHOLD] :
identifier[value] = identifier[mean] [ literal[int] ][ literal[int] ]
identifier[result] [ identifier[i] / identifier[blocksize] , identifier[j] / identifier[blocksize] ]= identifier[value]
identifier[small_image] = identifier[cv2] . identifier[resize] ( identifier[I] ,( identifier[image] . identifier[shape] [ literal[int] ]/ identifier[blocksize] ,
identifier[image] . identifier[shape] [ literal[int] ]/ identifier[blocksize] ))
identifier[res] , identifier[inpaintmask] = identifier[cv2] . identifier[threshold] ( identifier[result] , literal[int] , literal[int] , identifier[cv2] . identifier[THRESH_BINARY] )
identifier[inpainted] = identifier[cv2] . identifier[inpaint] ( identifier[small_image] , identifier[inpaintmask] . identifier[astype] ( identifier[np] . identifier[uint8] ), literal[int] ,
identifier[cv2] . identifier[INPAINT_TELEA] )
identifier[res] = identifier[cv2] . identifier[resize] ( identifier[inpainted] ,( identifier[image] . identifier[shape] [ literal[int] ], identifier[image] . identifier[shape] [ literal[int] ]))
keyword[return] identifier[res]
|
def _calc_block_mean_variance(image, mask, blocksize):
"""Adaptively determines image background.
Args:
image: image converted 1-channel image.
mask: 1-channel mask, same size as image.
blocksize: adaptive algorithm parameter.
Returns:
image of same size as input with foreground inpainted with background.
"""
I = image.copy()
I_f = I.astype(np.float32) / 255.0 # Used for mean and std.
result = np.zeros((image.shape[0] / blocksize, image.shape[1] / blocksize), dtype=np.float32)
for i in xrange(0, image.shape[0] - blocksize, blocksize):
for j in xrange(0, image.shape[1] - blocksize, blocksize):
patch = I_f[i:i + blocksize + 1, j:j + blocksize + 1]
mask_patch = mask[i:i + blocksize + 1, j:j + blocksize + 1]
tmp1 = np.zeros((blocksize, blocksize))
tmp2 = np.zeros((blocksize, blocksize))
(mean, std_dev) = cv2.meanStdDev(patch, tmp1, tmp2, mask_patch)
value = 0
if std_dev[0][0] > MEAN_VARIANCE_THRESHOLD:
value = mean[0][0] # depends on [control=['if'], data=[]]
result[i / blocksize, j / blocksize] = value # depends on [control=['for'], data=['j']] # depends on [control=['for'], data=['i']]
small_image = cv2.resize(I, (image.shape[1] / blocksize, image.shape[0] / blocksize))
(res, inpaintmask) = cv2.threshold(result, 0.02, 1, cv2.THRESH_BINARY)
inpainted = cv2.inpaint(small_image, inpaintmask.astype(np.uint8), 5, cv2.INPAINT_TELEA)
res = cv2.resize(inpainted, (image.shape[1], image.shape[0]))
return res
|
def cached_property():
"""
Handy utility to build caching properties in your classes.
Decorated code will be run only once and then result will be stored in private class property
with the given name. When called for the second time, property will return cached value.
:param storage_var_name: Name of the class property to store cached data.
:type storage_var_name: str
:return: Decorator for the class property
"""
def _stored_value(f):
storage_var_name = "__{}".format(f.__name__)
def _wrapper(self, *args, **kwargs):
value_in_cache = getattr(self, storage_var_name, Sentinel)
if value_in_cache is not Sentinel:
return value_in_cache
calculated_value = f(self, *args, **kwargs)
setattr(self, storage_var_name, calculated_value)
return calculated_value
return _wrapper
return _stored_value
|
def function[cached_property, parameter[]]:
constant[
Handy utility to build caching properties in your classes.
Decorated code will be run only once and then result will be stored in private class property
with the given name. When called for the second time, property will return cached value.
:param storage_var_name: Name of the class property to store cached data.
:type storage_var_name: str
:return: Decorator for the class property
]
def function[_stored_value, parameter[f]]:
variable[storage_var_name] assign[=] call[constant[__{}].format, parameter[name[f].__name__]]
def function[_wrapper, parameter[self]]:
variable[value_in_cache] assign[=] call[name[getattr], parameter[name[self], name[storage_var_name], name[Sentinel]]]
if compare[name[value_in_cache] is_not name[Sentinel]] begin[:]
return[name[value_in_cache]]
variable[calculated_value] assign[=] call[name[f], parameter[name[self], <ast.Starred object at 0x7da2043445b0>]]
call[name[setattr], parameter[name[self], name[storage_var_name], name[calculated_value]]]
return[name[calculated_value]]
return[name[_wrapper]]
return[name[_stored_value]]
|
keyword[def] identifier[cached_property] ():
literal[string]
keyword[def] identifier[_stored_value] ( identifier[f] ):
identifier[storage_var_name] = literal[string] . identifier[format] ( identifier[f] . identifier[__name__] )
keyword[def] identifier[_wrapper] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
identifier[value_in_cache] = identifier[getattr] ( identifier[self] , identifier[storage_var_name] , identifier[Sentinel] )
keyword[if] identifier[value_in_cache] keyword[is] keyword[not] identifier[Sentinel] :
keyword[return] identifier[value_in_cache]
identifier[calculated_value] = identifier[f] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] )
identifier[setattr] ( identifier[self] , identifier[storage_var_name] , identifier[calculated_value] )
keyword[return] identifier[calculated_value]
keyword[return] identifier[_wrapper]
keyword[return] identifier[_stored_value]
|
def cached_property():
"""
Handy utility to build caching properties in your classes.
Decorated code will be run only once and then result will be stored in private class property
with the given name. When called for the second time, property will return cached value.
:param storage_var_name: Name of the class property to store cached data.
:type storage_var_name: str
:return: Decorator for the class property
"""
def _stored_value(f):
storage_var_name = '__{}'.format(f.__name__)
def _wrapper(self, *args, **kwargs):
value_in_cache = getattr(self, storage_var_name, Sentinel)
if value_in_cache is not Sentinel:
return value_in_cache # depends on [control=['if'], data=['value_in_cache']]
calculated_value = f(self, *args, **kwargs)
setattr(self, storage_var_name, calculated_value)
return calculated_value
return _wrapper
return _stored_value
|
def start_task(self, task_tag, skip_unresolved=False):
""" Check dependency for the given task_tag and start task. For dependency checking see
:meth:`.WTaskDependencyRegistryStorage.dependency_check`. If task is already started then it must be
stopped before it will be started again.
:param task_tag: task to start. Any required dependencies will be started automatically.
:param skip_unresolved: flag controls this method behaviour for tasks that could not be found. \
When False, method will raise an exception if task tag was set in dependency and the related task \
wasn't found in registry. When True that unresolvable task will be omitted
:return: None
"""
if self.started_tasks(task_registry_id=task_tag) is not None:
return
task_cls = self.tasks_by_tag(task_tag)
if task_cls is None:
raise RuntimeError("Task '%s' wasn't found" % task_tag)
self.dependency_check(task_cls, skip_unresolved=skip_unresolved)
def start_dependency(start_task_cls):
for dependency in start_task_cls.__dependency__:
if self.started_tasks(task_registry_id=dependency) is not None:
continue
dependent_task = self.tasks_by_tag(dependency)
if dependent_task is not None:
start_dependency(dependent_task)
self.__started.append(start_task_cls.start_dependent_task())
start_dependency(task_cls)
|
def function[start_task, parameter[self, task_tag, skip_unresolved]]:
constant[ Check dependency for the given task_tag and start task. For dependency checking see
:meth:`.WTaskDependencyRegistryStorage.dependency_check`. If task is already started then it must be
stopped before it will be started again.
:param task_tag: task to start. Any required dependencies will be started automatically.
:param skip_unresolved: flag controls this method behaviour for tasks that could not be found. When False, method will raise an exception if task tag was set in dependency and the related task wasn't found in registry. When True that unresolvable task will be omitted
:return: None
]
if compare[call[name[self].started_tasks, parameter[]] is_not constant[None]] begin[:]
return[None]
variable[task_cls] assign[=] call[name[self].tasks_by_tag, parameter[name[task_tag]]]
if compare[name[task_cls] is constant[None]] begin[:]
<ast.Raise object at 0x7da20c992d40>
call[name[self].dependency_check, parameter[name[task_cls]]]
def function[start_dependency, parameter[start_task_cls]]:
for taget[name[dependency]] in starred[name[start_task_cls].__dependency__] begin[:]
if compare[call[name[self].started_tasks, parameter[]] is_not constant[None]] begin[:]
continue
variable[dependent_task] assign[=] call[name[self].tasks_by_tag, parameter[name[dependency]]]
if compare[name[dependent_task] is_not constant[None]] begin[:]
call[name[start_dependency], parameter[name[dependent_task]]]
call[name[self].__started.append, parameter[call[name[start_task_cls].start_dependent_task, parameter[]]]]
call[name[start_dependency], parameter[name[task_cls]]]
|
keyword[def] identifier[start_task] ( identifier[self] , identifier[task_tag] , identifier[skip_unresolved] = keyword[False] ):
literal[string]
keyword[if] identifier[self] . identifier[started_tasks] ( identifier[task_registry_id] = identifier[task_tag] ) keyword[is] keyword[not] keyword[None] :
keyword[return]
identifier[task_cls] = identifier[self] . identifier[tasks_by_tag] ( identifier[task_tag] )
keyword[if] identifier[task_cls] keyword[is] keyword[None] :
keyword[raise] identifier[RuntimeError] ( literal[string] % identifier[task_tag] )
identifier[self] . identifier[dependency_check] ( identifier[task_cls] , identifier[skip_unresolved] = identifier[skip_unresolved] )
keyword[def] identifier[start_dependency] ( identifier[start_task_cls] ):
keyword[for] identifier[dependency] keyword[in] identifier[start_task_cls] . identifier[__dependency__] :
keyword[if] identifier[self] . identifier[started_tasks] ( identifier[task_registry_id] = identifier[dependency] ) keyword[is] keyword[not] keyword[None] :
keyword[continue]
identifier[dependent_task] = identifier[self] . identifier[tasks_by_tag] ( identifier[dependency] )
keyword[if] identifier[dependent_task] keyword[is] keyword[not] keyword[None] :
identifier[start_dependency] ( identifier[dependent_task] )
identifier[self] . identifier[__started] . identifier[append] ( identifier[start_task_cls] . identifier[start_dependent_task] ())
identifier[start_dependency] ( identifier[task_cls] )
|
def start_task(self, task_tag, skip_unresolved=False):
""" Check dependency for the given task_tag and start task. For dependency checking see
:meth:`.WTaskDependencyRegistryStorage.dependency_check`. If task is already started then it must be
stopped before it will be started again.
:param task_tag: task to start. Any required dependencies will be started automatically.
:param skip_unresolved: flag controls this method behaviour for tasks that could not be found. When False, method will raise an exception if task tag was set in dependency and the related task wasn't found in registry. When True that unresolvable task will be omitted
:return: None
"""
if self.started_tasks(task_registry_id=task_tag) is not None:
return # depends on [control=['if'], data=[]]
task_cls = self.tasks_by_tag(task_tag)
if task_cls is None:
raise RuntimeError("Task '%s' wasn't found" % task_tag) # depends on [control=['if'], data=[]]
self.dependency_check(task_cls, skip_unresolved=skip_unresolved)
def start_dependency(start_task_cls):
for dependency in start_task_cls.__dependency__:
if self.started_tasks(task_registry_id=dependency) is not None:
continue # depends on [control=['if'], data=[]]
dependent_task = self.tasks_by_tag(dependency)
if dependent_task is not None:
start_dependency(dependent_task) # depends on [control=['if'], data=['dependent_task']] # depends on [control=['for'], data=['dependency']]
self.__started.append(start_task_cls.start_dependent_task())
start_dependency(task_cls)
|
def get_all(self, page=None, per_page=None, include_totals=False):
"""Retrieves all resource servers
Args:
page (int, optional): The result's page number (zero based).
per_page (int, optional): The amount of entries per page.
include_totals (bool, optional): True if the query summary is
to be included in the result, False otherwise.
See: https://auth0.com/docs/api/management/v2#!/Resource_Servers/get_resource_servers
"""
params = {
'page': page,
'per_page': per_page,
'include_totals': str(include_totals).lower()
}
return self.client.get(self._url(), params=params)
|
def function[get_all, parameter[self, page, per_page, include_totals]]:
constant[Retrieves all resource servers
Args:
page (int, optional): The result's page number (zero based).
per_page (int, optional): The amount of entries per page.
include_totals (bool, optional): True if the query summary is
to be included in the result, False otherwise.
See: https://auth0.com/docs/api/management/v2#!/Resource_Servers/get_resource_servers
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b07996c0>, <ast.Constant object at 0x7da1b079bd30>, <ast.Constant object at 0x7da1b079b550>], [<ast.Name object at 0x7da1b07980d0>, <ast.Name object at 0x7da1b0798f70>, <ast.Call object at 0x7da1b0799d50>]]
return[call[name[self].client.get, parameter[call[name[self]._url, parameter[]]]]]
|
keyword[def] identifier[get_all] ( identifier[self] , identifier[page] = keyword[None] , identifier[per_page] = keyword[None] , identifier[include_totals] = keyword[False] ):
literal[string]
identifier[params] ={
literal[string] : identifier[page] ,
literal[string] : identifier[per_page] ,
literal[string] : identifier[str] ( identifier[include_totals] ). identifier[lower] ()
}
keyword[return] identifier[self] . identifier[client] . identifier[get] ( identifier[self] . identifier[_url] (), identifier[params] = identifier[params] )
|
def get_all(self, page=None, per_page=None, include_totals=False):
"""Retrieves all resource servers
Args:
page (int, optional): The result's page number (zero based).
per_page (int, optional): The amount of entries per page.
include_totals (bool, optional): True if the query summary is
to be included in the result, False otherwise.
See: https://auth0.com/docs/api/management/v2#!/Resource_Servers/get_resource_servers
"""
params = {'page': page, 'per_page': per_page, 'include_totals': str(include_totals).lower()}
return self.client.get(self._url(), params=params)
|
def sampleVRVT(self,R,n=1,nsigma=None,target=True):
"""
NAME:
sampleVRVT
PURPOSE:
sample a radial and azimuthal velocity at R
INPUT:
R - Galactocentric distance (can be Quantity)
n= number of distances to sample
nsigma= number of sigma to rejection-sample on
target= if True, sample using the 'target' sigma_R rather than the actual sigma_R (default=True)
OUTPUT:
list of samples
BUGS:
should use the fact that vR and vT separate
HISTORY:
2011-03-24 - Written - Bovy (NYU)
"""
#Determine where the max of the v-distribution is using asymmetric drift
maxVR= 0.
maxVT= optimize.brentq(_vtmaxEq,0.,R**self._beta+0.2,(R,self))
maxVD= self(Orbit([R,maxVR,maxVT]))
#Now rejection-sample
if nsigma == None:
nsigma= _NSIGMA
out= []
if target:
sigma= math.sqrt(self.targetSigma2(R,use_physical=False))
else:
sigma= math.sqrt(self.sigma2(R,use_physical=False))
while len(out) < n:
#sample
vrg, vtg= nu.random.normal(), nu.random.normal()
propvR= vrg*nsigma*sigma
propvT= vtg*nsigma*sigma/self._gamma+maxVT
VDatprop= self(Orbit([R,propvR,propvT]))
if VDatprop/maxVD > nu.random.uniform()*nu.exp(-0.5*(vrg**2.+vtg**2.)): #accept
out.append(sc.array([propvR,propvT]))
return nu.array(out)
|
def function[sampleVRVT, parameter[self, R, n, nsigma, target]]:
constant[
NAME:
sampleVRVT
PURPOSE:
sample a radial and azimuthal velocity at R
INPUT:
R - Galactocentric distance (can be Quantity)
n= number of distances to sample
nsigma= number of sigma to rejection-sample on
target= if True, sample using the 'target' sigma_R rather than the actual sigma_R (default=True)
OUTPUT:
list of samples
BUGS:
should use the fact that vR and vT separate
HISTORY:
2011-03-24 - Written - Bovy (NYU)
]
variable[maxVR] assign[=] constant[0.0]
variable[maxVT] assign[=] call[name[optimize].brentq, parameter[name[_vtmaxEq], constant[0.0], binary_operation[binary_operation[name[R] ** name[self]._beta] + constant[0.2]], tuple[[<ast.Name object at 0x7da1b0ddf1c0>, <ast.Name object at 0x7da1b0ddf700>]]]]
variable[maxVD] assign[=] call[name[self], parameter[call[name[Orbit], parameter[list[[<ast.Name object at 0x7da1b0ddf280>, <ast.Name object at 0x7da1b0ddf1f0>, <ast.Name object at 0x7da1b0ddf130>]]]]]]
if compare[name[nsigma] equal[==] constant[None]] begin[:]
variable[nsigma] assign[=] name[_NSIGMA]
variable[out] assign[=] list[[]]
if name[target] begin[:]
variable[sigma] assign[=] call[name[math].sqrt, parameter[call[name[self].targetSigma2, parameter[name[R]]]]]
while compare[call[name[len], parameter[name[out]]] less[<] name[n]] begin[:]
<ast.Tuple object at 0x7da1b0da2770> assign[=] tuple[[<ast.Call object at 0x7da1b0da2830>, <ast.Call object at 0x7da1b0da0d90>]]
variable[propvR] assign[=] binary_operation[binary_operation[name[vrg] * name[nsigma]] * name[sigma]]
variable[propvT] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[name[vtg] * name[nsigma]] * name[sigma]] / name[self]._gamma] + name[maxVT]]
variable[VDatprop] assign[=] call[name[self], parameter[call[name[Orbit], parameter[list[[<ast.Name object at 0x7da1b0e15cf0>, <ast.Name object at 0x7da1b0e15c90>, <ast.Name object at 0x7da1b0e16470>]]]]]]
if compare[binary_operation[name[VDatprop] / name[maxVD]] greater[>] binary_operation[call[name[nu].random.uniform, parameter[]] * call[name[nu].exp, parameter[binary_operation[<ast.UnaryOp object at 0x7da1b0e154e0> * binary_operation[binary_operation[name[vrg] ** constant[2.0]] + binary_operation[name[vtg] ** constant[2.0]]]]]]]] begin[:]
call[name[out].append, parameter[call[name[sc].array, parameter[list[[<ast.Name object at 0x7da1b0e65750>, <ast.Name object at 0x7da1b0e679a0>]]]]]]
return[call[name[nu].array, parameter[name[out]]]]
|
keyword[def] identifier[sampleVRVT] ( identifier[self] , identifier[R] , identifier[n] = literal[int] , identifier[nsigma] = keyword[None] , identifier[target] = keyword[True] ):
literal[string]
identifier[maxVR] = literal[int]
identifier[maxVT] = identifier[optimize] . identifier[brentq] ( identifier[_vtmaxEq] , literal[int] , identifier[R] ** identifier[self] . identifier[_beta] + literal[int] ,( identifier[R] , identifier[self] ))
identifier[maxVD] = identifier[self] ( identifier[Orbit] ([ identifier[R] , identifier[maxVR] , identifier[maxVT] ]))
keyword[if] identifier[nsigma] == keyword[None] :
identifier[nsigma] = identifier[_NSIGMA]
identifier[out] =[]
keyword[if] identifier[target] :
identifier[sigma] = identifier[math] . identifier[sqrt] ( identifier[self] . identifier[targetSigma2] ( identifier[R] , identifier[use_physical] = keyword[False] ))
keyword[else] :
identifier[sigma] = identifier[math] . identifier[sqrt] ( identifier[self] . identifier[sigma2] ( identifier[R] , identifier[use_physical] = keyword[False] ))
keyword[while] identifier[len] ( identifier[out] )< identifier[n] :
identifier[vrg] , identifier[vtg] = identifier[nu] . identifier[random] . identifier[normal] (), identifier[nu] . identifier[random] . identifier[normal] ()
identifier[propvR] = identifier[vrg] * identifier[nsigma] * identifier[sigma]
identifier[propvT] = identifier[vtg] * identifier[nsigma] * identifier[sigma] / identifier[self] . identifier[_gamma] + identifier[maxVT]
identifier[VDatprop] = identifier[self] ( identifier[Orbit] ([ identifier[R] , identifier[propvR] , identifier[propvT] ]))
keyword[if] identifier[VDatprop] / identifier[maxVD] > identifier[nu] . identifier[random] . identifier[uniform] ()* identifier[nu] . identifier[exp] (- literal[int] *( identifier[vrg] ** literal[int] + identifier[vtg] ** literal[int] )):
identifier[out] . identifier[append] ( identifier[sc] . identifier[array] ([ identifier[propvR] , identifier[propvT] ]))
keyword[return] identifier[nu] . identifier[array] ( identifier[out] )
|
def sampleVRVT(self, R, n=1, nsigma=None, target=True):
"""
NAME:
sampleVRVT
PURPOSE:
sample a radial and azimuthal velocity at R
INPUT:
R - Galactocentric distance (can be Quantity)
n= number of distances to sample
nsigma= number of sigma to rejection-sample on
target= if True, sample using the 'target' sigma_R rather than the actual sigma_R (default=True)
OUTPUT:
list of samples
BUGS:
should use the fact that vR and vT separate
HISTORY:
2011-03-24 - Written - Bovy (NYU)
"""
#Determine where the max of the v-distribution is using asymmetric drift
maxVR = 0.0
maxVT = optimize.brentq(_vtmaxEq, 0.0, R ** self._beta + 0.2, (R, self))
maxVD = self(Orbit([R, maxVR, maxVT]))
#Now rejection-sample
if nsigma == None:
nsigma = _NSIGMA # depends on [control=['if'], data=['nsigma']]
out = []
if target:
sigma = math.sqrt(self.targetSigma2(R, use_physical=False)) # depends on [control=['if'], data=[]]
else:
sigma = math.sqrt(self.sigma2(R, use_physical=False))
while len(out) < n:
#sample
(vrg, vtg) = (nu.random.normal(), nu.random.normal())
propvR = vrg * nsigma * sigma
propvT = vtg * nsigma * sigma / self._gamma + maxVT
VDatprop = self(Orbit([R, propvR, propvT]))
if VDatprop / maxVD > nu.random.uniform() * nu.exp(-0.5 * (vrg ** 2.0 + vtg ** 2.0)): #accept
out.append(sc.array([propvR, propvT])) # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]]
return nu.array(out)
|
def issub(path):
"""
Returns #True if *path* is a relative path that does not point outside
of its parent directory or is equal to its parent directory (thus, this
function will also return False for a path like `./`).
"""
if isabs(path):
return False
if path.startswith(curdir + sep) or path.startswith(pardir + sep) or \
path == curdir or path == pardir:
return False
return True
|
def function[issub, parameter[path]]:
constant[
Returns #True if *path* is a relative path that does not point outside
of its parent directory or is equal to its parent directory (thus, this
function will also return False for a path like `./`).
]
if call[name[isabs], parameter[name[path]]] begin[:]
return[constant[False]]
if <ast.BoolOp object at 0x7da18f09c7f0> begin[:]
return[constant[False]]
return[constant[True]]
|
keyword[def] identifier[issub] ( identifier[path] ):
literal[string]
keyword[if] identifier[isabs] ( identifier[path] ):
keyword[return] keyword[False]
keyword[if] identifier[path] . identifier[startswith] ( identifier[curdir] + identifier[sep] ) keyword[or] identifier[path] . identifier[startswith] ( identifier[pardir] + identifier[sep] ) keyword[or] identifier[path] == identifier[curdir] keyword[or] identifier[path] == identifier[pardir] :
keyword[return] keyword[False]
keyword[return] keyword[True]
|
def issub(path):
"""
Returns #True if *path* is a relative path that does not point outside
of its parent directory or is equal to its parent directory (thus, this
function will also return False for a path like `./`).
"""
if isabs(path):
return False # depends on [control=['if'], data=[]]
if path.startswith(curdir + sep) or path.startswith(pardir + sep) or path == curdir or (path == pardir):
return False # depends on [control=['if'], data=[]]
return True
|
def discretize(self, intervals, slope_thresh=1500, cents_thresh=50):
"""
This function takes the pitch data and returns it quantized to given
set of intervals. All transactions must happen in cent scale.
slope_thresh is the bound beyond which the pitch contour is said to transit
from one svara to another. It is specified in cents/sec.
cents_thresh is a limit within which two pitch values are considered the same.
This is what pushes the quantization limit.
The function returns quantized pitch data.
"""
#eps = np.finfo(float).eps
#pitch = median_filter(pitch, 7)+eps
self.pitch = median_filter(self.pitch, 7)
pitch_quantized = np.zeros(len(self.pitch))
pitch_quantized[0] = utils.find_nearest_index(intervals, self.pitch[0])
pitch_quantized[-1] = utils.find_nearest_index(intervals, self.pitch[-1])
for i in xrange(1, len(self.pitch)-1):
if self.pitch[i] == -10000:
pitch_quantized[i] = -10000
continue
slope_back = abs((self.pitch[i] - self.pitch[i-1])/(self.timestamps[i] - self.timestamps[i-1]))
slope_front = abs((self.pitch[i+1] - self.pitch[i])/(self.timestamps[i+1] - self.timestamps[i]))
if slope_front < slope_thresh or slope_back < slope_thresh:
ind = utils.find_nearest_index(intervals, self.pitch[i])
cents_diff = abs(self.pitch[i] - intervals[ind])
if cents_diff <= cents_thresh:
pitch_quantized[i] = intervals[ind]
else:
pitch_quantized[i] = -10000
else:
pitch_quantized[i] = -10000
self.pitch = pitch_quantized
|
def function[discretize, parameter[self, intervals, slope_thresh, cents_thresh]]:
constant[
This function takes the pitch data and returns it quantized to given
set of intervals. All transactions must happen in cent scale.
slope_thresh is the bound beyond which the pitch contour is said to transit
from one svara to another. It is specified in cents/sec.
cents_thresh is a limit within which two pitch values are considered the same.
This is what pushes the quantization limit.
The function returns quantized pitch data.
]
name[self].pitch assign[=] call[name[median_filter], parameter[name[self].pitch, constant[7]]]
variable[pitch_quantized] assign[=] call[name[np].zeros, parameter[call[name[len], parameter[name[self].pitch]]]]
call[name[pitch_quantized]][constant[0]] assign[=] call[name[utils].find_nearest_index, parameter[name[intervals], call[name[self].pitch][constant[0]]]]
call[name[pitch_quantized]][<ast.UnaryOp object at 0x7da18eb560e0>] assign[=] call[name[utils].find_nearest_index, parameter[name[intervals], call[name[self].pitch][<ast.UnaryOp object at 0x7da18eb552d0>]]]
for taget[name[i]] in starred[call[name[xrange], parameter[constant[1], binary_operation[call[name[len], parameter[name[self].pitch]] - constant[1]]]]] begin[:]
if compare[call[name[self].pitch][name[i]] equal[==] <ast.UnaryOp object at 0x7da18eb55f60>] begin[:]
call[name[pitch_quantized]][name[i]] assign[=] <ast.UnaryOp object at 0x7da18eb55960>
continue
variable[slope_back] assign[=] call[name[abs], parameter[binary_operation[binary_operation[call[name[self].pitch][name[i]] - call[name[self].pitch][binary_operation[name[i] - constant[1]]]] / binary_operation[call[name[self].timestamps][name[i]] - call[name[self].timestamps][binary_operation[name[i] - constant[1]]]]]]]
variable[slope_front] assign[=] call[name[abs], parameter[binary_operation[binary_operation[call[name[self].pitch][binary_operation[name[i] + constant[1]]] - call[name[self].pitch][name[i]]] / binary_operation[call[name[self].timestamps][binary_operation[name[i] + constant[1]]] - call[name[self].timestamps][name[i]]]]]]
if <ast.BoolOp object at 0x7da18f813760> begin[:]
variable[ind] assign[=] call[name[utils].find_nearest_index, parameter[name[intervals], call[name[self].pitch][name[i]]]]
variable[cents_diff] assign[=] call[name[abs], parameter[binary_operation[call[name[self].pitch][name[i]] - call[name[intervals]][name[ind]]]]]
if compare[name[cents_diff] less_or_equal[<=] name[cents_thresh]] begin[:]
call[name[pitch_quantized]][name[i]] assign[=] call[name[intervals]][name[ind]]
name[self].pitch assign[=] name[pitch_quantized]
|
keyword[def] identifier[discretize] ( identifier[self] , identifier[intervals] , identifier[slope_thresh] = literal[int] , identifier[cents_thresh] = literal[int] ):
literal[string]
identifier[self] . identifier[pitch] = identifier[median_filter] ( identifier[self] . identifier[pitch] , literal[int] )
identifier[pitch_quantized] = identifier[np] . identifier[zeros] ( identifier[len] ( identifier[self] . identifier[pitch] ))
identifier[pitch_quantized] [ literal[int] ]= identifier[utils] . identifier[find_nearest_index] ( identifier[intervals] , identifier[self] . identifier[pitch] [ literal[int] ])
identifier[pitch_quantized] [- literal[int] ]= identifier[utils] . identifier[find_nearest_index] ( identifier[intervals] , identifier[self] . identifier[pitch] [- literal[int] ])
keyword[for] identifier[i] keyword[in] identifier[xrange] ( literal[int] , identifier[len] ( identifier[self] . identifier[pitch] )- literal[int] ):
keyword[if] identifier[self] . identifier[pitch] [ identifier[i] ]==- literal[int] :
identifier[pitch_quantized] [ identifier[i] ]=- literal[int]
keyword[continue]
identifier[slope_back] = identifier[abs] (( identifier[self] . identifier[pitch] [ identifier[i] ]- identifier[self] . identifier[pitch] [ identifier[i] - literal[int] ])/( identifier[self] . identifier[timestamps] [ identifier[i] ]- identifier[self] . identifier[timestamps] [ identifier[i] - literal[int] ]))
identifier[slope_front] = identifier[abs] (( identifier[self] . identifier[pitch] [ identifier[i] + literal[int] ]- identifier[self] . identifier[pitch] [ identifier[i] ])/( identifier[self] . identifier[timestamps] [ identifier[i] + literal[int] ]- identifier[self] . identifier[timestamps] [ identifier[i] ]))
keyword[if] identifier[slope_front] < identifier[slope_thresh] keyword[or] identifier[slope_back] < identifier[slope_thresh] :
identifier[ind] = identifier[utils] . identifier[find_nearest_index] ( identifier[intervals] , identifier[self] . identifier[pitch] [ identifier[i] ])
identifier[cents_diff] = identifier[abs] ( identifier[self] . identifier[pitch] [ identifier[i] ]- identifier[intervals] [ identifier[ind] ])
keyword[if] identifier[cents_diff] <= identifier[cents_thresh] :
identifier[pitch_quantized] [ identifier[i] ]= identifier[intervals] [ identifier[ind] ]
keyword[else] :
identifier[pitch_quantized] [ identifier[i] ]=- literal[int]
keyword[else] :
identifier[pitch_quantized] [ identifier[i] ]=- literal[int]
identifier[self] . identifier[pitch] = identifier[pitch_quantized]
|
def discretize(self, intervals, slope_thresh=1500, cents_thresh=50):
"""
This function takes the pitch data and returns it quantized to given
set of intervals. All transactions must happen in cent scale.
slope_thresh is the bound beyond which the pitch contour is said to transit
from one svara to another. It is specified in cents/sec.
cents_thresh is a limit within which two pitch values are considered the same.
This is what pushes the quantization limit.
The function returns quantized pitch data.
"""
#eps = np.finfo(float).eps
#pitch = median_filter(pitch, 7)+eps
self.pitch = median_filter(self.pitch, 7)
pitch_quantized = np.zeros(len(self.pitch))
pitch_quantized[0] = utils.find_nearest_index(intervals, self.pitch[0])
pitch_quantized[-1] = utils.find_nearest_index(intervals, self.pitch[-1])
for i in xrange(1, len(self.pitch) - 1):
if self.pitch[i] == -10000:
pitch_quantized[i] = -10000
continue # depends on [control=['if'], data=[]]
slope_back = abs((self.pitch[i] - self.pitch[i - 1]) / (self.timestamps[i] - self.timestamps[i - 1]))
slope_front = abs((self.pitch[i + 1] - self.pitch[i]) / (self.timestamps[i + 1] - self.timestamps[i]))
if slope_front < slope_thresh or slope_back < slope_thresh:
ind = utils.find_nearest_index(intervals, self.pitch[i])
cents_diff = abs(self.pitch[i] - intervals[ind])
if cents_diff <= cents_thresh:
pitch_quantized[i] = intervals[ind] # depends on [control=['if'], data=[]]
else:
pitch_quantized[i] = -10000 # depends on [control=['if'], data=[]]
else:
pitch_quantized[i] = -10000 # depends on [control=['for'], data=['i']]
self.pitch = pitch_quantized
|
def _read_wrapper(data):
"""Ensure unicode always returned on read."""
# Paramiko (strangely) in PY3 returns an int here.
if isinstance(data, int):
data = chr(data)
# Ensure unicode
return py23_compat.text_type(data)
|
def function[_read_wrapper, parameter[data]]:
constant[Ensure unicode always returned on read.]
if call[name[isinstance], parameter[name[data], name[int]]] begin[:]
variable[data] assign[=] call[name[chr], parameter[name[data]]]
return[call[name[py23_compat].text_type, parameter[name[data]]]]
|
keyword[def] identifier[_read_wrapper] ( identifier[data] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[data] , identifier[int] ):
identifier[data] = identifier[chr] ( identifier[data] )
keyword[return] identifier[py23_compat] . identifier[text_type] ( identifier[data] )
|
def _read_wrapper(data):
"""Ensure unicode always returned on read."""
# Paramiko (strangely) in PY3 returns an int here.
if isinstance(data, int):
data = chr(data) # depends on [control=['if'], data=[]]
# Ensure unicode
return py23_compat.text_type(data)
|
def printcodelist(codelist, to=sys.stdout):
"""Get a code list. Print it nicely."""
labeldict = {}
pendinglabels = []
for i, (op, arg) in enumerate(codelist):
if isinstance(op, Label):
pendinglabels.append(op)
elif op is SetLineno:
pass
else:
while pendinglabels:
labeldict[pendinglabels.pop()] = i
lineno = None
islabel = False
for i, (op, arg) in enumerate(codelist):
if op is SetLineno:
lineno = arg
print >> to
continue
if isinstance(op, Label):
islabel = True
continue
if lineno is None:
linenostr = ''
else:
linenostr = str(lineno)
lineno = None
if islabel:
islabelstr = '>>'
islabel = False
else:
islabelstr = ''
if op in hasconst:
argstr = repr(arg)
elif op in hasjump:
try:
argstr = 'to ' + str(labeldict[arg])
except KeyError:
argstr = repr(arg)
elif op in hasarg:
argstr = str(arg)
else:
argstr = ''
print >> to, '%3s %2s %4d %-20s %s' % (
linenostr,
islabelstr,
i,
op,
argstr)
|
def function[printcodelist, parameter[codelist, to]]:
constant[Get a code list. Print it nicely.]
variable[labeldict] assign[=] dictionary[[], []]
variable[pendinglabels] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b11bc1f0>, <ast.Tuple object at 0x7da1b11bf250>]]] in starred[call[name[enumerate], parameter[name[codelist]]]] begin[:]
if call[name[isinstance], parameter[name[op], name[Label]]] begin[:]
call[name[pendinglabels].append, parameter[name[op]]]
variable[lineno] assign[=] constant[None]
variable[islabel] assign[=] constant[False]
for taget[tuple[[<ast.Name object at 0x7da1b11bee30>, <ast.Tuple object at 0x7da1b11be590>]]] in starred[call[name[enumerate], parameter[name[codelist]]]] begin[:]
if compare[name[op] is name[SetLineno]] begin[:]
variable[lineno] assign[=] name[arg]
binary_operation[name[print] <ast.RShift object at 0x7da2590d6a40> name[to]]
continue
if call[name[isinstance], parameter[name[op], name[Label]]] begin[:]
variable[islabel] assign[=] constant[True]
continue
if compare[name[lineno] is constant[None]] begin[:]
variable[linenostr] assign[=] constant[]
if name[islabel] begin[:]
variable[islabelstr] assign[=] constant[>>]
variable[islabel] assign[=] constant[False]
if compare[name[op] in name[hasconst]] begin[:]
variable[argstr] assign[=] call[name[repr], parameter[name[arg]]]
tuple[[<ast.BinOp object at 0x7da1b11bd1e0>, <ast.BinOp object at 0x7da1b11bca00>]]
|
keyword[def] identifier[printcodelist] ( identifier[codelist] , identifier[to] = identifier[sys] . identifier[stdout] ):
literal[string]
identifier[labeldict] ={}
identifier[pendinglabels] =[]
keyword[for] identifier[i] ,( identifier[op] , identifier[arg] ) keyword[in] identifier[enumerate] ( identifier[codelist] ):
keyword[if] identifier[isinstance] ( identifier[op] , identifier[Label] ):
identifier[pendinglabels] . identifier[append] ( identifier[op] )
keyword[elif] identifier[op] keyword[is] identifier[SetLineno] :
keyword[pass]
keyword[else] :
keyword[while] identifier[pendinglabels] :
identifier[labeldict] [ identifier[pendinglabels] . identifier[pop] ()]= identifier[i]
identifier[lineno] = keyword[None]
identifier[islabel] = keyword[False]
keyword[for] identifier[i] ,( identifier[op] , identifier[arg] ) keyword[in] identifier[enumerate] ( identifier[codelist] ):
keyword[if] identifier[op] keyword[is] identifier[SetLineno] :
identifier[lineno] = identifier[arg]
identifier[print] >> identifier[to]
keyword[continue]
keyword[if] identifier[isinstance] ( identifier[op] , identifier[Label] ):
identifier[islabel] = keyword[True]
keyword[continue]
keyword[if] identifier[lineno] keyword[is] keyword[None] :
identifier[linenostr] = literal[string]
keyword[else] :
identifier[linenostr] = identifier[str] ( identifier[lineno] )
identifier[lineno] = keyword[None]
keyword[if] identifier[islabel] :
identifier[islabelstr] = literal[string]
identifier[islabel] = keyword[False]
keyword[else] :
identifier[islabelstr] = literal[string]
keyword[if] identifier[op] keyword[in] identifier[hasconst] :
identifier[argstr] = identifier[repr] ( identifier[arg] )
keyword[elif] identifier[op] keyword[in] identifier[hasjump] :
keyword[try] :
identifier[argstr] = literal[string] + identifier[str] ( identifier[labeldict] [ identifier[arg] ])
keyword[except] identifier[KeyError] :
identifier[argstr] = identifier[repr] ( identifier[arg] )
keyword[elif] identifier[op] keyword[in] identifier[hasarg] :
identifier[argstr] = identifier[str] ( identifier[arg] )
keyword[else] :
identifier[argstr] = literal[string]
identifier[print] >> identifier[to] , literal[string] %(
identifier[linenostr] ,
identifier[islabelstr] ,
identifier[i] ,
identifier[op] ,
identifier[argstr] )
|
def printcodelist(codelist, to=sys.stdout):
"""Get a code list. Print it nicely."""
labeldict = {}
pendinglabels = []
for (i, (op, arg)) in enumerate(codelist):
if isinstance(op, Label):
pendinglabels.append(op) # depends on [control=['if'], data=[]]
elif op is SetLineno:
pass # depends on [control=['if'], data=[]]
else:
while pendinglabels:
labeldict[pendinglabels.pop()] = i # depends on [control=['while'], data=[]] # depends on [control=['for'], data=[]]
lineno = None
islabel = False
for (i, (op, arg)) in enumerate(codelist):
if op is SetLineno:
lineno = arg
print >> to
continue # depends on [control=['if'], data=[]]
if isinstance(op, Label):
islabel = True
continue # depends on [control=['if'], data=[]]
if lineno is None:
linenostr = '' # depends on [control=['if'], data=[]]
else:
linenostr = str(lineno)
lineno = None
if islabel:
islabelstr = '>>'
islabel = False # depends on [control=['if'], data=[]]
else:
islabelstr = ''
if op in hasconst:
argstr = repr(arg) # depends on [control=['if'], data=[]]
elif op in hasjump:
try:
argstr = 'to ' + str(labeldict[arg]) # depends on [control=['try'], data=[]]
except KeyError:
argstr = repr(arg) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif op in hasarg:
argstr = str(arg) # depends on [control=['if'], data=[]]
else:
argstr = ''
(print >> to, '%3s %2s %4d %-20s %s' % (linenostr, islabelstr, i, op, argstr)) # depends on [control=['for'], data=[]]
|
def get_next_step(self):
"""Find the proper step when user clicks the Next button.
:returns: The step to be switched to
:rtype: WizardStep
"""
layer_purpose = self.parent.step_kw_purpose.selected_purpose()
if layer_purpose != layer_purpose_aggregation:
subcategory = self.parent.step_kw_subcategory.\
selected_subcategory()
else:
subcategory = {'key': None}
# Has layer groups, go to field mapping
field_groups = get_field_groups(
layer_purpose['key'], subcategory['key'])
compulsory_field = get_compulsory_fields(
layer_purpose['key'], subcategory['key'])
# It's aggregation and has field_groups.
if field_groups and layer_purpose == layer_purpose_aggregation:
return self.parent.step_kw_fields_mapping
# It has field_groups and the compulsory field is population count.
if field_groups and compulsory_field == population_count_field:
return self.parent.step_kw_fields_mapping
# Has classifications, go to multi classifications
if subcategory.get('classifications'):
if layer_purpose == layer_purpose_hazard:
return self.parent.step_kw_multi_classifications
elif layer_purpose == layer_purpose_exposure:
return self.parent.step_kw_classification
# Check if it can go to inasafe field step
non_compulsory_fields = get_non_compulsory_fields(
layer_purpose['key'], subcategory['key'])
if not skip_inasafe_field(self.parent.layer, non_compulsory_fields):
return self.parent.step_kw_inasafe_fields
# Check if it can go to inasafe default field step
default_inasafe_fields = get_fields(
layer_purpose['key'],
subcategory['key'],
replace_null=True,
in_group=False)
if default_inasafe_fields:
return self.parent.step_kw_default_inasafe_fields
# Any other case
return self.parent.step_kw_source
|
def function[get_next_step, parameter[self]]:
constant[Find the proper step when user clicks the Next button.
:returns: The step to be switched to
:rtype: WizardStep
]
variable[layer_purpose] assign[=] call[name[self].parent.step_kw_purpose.selected_purpose, parameter[]]
if compare[name[layer_purpose] not_equal[!=] name[layer_purpose_aggregation]] begin[:]
variable[subcategory] assign[=] call[name[self].parent.step_kw_subcategory.selected_subcategory, parameter[]]
variable[field_groups] assign[=] call[name[get_field_groups], parameter[call[name[layer_purpose]][constant[key]], call[name[subcategory]][constant[key]]]]
variable[compulsory_field] assign[=] call[name[get_compulsory_fields], parameter[call[name[layer_purpose]][constant[key]], call[name[subcategory]][constant[key]]]]
if <ast.BoolOp object at 0x7da1b0c3f010> begin[:]
return[name[self].parent.step_kw_fields_mapping]
if <ast.BoolOp object at 0x7da1b0c3d4e0> begin[:]
return[name[self].parent.step_kw_fields_mapping]
if call[name[subcategory].get, parameter[constant[classifications]]] begin[:]
if compare[name[layer_purpose] equal[==] name[layer_purpose_hazard]] begin[:]
return[name[self].parent.step_kw_multi_classifications]
variable[non_compulsory_fields] assign[=] call[name[get_non_compulsory_fields], parameter[call[name[layer_purpose]][constant[key]], call[name[subcategory]][constant[key]]]]
if <ast.UnaryOp object at 0x7da1b0c3ded0> begin[:]
return[name[self].parent.step_kw_inasafe_fields]
variable[default_inasafe_fields] assign[=] call[name[get_fields], parameter[call[name[layer_purpose]][constant[key]], call[name[subcategory]][constant[key]]]]
if name[default_inasafe_fields] begin[:]
return[name[self].parent.step_kw_default_inasafe_fields]
return[name[self].parent.step_kw_source]
|
keyword[def] identifier[get_next_step] ( identifier[self] ):
literal[string]
identifier[layer_purpose] = identifier[self] . identifier[parent] . identifier[step_kw_purpose] . identifier[selected_purpose] ()
keyword[if] identifier[layer_purpose] != identifier[layer_purpose_aggregation] :
identifier[subcategory] = identifier[self] . identifier[parent] . identifier[step_kw_subcategory] . identifier[selected_subcategory] ()
keyword[else] :
identifier[subcategory] ={ literal[string] : keyword[None] }
identifier[field_groups] = identifier[get_field_groups] (
identifier[layer_purpose] [ literal[string] ], identifier[subcategory] [ literal[string] ])
identifier[compulsory_field] = identifier[get_compulsory_fields] (
identifier[layer_purpose] [ literal[string] ], identifier[subcategory] [ literal[string] ])
keyword[if] identifier[field_groups] keyword[and] identifier[layer_purpose] == identifier[layer_purpose_aggregation] :
keyword[return] identifier[self] . identifier[parent] . identifier[step_kw_fields_mapping]
keyword[if] identifier[field_groups] keyword[and] identifier[compulsory_field] == identifier[population_count_field] :
keyword[return] identifier[self] . identifier[parent] . identifier[step_kw_fields_mapping]
keyword[if] identifier[subcategory] . identifier[get] ( literal[string] ):
keyword[if] identifier[layer_purpose] == identifier[layer_purpose_hazard] :
keyword[return] identifier[self] . identifier[parent] . identifier[step_kw_multi_classifications]
keyword[elif] identifier[layer_purpose] == identifier[layer_purpose_exposure] :
keyword[return] identifier[self] . identifier[parent] . identifier[step_kw_classification]
identifier[non_compulsory_fields] = identifier[get_non_compulsory_fields] (
identifier[layer_purpose] [ literal[string] ], identifier[subcategory] [ literal[string] ])
keyword[if] keyword[not] identifier[skip_inasafe_field] ( identifier[self] . identifier[parent] . identifier[layer] , identifier[non_compulsory_fields] ):
keyword[return] identifier[self] . identifier[parent] . identifier[step_kw_inasafe_fields]
identifier[default_inasafe_fields] = identifier[get_fields] (
identifier[layer_purpose] [ literal[string] ],
identifier[subcategory] [ literal[string] ],
identifier[replace_null] = keyword[True] ,
identifier[in_group] = keyword[False] )
keyword[if] identifier[default_inasafe_fields] :
keyword[return] identifier[self] . identifier[parent] . identifier[step_kw_default_inasafe_fields]
keyword[return] identifier[self] . identifier[parent] . identifier[step_kw_source]
|
def get_next_step(self):
"""Find the proper step when user clicks the Next button.
:returns: The step to be switched to
:rtype: WizardStep
"""
layer_purpose = self.parent.step_kw_purpose.selected_purpose()
if layer_purpose != layer_purpose_aggregation:
subcategory = self.parent.step_kw_subcategory.selected_subcategory() # depends on [control=['if'], data=[]]
else:
subcategory = {'key': None}
# Has layer groups, go to field mapping
field_groups = get_field_groups(layer_purpose['key'], subcategory['key'])
compulsory_field = get_compulsory_fields(layer_purpose['key'], subcategory['key'])
# It's aggregation and has field_groups.
if field_groups and layer_purpose == layer_purpose_aggregation:
return self.parent.step_kw_fields_mapping # depends on [control=['if'], data=[]]
# It has field_groups and the compulsory field is population count.
if field_groups and compulsory_field == population_count_field:
return self.parent.step_kw_fields_mapping # depends on [control=['if'], data=[]]
# Has classifications, go to multi classifications
if subcategory.get('classifications'):
if layer_purpose == layer_purpose_hazard:
return self.parent.step_kw_multi_classifications # depends on [control=['if'], data=[]]
elif layer_purpose == layer_purpose_exposure:
return self.parent.step_kw_classification # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Check if it can go to inasafe field step
non_compulsory_fields = get_non_compulsory_fields(layer_purpose['key'], subcategory['key'])
if not skip_inasafe_field(self.parent.layer, non_compulsory_fields):
return self.parent.step_kw_inasafe_fields # depends on [control=['if'], data=[]]
# Check if it can go to inasafe default field step
default_inasafe_fields = get_fields(layer_purpose['key'], subcategory['key'], replace_null=True, in_group=False)
if default_inasafe_fields:
return self.parent.step_kw_default_inasafe_fields # depends on [control=['if'], data=[]]
# Any other case
return self.parent.step_kw_source
|
def get_store_credit_by_id(cls, store_credit_id, **kwargs):
"""Find StoreCredit
Return single instance of StoreCredit by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_store_credit_by_id(store_credit_id, async=True)
>>> result = thread.get()
:param async bool
:param str store_credit_id: ID of storeCredit to return (required)
:return: StoreCredit
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_store_credit_by_id_with_http_info(store_credit_id, **kwargs)
else:
(data) = cls._get_store_credit_by_id_with_http_info(store_credit_id, **kwargs)
return data
|
def function[get_store_credit_by_id, parameter[cls, store_credit_id]]:
constant[Find StoreCredit
Return single instance of StoreCredit by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_store_credit_by_id(store_credit_id, async=True)
>>> result = thread.get()
:param async bool
:param str store_credit_id: ID of storeCredit to return (required)
:return: StoreCredit
If the method is called asynchronously,
returns the request thread.
]
call[name[kwargs]][constant[_return_http_data_only]] assign[=] constant[True]
if call[name[kwargs].get, parameter[constant[async]]] begin[:]
return[call[name[cls]._get_store_credit_by_id_with_http_info, parameter[name[store_credit_id]]]]
|
keyword[def] identifier[get_store_credit_by_id] ( identifier[cls] , identifier[store_credit_id] ,** identifier[kwargs] ):
literal[string]
identifier[kwargs] [ literal[string] ]= keyword[True]
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] identifier[cls] . identifier[_get_store_credit_by_id_with_http_info] ( identifier[store_credit_id] ,** identifier[kwargs] )
keyword[else] :
( identifier[data] )= identifier[cls] . identifier[_get_store_credit_by_id_with_http_info] ( identifier[store_credit_id] ,** identifier[kwargs] )
keyword[return] identifier[data]
|
def get_store_credit_by_id(cls, store_credit_id, **kwargs):
"""Find StoreCredit
Return single instance of StoreCredit by its ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_store_credit_by_id(store_credit_id, async=True)
>>> result = thread.get()
:param async bool
:param str store_credit_id: ID of storeCredit to return (required)
:return: StoreCredit
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return cls._get_store_credit_by_id_with_http_info(store_credit_id, **kwargs) # depends on [control=['if'], data=[]]
else:
data = cls._get_store_credit_by_id_with_http_info(store_credit_id, **kwargs)
return data
|
def static_workflow_declaration(job, config, normal_bam, normal_bai, tumor_bam, tumor_bai):
"""
Statically declare workflow so sections can be modularly repurposed
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Argparse Namespace object containing argument inputs
:param str normal_bam: Normal BAM FileStoreID
:param str normal_bai: Normal BAM index FileStoreID
:param str tumor_bam: Tumor BAM FileStoreID
:param str tumor_bai: Tumor BAM Index FileStoreID
"""
# Mutation and indel tool wiring
memory = '1G' if config.ci_test else '10G'
disk = '1G' if config.ci_test else '75G'
mutect_results, pindel_results, muse_results = None, None, None
if config.run_mutect:
mutect_results = job.addChildJobFn(run_mutect, normal_bam, normal_bai, tumor_bam, tumor_bai, config.reference,
config.dict, config.fai, config.cosmic, config.dbsnp,
cores=1, memory=memory, disk=disk).rv()
if config.run_pindel:
pindel_results = job.addChildJobFn(run_pindel, normal_bam, normal_bai, tumor_bam, tumor_bai,
config.reference, config.fai,
cores=config.cores, memory=memory, disk=disk).rv()
if config.run_muse:
muse_results = job.addChildJobFn(run_muse, normal_bam, normal_bai, tumor_bam, tumor_bai,
config.reference, config.dict, config.fai, config.dbsnp,
cores=config.cores, memory=memory, disk=disk).rv()
# Pass tool results (whether None or a promised return value) to consolidation step
consolidation = job.wrapJobFn(consolidate_output, config, mutect_results, pindel_results, muse_results)
job.addFollowOn(consolidation)
|
def function[static_workflow_declaration, parameter[job, config, normal_bam, normal_bai, tumor_bam, tumor_bai]]:
constant[
Statically declare workflow so sections can be modularly repurposed
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Argparse Namespace object containing argument inputs
:param str normal_bam: Normal BAM FileStoreID
:param str normal_bai: Normal BAM index FileStoreID
:param str tumor_bam: Tumor BAM FileStoreID
:param str tumor_bai: Tumor BAM Index FileStoreID
]
variable[memory] assign[=] <ast.IfExp object at 0x7da20c6c4670>
variable[disk] assign[=] <ast.IfExp object at 0x7da20c6c4940>
<ast.Tuple object at 0x7da20c6c7100> assign[=] tuple[[<ast.Constant object at 0x7da20c6c6950>, <ast.Constant object at 0x7da20c6c53c0>, <ast.Constant object at 0x7da20c6c4190>]]
if name[config].run_mutect begin[:]
variable[mutect_results] assign[=] call[call[name[job].addChildJobFn, parameter[name[run_mutect], name[normal_bam], name[normal_bai], name[tumor_bam], name[tumor_bai], name[config].reference, name[config].dict, name[config].fai, name[config].cosmic, name[config].dbsnp]].rv, parameter[]]
if name[config].run_pindel begin[:]
variable[pindel_results] assign[=] call[call[name[job].addChildJobFn, parameter[name[run_pindel], name[normal_bam], name[normal_bai], name[tumor_bam], name[tumor_bai], name[config].reference, name[config].fai]].rv, parameter[]]
if name[config].run_muse begin[:]
variable[muse_results] assign[=] call[call[name[job].addChildJobFn, parameter[name[run_muse], name[normal_bam], name[normal_bai], name[tumor_bam], name[tumor_bai], name[config].reference, name[config].dict, name[config].fai, name[config].dbsnp]].rv, parameter[]]
variable[consolidation] assign[=] call[name[job].wrapJobFn, parameter[name[consolidate_output], name[config], name[mutect_results], name[pindel_results], name[muse_results]]]
call[name[job].addFollowOn, parameter[name[consolidation]]]
|
keyword[def] identifier[static_workflow_declaration] ( identifier[job] , identifier[config] , identifier[normal_bam] , identifier[normal_bai] , identifier[tumor_bam] , identifier[tumor_bai] ):
literal[string]
identifier[memory] = literal[string] keyword[if] identifier[config] . identifier[ci_test] keyword[else] literal[string]
identifier[disk] = literal[string] keyword[if] identifier[config] . identifier[ci_test] keyword[else] literal[string]
identifier[mutect_results] , identifier[pindel_results] , identifier[muse_results] = keyword[None] , keyword[None] , keyword[None]
keyword[if] identifier[config] . identifier[run_mutect] :
identifier[mutect_results] = identifier[job] . identifier[addChildJobFn] ( identifier[run_mutect] , identifier[normal_bam] , identifier[normal_bai] , identifier[tumor_bam] , identifier[tumor_bai] , identifier[config] . identifier[reference] ,
identifier[config] . identifier[dict] , identifier[config] . identifier[fai] , identifier[config] . identifier[cosmic] , identifier[config] . identifier[dbsnp] ,
identifier[cores] = literal[int] , identifier[memory] = identifier[memory] , identifier[disk] = identifier[disk] ). identifier[rv] ()
keyword[if] identifier[config] . identifier[run_pindel] :
identifier[pindel_results] = identifier[job] . identifier[addChildJobFn] ( identifier[run_pindel] , identifier[normal_bam] , identifier[normal_bai] , identifier[tumor_bam] , identifier[tumor_bai] ,
identifier[config] . identifier[reference] , identifier[config] . identifier[fai] ,
identifier[cores] = identifier[config] . identifier[cores] , identifier[memory] = identifier[memory] , identifier[disk] = identifier[disk] ). identifier[rv] ()
keyword[if] identifier[config] . identifier[run_muse] :
identifier[muse_results] = identifier[job] . identifier[addChildJobFn] ( identifier[run_muse] , identifier[normal_bam] , identifier[normal_bai] , identifier[tumor_bam] , identifier[tumor_bai] ,
identifier[config] . identifier[reference] , identifier[config] . identifier[dict] , identifier[config] . identifier[fai] , identifier[config] . identifier[dbsnp] ,
identifier[cores] = identifier[config] . identifier[cores] , identifier[memory] = identifier[memory] , identifier[disk] = identifier[disk] ). identifier[rv] ()
identifier[consolidation] = identifier[job] . identifier[wrapJobFn] ( identifier[consolidate_output] , identifier[config] , identifier[mutect_results] , identifier[pindel_results] , identifier[muse_results] )
identifier[job] . identifier[addFollowOn] ( identifier[consolidation] )
|
def static_workflow_declaration(job, config, normal_bam, normal_bai, tumor_bam, tumor_bai):
"""
Statically declare workflow so sections can be modularly repurposed
:param JobFunctionWrappingJob job: passed automatically by Toil
:param Namespace config: Argparse Namespace object containing argument inputs
:param str normal_bam: Normal BAM FileStoreID
:param str normal_bai: Normal BAM index FileStoreID
:param str tumor_bam: Tumor BAM FileStoreID
:param str tumor_bai: Tumor BAM Index FileStoreID
"""
# Mutation and indel tool wiring
memory = '1G' if config.ci_test else '10G'
disk = '1G' if config.ci_test else '75G'
(mutect_results, pindel_results, muse_results) = (None, None, None)
if config.run_mutect:
mutect_results = job.addChildJobFn(run_mutect, normal_bam, normal_bai, tumor_bam, tumor_bai, config.reference, config.dict, config.fai, config.cosmic, config.dbsnp, cores=1, memory=memory, disk=disk).rv() # depends on [control=['if'], data=[]]
if config.run_pindel:
pindel_results = job.addChildJobFn(run_pindel, normal_bam, normal_bai, tumor_bam, tumor_bai, config.reference, config.fai, cores=config.cores, memory=memory, disk=disk).rv() # depends on [control=['if'], data=[]]
if config.run_muse:
muse_results = job.addChildJobFn(run_muse, normal_bam, normal_bai, tumor_bam, tumor_bai, config.reference, config.dict, config.fai, config.dbsnp, cores=config.cores, memory=memory, disk=disk).rv() # depends on [control=['if'], data=[]]
# Pass tool results (whether None or a promised return value) to consolidation step
consolidation = job.wrapJobFn(consolidate_output, config, mutect_results, pindel_results, muse_results)
job.addFollowOn(consolidation)
|
def process (self, input_symbol):
'''This is the main method that you call to process input. This may
cause the FSM to change state and call an action. This method calls
get_transition() to find the action and next_state associated with the
input_symbol and current_state. If the action is None then the action
is not called and only the current state is changed. This method
processes one complete input symbol. You can process a list of symbols
(or a string) by calling process_list(). '''
self.input_symbol = input_symbol
(self.action, self.next_state) = self.get_transition (self.input_symbol, self.current_state)
if self.action is not None:
self.action (self)
self.current_state = self.next_state
self.next_state = None
|
def function[process, parameter[self, input_symbol]]:
constant[This is the main method that you call to process input. This may
cause the FSM to change state and call an action. This method calls
get_transition() to find the action and next_state associated with the
input_symbol and current_state. If the action is None then the action
is not called and only the current state is changed. This method
processes one complete input symbol. You can process a list of symbols
(or a string) by calling process_list(). ]
name[self].input_symbol assign[=] name[input_symbol]
<ast.Tuple object at 0x7da1b1e1acb0> assign[=] call[name[self].get_transition, parameter[name[self].input_symbol, name[self].current_state]]
if compare[name[self].action is_not constant[None]] begin[:]
call[name[self].action, parameter[name[self]]]
name[self].current_state assign[=] name[self].next_state
name[self].next_state assign[=] constant[None]
|
keyword[def] identifier[process] ( identifier[self] , identifier[input_symbol] ):
literal[string]
identifier[self] . identifier[input_symbol] = identifier[input_symbol]
( identifier[self] . identifier[action] , identifier[self] . identifier[next_state] )= identifier[self] . identifier[get_transition] ( identifier[self] . identifier[input_symbol] , identifier[self] . identifier[current_state] )
keyword[if] identifier[self] . identifier[action] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[action] ( identifier[self] )
identifier[self] . identifier[current_state] = identifier[self] . identifier[next_state]
identifier[self] . identifier[next_state] = keyword[None]
|
def process(self, input_symbol):
"""This is the main method that you call to process input. This may
cause the FSM to change state and call an action. This method calls
get_transition() to find the action and next_state associated with the
input_symbol and current_state. If the action is None then the action
is not called and only the current state is changed. This method
processes one complete input symbol. You can process a list of symbols
(or a string) by calling process_list(). """
self.input_symbol = input_symbol
(self.action, self.next_state) = self.get_transition(self.input_symbol, self.current_state)
if self.action is not None:
self.action(self) # depends on [control=['if'], data=[]]
self.current_state = self.next_state
self.next_state = None
|
def create_sink(
self,
parent,
sink,
unique_writer_identity=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a sink that exports specified log entries to a destination. The
export of newly-ingested log entries begins immediately, unless the
sink's ``writer_identity`` is not permitted to write to the destination.
A sink can export log entries only from the resource owning the sink.
Example:
>>> from google.cloud import logging_v2
>>>
>>> client = logging_v2.ConfigServiceV2Client()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize `sink`:
>>> sink = {}
>>>
>>> response = client.create_sink(parent, sink)
Args:
parent (str): Required. The resource in which to create the sink:
::
"projects/[PROJECT_ID]"
"organizations/[ORGANIZATION_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]"
"folders/[FOLDER_ID]"
Examples: ``"projects/my-logging-project"``,
``"organizations/123456789"``.
sink (Union[dict, ~google.cloud.logging_v2.types.LogSink]): Required. The new sink, whose ``name`` parameter is a sink identifier
that is not already in use.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.logging_v2.types.LogSink`
unique_writer_identity (bool): Optional. Determines the kind of IAM identity returned as
``writer_identity`` in the new sink. If this value is omitted or set to
false, and if the sink's parent is a project, then the value returned as
``writer_identity`` is the same group or service account used by Logging
before the addition of writer identities to this API. The sink's
destination must be in the same project as the sink itself.
If this field is set to true, or if the sink is owned by a non-project
resource such as an organization, then the value of ``writer_identity``
will be a unique service account used only for exports from the new
sink. For more information, see ``writer_identity`` in ``LogSink``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.logging_v2.types.LogSink` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "create_sink" not in self._inner_api_calls:
self._inner_api_calls[
"create_sink"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.create_sink,
default_retry=self._method_configs["CreateSink"].retry,
default_timeout=self._method_configs["CreateSink"].timeout,
client_info=self._client_info,
)
request = logging_config_pb2.CreateSinkRequest(
parent=parent, sink=sink, unique_writer_identity=unique_writer_identity
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["create_sink"](
request, retry=retry, timeout=timeout, metadata=metadata
)
|
def function[create_sink, parameter[self, parent, sink, unique_writer_identity, retry, timeout, metadata]]:
constant[
Creates a sink that exports specified log entries to a destination. The
export of newly-ingested log entries begins immediately, unless the
sink's ``writer_identity`` is not permitted to write to the destination.
A sink can export log entries only from the resource owning the sink.
Example:
>>> from google.cloud import logging_v2
>>>
>>> client = logging_v2.ConfigServiceV2Client()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize `sink`:
>>> sink = {}
>>>
>>> response = client.create_sink(parent, sink)
Args:
parent (str): Required. The resource in which to create the sink:
::
"projects/[PROJECT_ID]"
"organizations/[ORGANIZATION_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]"
"folders/[FOLDER_ID]"
Examples: ``"projects/my-logging-project"``,
``"organizations/123456789"``.
sink (Union[dict, ~google.cloud.logging_v2.types.LogSink]): Required. The new sink, whose ``name`` parameter is a sink identifier
that is not already in use.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.logging_v2.types.LogSink`
unique_writer_identity (bool): Optional. Determines the kind of IAM identity returned as
``writer_identity`` in the new sink. If this value is omitted or set to
false, and if the sink's parent is a project, then the value returned as
``writer_identity`` is the same group or service account used by Logging
before the addition of writer identities to this API. The sink's
destination must be in the same project as the sink itself.
If this field is set to true, or if the sink is owned by a non-project
resource such as an organization, then the value of ``writer_identity``
will be a unique service account used only for exports from the new
sink. For more information, see ``writer_identity`` in ``LogSink``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.logging_v2.types.LogSink` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
]
if compare[constant[create_sink] <ast.NotIn object at 0x7da2590d7190> name[self]._inner_api_calls] begin[:]
call[name[self]._inner_api_calls][constant[create_sink]] assign[=] call[name[google].api_core.gapic_v1.method.wrap_method, parameter[name[self].transport.create_sink]]
variable[request] assign[=] call[name[logging_config_pb2].CreateSinkRequest, parameter[]]
if compare[name[metadata] is constant[None]] begin[:]
variable[metadata] assign[=] list[[]]
variable[metadata] assign[=] call[name[list], parameter[name[metadata]]]
<ast.Try object at 0x7da1b23475e0>
return[call[call[name[self]._inner_api_calls][constant[create_sink]], parameter[name[request]]]]
|
keyword[def] identifier[create_sink] (
identifier[self] ,
identifier[parent] ,
identifier[sink] ,
identifier[unique_writer_identity] = keyword[None] ,
identifier[retry] = identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[DEFAULT] ,
identifier[timeout] = identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[DEFAULT] ,
identifier[metadata] = keyword[None] ,
):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[_inner_api_calls] :
identifier[self] . identifier[_inner_api_calls] [
literal[string]
]= identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[wrap_method] (
identifier[self] . identifier[transport] . identifier[create_sink] ,
identifier[default_retry] = identifier[self] . identifier[_method_configs] [ literal[string] ]. identifier[retry] ,
identifier[default_timeout] = identifier[self] . identifier[_method_configs] [ literal[string] ]. identifier[timeout] ,
identifier[client_info] = identifier[self] . identifier[_client_info] ,
)
identifier[request] = identifier[logging_config_pb2] . identifier[CreateSinkRequest] (
identifier[parent] = identifier[parent] , identifier[sink] = identifier[sink] , identifier[unique_writer_identity] = identifier[unique_writer_identity]
)
keyword[if] identifier[metadata] keyword[is] keyword[None] :
identifier[metadata] =[]
identifier[metadata] = identifier[list] ( identifier[metadata] )
keyword[try] :
identifier[routing_header] =[( literal[string] , identifier[parent] )]
keyword[except] identifier[AttributeError] :
keyword[pass]
keyword[else] :
identifier[routing_metadata] = identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[routing_header] . identifier[to_grpc_metadata] (
identifier[routing_header]
)
identifier[metadata] . identifier[append] ( identifier[routing_metadata] )
keyword[return] identifier[self] . identifier[_inner_api_calls] [ literal[string] ](
identifier[request] , identifier[retry] = identifier[retry] , identifier[timeout] = identifier[timeout] , identifier[metadata] = identifier[metadata]
)
|
def create_sink(self, parent, sink, unique_writer_identity=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None):
"""
Creates a sink that exports specified log entries to a destination. The
export of newly-ingested log entries begins immediately, unless the
sink's ``writer_identity`` is not permitted to write to the destination.
A sink can export log entries only from the resource owning the sink.
Example:
>>> from google.cloud import logging_v2
>>>
>>> client = logging_v2.ConfigServiceV2Client()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> # TODO: Initialize `sink`:
>>> sink = {}
>>>
>>> response = client.create_sink(parent, sink)
Args:
parent (str): Required. The resource in which to create the sink:
::
"projects/[PROJECT_ID]"
"organizations/[ORGANIZATION_ID]"
"billingAccounts/[BILLING_ACCOUNT_ID]"
"folders/[FOLDER_ID]"
Examples: ``"projects/my-logging-project"``,
``"organizations/123456789"``.
sink (Union[dict, ~google.cloud.logging_v2.types.LogSink]): Required. The new sink, whose ``name`` parameter is a sink identifier
that is not already in use.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.logging_v2.types.LogSink`
unique_writer_identity (bool): Optional. Determines the kind of IAM identity returned as
``writer_identity`` in the new sink. If this value is omitted or set to
false, and if the sink's parent is a project, then the value returned as
``writer_identity`` is the same group or service account used by Logging
before the addition of writer identities to this API. The sink's
destination must be in the same project as the sink itself.
If this field is set to true, or if the sink is owned by a non-project
resource such as an organization, then the value of ``writer_identity``
will be a unique service account used only for exports from the new
sink. For more information, see ``writer_identity`` in ``LogSink``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.logging_v2.types.LogSink` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'create_sink' not in self._inner_api_calls:
self._inner_api_calls['create_sink'] = google.api_core.gapic_v1.method.wrap_method(self.transport.create_sink, default_retry=self._method_configs['CreateSink'].retry, default_timeout=self._method_configs['CreateSink'].timeout, client_info=self._client_info) # depends on [control=['if'], data=[]]
request = logging_config_pb2.CreateSinkRequest(parent=parent, sink=sink, unique_writer_identity=unique_writer_identity)
if metadata is None:
metadata = [] # depends on [control=['if'], data=['metadata']]
metadata = list(metadata)
try:
routing_header = [('parent', parent)] # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]]
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(routing_header)
metadata.append(routing_metadata)
return self._inner_api_calls['create_sink'](request, retry=retry, timeout=timeout, metadata=metadata)
|
def swap(args):
"""
%prog swap agpfile
Swap objects and components. Will add gap lines. This is often used in
conjuction with formats.chain.fromagp() to convert between different
coordinate systems.
"""
from jcvi.utils.range import range_interleave
p = OptionParser(swap.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
agpfile, = args
agp = AGP(agpfile, nogaps=True, validate=False)
agp.sort(key=lambda x: (x.component_id, x.component_beg))
newagpfile = agpfile.rsplit(".", 1)[0] + ".swapped.agp"
fw = open(newagpfile, "w")
agp.transfer_header(fw)
for cid, aa in groupby(agp, key=(lambda x: x.component_id)):
aa = list(aa)
aranges = [(x.component_id, x.component_beg, x.component_end) \
for x in aa]
gaps = range_interleave(aranges)
for a, g in zip_longest(aa, gaps):
a.object, a.component_id = a.component_id, a.object
a.component_beg = a.object_beg
a.component_end = a.object_end
print(a, file=fw)
if not g:
continue
aline = [cid, 0, 0, 0]
gseq, ga, gb = g
cspan = gb - ga + 1
aline += ["N", cspan, "fragment", "yes"]
print("\t".join(str(x) for x in aline), file=fw)
fw.close()
# Reindex
idxagpfile = reindex([newagpfile, "--inplace"])
return newagpfile
|
def function[swap, parameter[args]]:
constant[
%prog swap agpfile
Swap objects and components. Will add gap lines. This is often used in
conjuction with formats.chain.fromagp() to convert between different
coordinate systems.
]
from relative_module[jcvi.utils.range] import module[range_interleave]
variable[p] assign[=] call[name[OptionParser], parameter[name[swap].__doc__]]
<ast.Tuple object at 0x7da207f025f0> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[1]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da207f005e0>]]
<ast.Tuple object at 0x7da207f01ab0> assign[=] name[args]
variable[agp] assign[=] call[name[AGP], parameter[name[agpfile]]]
call[name[agp].sort, parameter[]]
variable[newagpfile] assign[=] binary_operation[call[call[name[agpfile].rsplit, parameter[constant[.], constant[1]]]][constant[0]] + constant[.swapped.agp]]
variable[fw] assign[=] call[name[open], parameter[name[newagpfile], constant[w]]]
call[name[agp].transfer_header, parameter[name[fw]]]
for taget[tuple[[<ast.Name object at 0x7da207f00be0>, <ast.Name object at 0x7da207f030d0>]]] in starred[call[name[groupby], parameter[name[agp]]]] begin[:]
variable[aa] assign[=] call[name[list], parameter[name[aa]]]
variable[aranges] assign[=] <ast.ListComp object at 0x7da207f03c70>
variable[gaps] assign[=] call[name[range_interleave], parameter[name[aranges]]]
for taget[tuple[[<ast.Name object at 0x7da207f99150>, <ast.Name object at 0x7da207f9b400>]]] in starred[call[name[zip_longest], parameter[name[aa], name[gaps]]]] begin[:]
<ast.Tuple object at 0x7da207f98dc0> assign[=] tuple[[<ast.Attribute object at 0x7da207f9af80>, <ast.Attribute object at 0x7da207f983a0>]]
name[a].component_beg assign[=] name[a].object_beg
name[a].component_end assign[=] name[a].object_end
call[name[print], parameter[name[a]]]
if <ast.UnaryOp object at 0x7da207f9a110> begin[:]
continue
variable[aline] assign[=] list[[<ast.Name object at 0x7da207f99720>, <ast.Constant object at 0x7da207f98040>, <ast.Constant object at 0x7da207f98250>, <ast.Constant object at 0x7da207f9b340>]]
<ast.Tuple object at 0x7da207f995a0> assign[=] name[g]
variable[cspan] assign[=] binary_operation[binary_operation[name[gb] - name[ga]] + constant[1]]
<ast.AugAssign object at 0x7da207f98ac0>
call[name[print], parameter[call[constant[ ].join, parameter[<ast.GeneratorExp object at 0x7da207f99a80>]]]]
call[name[fw].close, parameter[]]
variable[idxagpfile] assign[=] call[name[reindex], parameter[list[[<ast.Name object at 0x7da207f9b850>, <ast.Constant object at 0x7da207f9a9e0>]]]]
return[name[newagpfile]]
|
keyword[def] identifier[swap] ( identifier[args] ):
literal[string]
keyword[from] identifier[jcvi] . identifier[utils] . identifier[range] keyword[import] identifier[range_interleave]
identifier[p] = identifier[OptionParser] ( identifier[swap] . identifier[__doc__] )
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[agpfile] ,= identifier[args]
identifier[agp] = identifier[AGP] ( identifier[agpfile] , identifier[nogaps] = keyword[True] , identifier[validate] = keyword[False] )
identifier[agp] . identifier[sort] ( identifier[key] = keyword[lambda] identifier[x] :( identifier[x] . identifier[component_id] , identifier[x] . identifier[component_beg] ))
identifier[newagpfile] = identifier[agpfile] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]+ literal[string]
identifier[fw] = identifier[open] ( identifier[newagpfile] , literal[string] )
identifier[agp] . identifier[transfer_header] ( identifier[fw] )
keyword[for] identifier[cid] , identifier[aa] keyword[in] identifier[groupby] ( identifier[agp] , identifier[key] =( keyword[lambda] identifier[x] : identifier[x] . identifier[component_id] )):
identifier[aa] = identifier[list] ( identifier[aa] )
identifier[aranges] =[( identifier[x] . identifier[component_id] , identifier[x] . identifier[component_beg] , identifier[x] . identifier[component_end] ) keyword[for] identifier[x] keyword[in] identifier[aa] ]
identifier[gaps] = identifier[range_interleave] ( identifier[aranges] )
keyword[for] identifier[a] , identifier[g] keyword[in] identifier[zip_longest] ( identifier[aa] , identifier[gaps] ):
identifier[a] . identifier[object] , identifier[a] . identifier[component_id] = identifier[a] . identifier[component_id] , identifier[a] . identifier[object]
identifier[a] . identifier[component_beg] = identifier[a] . identifier[object_beg]
identifier[a] . identifier[component_end] = identifier[a] . identifier[object_end]
identifier[print] ( identifier[a] , identifier[file] = identifier[fw] )
keyword[if] keyword[not] identifier[g] :
keyword[continue]
identifier[aline] =[ identifier[cid] , literal[int] , literal[int] , literal[int] ]
identifier[gseq] , identifier[ga] , identifier[gb] = identifier[g]
identifier[cspan] = identifier[gb] - identifier[ga] + literal[int]
identifier[aline] +=[ literal[string] , identifier[cspan] , literal[string] , literal[string] ]
identifier[print] ( literal[string] . identifier[join] ( identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[aline] ), identifier[file] = identifier[fw] )
identifier[fw] . identifier[close] ()
identifier[idxagpfile] = identifier[reindex] ([ identifier[newagpfile] , literal[string] ])
keyword[return] identifier[newagpfile]
|
def swap(args):
"""
%prog swap agpfile
Swap objects and components. Will add gap lines. This is often used in
conjuction with formats.chain.fromagp() to convert between different
coordinate systems.
"""
from jcvi.utils.range import range_interleave
p = OptionParser(swap.__doc__)
(opts, args) = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
(agpfile,) = args
agp = AGP(agpfile, nogaps=True, validate=False)
agp.sort(key=lambda x: (x.component_id, x.component_beg))
newagpfile = agpfile.rsplit('.', 1)[0] + '.swapped.agp'
fw = open(newagpfile, 'w')
agp.transfer_header(fw)
for (cid, aa) in groupby(agp, key=lambda x: x.component_id):
aa = list(aa)
aranges = [(x.component_id, x.component_beg, x.component_end) for x in aa]
gaps = range_interleave(aranges)
for (a, g) in zip_longest(aa, gaps):
(a.object, a.component_id) = (a.component_id, a.object)
a.component_beg = a.object_beg
a.component_end = a.object_end
print(a, file=fw)
if not g:
continue # depends on [control=['if'], data=[]]
aline = [cid, 0, 0, 0]
(gseq, ga, gb) = g
cspan = gb - ga + 1
aline += ['N', cspan, 'fragment', 'yes']
print('\t'.join((str(x) for x in aline)), file=fw) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
fw.close()
# Reindex
idxagpfile = reindex([newagpfile, '--inplace'])
return newagpfile
|
def visibility_changed(self, enable):
"""DockWidget visibility has changed"""
SpyderPluginWidget.visibility_changed(self, enable)
if self.dockwidget is None:
return
if self.dockwidget.isWindow():
self.dock_toolbar.show()
else:
self.dock_toolbar.hide()
if enable:
self.refresh_plugin()
self.sig_update_plugin_title.emit()
|
def function[visibility_changed, parameter[self, enable]]:
constant[DockWidget visibility has changed]
call[name[SpyderPluginWidget].visibility_changed, parameter[name[self], name[enable]]]
if compare[name[self].dockwidget is constant[None]] begin[:]
return[None]
if call[name[self].dockwidget.isWindow, parameter[]] begin[:]
call[name[self].dock_toolbar.show, parameter[]]
if name[enable] begin[:]
call[name[self].refresh_plugin, parameter[]]
call[name[self].sig_update_plugin_title.emit, parameter[]]
|
keyword[def] identifier[visibility_changed] ( identifier[self] , identifier[enable] ):
literal[string]
identifier[SpyderPluginWidget] . identifier[visibility_changed] ( identifier[self] , identifier[enable] )
keyword[if] identifier[self] . identifier[dockwidget] keyword[is] keyword[None] :
keyword[return]
keyword[if] identifier[self] . identifier[dockwidget] . identifier[isWindow] ():
identifier[self] . identifier[dock_toolbar] . identifier[show] ()
keyword[else] :
identifier[self] . identifier[dock_toolbar] . identifier[hide] ()
keyword[if] identifier[enable] :
identifier[self] . identifier[refresh_plugin] ()
identifier[self] . identifier[sig_update_plugin_title] . identifier[emit] ()
|
def visibility_changed(self, enable):
"""DockWidget visibility has changed"""
SpyderPluginWidget.visibility_changed(self, enable)
if self.dockwidget is None:
return # depends on [control=['if'], data=[]]
if self.dockwidget.isWindow():
self.dock_toolbar.show() # depends on [control=['if'], data=[]]
else:
self.dock_toolbar.hide()
if enable:
self.refresh_plugin() # depends on [control=['if'], data=[]]
self.sig_update_plugin_title.emit()
|
def retrieve_seq_length_op(data):
"""An op to compute the length of a sequence from input shape of [batch_size, n_step(max), n_features],
it can be used when the features of padding (on right hand side) are all zeros.
Parameters
-----------
data : tensor
[batch_size, n_step(max), n_features] with zero padding on right hand side.
Examples
---------
>>> data = [[[1],[2],[0],[0],[0]],
... [[1],[2],[3],[0],[0]],
... [[1],[2],[6],[1],[0]]]
>>> data = np.asarray(data)
>>> print(data.shape)
(3, 5, 1)
>>> data = tf.constant(data)
>>> sl = retrieve_seq_length_op(data)
>>> sess = tf.InteractiveSession()
>>> tl.layers.initialize_global_variables(sess)
>>> y = sl.eval()
[2 3 4]
Multiple features
>>> data = [[[1,2],[2,2],[1,2],[1,2],[0,0]],
... [[2,3],[2,4],[3,2],[0,0],[0,0]],
... [[3,3],[2,2],[5,3],[1,2],[0,0]]]
>>> print(sl)
[4 3 4]
References
------------
Borrow from `TFlearn <https://github.com/tflearn/tflearn/blob/master/tflearn/layers/recurrent.py>`__.
"""
with tf.name_scope('GetLength'):
used = tf.sign(tf.reduce_max(tf.abs(data), 2))
length = tf.reduce_sum(used, 1)
return tf.cast(length, tf.int32)
|
def function[retrieve_seq_length_op, parameter[data]]:
constant[An op to compute the length of a sequence from input shape of [batch_size, n_step(max), n_features],
it can be used when the features of padding (on right hand side) are all zeros.
Parameters
-----------
data : tensor
[batch_size, n_step(max), n_features] with zero padding on right hand side.
Examples
---------
>>> data = [[[1],[2],[0],[0],[0]],
... [[1],[2],[3],[0],[0]],
... [[1],[2],[6],[1],[0]]]
>>> data = np.asarray(data)
>>> print(data.shape)
(3, 5, 1)
>>> data = tf.constant(data)
>>> sl = retrieve_seq_length_op(data)
>>> sess = tf.InteractiveSession()
>>> tl.layers.initialize_global_variables(sess)
>>> y = sl.eval()
[2 3 4]
Multiple features
>>> data = [[[1,2],[2,2],[1,2],[1,2],[0,0]],
... [[2,3],[2,4],[3,2],[0,0],[0,0]],
... [[3,3],[2,2],[5,3],[1,2],[0,0]]]
>>> print(sl)
[4 3 4]
References
------------
Borrow from `TFlearn <https://github.com/tflearn/tflearn/blob/master/tflearn/layers/recurrent.py>`__.
]
with call[name[tf].name_scope, parameter[constant[GetLength]]] begin[:]
variable[used] assign[=] call[name[tf].sign, parameter[call[name[tf].reduce_max, parameter[call[name[tf].abs, parameter[name[data]]], constant[2]]]]]
variable[length] assign[=] call[name[tf].reduce_sum, parameter[name[used], constant[1]]]
return[call[name[tf].cast, parameter[name[length], name[tf].int32]]]
|
keyword[def] identifier[retrieve_seq_length_op] ( identifier[data] ):
literal[string]
keyword[with] identifier[tf] . identifier[name_scope] ( literal[string] ):
identifier[used] = identifier[tf] . identifier[sign] ( identifier[tf] . identifier[reduce_max] ( identifier[tf] . identifier[abs] ( identifier[data] ), literal[int] ))
identifier[length] = identifier[tf] . identifier[reduce_sum] ( identifier[used] , literal[int] )
keyword[return] identifier[tf] . identifier[cast] ( identifier[length] , identifier[tf] . identifier[int32] )
|
def retrieve_seq_length_op(data):
"""An op to compute the length of a sequence from input shape of [batch_size, n_step(max), n_features],
it can be used when the features of padding (on right hand side) are all zeros.
Parameters
-----------
data : tensor
[batch_size, n_step(max), n_features] with zero padding on right hand side.
Examples
---------
>>> data = [[[1],[2],[0],[0],[0]],
... [[1],[2],[3],[0],[0]],
... [[1],[2],[6],[1],[0]]]
>>> data = np.asarray(data)
>>> print(data.shape)
(3, 5, 1)
>>> data = tf.constant(data)
>>> sl = retrieve_seq_length_op(data)
>>> sess = tf.InteractiveSession()
>>> tl.layers.initialize_global_variables(sess)
>>> y = sl.eval()
[2 3 4]
Multiple features
>>> data = [[[1,2],[2,2],[1,2],[1,2],[0,0]],
... [[2,3],[2,4],[3,2],[0,0],[0,0]],
... [[3,3],[2,2],[5,3],[1,2],[0,0]]]
>>> print(sl)
[4 3 4]
References
------------
Borrow from `TFlearn <https://github.com/tflearn/tflearn/blob/master/tflearn/layers/recurrent.py>`__.
"""
with tf.name_scope('GetLength'):
used = tf.sign(tf.reduce_max(tf.abs(data), 2))
length = tf.reduce_sum(used, 1)
return tf.cast(length, tf.int32) # depends on [control=['with'], data=[]]
|
def load_and_dump(create_loader, create_dumper, load_and_dump_):
""":return: a function that has the doc string of
:paramref:`load_and_dump_`
additional arguments to this function are passed on to
:paramref:`load_and_dump_`.
:param create_loader: a loader, e.g.
:class:`knittingpattern.Loader.PathLoader`
:param create_dumper: a dumper, e.g.
:class:`knittingpattern.Dumper.ContentDumper`
:param load_and_dump_: a function to call with the loaded content.
The arguments to both, :paramref:`create_dumper` and,
:paramref:`create_loader`
will be passed to :paramref:`load_and_dump_`.
Any additional arguments to the return value are also passed to
:paramref:`load_and_dump_`.
The return value of :paramref:`load_and_dump_` is passed back to the
:paramref:`Dumper`.
.. seealso:: :func:`decorate_load_and_dump`
"""
@wraps(load_and_dump_)
def load_and_dump__(*args1, **kw):
"""Return the loader."""
def load(*args2):
"""Return the dumper."""
def dump(*args3):
"""Dump the object."""
return load_and_dump_(*(args2 + args3 + args1), **kw)
return create_dumper(dump)
return create_loader(load)
return load_and_dump__
|
def function[load_and_dump, parameter[create_loader, create_dumper, load_and_dump_]]:
constant[:return: a function that has the doc string of
:paramref:`load_and_dump_`
additional arguments to this function are passed on to
:paramref:`load_and_dump_`.
:param create_loader: a loader, e.g.
:class:`knittingpattern.Loader.PathLoader`
:param create_dumper: a dumper, e.g.
:class:`knittingpattern.Dumper.ContentDumper`
:param load_and_dump_: a function to call with the loaded content.
The arguments to both, :paramref:`create_dumper` and,
:paramref:`create_loader`
will be passed to :paramref:`load_and_dump_`.
Any additional arguments to the return value are also passed to
:paramref:`load_and_dump_`.
The return value of :paramref:`load_and_dump_` is passed back to the
:paramref:`Dumper`.
.. seealso:: :func:`decorate_load_and_dump`
]
def function[load_and_dump__, parameter[]]:
constant[Return the loader.]
def function[load, parameter[]]:
constant[Return the dumper.]
def function[dump, parameter[]]:
constant[Dump the object.]
return[call[name[load_and_dump_], parameter[<ast.Starred object at 0x7da18ede5480>]]]
return[call[name[create_dumper], parameter[name[dump]]]]
return[call[name[create_loader], parameter[name[load]]]]
return[name[load_and_dump__]]
|
keyword[def] identifier[load_and_dump] ( identifier[create_loader] , identifier[create_dumper] , identifier[load_and_dump_] ):
literal[string]
@ identifier[wraps] ( identifier[load_and_dump_] )
keyword[def] identifier[load_and_dump__] (* identifier[args1] ,** identifier[kw] ):
literal[string]
keyword[def] identifier[load] (* identifier[args2] ):
literal[string]
keyword[def] identifier[dump] (* identifier[args3] ):
literal[string]
keyword[return] identifier[load_and_dump_] (*( identifier[args2] + identifier[args3] + identifier[args1] ),** identifier[kw] )
keyword[return] identifier[create_dumper] ( identifier[dump] )
keyword[return] identifier[create_loader] ( identifier[load] )
keyword[return] identifier[load_and_dump__]
|
def load_and_dump(create_loader, create_dumper, load_and_dump_):
""":return: a function that has the doc string of
:paramref:`load_and_dump_`
additional arguments to this function are passed on to
:paramref:`load_and_dump_`.
:param create_loader: a loader, e.g.
:class:`knittingpattern.Loader.PathLoader`
:param create_dumper: a dumper, e.g.
:class:`knittingpattern.Dumper.ContentDumper`
:param load_and_dump_: a function to call with the loaded content.
The arguments to both, :paramref:`create_dumper` and,
:paramref:`create_loader`
will be passed to :paramref:`load_and_dump_`.
Any additional arguments to the return value are also passed to
:paramref:`load_and_dump_`.
The return value of :paramref:`load_and_dump_` is passed back to the
:paramref:`Dumper`.
.. seealso:: :func:`decorate_load_and_dump`
"""
@wraps(load_and_dump_)
def load_and_dump__(*args1, **kw):
"""Return the loader."""
def load(*args2):
"""Return the dumper."""
def dump(*args3):
"""Dump the object."""
return load_and_dump_(*args2 + args3 + args1, **kw)
return create_dumper(dump)
return create_loader(load)
return load_and_dump__
|
def check(self, options=None):
"""check for ambiguous keys and move attributes into dict"""
self.check_values(options)
self.check_attributes(options)
self.check_values(options)
return self
|
def function[check, parameter[self, options]]:
constant[check for ambiguous keys and move attributes into dict]
call[name[self].check_values, parameter[name[options]]]
call[name[self].check_attributes, parameter[name[options]]]
call[name[self].check_values, parameter[name[options]]]
return[name[self]]
|
keyword[def] identifier[check] ( identifier[self] , identifier[options] = keyword[None] ):
literal[string]
identifier[self] . identifier[check_values] ( identifier[options] )
identifier[self] . identifier[check_attributes] ( identifier[options] )
identifier[self] . identifier[check_values] ( identifier[options] )
keyword[return] identifier[self]
|
def check(self, options=None):
"""check for ambiguous keys and move attributes into dict"""
self.check_values(options)
self.check_attributes(options)
self.check_values(options)
return self
|
def save_state_machine_as(path=None, recent_opened_notification=False, as_copy=False):
""" Store selected state machine to path
If there is no handed path the interface dialog "create folder" is used to collect one. The state machine finally
is stored by the save_state_machine function.
:param str path: Path of state machine folder where selected state machine should be stored
:param bool recent_opened_notification: Flag to insert path of state machine into recent opened state machine paths
:param bool as_copy: Store state machine as copy flag e.g. without assigning path to state_machine.file_system_path
:return: True if successfully stored, False if the storing process was canceled or stopped by condition fail
:rtype bool:
"""
state_machine_manager_model = rafcon.gui.singleton.state_machine_manager_model
selected_state_machine_model = state_machine_manager_model.get_selected_state_machine_model()
if selected_state_machine_model is None:
logger.warning("Can not 'save state machine as' because no state machine is selected.")
return False
if path is None:
if interface.create_folder_func is None:
logger.error("No function defined for creating a folder")
return False
folder_name = selected_state_machine_model.state_machine.root_state.name
path = interface.create_folder_func("Please choose a root folder and a folder name for the state-machine. "
"The default folder name is the name of the root state.",
format_default_folder_name(folder_name))
if path is None:
logger.warning("No valid path specified")
return False
previous_path = selected_state_machine_model.state_machine.file_system_path
if not as_copy:
marked_dirty = selected_state_machine_model.state_machine.marked_dirty
recent_opened_notification = recent_opened_notification and (not previous_path == path or marked_dirty)
selected_state_machine_model.state_machine.file_system_path = path
result = save_state_machine(delete_old_state_machine=True,
recent_opened_notification=recent_opened_notification,
as_copy=as_copy, copy_path=path)
library_manager_model.state_machine_was_stored(selected_state_machine_model, previous_path)
return result
|
def function[save_state_machine_as, parameter[path, recent_opened_notification, as_copy]]:
constant[ Store selected state machine to path
If there is no handed path the interface dialog "create folder" is used to collect one. The state machine finally
is stored by the save_state_machine function.
:param str path: Path of state machine folder where selected state machine should be stored
:param bool recent_opened_notification: Flag to insert path of state machine into recent opened state machine paths
:param bool as_copy: Store state machine as copy flag e.g. without assigning path to state_machine.file_system_path
:return: True if successfully stored, False if the storing process was canceled or stopped by condition fail
:rtype bool:
]
variable[state_machine_manager_model] assign[=] name[rafcon].gui.singleton.state_machine_manager_model
variable[selected_state_machine_model] assign[=] call[name[state_machine_manager_model].get_selected_state_machine_model, parameter[]]
if compare[name[selected_state_machine_model] is constant[None]] begin[:]
call[name[logger].warning, parameter[constant[Can not 'save state machine as' because no state machine is selected.]]]
return[constant[False]]
if compare[name[path] is constant[None]] begin[:]
if compare[name[interface].create_folder_func is constant[None]] begin[:]
call[name[logger].error, parameter[constant[No function defined for creating a folder]]]
return[constant[False]]
variable[folder_name] assign[=] name[selected_state_machine_model].state_machine.root_state.name
variable[path] assign[=] call[name[interface].create_folder_func, parameter[constant[Please choose a root folder and a folder name for the state-machine. The default folder name is the name of the root state.], call[name[format_default_folder_name], parameter[name[folder_name]]]]]
if compare[name[path] is constant[None]] begin[:]
call[name[logger].warning, parameter[constant[No valid path specified]]]
return[constant[False]]
variable[previous_path] assign[=] name[selected_state_machine_model].state_machine.file_system_path
if <ast.UnaryOp object at 0x7da1b26afe50> begin[:]
variable[marked_dirty] assign[=] name[selected_state_machine_model].state_machine.marked_dirty
variable[recent_opened_notification] assign[=] <ast.BoolOp object at 0x7da1b26ac250>
name[selected_state_machine_model].state_machine.file_system_path assign[=] name[path]
variable[result] assign[=] call[name[save_state_machine], parameter[]]
call[name[library_manager_model].state_machine_was_stored, parameter[name[selected_state_machine_model], name[previous_path]]]
return[name[result]]
|
keyword[def] identifier[save_state_machine_as] ( identifier[path] = keyword[None] , identifier[recent_opened_notification] = keyword[False] , identifier[as_copy] = keyword[False] ):
literal[string]
identifier[state_machine_manager_model] = identifier[rafcon] . identifier[gui] . identifier[singleton] . identifier[state_machine_manager_model]
identifier[selected_state_machine_model] = identifier[state_machine_manager_model] . identifier[get_selected_state_machine_model] ()
keyword[if] identifier[selected_state_machine_model] keyword[is] keyword[None] :
identifier[logger] . identifier[warning] ( literal[string] )
keyword[return] keyword[False]
keyword[if] identifier[path] keyword[is] keyword[None] :
keyword[if] identifier[interface] . identifier[create_folder_func] keyword[is] keyword[None] :
identifier[logger] . identifier[error] ( literal[string] )
keyword[return] keyword[False]
identifier[folder_name] = identifier[selected_state_machine_model] . identifier[state_machine] . identifier[root_state] . identifier[name]
identifier[path] = identifier[interface] . identifier[create_folder_func] ( literal[string]
literal[string] ,
identifier[format_default_folder_name] ( identifier[folder_name] ))
keyword[if] identifier[path] keyword[is] keyword[None] :
identifier[logger] . identifier[warning] ( literal[string] )
keyword[return] keyword[False]
identifier[previous_path] = identifier[selected_state_machine_model] . identifier[state_machine] . identifier[file_system_path]
keyword[if] keyword[not] identifier[as_copy] :
identifier[marked_dirty] = identifier[selected_state_machine_model] . identifier[state_machine] . identifier[marked_dirty]
identifier[recent_opened_notification] = identifier[recent_opened_notification] keyword[and] ( keyword[not] identifier[previous_path] == identifier[path] keyword[or] identifier[marked_dirty] )
identifier[selected_state_machine_model] . identifier[state_machine] . identifier[file_system_path] = identifier[path]
identifier[result] = identifier[save_state_machine] ( identifier[delete_old_state_machine] = keyword[True] ,
identifier[recent_opened_notification] = identifier[recent_opened_notification] ,
identifier[as_copy] = identifier[as_copy] , identifier[copy_path] = identifier[path] )
identifier[library_manager_model] . identifier[state_machine_was_stored] ( identifier[selected_state_machine_model] , identifier[previous_path] )
keyword[return] identifier[result]
|
def save_state_machine_as(path=None, recent_opened_notification=False, as_copy=False):
""" Store selected state machine to path
If there is no handed path the interface dialog "create folder" is used to collect one. The state machine finally
is stored by the save_state_machine function.
:param str path: Path of state machine folder where selected state machine should be stored
:param bool recent_opened_notification: Flag to insert path of state machine into recent opened state machine paths
:param bool as_copy: Store state machine as copy flag e.g. without assigning path to state_machine.file_system_path
:return: True if successfully stored, False if the storing process was canceled or stopped by condition fail
:rtype bool:
"""
state_machine_manager_model = rafcon.gui.singleton.state_machine_manager_model
selected_state_machine_model = state_machine_manager_model.get_selected_state_machine_model()
if selected_state_machine_model is None:
logger.warning("Can not 'save state machine as' because no state machine is selected.")
return False # depends on [control=['if'], data=[]]
if path is None:
if interface.create_folder_func is None:
logger.error('No function defined for creating a folder')
return False # depends on [control=['if'], data=[]]
folder_name = selected_state_machine_model.state_machine.root_state.name
path = interface.create_folder_func('Please choose a root folder and a folder name for the state-machine. The default folder name is the name of the root state.', format_default_folder_name(folder_name))
if path is None:
logger.warning('No valid path specified')
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['path']]
previous_path = selected_state_machine_model.state_machine.file_system_path
if not as_copy:
marked_dirty = selected_state_machine_model.state_machine.marked_dirty
recent_opened_notification = recent_opened_notification and (not previous_path == path or marked_dirty)
selected_state_machine_model.state_machine.file_system_path = path # depends on [control=['if'], data=[]]
result = save_state_machine(delete_old_state_machine=True, recent_opened_notification=recent_opened_notification, as_copy=as_copy, copy_path=path)
library_manager_model.state_machine_was_stored(selected_state_machine_model, previous_path)
return result
|
def _setup_axes(cls, axes, info_axis=None, stat_axis=None, aliases=None,
slicers=None, axes_are_reversed=False, build_axes=True,
ns=None, docs=None):
"""Provide axes setup for the major PandasObjects.
Parameters
----------
axes : the names of the axes in order (lowest to highest)
info_axis_num : the axis of the selector dimension (int)
stat_axis_num : the number of axis for the default stats (int)
aliases : other names for a single axis (dict)
slicers : how axes slice to others (dict)
axes_are_reversed : boolean whether to treat passed axes as
reversed (DataFrame)
build_axes : setup the axis properties (default True)
"""
cls._AXIS_ORDERS = axes
cls._AXIS_NUMBERS = {a: i for i, a in enumerate(axes)}
cls._AXIS_LEN = len(axes)
cls._AXIS_ALIASES = aliases or dict()
cls._AXIS_IALIASES = {v: k for k, v in cls._AXIS_ALIASES.items()}
cls._AXIS_NAMES = dict(enumerate(axes))
cls._AXIS_SLICEMAP = slicers or None
cls._AXIS_REVERSED = axes_are_reversed
# typ
setattr(cls, '_typ', cls.__name__.lower())
# indexing support
cls._ix = None
if info_axis is not None:
cls._info_axis_number = info_axis
cls._info_axis_name = axes[info_axis]
if stat_axis is not None:
cls._stat_axis_number = stat_axis
cls._stat_axis_name = axes[stat_axis]
# setup the actual axis
if build_axes:
def set_axis(a, i):
setattr(cls, a, properties.AxisProperty(i, docs.get(a, a)))
cls._internal_names_set.add(a)
if axes_are_reversed:
m = cls._AXIS_LEN - 1
for i, a in cls._AXIS_NAMES.items():
set_axis(a, m - i)
else:
for i, a in cls._AXIS_NAMES.items():
set_axis(a, i)
assert not isinstance(ns, dict)
|
def function[_setup_axes, parameter[cls, axes, info_axis, stat_axis, aliases, slicers, axes_are_reversed, build_axes, ns, docs]]:
constant[Provide axes setup for the major PandasObjects.
Parameters
----------
axes : the names of the axes in order (lowest to highest)
info_axis_num : the axis of the selector dimension (int)
stat_axis_num : the number of axis for the default stats (int)
aliases : other names for a single axis (dict)
slicers : how axes slice to others (dict)
axes_are_reversed : boolean whether to treat passed axes as
reversed (DataFrame)
build_axes : setup the axis properties (default True)
]
name[cls]._AXIS_ORDERS assign[=] name[axes]
name[cls]._AXIS_NUMBERS assign[=] <ast.DictComp object at 0x7da18ede72e0>
name[cls]._AXIS_LEN assign[=] call[name[len], parameter[name[axes]]]
name[cls]._AXIS_ALIASES assign[=] <ast.BoolOp object at 0x7da18ede4850>
name[cls]._AXIS_IALIASES assign[=] <ast.DictComp object at 0x7da18ede7760>
name[cls]._AXIS_NAMES assign[=] call[name[dict], parameter[call[name[enumerate], parameter[name[axes]]]]]
name[cls]._AXIS_SLICEMAP assign[=] <ast.BoolOp object at 0x7da18ede78e0>
name[cls]._AXIS_REVERSED assign[=] name[axes_are_reversed]
call[name[setattr], parameter[name[cls], constant[_typ], call[name[cls].__name__.lower, parameter[]]]]
name[cls]._ix assign[=] constant[None]
if compare[name[info_axis] is_not constant[None]] begin[:]
name[cls]._info_axis_number assign[=] name[info_axis]
name[cls]._info_axis_name assign[=] call[name[axes]][name[info_axis]]
if compare[name[stat_axis] is_not constant[None]] begin[:]
name[cls]._stat_axis_number assign[=] name[stat_axis]
name[cls]._stat_axis_name assign[=] call[name[axes]][name[stat_axis]]
if name[build_axes] begin[:]
def function[set_axis, parameter[a, i]]:
call[name[setattr], parameter[name[cls], name[a], call[name[properties].AxisProperty, parameter[name[i], call[name[docs].get, parameter[name[a], name[a]]]]]]]
call[name[cls]._internal_names_set.add, parameter[name[a]]]
if name[axes_are_reversed] begin[:]
variable[m] assign[=] binary_operation[name[cls]._AXIS_LEN - constant[1]]
for taget[tuple[[<ast.Name object at 0x7da18ede5780>, <ast.Name object at 0x7da18ede4b20>]]] in starred[call[name[cls]._AXIS_NAMES.items, parameter[]]] begin[:]
call[name[set_axis], parameter[name[a], binary_operation[name[m] - name[i]]]]
assert[<ast.UnaryOp object at 0x7da18ede6b00>]
|
keyword[def] identifier[_setup_axes] ( identifier[cls] , identifier[axes] , identifier[info_axis] = keyword[None] , identifier[stat_axis] = keyword[None] , identifier[aliases] = keyword[None] ,
identifier[slicers] = keyword[None] , identifier[axes_are_reversed] = keyword[False] , identifier[build_axes] = keyword[True] ,
identifier[ns] = keyword[None] , identifier[docs] = keyword[None] ):
literal[string]
identifier[cls] . identifier[_AXIS_ORDERS] = identifier[axes]
identifier[cls] . identifier[_AXIS_NUMBERS] ={ identifier[a] : identifier[i] keyword[for] identifier[i] , identifier[a] keyword[in] identifier[enumerate] ( identifier[axes] )}
identifier[cls] . identifier[_AXIS_LEN] = identifier[len] ( identifier[axes] )
identifier[cls] . identifier[_AXIS_ALIASES] = identifier[aliases] keyword[or] identifier[dict] ()
identifier[cls] . identifier[_AXIS_IALIASES] ={ identifier[v] : identifier[k] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[cls] . identifier[_AXIS_ALIASES] . identifier[items] ()}
identifier[cls] . identifier[_AXIS_NAMES] = identifier[dict] ( identifier[enumerate] ( identifier[axes] ))
identifier[cls] . identifier[_AXIS_SLICEMAP] = identifier[slicers] keyword[or] keyword[None]
identifier[cls] . identifier[_AXIS_REVERSED] = identifier[axes_are_reversed]
identifier[setattr] ( identifier[cls] , literal[string] , identifier[cls] . identifier[__name__] . identifier[lower] ())
identifier[cls] . identifier[_ix] = keyword[None]
keyword[if] identifier[info_axis] keyword[is] keyword[not] keyword[None] :
identifier[cls] . identifier[_info_axis_number] = identifier[info_axis]
identifier[cls] . identifier[_info_axis_name] = identifier[axes] [ identifier[info_axis] ]
keyword[if] identifier[stat_axis] keyword[is] keyword[not] keyword[None] :
identifier[cls] . identifier[_stat_axis_number] = identifier[stat_axis]
identifier[cls] . identifier[_stat_axis_name] = identifier[axes] [ identifier[stat_axis] ]
keyword[if] identifier[build_axes] :
keyword[def] identifier[set_axis] ( identifier[a] , identifier[i] ):
identifier[setattr] ( identifier[cls] , identifier[a] , identifier[properties] . identifier[AxisProperty] ( identifier[i] , identifier[docs] . identifier[get] ( identifier[a] , identifier[a] )))
identifier[cls] . identifier[_internal_names_set] . identifier[add] ( identifier[a] )
keyword[if] identifier[axes_are_reversed] :
identifier[m] = identifier[cls] . identifier[_AXIS_LEN] - literal[int]
keyword[for] identifier[i] , identifier[a] keyword[in] identifier[cls] . identifier[_AXIS_NAMES] . identifier[items] ():
identifier[set_axis] ( identifier[a] , identifier[m] - identifier[i] )
keyword[else] :
keyword[for] identifier[i] , identifier[a] keyword[in] identifier[cls] . identifier[_AXIS_NAMES] . identifier[items] ():
identifier[set_axis] ( identifier[a] , identifier[i] )
keyword[assert] keyword[not] identifier[isinstance] ( identifier[ns] , identifier[dict] )
|
def _setup_axes(cls, axes, info_axis=None, stat_axis=None, aliases=None, slicers=None, axes_are_reversed=False, build_axes=True, ns=None, docs=None):
"""Provide axes setup for the major PandasObjects.
Parameters
----------
axes : the names of the axes in order (lowest to highest)
info_axis_num : the axis of the selector dimension (int)
stat_axis_num : the number of axis for the default stats (int)
aliases : other names for a single axis (dict)
slicers : how axes slice to others (dict)
axes_are_reversed : boolean whether to treat passed axes as
reversed (DataFrame)
build_axes : setup the axis properties (default True)
"""
cls._AXIS_ORDERS = axes
cls._AXIS_NUMBERS = {a: i for (i, a) in enumerate(axes)}
cls._AXIS_LEN = len(axes)
cls._AXIS_ALIASES = aliases or dict()
cls._AXIS_IALIASES = {v: k for (k, v) in cls._AXIS_ALIASES.items()}
cls._AXIS_NAMES = dict(enumerate(axes))
cls._AXIS_SLICEMAP = slicers or None
cls._AXIS_REVERSED = axes_are_reversed
# typ
setattr(cls, '_typ', cls.__name__.lower())
# indexing support
cls._ix = None
if info_axis is not None:
cls._info_axis_number = info_axis
cls._info_axis_name = axes[info_axis] # depends on [control=['if'], data=['info_axis']]
if stat_axis is not None:
cls._stat_axis_number = stat_axis
cls._stat_axis_name = axes[stat_axis] # depends on [control=['if'], data=['stat_axis']]
# setup the actual axis
if build_axes:
def set_axis(a, i):
setattr(cls, a, properties.AxisProperty(i, docs.get(a, a)))
cls._internal_names_set.add(a)
if axes_are_reversed:
m = cls._AXIS_LEN - 1
for (i, a) in cls._AXIS_NAMES.items():
set_axis(a, m - i) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
for (i, a) in cls._AXIS_NAMES.items():
set_axis(a, i) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
assert not isinstance(ns, dict)
|
def from_dict(d):
"""
Re-create the noise model from a dictionary representation.
:param Dict[str,Any] d: The dictionary representation.
:return: The restored noise model.
:rtype: NoiseModel
"""
return NoiseModel(
gates=[KrausModel.from_dict(t) for t in d["gates"]],
assignment_probs={int(qid): np.array(a) for qid, a in d["assignment_probs"].items()},
)
|
def function[from_dict, parameter[d]]:
constant[
Re-create the noise model from a dictionary representation.
:param Dict[str,Any] d: The dictionary representation.
:return: The restored noise model.
:rtype: NoiseModel
]
return[call[name[NoiseModel], parameter[]]]
|
keyword[def] identifier[from_dict] ( identifier[d] ):
literal[string]
keyword[return] identifier[NoiseModel] (
identifier[gates] =[ identifier[KrausModel] . identifier[from_dict] ( identifier[t] ) keyword[for] identifier[t] keyword[in] identifier[d] [ literal[string] ]],
identifier[assignment_probs] ={ identifier[int] ( identifier[qid] ): identifier[np] . identifier[array] ( identifier[a] ) keyword[for] identifier[qid] , identifier[a] keyword[in] identifier[d] [ literal[string] ]. identifier[items] ()},
)
|
def from_dict(d):
"""
Re-create the noise model from a dictionary representation.
:param Dict[str,Any] d: The dictionary representation.
:return: The restored noise model.
:rtype: NoiseModel
"""
return NoiseModel(gates=[KrausModel.from_dict(t) for t in d['gates']], assignment_probs={int(qid): np.array(a) for (qid, a) in d['assignment_probs'].items()})
|
def r(self,*args,**kwargs):
"""
NAME:
r
PURPOSE:
return spherical radius at time t
INPUT:
t - (optional) time at which to get the radius
ro= (Object-wide default) physical scale for distances to use to convert
use_physical= use to override Object-wide default for using a physical scale for output
OUTPUT:
r(t)
HISTORY:
2016-04-19 - Written - Bovy (UofT)
"""
thiso= self(*args,**kwargs)
onet= (len(thiso.shape) == 1)
if onet: return nu.sqrt(thiso[0]**2.+thiso[3]**2.)
else: return nu.sqrt(thiso[0,:]**2.+thiso[3,:]**2.)
|
def function[r, parameter[self]]:
constant[
NAME:
r
PURPOSE:
return spherical radius at time t
INPUT:
t - (optional) time at which to get the radius
ro= (Object-wide default) physical scale for distances to use to convert
use_physical= use to override Object-wide default for using a physical scale for output
OUTPUT:
r(t)
HISTORY:
2016-04-19 - Written - Bovy (UofT)
]
variable[thiso] assign[=] call[name[self], parameter[<ast.Starred object at 0x7da20c992260>]]
variable[onet] assign[=] compare[call[name[len], parameter[name[thiso].shape]] equal[==] constant[1]]
if name[onet] begin[:]
return[call[name[nu].sqrt, parameter[binary_operation[binary_operation[call[name[thiso]][constant[0]] ** constant[2.0]] + binary_operation[call[name[thiso]][constant[3]] ** constant[2.0]]]]]]
|
keyword[def] identifier[r] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[thiso] = identifier[self] (* identifier[args] ,** identifier[kwargs] )
identifier[onet] =( identifier[len] ( identifier[thiso] . identifier[shape] )== literal[int] )
keyword[if] identifier[onet] : keyword[return] identifier[nu] . identifier[sqrt] ( identifier[thiso] [ literal[int] ]** literal[int] + identifier[thiso] [ literal[int] ]** literal[int] )
keyword[else] : keyword[return] identifier[nu] . identifier[sqrt] ( identifier[thiso] [ literal[int] ,:]** literal[int] + identifier[thiso] [ literal[int] ,:]** literal[int] )
|
def r(self, *args, **kwargs):
"""
NAME:
r
PURPOSE:
return spherical radius at time t
INPUT:
t - (optional) time at which to get the radius
ro= (Object-wide default) physical scale for distances to use to convert
use_physical= use to override Object-wide default for using a physical scale for output
OUTPUT:
r(t)
HISTORY:
2016-04-19 - Written - Bovy (UofT)
"""
thiso = self(*args, **kwargs)
onet = len(thiso.shape) == 1
if onet:
return nu.sqrt(thiso[0] ** 2.0 + thiso[3] ** 2.0) # depends on [control=['if'], data=[]]
else:
return nu.sqrt(thiso[0, :] ** 2.0 + thiso[3, :] ** 2.0)
|
def preorder_iter_with_position(expression):
"""Iterate over the expression in preorder.
Also yields the position of each subexpression.
"""
yield expression, ()
if isinstance(expression, Operation):
for i, operand in enumerate(op_iter(expression)):
for child, pos in preorder_iter_with_position(operand):
yield child, (i, ) + pos
|
def function[preorder_iter_with_position, parameter[expression]]:
constant[Iterate over the expression in preorder.
Also yields the position of each subexpression.
]
<ast.Yield object at 0x7da207f005e0>
if call[name[isinstance], parameter[name[expression], name[Operation]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da207f00310>, <ast.Name object at 0x7da207f010f0>]]] in starred[call[name[enumerate], parameter[call[name[op_iter], parameter[name[expression]]]]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da207f001f0>, <ast.Name object at 0x7da207f016c0>]]] in starred[call[name[preorder_iter_with_position], parameter[name[operand]]]] begin[:]
<ast.Yield object at 0x7da207f00d00>
|
keyword[def] identifier[preorder_iter_with_position] ( identifier[expression] ):
literal[string]
keyword[yield] identifier[expression] ,()
keyword[if] identifier[isinstance] ( identifier[expression] , identifier[Operation] ):
keyword[for] identifier[i] , identifier[operand] keyword[in] identifier[enumerate] ( identifier[op_iter] ( identifier[expression] )):
keyword[for] identifier[child] , identifier[pos] keyword[in] identifier[preorder_iter_with_position] ( identifier[operand] ):
keyword[yield] identifier[child] ,( identifier[i] ,)+ identifier[pos]
|
def preorder_iter_with_position(expression):
"""Iterate over the expression in preorder.
Also yields the position of each subexpression.
"""
yield (expression, ())
if isinstance(expression, Operation):
for (i, operand) in enumerate(op_iter(expression)):
for (child, pos) in preorder_iter_with_position(operand):
yield (child, (i,) + pos) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
|
def iou(preds, labels, C, EMPTY=1., ignore=None, per_image=False):
"""
Array of IoU for each (non ignored) class
"""
if not per_image:
preds, labels = (preds,), (labels,)
ious = []
for pred, label in zip(preds, labels):
iou = []
for i in range(C):
if i != ignore: # The ignored label is sometimes among predicted classes (ENet - CityScapes)
intersection = ((label == i) & (pred == i)).sum()
union = ((label == i) | ((pred == i) & (label != ignore))).sum()
if not union:
iou.append(EMPTY)
else:
iou.append(float(intersection) / union)
ious.append(iou)
ious = map(mean, zip(*ious)) # mean accross images if per_image
return 100 * np.array(ious)
|
def function[iou, parameter[preds, labels, C, EMPTY, ignore, per_image]]:
constant[
Array of IoU for each (non ignored) class
]
if <ast.UnaryOp object at 0x7da1b216cd90> begin[:]
<ast.Tuple object at 0x7da1b216f760> assign[=] tuple[[<ast.Tuple object at 0x7da1b216d7e0>, <ast.Tuple object at 0x7da1b216d0c0>]]
variable[ious] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b216e6b0>, <ast.Name object at 0x7da1b216d510>]]] in starred[call[name[zip], parameter[name[preds], name[labels]]]] begin[:]
variable[iou] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[name[C]]]] begin[:]
if compare[name[i] not_equal[!=] name[ignore]] begin[:]
variable[intersection] assign[=] call[binary_operation[compare[name[label] equal[==] name[i]] <ast.BitAnd object at 0x7da2590d6b60> compare[name[pred] equal[==] name[i]]].sum, parameter[]]
variable[union] assign[=] call[binary_operation[compare[name[label] equal[==] name[i]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[compare[name[pred] equal[==] name[i]] <ast.BitAnd object at 0x7da2590d6b60> compare[name[label] not_equal[!=] name[ignore]]]].sum, parameter[]]
if <ast.UnaryOp object at 0x7da1b1f19b10> begin[:]
call[name[iou].append, parameter[name[EMPTY]]]
call[name[ious].append, parameter[name[iou]]]
variable[ious] assign[=] call[name[map], parameter[name[mean], call[name[zip], parameter[<ast.Starred object at 0x7da1b1f188e0>]]]]
return[binary_operation[constant[100] * call[name[np].array, parameter[name[ious]]]]]
|
keyword[def] identifier[iou] ( identifier[preds] , identifier[labels] , identifier[C] , identifier[EMPTY] = literal[int] , identifier[ignore] = keyword[None] , identifier[per_image] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[per_image] :
identifier[preds] , identifier[labels] =( identifier[preds] ,),( identifier[labels] ,)
identifier[ious] =[]
keyword[for] identifier[pred] , identifier[label] keyword[in] identifier[zip] ( identifier[preds] , identifier[labels] ):
identifier[iou] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[C] ):
keyword[if] identifier[i] != identifier[ignore] :
identifier[intersection] =(( identifier[label] == identifier[i] )&( identifier[pred] == identifier[i] )). identifier[sum] ()
identifier[union] =(( identifier[label] == identifier[i] )|(( identifier[pred] == identifier[i] )&( identifier[label] != identifier[ignore] ))). identifier[sum] ()
keyword[if] keyword[not] identifier[union] :
identifier[iou] . identifier[append] ( identifier[EMPTY] )
keyword[else] :
identifier[iou] . identifier[append] ( identifier[float] ( identifier[intersection] )/ identifier[union] )
identifier[ious] . identifier[append] ( identifier[iou] )
identifier[ious] = identifier[map] ( identifier[mean] , identifier[zip] (* identifier[ious] ))
keyword[return] literal[int] * identifier[np] . identifier[array] ( identifier[ious] )
|
def iou(preds, labels, C, EMPTY=1.0, ignore=None, per_image=False):
"""
Array of IoU for each (non ignored) class
"""
if not per_image:
(preds, labels) = ((preds,), (labels,)) # depends on [control=['if'], data=[]]
ious = []
for (pred, label) in zip(preds, labels):
iou = []
for i in range(C):
if i != ignore: # The ignored label is sometimes among predicted classes (ENet - CityScapes)
intersection = ((label == i) & (pred == i)).sum()
union = ((label == i) | (pred == i) & (label != ignore)).sum()
if not union:
iou.append(EMPTY) # depends on [control=['if'], data=[]]
else:
iou.append(float(intersection) / union) # depends on [control=['if'], data=['i', 'ignore']] # depends on [control=['for'], data=['i']]
ious.append(iou) # depends on [control=['for'], data=[]]
ious = map(mean, zip(*ious)) # mean accross images if per_image
return 100 * np.array(ious)
|
def gen_chunks(self, gen):
"""Generates byte chunks of a given size.
Takes a bytes generator and yields chunks of a maximum of
``chunk_size`` bytes.
Parameters
----------
gen : generator
The bytes generator that produces the bytes
"""
for data in gen:
size = len(data)
if size < self.chunk_size:
yield data
else:
mv = buffer(data)
offset = 0
while offset < size:
nb = min(self.chunk_size, size - offset)
yield mv[offset:offset + nb]
offset += nb
|
def function[gen_chunks, parameter[self, gen]]:
constant[Generates byte chunks of a given size.
Takes a bytes generator and yields chunks of a maximum of
``chunk_size`` bytes.
Parameters
----------
gen : generator
The bytes generator that produces the bytes
]
for taget[name[data]] in starred[name[gen]] begin[:]
variable[size] assign[=] call[name[len], parameter[name[data]]]
if compare[name[size] less[<] name[self].chunk_size] begin[:]
<ast.Yield object at 0x7da18fe933d0>
|
keyword[def] identifier[gen_chunks] ( identifier[self] , identifier[gen] ):
literal[string]
keyword[for] identifier[data] keyword[in] identifier[gen] :
identifier[size] = identifier[len] ( identifier[data] )
keyword[if] identifier[size] < identifier[self] . identifier[chunk_size] :
keyword[yield] identifier[data]
keyword[else] :
identifier[mv] = identifier[buffer] ( identifier[data] )
identifier[offset] = literal[int]
keyword[while] identifier[offset] < identifier[size] :
identifier[nb] = identifier[min] ( identifier[self] . identifier[chunk_size] , identifier[size] - identifier[offset] )
keyword[yield] identifier[mv] [ identifier[offset] : identifier[offset] + identifier[nb] ]
identifier[offset] += identifier[nb]
|
def gen_chunks(self, gen):
"""Generates byte chunks of a given size.
Takes a bytes generator and yields chunks of a maximum of
``chunk_size`` bytes.
Parameters
----------
gen : generator
The bytes generator that produces the bytes
"""
for data in gen:
size = len(data)
if size < self.chunk_size:
yield data # depends on [control=['if'], data=[]]
else:
mv = buffer(data)
offset = 0
while offset < size:
nb = min(self.chunk_size, size - offset)
yield mv[offset:offset + nb]
offset += nb # depends on [control=['while'], data=['offset', 'size']] # depends on [control=['for'], data=['data']]
|
def parse_manifest(cls, session, url_or_manifest, **args):
"""
Attempt to parse a DASH manifest file and return its streams
:param session: Streamlink session instance
:param url_or_manifest: URL of the manifest file or an XML manifest string
:return: a dict of name -> DASHStream instances
"""
ret = {}
if url_or_manifest.startswith('<?xml'):
mpd = MPD(parse_xml(url_or_manifest, ignore_ns=True))
else:
res = session.http.get(url_or_manifest, **args)
url = res.url
urlp = list(urlparse(url))
urlp[2], _ = urlp[2].rsplit("/", 1)
mpd = MPD(session.http.xml(res, ignore_ns=True), base_url=urlunparse(urlp), url=url)
video, audio = [], []
# Search for suitable video and audio representations
for aset in mpd.periods[0].adaptationSets:
if aset.contentProtection:
raise PluginError("{} is protected by DRM".format(url))
for rep in aset.representations:
if rep.mimeType.startswith("video"):
video.append(rep)
elif rep.mimeType.startswith("audio"):
audio.append(rep)
if not video:
video = [None]
if not audio:
audio = [None]
locale = session.localization
locale_lang = locale.language
lang = None
available_languages = set()
# if the locale is explicitly set, prefer that language over others
for aud in audio:
if aud and aud.lang:
available_languages.add(aud.lang)
try:
if locale.explicit and aud.lang and Language.get(aud.lang) == locale_lang:
lang = aud.lang
except LookupError:
continue
if not lang:
# filter by the first language that appears
lang = audio[0] and audio[0].lang
log.debug("Available languages for DASH audio streams: {0} (using: {1})".format(", ".join(available_languages) or "NONE", lang or "n/a"))
# if the language is given by the stream, filter out other languages that do not match
if len(available_languages) > 1:
audio = list(filter(lambda a: a.lang is None or a.lang == lang, audio))
for vid, aud in itertools.product(video, audio):
stream = DASHStream(session, mpd, vid, aud, **args)
stream_name = []
if vid:
stream_name.append("{:0.0f}{}".format(vid.height or vid.bandwidth_rounded, "p" if vid.height else "k"))
if audio and len(audio) > 1:
stream_name.append("a{:0.0f}k".format(aud.bandwidth))
ret['+'.join(stream_name)] = stream
return ret
|
def function[parse_manifest, parameter[cls, session, url_or_manifest]]:
constant[
Attempt to parse a DASH manifest file and return its streams
:param session: Streamlink session instance
:param url_or_manifest: URL of the manifest file or an XML manifest string
:return: a dict of name -> DASHStream instances
]
variable[ret] assign[=] dictionary[[], []]
if call[name[url_or_manifest].startswith, parameter[constant[<?xml]]] begin[:]
variable[mpd] assign[=] call[name[MPD], parameter[call[name[parse_xml], parameter[name[url_or_manifest]]]]]
<ast.Tuple object at 0x7da20c76fe80> assign[=] tuple[[<ast.List object at 0x7da20c76fb80>, <ast.List object at 0x7da20c76fb20>]]
for taget[name[aset]] in starred[call[name[mpd].periods][constant[0]].adaptationSets] begin[:]
if name[aset].contentProtection begin[:]
<ast.Raise object at 0x7da207f02e60>
for taget[name[rep]] in starred[name[aset].representations] begin[:]
if call[name[rep].mimeType.startswith, parameter[constant[video]]] begin[:]
call[name[video].append, parameter[name[rep]]]
if <ast.UnaryOp object at 0x7da207f011b0> begin[:]
variable[video] assign[=] list[[<ast.Constant object at 0x7da207f03790>]]
if <ast.UnaryOp object at 0x7da207f03a60> begin[:]
variable[audio] assign[=] list[[<ast.Constant object at 0x7da207f031f0>]]
variable[locale] assign[=] name[session].localization
variable[locale_lang] assign[=] name[locale].language
variable[lang] assign[=] constant[None]
variable[available_languages] assign[=] call[name[set], parameter[]]
for taget[name[aud]] in starred[name[audio]] begin[:]
if <ast.BoolOp object at 0x7da207f00c10> begin[:]
call[name[available_languages].add, parameter[name[aud].lang]]
<ast.Try object at 0x7da207f03310>
if <ast.UnaryOp object at 0x7da207f00430> begin[:]
variable[lang] assign[=] <ast.BoolOp object at 0x7da207f03d60>
call[name[log].debug, parameter[call[constant[Available languages for DASH audio streams: {0} (using: {1})].format, parameter[<ast.BoolOp object at 0x7da207f010c0>, <ast.BoolOp object at 0x7da207f02d40>]]]]
if compare[call[name[len], parameter[name[available_languages]]] greater[>] constant[1]] begin[:]
variable[audio] assign[=] call[name[list], parameter[call[name[filter], parameter[<ast.Lambda object at 0x7da20c6e5600>, name[audio]]]]]
for taget[tuple[[<ast.Name object at 0x7da20c6e68c0>, <ast.Name object at 0x7da20c6e5e40>]]] in starred[call[name[itertools].product, parameter[name[video], name[audio]]]] begin[:]
variable[stream] assign[=] call[name[DASHStream], parameter[name[session], name[mpd], name[vid], name[aud]]]
variable[stream_name] assign[=] list[[]]
if name[vid] begin[:]
call[name[stream_name].append, parameter[call[constant[{:0.0f}{}].format, parameter[<ast.BoolOp object at 0x7da20c6e7070>, <ast.IfExp object at 0x7da20c6e5480>]]]]
if <ast.BoolOp object at 0x7da20c6e62f0> begin[:]
call[name[stream_name].append, parameter[call[constant[a{:0.0f}k].format, parameter[name[aud].bandwidth]]]]
call[name[ret]][call[constant[+].join, parameter[name[stream_name]]]] assign[=] name[stream]
return[name[ret]]
|
keyword[def] identifier[parse_manifest] ( identifier[cls] , identifier[session] , identifier[url_or_manifest] ,** identifier[args] ):
literal[string]
identifier[ret] ={}
keyword[if] identifier[url_or_manifest] . identifier[startswith] ( literal[string] ):
identifier[mpd] = identifier[MPD] ( identifier[parse_xml] ( identifier[url_or_manifest] , identifier[ignore_ns] = keyword[True] ))
keyword[else] :
identifier[res] = identifier[session] . identifier[http] . identifier[get] ( identifier[url_or_manifest] ,** identifier[args] )
identifier[url] = identifier[res] . identifier[url]
identifier[urlp] = identifier[list] ( identifier[urlparse] ( identifier[url] ))
identifier[urlp] [ literal[int] ], identifier[_] = identifier[urlp] [ literal[int] ]. identifier[rsplit] ( literal[string] , literal[int] )
identifier[mpd] = identifier[MPD] ( identifier[session] . identifier[http] . identifier[xml] ( identifier[res] , identifier[ignore_ns] = keyword[True] ), identifier[base_url] = identifier[urlunparse] ( identifier[urlp] ), identifier[url] = identifier[url] )
identifier[video] , identifier[audio] =[],[]
keyword[for] identifier[aset] keyword[in] identifier[mpd] . identifier[periods] [ literal[int] ]. identifier[adaptationSets] :
keyword[if] identifier[aset] . identifier[contentProtection] :
keyword[raise] identifier[PluginError] ( literal[string] . identifier[format] ( identifier[url] ))
keyword[for] identifier[rep] keyword[in] identifier[aset] . identifier[representations] :
keyword[if] identifier[rep] . identifier[mimeType] . identifier[startswith] ( literal[string] ):
identifier[video] . identifier[append] ( identifier[rep] )
keyword[elif] identifier[rep] . identifier[mimeType] . identifier[startswith] ( literal[string] ):
identifier[audio] . identifier[append] ( identifier[rep] )
keyword[if] keyword[not] identifier[video] :
identifier[video] =[ keyword[None] ]
keyword[if] keyword[not] identifier[audio] :
identifier[audio] =[ keyword[None] ]
identifier[locale] = identifier[session] . identifier[localization]
identifier[locale_lang] = identifier[locale] . identifier[language]
identifier[lang] = keyword[None]
identifier[available_languages] = identifier[set] ()
keyword[for] identifier[aud] keyword[in] identifier[audio] :
keyword[if] identifier[aud] keyword[and] identifier[aud] . identifier[lang] :
identifier[available_languages] . identifier[add] ( identifier[aud] . identifier[lang] )
keyword[try] :
keyword[if] identifier[locale] . identifier[explicit] keyword[and] identifier[aud] . identifier[lang] keyword[and] identifier[Language] . identifier[get] ( identifier[aud] . identifier[lang] )== identifier[locale_lang] :
identifier[lang] = identifier[aud] . identifier[lang]
keyword[except] identifier[LookupError] :
keyword[continue]
keyword[if] keyword[not] identifier[lang] :
identifier[lang] = identifier[audio] [ literal[int] ] keyword[and] identifier[audio] [ literal[int] ]. identifier[lang]
identifier[log] . identifier[debug] ( literal[string] . identifier[format] ( literal[string] . identifier[join] ( identifier[available_languages] ) keyword[or] literal[string] , identifier[lang] keyword[or] literal[string] ))
keyword[if] identifier[len] ( identifier[available_languages] )> literal[int] :
identifier[audio] = identifier[list] ( identifier[filter] ( keyword[lambda] identifier[a] : identifier[a] . identifier[lang] keyword[is] keyword[None] keyword[or] identifier[a] . identifier[lang] == identifier[lang] , identifier[audio] ))
keyword[for] identifier[vid] , identifier[aud] keyword[in] identifier[itertools] . identifier[product] ( identifier[video] , identifier[audio] ):
identifier[stream] = identifier[DASHStream] ( identifier[session] , identifier[mpd] , identifier[vid] , identifier[aud] ,** identifier[args] )
identifier[stream_name] =[]
keyword[if] identifier[vid] :
identifier[stream_name] . identifier[append] ( literal[string] . identifier[format] ( identifier[vid] . identifier[height] keyword[or] identifier[vid] . identifier[bandwidth_rounded] , literal[string] keyword[if] identifier[vid] . identifier[height] keyword[else] literal[string] ))
keyword[if] identifier[audio] keyword[and] identifier[len] ( identifier[audio] )> literal[int] :
identifier[stream_name] . identifier[append] ( literal[string] . identifier[format] ( identifier[aud] . identifier[bandwidth] ))
identifier[ret] [ literal[string] . identifier[join] ( identifier[stream_name] )]= identifier[stream]
keyword[return] identifier[ret]
|
def parse_manifest(cls, session, url_or_manifest, **args):
"""
Attempt to parse a DASH manifest file and return its streams
:param session: Streamlink session instance
:param url_or_manifest: URL of the manifest file or an XML manifest string
:return: a dict of name -> DASHStream instances
"""
ret = {}
if url_or_manifest.startswith('<?xml'):
mpd = MPD(parse_xml(url_or_manifest, ignore_ns=True)) # depends on [control=['if'], data=[]]
else:
res = session.http.get(url_or_manifest, **args)
url = res.url
urlp = list(urlparse(url))
(urlp[2], _) = urlp[2].rsplit('/', 1)
mpd = MPD(session.http.xml(res, ignore_ns=True), base_url=urlunparse(urlp), url=url)
(video, audio) = ([], [])
# Search for suitable video and audio representations
for aset in mpd.periods[0].adaptationSets:
if aset.contentProtection:
raise PluginError('{} is protected by DRM'.format(url)) # depends on [control=['if'], data=[]]
for rep in aset.representations:
if rep.mimeType.startswith('video'):
video.append(rep) # depends on [control=['if'], data=[]]
elif rep.mimeType.startswith('audio'):
audio.append(rep) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['rep']] # depends on [control=['for'], data=['aset']]
if not video:
video = [None] # depends on [control=['if'], data=[]]
if not audio:
audio = [None] # depends on [control=['if'], data=[]]
locale = session.localization
locale_lang = locale.language
lang = None
available_languages = set()
# if the locale is explicitly set, prefer that language over others
for aud in audio:
if aud and aud.lang:
available_languages.add(aud.lang)
try:
if locale.explicit and aud.lang and (Language.get(aud.lang) == locale_lang):
lang = aud.lang # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except LookupError:
continue # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['aud']]
if not lang:
# filter by the first language that appears
lang = audio[0] and audio[0].lang # depends on [control=['if'], data=[]]
log.debug('Available languages for DASH audio streams: {0} (using: {1})'.format(', '.join(available_languages) or 'NONE', lang or 'n/a'))
# if the language is given by the stream, filter out other languages that do not match
if len(available_languages) > 1:
audio = list(filter(lambda a: a.lang is None or a.lang == lang, audio)) # depends on [control=['if'], data=[]]
for (vid, aud) in itertools.product(video, audio):
stream = DASHStream(session, mpd, vid, aud, **args)
stream_name = []
if vid:
stream_name.append('{:0.0f}{}'.format(vid.height or vid.bandwidth_rounded, 'p' if vid.height else 'k')) # depends on [control=['if'], data=[]]
if audio and len(audio) > 1:
stream_name.append('a{:0.0f}k'.format(aud.bandwidth)) # depends on [control=['if'], data=[]]
ret['+'.join(stream_name)] = stream # depends on [control=['for'], data=[]]
return ret
|
def git_list_tags(repo_dir, with_messages=False):
"""Return a list of git tags for the git repo in `repo_dir`."""
command = ['git', 'tag', '-l']
if with_messages:
command.append('-n1')
raw = execute_git_command(command, repo_dir=repo_dir).splitlines()
output = [l.strip() for l in raw if l.strip()]
if with_messages:
output = [tuple(j.strip() for j in line.split(None, 1))
for line in output]
return output
|
def function[git_list_tags, parameter[repo_dir, with_messages]]:
constant[Return a list of git tags for the git repo in `repo_dir`.]
variable[command] assign[=] list[[<ast.Constant object at 0x7da1b09e85e0>, <ast.Constant object at 0x7da1b09eb220>, <ast.Constant object at 0x7da1b09ea890>]]
if name[with_messages] begin[:]
call[name[command].append, parameter[constant[-n1]]]
variable[raw] assign[=] call[call[name[execute_git_command], parameter[name[command]]].splitlines, parameter[]]
variable[output] assign[=] <ast.ListComp object at 0x7da1b09eae60>
if name[with_messages] begin[:]
variable[output] assign[=] <ast.ListComp object at 0x7da1b09eb9a0>
return[name[output]]
|
keyword[def] identifier[git_list_tags] ( identifier[repo_dir] , identifier[with_messages] = keyword[False] ):
literal[string]
identifier[command] =[ literal[string] , literal[string] , literal[string] ]
keyword[if] identifier[with_messages] :
identifier[command] . identifier[append] ( literal[string] )
identifier[raw] = identifier[execute_git_command] ( identifier[command] , identifier[repo_dir] = identifier[repo_dir] ). identifier[splitlines] ()
identifier[output] =[ identifier[l] . identifier[strip] () keyword[for] identifier[l] keyword[in] identifier[raw] keyword[if] identifier[l] . identifier[strip] ()]
keyword[if] identifier[with_messages] :
identifier[output] =[ identifier[tuple] ( identifier[j] . identifier[strip] () keyword[for] identifier[j] keyword[in] identifier[line] . identifier[split] ( keyword[None] , literal[int] ))
keyword[for] identifier[line] keyword[in] identifier[output] ]
keyword[return] identifier[output]
|
def git_list_tags(repo_dir, with_messages=False):
"""Return a list of git tags for the git repo in `repo_dir`."""
command = ['git', 'tag', '-l']
if with_messages:
command.append('-n1') # depends on [control=['if'], data=[]]
raw = execute_git_command(command, repo_dir=repo_dir).splitlines()
output = [l.strip() for l in raw if l.strip()]
if with_messages:
output = [tuple((j.strip() for j in line.split(None, 1))) for line in output] # depends on [control=['if'], data=[]]
return output
|
def _read_frame(self):
"""Read one frame"""
# Read the first line, ignore the title and try to get the time. The
# time field is optional.
line = self._get_line()
pos = line.rfind("t=")
if pos >= 0:
time = float(line[pos+2:])*picosecond
else:
time = 0.0
# Read the second line, the number of atoms must match with the first
# frame.
num_atoms = int(self._get_line())
if self.num_atoms is not None and self.num_atoms != num_atoms:
raise ValueError("The number of atoms must be the same over the entire file.")
# Read the atom lines
pos = np.zeros((num_atoms, 3), np.float32)
vel = np.zeros((num_atoms, 3), np.float32)
for i in range(num_atoms):
words = self._get_line()[22:].split()
pos[i, 0] = float(words[0])
pos[i, 1] = float(words[1])
pos[i, 2] = float(words[2])
vel[i, 0] = float(words[3])
vel[i, 1] = float(words[4])
vel[i, 2] = float(words[5])
pos *= nanometer
vel *= nanometer/picosecond
# Read the cell line
cell = np.zeros((3, 3), np.float32)
words = self._get_line().split()
if len(words) >= 3:
cell[0, 0] = float(words[0])
cell[1, 1] = float(words[1])
cell[2, 2] = float(words[2])
if len(words) == 9:
cell[1, 0] = float(words[3])
cell[2, 0] = float(words[4])
cell[0, 1] = float(words[5])
cell[2, 1] = float(words[6])
cell[0, 2] = float(words[7])
cell[1, 2] = float(words[8])
cell *= nanometer
return time, pos, vel, cell
|
def function[_read_frame, parameter[self]]:
constant[Read one frame]
variable[line] assign[=] call[name[self]._get_line, parameter[]]
variable[pos] assign[=] call[name[line].rfind, parameter[constant[t=]]]
if compare[name[pos] greater_or_equal[>=] constant[0]] begin[:]
variable[time] assign[=] binary_operation[call[name[float], parameter[call[name[line]][<ast.Slice object at 0x7da20c6c5030>]]] * name[picosecond]]
variable[num_atoms] assign[=] call[name[int], parameter[call[name[self]._get_line, parameter[]]]]
if <ast.BoolOp object at 0x7da20c6c7490> begin[:]
<ast.Raise object at 0x7da20c6c7070>
variable[pos] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da20c6c4520>, <ast.Constant object at 0x7da20c6c7910>]], name[np].float32]]
variable[vel] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Name object at 0x7da20c6c5630>, <ast.Constant object at 0x7da20c6c5150>]], name[np].float32]]
for taget[name[i]] in starred[call[name[range], parameter[name[num_atoms]]]] begin[:]
variable[words] assign[=] call[call[call[name[self]._get_line, parameter[]]][<ast.Slice object at 0x7da20c6c7400>].split, parameter[]]
call[name[pos]][tuple[[<ast.Name object at 0x7da20c6c6590>, <ast.Constant object at 0x7da20c6c5360>]]] assign[=] call[name[float], parameter[call[name[words]][constant[0]]]]
call[name[pos]][tuple[[<ast.Name object at 0x7da20c6e6650>, <ast.Constant object at 0x7da20c6e5e10>]]] assign[=] call[name[float], parameter[call[name[words]][constant[1]]]]
call[name[pos]][tuple[[<ast.Name object at 0x7da20c6e4d00>, <ast.Constant object at 0x7da20c6e4dc0>]]] assign[=] call[name[float], parameter[call[name[words]][constant[2]]]]
call[name[vel]][tuple[[<ast.Name object at 0x7da20c6e69e0>, <ast.Constant object at 0x7da20c6e6620>]]] assign[=] call[name[float], parameter[call[name[words]][constant[3]]]]
call[name[vel]][tuple[[<ast.Name object at 0x7da20c6e6e00>, <ast.Constant object at 0x7da20c6e4970>]]] assign[=] call[name[float], parameter[call[name[words]][constant[4]]]]
call[name[vel]][tuple[[<ast.Name object at 0x7da20c6e63b0>, <ast.Constant object at 0x7da20c6e7e50>]]] assign[=] call[name[float], parameter[call[name[words]][constant[5]]]]
<ast.AugAssign object at 0x7da20c6e6a40>
<ast.AugAssign object at 0x7da20c6e5b40>
variable[cell] assign[=] call[name[np].zeros, parameter[tuple[[<ast.Constant object at 0x7da20c6e4820>, <ast.Constant object at 0x7da20c6e6aa0>]], name[np].float32]]
variable[words] assign[=] call[call[name[self]._get_line, parameter[]].split, parameter[]]
if compare[call[name[len], parameter[name[words]]] greater_or_equal[>=] constant[3]] begin[:]
call[name[cell]][tuple[[<ast.Constant object at 0x7da20c6e4af0>, <ast.Constant object at 0x7da20c6e5ba0>]]] assign[=] call[name[float], parameter[call[name[words]][constant[0]]]]
call[name[cell]][tuple[[<ast.Constant object at 0x7da20c6e61d0>, <ast.Constant object at 0x7da20c6e43d0>]]] assign[=] call[name[float], parameter[call[name[words]][constant[1]]]]
call[name[cell]][tuple[[<ast.Constant object at 0x7da20c6e7130>, <ast.Constant object at 0x7da20c6e7910>]]] assign[=] call[name[float], parameter[call[name[words]][constant[2]]]]
if compare[call[name[len], parameter[name[words]]] equal[==] constant[9]] begin[:]
call[name[cell]][tuple[[<ast.Constant object at 0x7da20c6e5690>, <ast.Constant object at 0x7da20c6e6ec0>]]] assign[=] call[name[float], parameter[call[name[words]][constant[3]]]]
call[name[cell]][tuple[[<ast.Constant object at 0x7da20c6e4d30>, <ast.Constant object at 0x7da20c6e5630>]]] assign[=] call[name[float], parameter[call[name[words]][constant[4]]]]
call[name[cell]][tuple[[<ast.Constant object at 0x7da20c6e6950>, <ast.Constant object at 0x7da20c6e5330>]]] assign[=] call[name[float], parameter[call[name[words]][constant[5]]]]
call[name[cell]][tuple[[<ast.Constant object at 0x7da20c6e4ca0>, <ast.Constant object at 0x7da20c6e7550>]]] assign[=] call[name[float], parameter[call[name[words]][constant[6]]]]
call[name[cell]][tuple[[<ast.Constant object at 0x7da20c7c8e80>, <ast.Constant object at 0x7da20c7cb280>]]] assign[=] call[name[float], parameter[call[name[words]][constant[7]]]]
call[name[cell]][tuple[[<ast.Constant object at 0x7da20c7c97b0>, <ast.Constant object at 0x7da20c7c8af0>]]] assign[=] call[name[float], parameter[call[name[words]][constant[8]]]]
<ast.AugAssign object at 0x7da20c7ca2c0>
return[tuple[[<ast.Name object at 0x7da20c7cb5e0>, <ast.Name object at 0x7da20c7cbf40>, <ast.Name object at 0x7da20c7cb6a0>, <ast.Name object at 0x7da20c7c80a0>]]]
|
keyword[def] identifier[_read_frame] ( identifier[self] ):
literal[string]
identifier[line] = identifier[self] . identifier[_get_line] ()
identifier[pos] = identifier[line] . identifier[rfind] ( literal[string] )
keyword[if] identifier[pos] >= literal[int] :
identifier[time] = identifier[float] ( identifier[line] [ identifier[pos] + literal[int] :])* identifier[picosecond]
keyword[else] :
identifier[time] = literal[int]
identifier[num_atoms] = identifier[int] ( identifier[self] . identifier[_get_line] ())
keyword[if] identifier[self] . identifier[num_atoms] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[num_atoms] != identifier[num_atoms] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[pos] = identifier[np] . identifier[zeros] (( identifier[num_atoms] , literal[int] ), identifier[np] . identifier[float32] )
identifier[vel] = identifier[np] . identifier[zeros] (( identifier[num_atoms] , literal[int] ), identifier[np] . identifier[float32] )
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[num_atoms] ):
identifier[words] = identifier[self] . identifier[_get_line] ()[ literal[int] :]. identifier[split] ()
identifier[pos] [ identifier[i] , literal[int] ]= identifier[float] ( identifier[words] [ literal[int] ])
identifier[pos] [ identifier[i] , literal[int] ]= identifier[float] ( identifier[words] [ literal[int] ])
identifier[pos] [ identifier[i] , literal[int] ]= identifier[float] ( identifier[words] [ literal[int] ])
identifier[vel] [ identifier[i] , literal[int] ]= identifier[float] ( identifier[words] [ literal[int] ])
identifier[vel] [ identifier[i] , literal[int] ]= identifier[float] ( identifier[words] [ literal[int] ])
identifier[vel] [ identifier[i] , literal[int] ]= identifier[float] ( identifier[words] [ literal[int] ])
identifier[pos] *= identifier[nanometer]
identifier[vel] *= identifier[nanometer] / identifier[picosecond]
identifier[cell] = identifier[np] . identifier[zeros] (( literal[int] , literal[int] ), identifier[np] . identifier[float32] )
identifier[words] = identifier[self] . identifier[_get_line] (). identifier[split] ()
keyword[if] identifier[len] ( identifier[words] )>= literal[int] :
identifier[cell] [ literal[int] , literal[int] ]= identifier[float] ( identifier[words] [ literal[int] ])
identifier[cell] [ literal[int] , literal[int] ]= identifier[float] ( identifier[words] [ literal[int] ])
identifier[cell] [ literal[int] , literal[int] ]= identifier[float] ( identifier[words] [ literal[int] ])
keyword[if] identifier[len] ( identifier[words] )== literal[int] :
identifier[cell] [ literal[int] , literal[int] ]= identifier[float] ( identifier[words] [ literal[int] ])
identifier[cell] [ literal[int] , literal[int] ]= identifier[float] ( identifier[words] [ literal[int] ])
identifier[cell] [ literal[int] , literal[int] ]= identifier[float] ( identifier[words] [ literal[int] ])
identifier[cell] [ literal[int] , literal[int] ]= identifier[float] ( identifier[words] [ literal[int] ])
identifier[cell] [ literal[int] , literal[int] ]= identifier[float] ( identifier[words] [ literal[int] ])
identifier[cell] [ literal[int] , literal[int] ]= identifier[float] ( identifier[words] [ literal[int] ])
identifier[cell] *= identifier[nanometer]
keyword[return] identifier[time] , identifier[pos] , identifier[vel] , identifier[cell]
|
def _read_frame(self):
"""Read one frame"""
# Read the first line, ignore the title and try to get the time. The
# time field is optional.
line = self._get_line()
pos = line.rfind('t=')
if pos >= 0:
time = float(line[pos + 2:]) * picosecond # depends on [control=['if'], data=['pos']]
else:
time = 0.0
# Read the second line, the number of atoms must match with the first
# frame.
num_atoms = int(self._get_line())
if self.num_atoms is not None and self.num_atoms != num_atoms:
raise ValueError('The number of atoms must be the same over the entire file.') # depends on [control=['if'], data=[]]
# Read the atom lines
pos = np.zeros((num_atoms, 3), np.float32)
vel = np.zeros((num_atoms, 3), np.float32)
for i in range(num_atoms):
words = self._get_line()[22:].split()
pos[i, 0] = float(words[0])
pos[i, 1] = float(words[1])
pos[i, 2] = float(words[2])
vel[i, 0] = float(words[3])
vel[i, 1] = float(words[4])
vel[i, 2] = float(words[5]) # depends on [control=['for'], data=['i']]
pos *= nanometer
vel *= nanometer / picosecond
# Read the cell line
cell = np.zeros((3, 3), np.float32)
words = self._get_line().split()
if len(words) >= 3:
cell[0, 0] = float(words[0])
cell[1, 1] = float(words[1])
cell[2, 2] = float(words[2]) # depends on [control=['if'], data=[]]
if len(words) == 9:
cell[1, 0] = float(words[3])
cell[2, 0] = float(words[4])
cell[0, 1] = float(words[5])
cell[2, 1] = float(words[6])
cell[0, 2] = float(words[7])
cell[1, 2] = float(words[8]) # depends on [control=['if'], data=[]]
cell *= nanometer
return (time, pos, vel, cell)
|
def from_int(cls, integer):
"""
Constructs a `Deleted` using the `tinyint` value of the `rev_deleted`
column of the `revision` MariaDB table.
* DELETED_TEXT = 1
* DELETED_COMMENT = 2
* DELETED_USER = 4
* DELETED_RESTRICTED = 8
"""
bin_string = bin(integer)
return cls(
text=len(bin_string) >= 1 and bin_string[-1] == "1",
comment=len(bin_string) >= 2 and bin_string[-2] == "1",
user=len(bin_string) >= 3 and bin_string[-3] == "1",
restricted=len(bin_string) >= 4 and bin_string[-4] == "1"
)
|
def function[from_int, parameter[cls, integer]]:
constant[
Constructs a `Deleted` using the `tinyint` value of the `rev_deleted`
column of the `revision` MariaDB table.
* DELETED_TEXT = 1
* DELETED_COMMENT = 2
* DELETED_USER = 4
* DELETED_RESTRICTED = 8
]
variable[bin_string] assign[=] call[name[bin], parameter[name[integer]]]
return[call[name[cls], parameter[]]]
|
keyword[def] identifier[from_int] ( identifier[cls] , identifier[integer] ):
literal[string]
identifier[bin_string] = identifier[bin] ( identifier[integer] )
keyword[return] identifier[cls] (
identifier[text] = identifier[len] ( identifier[bin_string] )>= literal[int] keyword[and] identifier[bin_string] [- literal[int] ]== literal[string] ,
identifier[comment] = identifier[len] ( identifier[bin_string] )>= literal[int] keyword[and] identifier[bin_string] [- literal[int] ]== literal[string] ,
identifier[user] = identifier[len] ( identifier[bin_string] )>= literal[int] keyword[and] identifier[bin_string] [- literal[int] ]== literal[string] ,
identifier[restricted] = identifier[len] ( identifier[bin_string] )>= literal[int] keyword[and] identifier[bin_string] [- literal[int] ]== literal[string]
)
|
def from_int(cls, integer):
"""
Constructs a `Deleted` using the `tinyint` value of the `rev_deleted`
column of the `revision` MariaDB table.
* DELETED_TEXT = 1
* DELETED_COMMENT = 2
* DELETED_USER = 4
* DELETED_RESTRICTED = 8
"""
bin_string = bin(integer)
return cls(text=len(bin_string) >= 1 and bin_string[-1] == '1', comment=len(bin_string) >= 2 and bin_string[-2] == '1', user=len(bin_string) >= 3 and bin_string[-3] == '1', restricted=len(bin_string) >= 4 and bin_string[-4] == '1')
|
def serve(path=None, host=None, port=None, user_content=False, context=None,
username=None, password=None, render_offline=False,
render_wide=False, render_inline=False, api_url=None, title=None,
autorefresh=True, browser=False, quiet=None, grip_class=None):
"""
Starts a server to render the specified file or directory containing
a README.
"""
app = create_app(path, user_content, context, username, password,
render_offline, render_wide, render_inline, api_url,
title, None, autorefresh, quiet, grip_class)
app.run(host, port, open_browser=browser)
|
def function[serve, parameter[path, host, port, user_content, context, username, password, render_offline, render_wide, render_inline, api_url, title, autorefresh, browser, quiet, grip_class]]:
constant[
Starts a server to render the specified file or directory containing
a README.
]
variable[app] assign[=] call[name[create_app], parameter[name[path], name[user_content], name[context], name[username], name[password], name[render_offline], name[render_wide], name[render_inline], name[api_url], name[title], constant[None], name[autorefresh], name[quiet], name[grip_class]]]
call[name[app].run, parameter[name[host], name[port]]]
|
keyword[def] identifier[serve] ( identifier[path] = keyword[None] , identifier[host] = keyword[None] , identifier[port] = keyword[None] , identifier[user_content] = keyword[False] , identifier[context] = keyword[None] ,
identifier[username] = keyword[None] , identifier[password] = keyword[None] , identifier[render_offline] = keyword[False] ,
identifier[render_wide] = keyword[False] , identifier[render_inline] = keyword[False] , identifier[api_url] = keyword[None] , identifier[title] = keyword[None] ,
identifier[autorefresh] = keyword[True] , identifier[browser] = keyword[False] , identifier[quiet] = keyword[None] , identifier[grip_class] = keyword[None] ):
literal[string]
identifier[app] = identifier[create_app] ( identifier[path] , identifier[user_content] , identifier[context] , identifier[username] , identifier[password] ,
identifier[render_offline] , identifier[render_wide] , identifier[render_inline] , identifier[api_url] ,
identifier[title] , keyword[None] , identifier[autorefresh] , identifier[quiet] , identifier[grip_class] )
identifier[app] . identifier[run] ( identifier[host] , identifier[port] , identifier[open_browser] = identifier[browser] )
|
def serve(path=None, host=None, port=None, user_content=False, context=None, username=None, password=None, render_offline=False, render_wide=False, render_inline=False, api_url=None, title=None, autorefresh=True, browser=False, quiet=None, grip_class=None):
"""
Starts a server to render the specified file or directory containing
a README.
"""
app = create_app(path, user_content, context, username, password, render_offline, render_wide, render_inline, api_url, title, None, autorefresh, quiet, grip_class)
app.run(host, port, open_browser=browser)
|
def DynamicCmd(name, plugins):
"""
Returns a cmd with the added plugins,
:param name: TODO:
:param plugins: list of plugins
"""
exec('class %s(cmd.Cmd):\n prompt="cm> "' % name)
plugin_objects = []
for plugin in plugins:
classprefix = plugin['class']
plugin_list = plugin['plugins']
plugin_objects = plugin_objects + \
load_plugins(classprefix, plugin_list)
exec_command = make_cmd_class(name, *plugin_objects)()
return (exec_command, plugin_objects)
|
def function[DynamicCmd, parameter[name, plugins]]:
constant[
Returns a cmd with the added plugins,
:param name: TODO:
:param plugins: list of plugins
]
call[name[exec], parameter[binary_operation[constant[class %s(cmd.Cmd):
prompt="cm> "] <ast.Mod object at 0x7da2590d6920> name[name]]]]
variable[plugin_objects] assign[=] list[[]]
for taget[name[plugin]] in starred[name[plugins]] begin[:]
variable[classprefix] assign[=] call[name[plugin]][constant[class]]
variable[plugin_list] assign[=] call[name[plugin]][constant[plugins]]
variable[plugin_objects] assign[=] binary_operation[name[plugin_objects] + call[name[load_plugins], parameter[name[classprefix], name[plugin_list]]]]
variable[exec_command] assign[=] call[call[name[make_cmd_class], parameter[name[name], <ast.Starred object at 0x7da2045643a0>]], parameter[]]
return[tuple[[<ast.Name object at 0x7da204567ca0>, <ast.Name object at 0x7da204566cb0>]]]
|
keyword[def] identifier[DynamicCmd] ( identifier[name] , identifier[plugins] ):
literal[string]
identifier[exec] ( literal[string] % identifier[name] )
identifier[plugin_objects] =[]
keyword[for] identifier[plugin] keyword[in] identifier[plugins] :
identifier[classprefix] = identifier[plugin] [ literal[string] ]
identifier[plugin_list] = identifier[plugin] [ literal[string] ]
identifier[plugin_objects] = identifier[plugin_objects] + identifier[load_plugins] ( identifier[classprefix] , identifier[plugin_list] )
identifier[exec_command] = identifier[make_cmd_class] ( identifier[name] ,* identifier[plugin_objects] )()
keyword[return] ( identifier[exec_command] , identifier[plugin_objects] )
|
def DynamicCmd(name, plugins):
"""
Returns a cmd with the added plugins,
:param name: TODO:
:param plugins: list of plugins
"""
exec('class %s(cmd.Cmd):\n prompt="cm> "' % name)
plugin_objects = []
for plugin in plugins:
classprefix = plugin['class']
plugin_list = plugin['plugins']
plugin_objects = plugin_objects + load_plugins(classprefix, plugin_list) # depends on [control=['for'], data=['plugin']]
exec_command = make_cmd_class(name, *plugin_objects)()
return (exec_command, plugin_objects)
|
def csv_dumper(**kwargs):
"""dump data to csv"""
logging.info("dumping to csv")
barn = kwargs["barn"]
farms = kwargs["farms"]
experiments = kwargs["experiments"]
for experiment, farm in zip(experiments, farms):
name = experiment.journal.name
project = experiment.journal.project
project_dir, batch_dir, raw_dir = \
experiment.journal.paginate()
if batch_dir is None:
logging.info("have to generate folder-name on the fly")
out_data_dir, project_dir, batch_dir, raw_dir = \
generate_folder_names(name, project)
if barn == "batch_dir":
out_dir = batch_dir
elif barn == "project_dir":
out_dir = project_dir
elif barn == "raw_dir":
out_dir = raw_dir
else:
out_dir = barn
for animal in farm:
file_name = os.path.join(
out_dir, "summary_%s_%s.csv" % (
animal.name,
name
)
)
logging.info(f"> {file_name}")
animal.to_csv(file_name, sep=prms.Reader.sep)
|
def function[csv_dumper, parameter[]]:
constant[dump data to csv]
call[name[logging].info, parameter[constant[dumping to csv]]]
variable[barn] assign[=] call[name[kwargs]][constant[barn]]
variable[farms] assign[=] call[name[kwargs]][constant[farms]]
variable[experiments] assign[=] call[name[kwargs]][constant[experiments]]
for taget[tuple[[<ast.Name object at 0x7da1b1b9fa90>, <ast.Name object at 0x7da1b1b9e7d0>]]] in starred[call[name[zip], parameter[name[experiments], name[farms]]]] begin[:]
variable[name] assign[=] name[experiment].journal.name
variable[project] assign[=] name[experiment].journal.project
<ast.Tuple object at 0x7da1b1b9e860> assign[=] call[name[experiment].journal.paginate, parameter[]]
if compare[name[batch_dir] is constant[None]] begin[:]
call[name[logging].info, parameter[constant[have to generate folder-name on the fly]]]
<ast.Tuple object at 0x7da1b1b9e650> assign[=] call[name[generate_folder_names], parameter[name[name], name[project]]]
if compare[name[barn] equal[==] constant[batch_dir]] begin[:]
variable[out_dir] assign[=] name[batch_dir]
for taget[name[animal]] in starred[name[farm]] begin[:]
variable[file_name] assign[=] call[name[os].path.join, parameter[name[out_dir], binary_operation[constant[summary_%s_%s.csv] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b198ece0>, <ast.Name object at 0x7da1b198de70>]]]]]
call[name[logging].info, parameter[<ast.JoinedStr object at 0x7da1b198ee30>]]
call[name[animal].to_csv, parameter[name[file_name]]]
|
keyword[def] identifier[csv_dumper] (** identifier[kwargs] ):
literal[string]
identifier[logging] . identifier[info] ( literal[string] )
identifier[barn] = identifier[kwargs] [ literal[string] ]
identifier[farms] = identifier[kwargs] [ literal[string] ]
identifier[experiments] = identifier[kwargs] [ literal[string] ]
keyword[for] identifier[experiment] , identifier[farm] keyword[in] identifier[zip] ( identifier[experiments] , identifier[farms] ):
identifier[name] = identifier[experiment] . identifier[journal] . identifier[name]
identifier[project] = identifier[experiment] . identifier[journal] . identifier[project]
identifier[project_dir] , identifier[batch_dir] , identifier[raw_dir] = identifier[experiment] . identifier[journal] . identifier[paginate] ()
keyword[if] identifier[batch_dir] keyword[is] keyword[None] :
identifier[logging] . identifier[info] ( literal[string] )
identifier[out_data_dir] , identifier[project_dir] , identifier[batch_dir] , identifier[raw_dir] = identifier[generate_folder_names] ( identifier[name] , identifier[project] )
keyword[if] identifier[barn] == literal[string] :
identifier[out_dir] = identifier[batch_dir]
keyword[elif] identifier[barn] == literal[string] :
identifier[out_dir] = identifier[project_dir]
keyword[elif] identifier[barn] == literal[string] :
identifier[out_dir] = identifier[raw_dir]
keyword[else] :
identifier[out_dir] = identifier[barn]
keyword[for] identifier[animal] keyword[in] identifier[farm] :
identifier[file_name] = identifier[os] . identifier[path] . identifier[join] (
identifier[out_dir] , literal[string] %(
identifier[animal] . identifier[name] ,
identifier[name]
)
)
identifier[logging] . identifier[info] ( literal[string] )
identifier[animal] . identifier[to_csv] ( identifier[file_name] , identifier[sep] = identifier[prms] . identifier[Reader] . identifier[sep] )
|
def csv_dumper(**kwargs):
"""dump data to csv"""
logging.info('dumping to csv')
barn = kwargs['barn']
farms = kwargs['farms']
experiments = kwargs['experiments']
for (experiment, farm) in zip(experiments, farms):
name = experiment.journal.name
project = experiment.journal.project
(project_dir, batch_dir, raw_dir) = experiment.journal.paginate()
if batch_dir is None:
logging.info('have to generate folder-name on the fly')
(out_data_dir, project_dir, batch_dir, raw_dir) = generate_folder_names(name, project) # depends on [control=['if'], data=['batch_dir']]
if barn == 'batch_dir':
out_dir = batch_dir # depends on [control=['if'], data=[]]
elif barn == 'project_dir':
out_dir = project_dir # depends on [control=['if'], data=[]]
elif barn == 'raw_dir':
out_dir = raw_dir # depends on [control=['if'], data=[]]
else:
out_dir = barn
for animal in farm:
file_name = os.path.join(out_dir, 'summary_%s_%s.csv' % (animal.name, name))
logging.info(f'> {file_name}')
animal.to_csv(file_name, sep=prms.Reader.sep) # depends on [control=['for'], data=['animal']] # depends on [control=['for'], data=[]]
|
def update(self, other):
"""
Updates this cache mixin with results discovered by the other split off one.
"""
acceptable_models = [ m for m in other._models if set(m.model.keys()) == self.variables ]
self._models.update(acceptable_models)
self._eval_exhausted.update(other._eval_exhausted)
self._max_exhausted.update(other._max_exhausted)
self._min_exhausted.update(other._min_exhausted)
|
def function[update, parameter[self, other]]:
constant[
Updates this cache mixin with results discovered by the other split off one.
]
variable[acceptable_models] assign[=] <ast.ListComp object at 0x7da18dc99960>
call[name[self]._models.update, parameter[name[acceptable_models]]]
call[name[self]._eval_exhausted.update, parameter[name[other]._eval_exhausted]]
call[name[self]._max_exhausted.update, parameter[name[other]._max_exhausted]]
call[name[self]._min_exhausted.update, parameter[name[other]._min_exhausted]]
|
keyword[def] identifier[update] ( identifier[self] , identifier[other] ):
literal[string]
identifier[acceptable_models] =[ identifier[m] keyword[for] identifier[m] keyword[in] identifier[other] . identifier[_models] keyword[if] identifier[set] ( identifier[m] . identifier[model] . identifier[keys] ())== identifier[self] . identifier[variables] ]
identifier[self] . identifier[_models] . identifier[update] ( identifier[acceptable_models] )
identifier[self] . identifier[_eval_exhausted] . identifier[update] ( identifier[other] . identifier[_eval_exhausted] )
identifier[self] . identifier[_max_exhausted] . identifier[update] ( identifier[other] . identifier[_max_exhausted] )
identifier[self] . identifier[_min_exhausted] . identifier[update] ( identifier[other] . identifier[_min_exhausted] )
|
def update(self, other):
"""
Updates this cache mixin with results discovered by the other split off one.
"""
acceptable_models = [m for m in other._models if set(m.model.keys()) == self.variables]
self._models.update(acceptable_models)
self._eval_exhausted.update(other._eval_exhausted)
self._max_exhausted.update(other._max_exhausted)
self._min_exhausted.update(other._min_exhausted)
|
def load_scoring_function(scoring_func):
"""
converts mymodule.myfunc in the myfunc
object itself so tpot receives a scoring function
"""
if scoring_func and ("." in scoring_func):
try:
module_name, func_name = scoring_func.rsplit('.', 1)
module_path = os.getcwd()
sys.path.insert(0, module_path)
scoring_func = getattr(import_module(module_name), func_name)
sys.path.pop(0)
print('manual scoring function: {}'.format(scoring_func))
print('taken from module: {}'.format(module_name))
except Exception as e:
print('failed importing custom scoring function, error: {}'.format(str(e)))
raise ValueError(e)
return scoring_func
|
def function[load_scoring_function, parameter[scoring_func]]:
constant[
converts mymodule.myfunc in the myfunc
object itself so tpot receives a scoring function
]
if <ast.BoolOp object at 0x7da2043467a0> begin[:]
<ast.Try object at 0x7da2043454b0>
return[name[scoring_func]]
|
keyword[def] identifier[load_scoring_function] ( identifier[scoring_func] ):
literal[string]
keyword[if] identifier[scoring_func] keyword[and] ( literal[string] keyword[in] identifier[scoring_func] ):
keyword[try] :
identifier[module_name] , identifier[func_name] = identifier[scoring_func] . identifier[rsplit] ( literal[string] , literal[int] )
identifier[module_path] = identifier[os] . identifier[getcwd] ()
identifier[sys] . identifier[path] . identifier[insert] ( literal[int] , identifier[module_path] )
identifier[scoring_func] = identifier[getattr] ( identifier[import_module] ( identifier[module_name] ), identifier[func_name] )
identifier[sys] . identifier[path] . identifier[pop] ( literal[int] )
identifier[print] ( literal[string] . identifier[format] ( identifier[scoring_func] ))
identifier[print] ( literal[string] . identifier[format] ( identifier[module_name] ))
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[print] ( literal[string] . identifier[format] ( identifier[str] ( identifier[e] )))
keyword[raise] identifier[ValueError] ( identifier[e] )
keyword[return] identifier[scoring_func]
|
def load_scoring_function(scoring_func):
"""
converts mymodule.myfunc in the myfunc
object itself so tpot receives a scoring function
"""
if scoring_func and '.' in scoring_func:
try:
(module_name, func_name) = scoring_func.rsplit('.', 1)
module_path = os.getcwd()
sys.path.insert(0, module_path)
scoring_func = getattr(import_module(module_name), func_name)
sys.path.pop(0)
print('manual scoring function: {}'.format(scoring_func))
print('taken from module: {}'.format(module_name)) # depends on [control=['try'], data=[]]
except Exception as e:
print('failed importing custom scoring function, error: {}'.format(str(e)))
raise ValueError(e) # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]]
return scoring_func
|
def draw_chimera_yield(G, **kwargs):
"""Draws the given graph G with highlighted faults, according to layout.
Parameters
----------
G : NetworkX graph
The graph to be parsed for faults
unused_color : tuple or color string (optional, default (0.9,0.9,0.9,1.0))
The color to use for nodes and edges of G which are not faults.
If unused_color is None, these nodes and edges will not be shown at all.
fault_color : tuple or color string (optional, default (1.0,0.0,0.0,1.0))
A color to represent nodes absent from the graph G. Colors should be
length-4 tuples of floats between 0 and 1 inclusive.
fault_shape : string, optional (default='x')
The shape of the fault nodes. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8'.
fault_style : string, optional (default='dashed')
Edge fault line style (solid|dashed|dotted,dashdot)
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the `pos` parameter which is not used by this
function. If `linear_biases` or `quadratic_biases` are provided,
any provided `node_color` or `edge_color` arguments are ignored.
"""
try:
assert(G.graph["family"] == "chimera")
m = G.graph["columns"]
n = G.graph["rows"]
t = G.graph["tile"]
coordinates = G.graph["labels"] == "coordinate"
except:
raise ValueError("Target chimera graph needs to have columns, rows, \
tile, and label attributes to be able to identify faulty qubits.")
perfect_graph = chimera_graph(m,n,t, coordinates=coordinates)
draw_yield(G, chimera_layout(perfect_graph), perfect_graph, **kwargs)
|
def function[draw_chimera_yield, parameter[G]]:
constant[Draws the given graph G with highlighted faults, according to layout.
Parameters
----------
G : NetworkX graph
The graph to be parsed for faults
unused_color : tuple or color string (optional, default (0.9,0.9,0.9,1.0))
The color to use for nodes and edges of G which are not faults.
If unused_color is None, these nodes and edges will not be shown at all.
fault_color : tuple or color string (optional, default (1.0,0.0,0.0,1.0))
A color to represent nodes absent from the graph G. Colors should be
length-4 tuples of floats between 0 and 1 inclusive.
fault_shape : string, optional (default='x')
The shape of the fault nodes. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8'.
fault_style : string, optional (default='dashed')
Edge fault line style (solid|dashed|dotted,dashdot)
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the `pos` parameter which is not used by this
function. If `linear_biases` or `quadratic_biases` are provided,
any provided `node_color` or `edge_color` arguments are ignored.
]
<ast.Try object at 0x7da1b08ba6e0>
variable[perfect_graph] assign[=] call[name[chimera_graph], parameter[name[m], name[n], name[t]]]
call[name[draw_yield], parameter[name[G], call[name[chimera_layout], parameter[name[perfect_graph]]], name[perfect_graph]]]
|
keyword[def] identifier[draw_chimera_yield] ( identifier[G] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
keyword[assert] ( identifier[G] . identifier[graph] [ literal[string] ]== literal[string] )
identifier[m] = identifier[G] . identifier[graph] [ literal[string] ]
identifier[n] = identifier[G] . identifier[graph] [ literal[string] ]
identifier[t] = identifier[G] . identifier[graph] [ literal[string] ]
identifier[coordinates] = identifier[G] . identifier[graph] [ literal[string] ]== literal[string]
keyword[except] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[perfect_graph] = identifier[chimera_graph] ( identifier[m] , identifier[n] , identifier[t] , identifier[coordinates] = identifier[coordinates] )
identifier[draw_yield] ( identifier[G] , identifier[chimera_layout] ( identifier[perfect_graph] ), identifier[perfect_graph] ,** identifier[kwargs] )
|
def draw_chimera_yield(G, **kwargs):
"""Draws the given graph G with highlighted faults, according to layout.
Parameters
----------
G : NetworkX graph
The graph to be parsed for faults
unused_color : tuple or color string (optional, default (0.9,0.9,0.9,1.0))
The color to use for nodes and edges of G which are not faults.
If unused_color is None, these nodes and edges will not be shown at all.
fault_color : tuple or color string (optional, default (1.0,0.0,0.0,1.0))
A color to represent nodes absent from the graph G. Colors should be
length-4 tuples of floats between 0 and 1 inclusive.
fault_shape : string, optional (default='x')
The shape of the fault nodes. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8'.
fault_style : string, optional (default='dashed')
Edge fault line style (solid|dashed|dotted,dashdot)
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the `pos` parameter which is not used by this
function. If `linear_biases` or `quadratic_biases` are provided,
any provided `node_color` or `edge_color` arguments are ignored.
"""
try:
assert G.graph['family'] == 'chimera'
m = G.graph['columns']
n = G.graph['rows']
t = G.graph['tile']
coordinates = G.graph['labels'] == 'coordinate' # depends on [control=['try'], data=[]]
except:
raise ValueError('Target chimera graph needs to have columns, rows, tile, and label attributes to be able to identify faulty qubits.') # depends on [control=['except'], data=[]]
perfect_graph = chimera_graph(m, n, t, coordinates=coordinates)
draw_yield(G, chimera_layout(perfect_graph), perfect_graph, **kwargs)
|
def WriteVarBytes(self, value, endian="<"):
"""
Write an integer value in a space saving way to the stream.
Read more about variable size encoding here: http://docs.neo.org/en-us/node/network-protocol.html#convention
Args:
value (bytes):
endian (str): specify the endianness. (Default) Little endian ('<'). Use '>' for big endian.
Returns:
int: the number of bytes written.
"""
length = len(value)
self.WriteVarInt(length, endian)
return self.WriteBytes(value, unhex=False)
|
def function[WriteVarBytes, parameter[self, value, endian]]:
constant[
Write an integer value in a space saving way to the stream.
Read more about variable size encoding here: http://docs.neo.org/en-us/node/network-protocol.html#convention
Args:
value (bytes):
endian (str): specify the endianness. (Default) Little endian ('<'). Use '>' for big endian.
Returns:
int: the number of bytes written.
]
variable[length] assign[=] call[name[len], parameter[name[value]]]
call[name[self].WriteVarInt, parameter[name[length], name[endian]]]
return[call[name[self].WriteBytes, parameter[name[value]]]]
|
keyword[def] identifier[WriteVarBytes] ( identifier[self] , identifier[value] , identifier[endian] = literal[string] ):
literal[string]
identifier[length] = identifier[len] ( identifier[value] )
identifier[self] . identifier[WriteVarInt] ( identifier[length] , identifier[endian] )
keyword[return] identifier[self] . identifier[WriteBytes] ( identifier[value] , identifier[unhex] = keyword[False] )
|
def WriteVarBytes(self, value, endian='<'):
"""
Write an integer value in a space saving way to the stream.
Read more about variable size encoding here: http://docs.neo.org/en-us/node/network-protocol.html#convention
Args:
value (bytes):
endian (str): specify the endianness. (Default) Little endian ('<'). Use '>' for big endian.
Returns:
int: the number of bytes written.
"""
length = len(value)
self.WriteVarInt(length, endian)
return self.WriteBytes(value, unhex=False)
|
def error(self, msg, n):
"""Raise a SyntaxError with the lineno and col_offset set to n's."""
raise SyntaxError(msg, n.lineno, n.col_offset,
filename=self.compile_info.filename)
|
def function[error, parameter[self, msg, n]]:
constant[Raise a SyntaxError with the lineno and col_offset set to n's.]
<ast.Raise object at 0x7da2054a78e0>
|
keyword[def] identifier[error] ( identifier[self] , identifier[msg] , identifier[n] ):
literal[string]
keyword[raise] identifier[SyntaxError] ( identifier[msg] , identifier[n] . identifier[lineno] , identifier[n] . identifier[col_offset] ,
identifier[filename] = identifier[self] . identifier[compile_info] . identifier[filename] )
|
def error(self, msg, n):
"""Raise a SyntaxError with the lineno and col_offset set to n's."""
raise SyntaxError(msg, n.lineno, n.col_offset, filename=self.compile_info.filename)
|
def basic_word_sim(word1, word2):
"""
Simple measure of similarity: Number of letters in common / max length
"""
return sum([1 for c in word1 if c in word2]) / max(len(word1), len(word2))
|
def function[basic_word_sim, parameter[word1, word2]]:
constant[
Simple measure of similarity: Number of letters in common / max length
]
return[binary_operation[call[name[sum], parameter[<ast.ListComp object at 0x7da2054a7790>]] / call[name[max], parameter[call[name[len], parameter[name[word1]]], call[name[len], parameter[name[word2]]]]]]]
|
keyword[def] identifier[basic_word_sim] ( identifier[word1] , identifier[word2] ):
literal[string]
keyword[return] identifier[sum] ([ literal[int] keyword[for] identifier[c] keyword[in] identifier[word1] keyword[if] identifier[c] keyword[in] identifier[word2] ])/ identifier[max] ( identifier[len] ( identifier[word1] ), identifier[len] ( identifier[word2] ))
|
def basic_word_sim(word1, word2):
"""
Simple measure of similarity: Number of letters in common / max length
"""
return sum([1 for c in word1 if c in word2]) / max(len(word1), len(word2))
|
def route_sns_task(event, context):
"""
Gets SNS Message, deserialises the message,
imports the function, calls the function with args
"""
record = event['Records'][0]
message = json.loads(
record['Sns']['Message']
)
return run_message(message)
|
def function[route_sns_task, parameter[event, context]]:
constant[
Gets SNS Message, deserialises the message,
imports the function, calls the function with args
]
variable[record] assign[=] call[call[name[event]][constant[Records]]][constant[0]]
variable[message] assign[=] call[name[json].loads, parameter[call[call[name[record]][constant[Sns]]][constant[Message]]]]
return[call[name[run_message], parameter[name[message]]]]
|
keyword[def] identifier[route_sns_task] ( identifier[event] , identifier[context] ):
literal[string]
identifier[record] = identifier[event] [ literal[string] ][ literal[int] ]
identifier[message] = identifier[json] . identifier[loads] (
identifier[record] [ literal[string] ][ literal[string] ]
)
keyword[return] identifier[run_message] ( identifier[message] )
|
def route_sns_task(event, context):
"""
Gets SNS Message, deserialises the message,
imports the function, calls the function with args
"""
record = event['Records'][0]
message = json.loads(record['Sns']['Message'])
return run_message(message)
|
def getElementType(self, elementKw):
""" return type name for given element keyword,
e.g. getElementType('Q01') should return string: 'QUAD'
"""
try:
etype = list(self.all_elements.get(elementKw.upper()).keys())[0]
except:
etype = self.all_elements.get(elementKw.upper())
return etype.upper()
|
def function[getElementType, parameter[self, elementKw]]:
constant[ return type name for given element keyword,
e.g. getElementType('Q01') should return string: 'QUAD'
]
<ast.Try object at 0x7da1b09bece0>
return[call[name[etype].upper, parameter[]]]
|
keyword[def] identifier[getElementType] ( identifier[self] , identifier[elementKw] ):
literal[string]
keyword[try] :
identifier[etype] = identifier[list] ( identifier[self] . identifier[all_elements] . identifier[get] ( identifier[elementKw] . identifier[upper] ()). identifier[keys] ())[ literal[int] ]
keyword[except] :
identifier[etype] = identifier[self] . identifier[all_elements] . identifier[get] ( identifier[elementKw] . identifier[upper] ())
keyword[return] identifier[etype] . identifier[upper] ()
|
def getElementType(self, elementKw):
""" return type name for given element keyword,
e.g. getElementType('Q01') should return string: 'QUAD'
"""
try:
etype = list(self.all_elements.get(elementKw.upper()).keys())[0] # depends on [control=['try'], data=[]]
except:
etype = self.all_elements.get(elementKw.upper()) # depends on [control=['except'], data=[]]
return etype.upper()
|
def expand(self, normalization='4pi', csphase=1, **kwargs):
"""
Expand the grid into spherical harmonics.
Usage
-----
clm = x.expand([normalization, csphase, lmax_calc])
Returns
-------
clm : SHCoeffs class instance
Parameters
----------
normalization : str, optional, default = '4pi'
Normalization of the output class: '4pi', 'ortho', 'schmidt', or
'unnorm', for geodesy 4pi normalized, orthonormalized, Schmidt
semi-normalized, or unnormalized coefficients, respectively.
csphase : int, optional, default = 1
Condon-Shortley phase convention: 1 to exclude the phase factor,
or -1 to include it.
lmax_calc : int, optional, default = x.lmax
Maximum spherical harmonic degree to return.
"""
if type(normalization) != str:
raise ValueError('normalization must be a string. ' +
'Input type was {:s}'
.format(str(type(normalization))))
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError(
"The normalization must be '4pi', 'ortho', 'schmidt', " +
"or 'unnorm'. Input value was {:s}."
.format(repr(normalization))
)
if csphase != 1 and csphase != -1:
raise ValueError(
"csphase must be either 1 or -1. Input value was {:s}."
.format(repr(csphase))
)
return self._expand(normalization=normalization, csphase=csphase,
**kwargs)
|
def function[expand, parameter[self, normalization, csphase]]:
constant[
Expand the grid into spherical harmonics.
Usage
-----
clm = x.expand([normalization, csphase, lmax_calc])
Returns
-------
clm : SHCoeffs class instance
Parameters
----------
normalization : str, optional, default = '4pi'
Normalization of the output class: '4pi', 'ortho', 'schmidt', or
'unnorm', for geodesy 4pi normalized, orthonormalized, Schmidt
semi-normalized, or unnormalized coefficients, respectively.
csphase : int, optional, default = 1
Condon-Shortley phase convention: 1 to exclude the phase factor,
or -1 to include it.
lmax_calc : int, optional, default = x.lmax
Maximum spherical harmonic degree to return.
]
if compare[call[name[type], parameter[name[normalization]]] not_equal[!=] name[str]] begin[:]
<ast.Raise object at 0x7da18bcc84c0>
if compare[call[name[normalization].lower, parameter[]] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da18bcc96c0>, <ast.Constant object at 0x7da18bcc8e50>, <ast.Constant object at 0x7da18bcc8bb0>, <ast.Constant object at 0x7da18bcc9a20>]]] begin[:]
<ast.Raise object at 0x7da18bcc82e0>
if <ast.BoolOp object at 0x7da18bcc8910> begin[:]
<ast.Raise object at 0x7da18bcc9270>
return[call[name[self]._expand, parameter[]]]
|
keyword[def] identifier[expand] ( identifier[self] , identifier[normalization] = literal[string] , identifier[csphase] = literal[int] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[type] ( identifier[normalization] )!= identifier[str] :
keyword[raise] identifier[ValueError] ( literal[string] +
literal[string]
. identifier[format] ( identifier[str] ( identifier[type] ( identifier[normalization] ))))
keyword[if] identifier[normalization] . identifier[lower] () keyword[not] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] ):
keyword[raise] identifier[ValueError] (
literal[string] +
literal[string]
. identifier[format] ( identifier[repr] ( identifier[normalization] ))
)
keyword[if] identifier[csphase] != literal[int] keyword[and] identifier[csphase] !=- literal[int] :
keyword[raise] identifier[ValueError] (
literal[string]
. identifier[format] ( identifier[repr] ( identifier[csphase] ))
)
keyword[return] identifier[self] . identifier[_expand] ( identifier[normalization] = identifier[normalization] , identifier[csphase] = identifier[csphase] ,
** identifier[kwargs] )
|
def expand(self, normalization='4pi', csphase=1, **kwargs):
"""
Expand the grid into spherical harmonics.
Usage
-----
clm = x.expand([normalization, csphase, lmax_calc])
Returns
-------
clm : SHCoeffs class instance
Parameters
----------
normalization : str, optional, default = '4pi'
Normalization of the output class: '4pi', 'ortho', 'schmidt', or
'unnorm', for geodesy 4pi normalized, orthonormalized, Schmidt
semi-normalized, or unnormalized coefficients, respectively.
csphase : int, optional, default = 1
Condon-Shortley phase convention: 1 to exclude the phase factor,
or -1 to include it.
lmax_calc : int, optional, default = x.lmax
Maximum spherical harmonic degree to return.
"""
if type(normalization) != str:
raise ValueError('normalization must be a string. ' + 'Input type was {:s}'.format(str(type(normalization)))) # depends on [control=['if'], data=['str']]
if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'):
raise ValueError("The normalization must be '4pi', 'ortho', 'schmidt', " + "or 'unnorm'. Input value was {:s}.".format(repr(normalization))) # depends on [control=['if'], data=[]]
if csphase != 1 and csphase != -1:
raise ValueError('csphase must be either 1 or -1. Input value was {:s}.'.format(repr(csphase))) # depends on [control=['if'], data=[]]
return self._expand(normalization=normalization, csphase=csphase, **kwargs)
|
def alter_field(self, model, old_field, new_field, strict=False):
"""Ran when the configuration on a field changed."""
is_old_field_hstore = isinstance(old_field, HStoreField)
is_new_field_hstore = isinstance(new_field, HStoreField)
if not is_old_field_hstore and not is_new_field_hstore:
return
old_uniqueness = getattr(old_field, 'uniqueness', []) or []
new_uniqueness = getattr(new_field, 'uniqueness', []) or []
# handle field renames before moving on
if str(old_field.column) != str(new_field.column):
for keys in self._iterate_uniqueness_keys(old_field):
self._rename_hstore_unique(
model._meta.db_table,
model._meta.db_table,
old_field,
new_field,
keys
)
# drop the indexes for keys that have been removed
for keys in old_uniqueness:
if keys not in new_uniqueness:
self._drop_hstore_unique(
model,
old_field,
self._compose_keys(keys)
)
# create new indexes for keys that have been added
for keys in new_uniqueness:
if keys not in old_uniqueness:
self._create_hstore_unique(
model,
new_field,
self._compose_keys(keys)
)
|
def function[alter_field, parameter[self, model, old_field, new_field, strict]]:
constant[Ran when the configuration on a field changed.]
variable[is_old_field_hstore] assign[=] call[name[isinstance], parameter[name[old_field], name[HStoreField]]]
variable[is_new_field_hstore] assign[=] call[name[isinstance], parameter[name[new_field], name[HStoreField]]]
if <ast.BoolOp object at 0x7da1b03a4580> begin[:]
return[None]
variable[old_uniqueness] assign[=] <ast.BoolOp object at 0x7da1b03a5db0>
variable[new_uniqueness] assign[=] <ast.BoolOp object at 0x7da1b03a5d20>
if compare[call[name[str], parameter[name[old_field].column]] not_equal[!=] call[name[str], parameter[name[new_field].column]]] begin[:]
for taget[name[keys]] in starred[call[name[self]._iterate_uniqueness_keys, parameter[name[old_field]]]] begin[:]
call[name[self]._rename_hstore_unique, parameter[name[model]._meta.db_table, name[model]._meta.db_table, name[old_field], name[new_field], name[keys]]]
for taget[name[keys]] in starred[name[old_uniqueness]] begin[:]
if compare[name[keys] <ast.NotIn object at 0x7da2590d7190> name[new_uniqueness]] begin[:]
call[name[self]._drop_hstore_unique, parameter[name[model], name[old_field], call[name[self]._compose_keys, parameter[name[keys]]]]]
for taget[name[keys]] in starred[name[new_uniqueness]] begin[:]
if compare[name[keys] <ast.NotIn object at 0x7da2590d7190> name[old_uniqueness]] begin[:]
call[name[self]._create_hstore_unique, parameter[name[model], name[new_field], call[name[self]._compose_keys, parameter[name[keys]]]]]
|
keyword[def] identifier[alter_field] ( identifier[self] , identifier[model] , identifier[old_field] , identifier[new_field] , identifier[strict] = keyword[False] ):
literal[string]
identifier[is_old_field_hstore] = identifier[isinstance] ( identifier[old_field] , identifier[HStoreField] )
identifier[is_new_field_hstore] = identifier[isinstance] ( identifier[new_field] , identifier[HStoreField] )
keyword[if] keyword[not] identifier[is_old_field_hstore] keyword[and] keyword[not] identifier[is_new_field_hstore] :
keyword[return]
identifier[old_uniqueness] = identifier[getattr] ( identifier[old_field] , literal[string] ,[]) keyword[or] []
identifier[new_uniqueness] = identifier[getattr] ( identifier[new_field] , literal[string] ,[]) keyword[or] []
keyword[if] identifier[str] ( identifier[old_field] . identifier[column] )!= identifier[str] ( identifier[new_field] . identifier[column] ):
keyword[for] identifier[keys] keyword[in] identifier[self] . identifier[_iterate_uniqueness_keys] ( identifier[old_field] ):
identifier[self] . identifier[_rename_hstore_unique] (
identifier[model] . identifier[_meta] . identifier[db_table] ,
identifier[model] . identifier[_meta] . identifier[db_table] ,
identifier[old_field] ,
identifier[new_field] ,
identifier[keys]
)
keyword[for] identifier[keys] keyword[in] identifier[old_uniqueness] :
keyword[if] identifier[keys] keyword[not] keyword[in] identifier[new_uniqueness] :
identifier[self] . identifier[_drop_hstore_unique] (
identifier[model] ,
identifier[old_field] ,
identifier[self] . identifier[_compose_keys] ( identifier[keys] )
)
keyword[for] identifier[keys] keyword[in] identifier[new_uniqueness] :
keyword[if] identifier[keys] keyword[not] keyword[in] identifier[old_uniqueness] :
identifier[self] . identifier[_create_hstore_unique] (
identifier[model] ,
identifier[new_field] ,
identifier[self] . identifier[_compose_keys] ( identifier[keys] )
)
|
def alter_field(self, model, old_field, new_field, strict=False):
"""Ran when the configuration on a field changed."""
is_old_field_hstore = isinstance(old_field, HStoreField)
is_new_field_hstore = isinstance(new_field, HStoreField)
if not is_old_field_hstore and (not is_new_field_hstore):
return # depends on [control=['if'], data=[]]
old_uniqueness = getattr(old_field, 'uniqueness', []) or []
new_uniqueness = getattr(new_field, 'uniqueness', []) or []
# handle field renames before moving on
if str(old_field.column) != str(new_field.column):
for keys in self._iterate_uniqueness_keys(old_field):
self._rename_hstore_unique(model._meta.db_table, model._meta.db_table, old_field, new_field, keys) # depends on [control=['for'], data=['keys']] # depends on [control=['if'], data=[]]
# drop the indexes for keys that have been removed
for keys in old_uniqueness:
if keys not in new_uniqueness:
self._drop_hstore_unique(model, old_field, self._compose_keys(keys)) # depends on [control=['if'], data=['keys']] # depends on [control=['for'], data=['keys']]
# create new indexes for keys that have been added
for keys in new_uniqueness:
if keys not in old_uniqueness:
self._create_hstore_unique(model, new_field, self._compose_keys(keys)) # depends on [control=['if'], data=['keys']] # depends on [control=['for'], data=['keys']]
|
def conf_int(self, alpha=0.05, **kwargs):
r"""Returns the confidence interval of the fitted parameters.
Returns
-------
alpha : float, optional (default=0.05)
The significance level for the confidence interval. ie.,
the default alpha = .05 returns a 95% confidence interval.
**kwargs : keyword args or dict
Keyword arguments to pass to the confidence interval function.
Could include 'cols' or 'method'
"""
return self.arima_res_.conf_int(alpha=alpha, **kwargs)
|
def function[conf_int, parameter[self, alpha]]:
constant[Returns the confidence interval of the fitted parameters.
Returns
-------
alpha : float, optional (default=0.05)
The significance level for the confidence interval. ie.,
the default alpha = .05 returns a 95% confidence interval.
**kwargs : keyword args or dict
Keyword arguments to pass to the confidence interval function.
Could include 'cols' or 'method'
]
return[call[name[self].arima_res_.conf_int, parameter[]]]
|
keyword[def] identifier[conf_int] ( identifier[self] , identifier[alpha] = literal[int] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[self] . identifier[arima_res_] . identifier[conf_int] ( identifier[alpha] = identifier[alpha] ,** identifier[kwargs] )
|
def conf_int(self, alpha=0.05, **kwargs):
"""Returns the confidence interval of the fitted parameters.
Returns
-------
alpha : float, optional (default=0.05)
The significance level for the confidence interval. ie.,
the default alpha = .05 returns a 95% confidence interval.
**kwargs : keyword args or dict
Keyword arguments to pass to the confidence interval function.
Could include 'cols' or 'method'
"""
return self.arima_res_.conf_int(alpha=alpha, **kwargs)
|
def download_object(self, obj_name, directory, structure=True):
"""
Alias for self.download(); included for backwards compatibility
"""
return self.download(obj=obj_name, directory=directory,
structure=structure)
|
def function[download_object, parameter[self, obj_name, directory, structure]]:
constant[
Alias for self.download(); included for backwards compatibility
]
return[call[name[self].download, parameter[]]]
|
keyword[def] identifier[download_object] ( identifier[self] , identifier[obj_name] , identifier[directory] , identifier[structure] = keyword[True] ):
literal[string]
keyword[return] identifier[self] . identifier[download] ( identifier[obj] = identifier[obj_name] , identifier[directory] = identifier[directory] ,
identifier[structure] = identifier[structure] )
|
def download_object(self, obj_name, directory, structure=True):
"""
Alias for self.download(); included for backwards compatibility
"""
return self.download(obj=obj_name, directory=directory, structure=structure)
|
def delete_files_within_dir(directory: str, filenames: List[str]) -> None:
"""
Delete files within ``directory`` whose filename *exactly* matches one of
``filenames``.
"""
for dirpath, dirnames, fnames in os.walk(directory):
for f in fnames:
if f in filenames:
fullpath = os.path.join(dirpath, f)
log.debug("Deleting {!r}", fullpath)
os.remove(fullpath)
|
def function[delete_files_within_dir, parameter[directory, filenames]]:
constant[
Delete files within ``directory`` whose filename *exactly* matches one of
``filenames``.
]
for taget[tuple[[<ast.Name object at 0x7da1b189e2f0>, <ast.Name object at 0x7da1b189d990>, <ast.Name object at 0x7da1b189eaa0>]]] in starred[call[name[os].walk, parameter[name[directory]]]] begin[:]
for taget[name[f]] in starred[name[fnames]] begin[:]
if compare[name[f] in name[filenames]] begin[:]
variable[fullpath] assign[=] call[name[os].path.join, parameter[name[dirpath], name[f]]]
call[name[log].debug, parameter[constant[Deleting {!r}], name[fullpath]]]
call[name[os].remove, parameter[name[fullpath]]]
|
keyword[def] identifier[delete_files_within_dir] ( identifier[directory] : identifier[str] , identifier[filenames] : identifier[List] [ identifier[str] ])-> keyword[None] :
literal[string]
keyword[for] identifier[dirpath] , identifier[dirnames] , identifier[fnames] keyword[in] identifier[os] . identifier[walk] ( identifier[directory] ):
keyword[for] identifier[f] keyword[in] identifier[fnames] :
keyword[if] identifier[f] keyword[in] identifier[filenames] :
identifier[fullpath] = identifier[os] . identifier[path] . identifier[join] ( identifier[dirpath] , identifier[f] )
identifier[log] . identifier[debug] ( literal[string] , identifier[fullpath] )
identifier[os] . identifier[remove] ( identifier[fullpath] )
|
def delete_files_within_dir(directory: str, filenames: List[str]) -> None:
"""
Delete files within ``directory`` whose filename *exactly* matches one of
``filenames``.
"""
for (dirpath, dirnames, fnames) in os.walk(directory):
for f in fnames:
if f in filenames:
fullpath = os.path.join(dirpath, f)
log.debug('Deleting {!r}', fullpath)
os.remove(fullpath) # depends on [control=['if'], data=['f']] # depends on [control=['for'], data=['f']] # depends on [control=['for'], data=[]]
|
def _normalize_tags_type(self, tags, device_name=None, metric_name=None):
"""
Normalize tags contents and type:
- append `device_name` as `device:` tag
- normalize tags type
- doesn't mutate the passed list, returns a new list
"""
normalized_tags = []
if device_name:
self._log_deprecation('device_name')
normalized_tags.append('device:{}'.format(ensure_unicode(device_name)))
if tags is not None:
for tag in tags:
if tag is None:
continue
if not isinstance(tag, str):
try:
tag = tag.decode('utf-8')
except Exception:
self.log.warning(
'Error decoding tag `{}` as utf-8 for metric `{}`, ignoring tag'.format(tag, metric_name)
)
continue
normalized_tags.append(tag)
return normalized_tags
|
def function[_normalize_tags_type, parameter[self, tags, device_name, metric_name]]:
constant[
Normalize tags contents and type:
- append `device_name` as `device:` tag
- normalize tags type
- doesn't mutate the passed list, returns a new list
]
variable[normalized_tags] assign[=] list[[]]
if name[device_name] begin[:]
call[name[self]._log_deprecation, parameter[constant[device_name]]]
call[name[normalized_tags].append, parameter[call[constant[device:{}].format, parameter[call[name[ensure_unicode], parameter[name[device_name]]]]]]]
if compare[name[tags] is_not constant[None]] begin[:]
for taget[name[tag]] in starred[name[tags]] begin[:]
if compare[name[tag] is constant[None]] begin[:]
continue
if <ast.UnaryOp object at 0x7da18f811f30> begin[:]
<ast.Try object at 0x7da18f813940>
call[name[normalized_tags].append, parameter[name[tag]]]
return[name[normalized_tags]]
|
keyword[def] identifier[_normalize_tags_type] ( identifier[self] , identifier[tags] , identifier[device_name] = keyword[None] , identifier[metric_name] = keyword[None] ):
literal[string]
identifier[normalized_tags] =[]
keyword[if] identifier[device_name] :
identifier[self] . identifier[_log_deprecation] ( literal[string] )
identifier[normalized_tags] . identifier[append] ( literal[string] . identifier[format] ( identifier[ensure_unicode] ( identifier[device_name] )))
keyword[if] identifier[tags] keyword[is] keyword[not] keyword[None] :
keyword[for] identifier[tag] keyword[in] identifier[tags] :
keyword[if] identifier[tag] keyword[is] keyword[None] :
keyword[continue]
keyword[if] keyword[not] identifier[isinstance] ( identifier[tag] , identifier[str] ):
keyword[try] :
identifier[tag] = identifier[tag] . identifier[decode] ( literal[string] )
keyword[except] identifier[Exception] :
identifier[self] . identifier[log] . identifier[warning] (
literal[string] . identifier[format] ( identifier[tag] , identifier[metric_name] )
)
keyword[continue]
identifier[normalized_tags] . identifier[append] ( identifier[tag] )
keyword[return] identifier[normalized_tags]
|
def _normalize_tags_type(self, tags, device_name=None, metric_name=None):
"""
Normalize tags contents and type:
- append `device_name` as `device:` tag
- normalize tags type
- doesn't mutate the passed list, returns a new list
"""
normalized_tags = []
if device_name:
self._log_deprecation('device_name')
normalized_tags.append('device:{}'.format(ensure_unicode(device_name))) # depends on [control=['if'], data=[]]
if tags is not None:
for tag in tags:
if tag is None:
continue # depends on [control=['if'], data=[]]
if not isinstance(tag, str):
try:
tag = tag.decode('utf-8') # depends on [control=['try'], data=[]]
except Exception:
self.log.warning('Error decoding tag `{}` as utf-8 for metric `{}`, ignoring tag'.format(tag, metric_name))
continue # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
normalized_tags.append(tag) # depends on [control=['for'], data=['tag']] # depends on [control=['if'], data=['tags']]
return normalized_tags
|
def assess_itx_resistance(job, gene_expression, univ_options, reports_options):
"""
Assess the prevalence of the various genes in various cancer pathways and return a report in the txt
format.
:param toil.fileStore.FileID gene_expression: fsID for the rsem gene expression file
:param dict univ_options: Dict of universal options used by almost all tools
:param dict reports_options: Options specific to reporting modules
:return: The fsID for the itx resistance report file
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
tumor_type = univ_options['tumor_type']
# Get the input files
input_files = {
'rsem_quant.tsv': gene_expression,
'itx_resistance.tsv.tar.gz': reports_options['itx_resistance_file'],
'immune_resistance_pathways.json.tar.gz': reports_options['immune_resistance_pathways_file']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
input_files['itx_resistance.tsv'] = untargz(input_files['itx_resistance.tsv.tar.gz'], work_dir)
input_files['immune_resistance_pathways.json'] = untargz(input_files['immune_resistance_pathways.json.tar.gz'], work_dir)
full_data = pd.read_table(input_files['itx_resistance.tsv'], index_col=0)
# Read pathways descriptions and cancer pathway data
with open(input_files['immune_resistance_pathways.json']) as json_file:
json_data = json.load(json_file)
# Read patient file
patient_df = pd.read_csv('rsem_quant.tsv', sep=' ', delimiter='\t', header='infer', index_col=0)
patient_df.index = (patient_df.index).str.replace('\\..*$', '')
with open('immunotherapy_resistance_report.txt', 'w') as report_file:
# Check if data exsits for specified tumor type
try:
pathways = json_data['Cancer_to_pathway'][tumor_type]
except KeyError:
print('Data not available for ' + tumor_type, file=report_file)
else:
# If data exists, write a report
for pathway in pathways:
up_is_good = json_data['Pathways'][pathway]['up_is_good']
if up_is_good:
comp_fn = lambda x, y: x >= y
else:
comp_fn = lambda x, y: x < y
# Describe pathway and genes for it
print('Pathway: ' + pathway + '\n', file=report_file)
print ('Papers: ' + json_data['Pathways'][pathway]['paper'], file=report_file)
description = json_data['Pathways'][pathway]['description']
print('Description of pathway:\n' + textwrap.fill(description, width=100),
file=report_file)
print('Pathway genes: ', file=report_file)
print('\t{:10}{:<20}{:<20}{:<12}'.format('Gene', 'GTEX Median',
'TCGA N Median', 'Observed'),
file=report_file)
status = []
# Write TCGA, GTEX, and observed values
for gene in json_data['Pathways'][pathway]['genes']:
gtex = '{0:.2f}'.format(
float(full_data.loc[gene, TCGAToGTEx[tumor_type]])) \
if gene in full_data.index else 'NA'
tcga = '{0:.2f}'.format(
float(full_data.loc[gene, tumor_type + ' normal'])) \
if gene in full_data.index else 'NA'
tpm_value = '{0:.2f}'.format(float(patient_df.loc[gene, 'TPM'])) \
if gene in patient_df.index else 'NA'
ensg = json_data['Pathways'][pathway]['genes'][gene]
print('\t{:10}{:<20}{:<20}{:<12}'.format(ensg, gtex, tcga, tpm_value),
file=report_file)
if gtex != 'NA' and tpm_value != 'NA':
tcga_bool = comp_fn(float(tpm_value), float(tcga))
gtex_bool = comp_fn(float(tpm_value), float(gtex))
status.append(tcga_bool and gtex_bool)
else:
status.append(False)
# Based on the number of genes with expression values above normal, assess the status
print ('Status: ' + json_data['Pathways'][pathway]['status'][
str(sum(status) >= 0.75 * len(status))] + '\n', file=report_file)
output_file = job.fileStore.writeGlobalFile(report_file.name)
export_results(job, output_file, report_file.name, univ_options, subfolder='reports')
job.fileStore.logToMaster('Ran create immunotherapy resistance report on %s successfully'
% univ_options['patient'])
return output_file
|
def function[assess_itx_resistance, parameter[job, gene_expression, univ_options, reports_options]]:
constant[
Assess the prevalence of the various genes in various cancer pathways and return a report in the txt
format.
:param toil.fileStore.FileID gene_expression: fsID for the rsem gene expression file
:param dict univ_options: Dict of universal options used by almost all tools
:param dict reports_options: Options specific to reporting modules
:return: The fsID for the itx resistance report file
:rtype: toil.fileStore.FileID
]
variable[work_dir] assign[=] call[name[os].getcwd, parameter[]]
variable[tumor_type] assign[=] call[name[univ_options]][constant[tumor_type]]
variable[input_files] assign[=] dictionary[[<ast.Constant object at 0x7da18f810100>, <ast.Constant object at 0x7da18f812f50>, <ast.Constant object at 0x7da18f8102b0>], [<ast.Name object at 0x7da18f812050>, <ast.Subscript object at 0x7da18f812110>, <ast.Subscript object at 0x7da18f813700>]]
variable[input_files] assign[=] call[name[get_files_from_filestore], parameter[name[job], name[input_files], name[work_dir]]]
call[name[input_files]][constant[itx_resistance.tsv]] assign[=] call[name[untargz], parameter[call[name[input_files]][constant[itx_resistance.tsv.tar.gz]], name[work_dir]]]
call[name[input_files]][constant[immune_resistance_pathways.json]] assign[=] call[name[untargz], parameter[call[name[input_files]][constant[immune_resistance_pathways.json.tar.gz]], name[work_dir]]]
variable[full_data] assign[=] call[name[pd].read_table, parameter[call[name[input_files]][constant[itx_resistance.tsv]]]]
with call[name[open], parameter[call[name[input_files]][constant[immune_resistance_pathways.json]]]] begin[:]
variable[json_data] assign[=] call[name[json].load, parameter[name[json_file]]]
variable[patient_df] assign[=] call[name[pd].read_csv, parameter[constant[rsem_quant.tsv]]]
name[patient_df].index assign[=] call[name[patient_df].index.str.replace, parameter[constant[\..*$], constant[]]]
with call[name[open], parameter[constant[immunotherapy_resistance_report.txt], constant[w]]] begin[:]
<ast.Try object at 0x7da18f813760>
variable[output_file] assign[=] call[name[job].fileStore.writeGlobalFile, parameter[name[report_file].name]]
call[name[export_results], parameter[name[job], name[output_file], name[report_file].name, name[univ_options]]]
call[name[job].fileStore.logToMaster, parameter[binary_operation[constant[Ran create immunotherapy resistance report on %s successfully] <ast.Mod object at 0x7da2590d6920> call[name[univ_options]][constant[patient]]]]]
return[name[output_file]]
|
keyword[def] identifier[assess_itx_resistance] ( identifier[job] , identifier[gene_expression] , identifier[univ_options] , identifier[reports_options] ):
literal[string]
identifier[work_dir] = identifier[os] . identifier[getcwd] ()
identifier[tumor_type] = identifier[univ_options] [ literal[string] ]
identifier[input_files] ={
literal[string] : identifier[gene_expression] ,
literal[string] : identifier[reports_options] [ literal[string] ],
literal[string] : identifier[reports_options] [ literal[string] ]}
identifier[input_files] = identifier[get_files_from_filestore] ( identifier[job] , identifier[input_files] , identifier[work_dir] , identifier[docker] = keyword[False] )
identifier[input_files] [ literal[string] ]= identifier[untargz] ( identifier[input_files] [ literal[string] ], identifier[work_dir] )
identifier[input_files] [ literal[string] ]= identifier[untargz] ( identifier[input_files] [ literal[string] ], identifier[work_dir] )
identifier[full_data] = identifier[pd] . identifier[read_table] ( identifier[input_files] [ literal[string] ], identifier[index_col] = literal[int] )
keyword[with] identifier[open] ( identifier[input_files] [ literal[string] ]) keyword[as] identifier[json_file] :
identifier[json_data] = identifier[json] . identifier[load] ( identifier[json_file] )
identifier[patient_df] = identifier[pd] . identifier[read_csv] ( literal[string] , identifier[sep] = literal[string] , identifier[delimiter] = literal[string] , identifier[header] = literal[string] , identifier[index_col] = literal[int] )
identifier[patient_df] . identifier[index] =( identifier[patient_df] . identifier[index] ). identifier[str] . identifier[replace] ( literal[string] , literal[string] )
keyword[with] identifier[open] ( literal[string] , literal[string] ) keyword[as] identifier[report_file] :
keyword[try] :
identifier[pathways] = identifier[json_data] [ literal[string] ][ identifier[tumor_type] ]
keyword[except] identifier[KeyError] :
identifier[print] ( literal[string] + identifier[tumor_type] , identifier[file] = identifier[report_file] )
keyword[else] :
keyword[for] identifier[pathway] keyword[in] identifier[pathways] :
identifier[up_is_good] = identifier[json_data] [ literal[string] ][ identifier[pathway] ][ literal[string] ]
keyword[if] identifier[up_is_good] :
identifier[comp_fn] = keyword[lambda] identifier[x] , identifier[y] : identifier[x] >= identifier[y]
keyword[else] :
identifier[comp_fn] = keyword[lambda] identifier[x] , identifier[y] : identifier[x] < identifier[y]
identifier[print] ( literal[string] + identifier[pathway] + literal[string] , identifier[file] = identifier[report_file] )
identifier[print] ( literal[string] + identifier[json_data] [ literal[string] ][ identifier[pathway] ][ literal[string] ], identifier[file] = identifier[report_file] )
identifier[description] = identifier[json_data] [ literal[string] ][ identifier[pathway] ][ literal[string] ]
identifier[print] ( literal[string] + identifier[textwrap] . identifier[fill] ( identifier[description] , identifier[width] = literal[int] ),
identifier[file] = identifier[report_file] )
identifier[print] ( literal[string] , identifier[file] = identifier[report_file] )
identifier[print] ( literal[string] . identifier[format] ( literal[string] , literal[string] ,
literal[string] , literal[string] ),
identifier[file] = identifier[report_file] )
identifier[status] =[]
keyword[for] identifier[gene] keyword[in] identifier[json_data] [ literal[string] ][ identifier[pathway] ][ literal[string] ]:
identifier[gtex] = literal[string] . identifier[format] (
identifier[float] ( identifier[full_data] . identifier[loc] [ identifier[gene] , identifier[TCGAToGTEx] [ identifier[tumor_type] ]])) keyword[if] identifier[gene] keyword[in] identifier[full_data] . identifier[index] keyword[else] literal[string]
identifier[tcga] = literal[string] . identifier[format] (
identifier[float] ( identifier[full_data] . identifier[loc] [ identifier[gene] , identifier[tumor_type] + literal[string] ])) keyword[if] identifier[gene] keyword[in] identifier[full_data] . identifier[index] keyword[else] literal[string]
identifier[tpm_value] = literal[string] . identifier[format] ( identifier[float] ( identifier[patient_df] . identifier[loc] [ identifier[gene] , literal[string] ])) keyword[if] identifier[gene] keyword[in] identifier[patient_df] . identifier[index] keyword[else] literal[string]
identifier[ensg] = identifier[json_data] [ literal[string] ][ identifier[pathway] ][ literal[string] ][ identifier[gene] ]
identifier[print] ( literal[string] . identifier[format] ( identifier[ensg] , identifier[gtex] , identifier[tcga] , identifier[tpm_value] ),
identifier[file] = identifier[report_file] )
keyword[if] identifier[gtex] != literal[string] keyword[and] identifier[tpm_value] != literal[string] :
identifier[tcga_bool] = identifier[comp_fn] ( identifier[float] ( identifier[tpm_value] ), identifier[float] ( identifier[tcga] ))
identifier[gtex_bool] = identifier[comp_fn] ( identifier[float] ( identifier[tpm_value] ), identifier[float] ( identifier[gtex] ))
identifier[status] . identifier[append] ( identifier[tcga_bool] keyword[and] identifier[gtex_bool] )
keyword[else] :
identifier[status] . identifier[append] ( keyword[False] )
identifier[print] ( literal[string] + identifier[json_data] [ literal[string] ][ identifier[pathway] ][ literal[string] ][
identifier[str] ( identifier[sum] ( identifier[status] )>= literal[int] * identifier[len] ( identifier[status] ))]+ literal[string] , identifier[file] = identifier[report_file] )
identifier[output_file] = identifier[job] . identifier[fileStore] . identifier[writeGlobalFile] ( identifier[report_file] . identifier[name] )
identifier[export_results] ( identifier[job] , identifier[output_file] , identifier[report_file] . identifier[name] , identifier[univ_options] , identifier[subfolder] = literal[string] )
identifier[job] . identifier[fileStore] . identifier[logToMaster] ( literal[string]
% identifier[univ_options] [ literal[string] ])
keyword[return] identifier[output_file]
|
def assess_itx_resistance(job, gene_expression, univ_options, reports_options):
"""
Assess the prevalence of the various genes in various cancer pathways and return a report in the txt
format.
:param toil.fileStore.FileID gene_expression: fsID for the rsem gene expression file
:param dict univ_options: Dict of universal options used by almost all tools
:param dict reports_options: Options specific to reporting modules
:return: The fsID for the itx resistance report file
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
tumor_type = univ_options['tumor_type']
# Get the input files
input_files = {'rsem_quant.tsv': gene_expression, 'itx_resistance.tsv.tar.gz': reports_options['itx_resistance_file'], 'immune_resistance_pathways.json.tar.gz': reports_options['immune_resistance_pathways_file']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
input_files['itx_resistance.tsv'] = untargz(input_files['itx_resistance.tsv.tar.gz'], work_dir)
input_files['immune_resistance_pathways.json'] = untargz(input_files['immune_resistance_pathways.json.tar.gz'], work_dir)
full_data = pd.read_table(input_files['itx_resistance.tsv'], index_col=0)
# Read pathways descriptions and cancer pathway data
with open(input_files['immune_resistance_pathways.json']) as json_file:
json_data = json.load(json_file) # depends on [control=['with'], data=['json_file']]
# Read patient file
patient_df = pd.read_csv('rsem_quant.tsv', sep=' ', delimiter='\t', header='infer', index_col=0)
patient_df.index = patient_df.index.str.replace('\\..*$', '')
with open('immunotherapy_resistance_report.txt', 'w') as report_file:
# Check if data exsits for specified tumor type
try:
pathways = json_data['Cancer_to_pathway'][tumor_type] # depends on [control=['try'], data=[]]
except KeyError:
print('Data not available for ' + tumor_type, file=report_file) # depends on [control=['except'], data=[]]
else:
# If data exists, write a report
for pathway in pathways:
up_is_good = json_data['Pathways'][pathway]['up_is_good']
if up_is_good:
comp_fn = lambda x, y: x >= y # depends on [control=['if'], data=[]]
else:
comp_fn = lambda x, y: x < y
# Describe pathway and genes for it
print('Pathway: ' + pathway + '\n', file=report_file)
print('Papers: ' + json_data['Pathways'][pathway]['paper'], file=report_file)
description = json_data['Pathways'][pathway]['description']
print('Description of pathway:\n' + textwrap.fill(description, width=100), file=report_file)
print('Pathway genes: ', file=report_file)
print('\t{:10}{:<20}{:<20}{:<12}'.format('Gene', 'GTEX Median', 'TCGA N Median', 'Observed'), file=report_file)
status = []
# Write TCGA, GTEX, and observed values
for gene in json_data['Pathways'][pathway]['genes']:
gtex = '{0:.2f}'.format(float(full_data.loc[gene, TCGAToGTEx[tumor_type]])) if gene in full_data.index else 'NA'
tcga = '{0:.2f}'.format(float(full_data.loc[gene, tumor_type + ' normal'])) if gene in full_data.index else 'NA'
tpm_value = '{0:.2f}'.format(float(patient_df.loc[gene, 'TPM'])) if gene in patient_df.index else 'NA'
ensg = json_data['Pathways'][pathway]['genes'][gene]
print('\t{:10}{:<20}{:<20}{:<12}'.format(ensg, gtex, tcga, tpm_value), file=report_file)
if gtex != 'NA' and tpm_value != 'NA':
tcga_bool = comp_fn(float(tpm_value), float(tcga))
gtex_bool = comp_fn(float(tpm_value), float(gtex))
status.append(tcga_bool and gtex_bool) # depends on [control=['if'], data=[]]
else:
status.append(False) # depends on [control=['for'], data=['gene']]
# Based on the number of genes with expression values above normal, assess the status
print('Status: ' + json_data['Pathways'][pathway]['status'][str(sum(status) >= 0.75 * len(status))] + '\n', file=report_file) # depends on [control=['for'], data=['pathway']] # depends on [control=['with'], data=['report_file']]
output_file = job.fileStore.writeGlobalFile(report_file.name)
export_results(job, output_file, report_file.name, univ_options, subfolder='reports')
job.fileStore.logToMaster('Ran create immunotherapy resistance report on %s successfully' % univ_options['patient'])
return output_file
|
def to_add_link(self, ):
'''
To add link
'''
if self.check_post_role()['ADD']:
pass
else:
return False
kwd = {
'pager': '',
'uid': '',
}
self.render('misc/link/link_add.html',
topmenu='',
kwd=kwd,
userinfo=self.userinfo, )
|
def function[to_add_link, parameter[self]]:
constant[
To add link
]
if call[call[name[self].check_post_role, parameter[]]][constant[ADD]] begin[:]
pass
variable[kwd] assign[=] dictionary[[<ast.Constant object at 0x7da1b04f8820>, <ast.Constant object at 0x7da1b04f9e10>], [<ast.Constant object at 0x7da1b04f9f30>, <ast.Constant object at 0x7da1b04f86a0>]]
call[name[self].render, parameter[constant[misc/link/link_add.html]]]
|
keyword[def] identifier[to_add_link] ( identifier[self] ,):
literal[string]
keyword[if] identifier[self] . identifier[check_post_role] ()[ literal[string] ]:
keyword[pass]
keyword[else] :
keyword[return] keyword[False]
identifier[kwd] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
}
identifier[self] . identifier[render] ( literal[string] ,
identifier[topmenu] = literal[string] ,
identifier[kwd] = identifier[kwd] ,
identifier[userinfo] = identifier[self] . identifier[userinfo] ,)
|
def to_add_link(self):
"""
To add link
"""
if self.check_post_role()['ADD']:
pass # depends on [control=['if'], data=[]]
else:
return False
kwd = {'pager': '', 'uid': ''}
self.render('misc/link/link_add.html', topmenu='', kwd=kwd, userinfo=self.userinfo)
|
def add_price_entity(self, price: dal.Price):
""" Adds the price """
from decimal import Decimal
# check if the price already exists in db.
repo = self.get_price_repository()
existing = (
repo.query
.filter(dal.Price.namespace == price.namespace)
.filter(dal.Price.symbol == price.symbol)
.filter(dal.Price.date == price.date)
.filter(dal.Price.time == price.time)
.first()
)
if existing:
# Update existing price.
new_value = Decimal(price.value) / Decimal(price.denom)
self.logger.info(f"Exists: {price}")
if price.currency != existing.currency:
raise ValueError(
f"The currency is different for price {price}!")
if existing.value != price.value:
existing.value = price.value
self.logger.info(f"Updating to {new_value}.")
if existing.denom != price.denom:
existing.denom = price.denom
else:
# Insert new price
self.session.add(price)
self.logger.info(f"Added {price}")
|
def function[add_price_entity, parameter[self, price]]:
constant[ Adds the price ]
from relative_module[decimal] import module[Decimal]
variable[repo] assign[=] call[name[self].get_price_repository, parameter[]]
variable[existing] assign[=] call[call[call[call[call[name[repo].query.filter, parameter[compare[name[dal].Price.namespace equal[==] name[price].namespace]]].filter, parameter[compare[name[dal].Price.symbol equal[==] name[price].symbol]]].filter, parameter[compare[name[dal].Price.date equal[==] name[price].date]]].filter, parameter[compare[name[dal].Price.time equal[==] name[price].time]]].first, parameter[]]
if name[existing] begin[:]
variable[new_value] assign[=] binary_operation[call[name[Decimal], parameter[name[price].value]] / call[name[Decimal], parameter[name[price].denom]]]
call[name[self].logger.info, parameter[<ast.JoinedStr object at 0x7da1b03b91b0>]]
if compare[name[price].currency not_equal[!=] name[existing].currency] begin[:]
<ast.Raise object at 0x7da1b03ba3e0>
if compare[name[existing].value not_equal[!=] name[price].value] begin[:]
name[existing].value assign[=] name[price].value
call[name[self].logger.info, parameter[<ast.JoinedStr object at 0x7da1b03b8580>]]
if compare[name[existing].denom not_equal[!=] name[price].denom] begin[:]
name[existing].denom assign[=] name[price].denom
|
keyword[def] identifier[add_price_entity] ( identifier[self] , identifier[price] : identifier[dal] . identifier[Price] ):
literal[string]
keyword[from] identifier[decimal] keyword[import] identifier[Decimal]
identifier[repo] = identifier[self] . identifier[get_price_repository] ()
identifier[existing] =(
identifier[repo] . identifier[query]
. identifier[filter] ( identifier[dal] . identifier[Price] . identifier[namespace] == identifier[price] . identifier[namespace] )
. identifier[filter] ( identifier[dal] . identifier[Price] . identifier[symbol] == identifier[price] . identifier[symbol] )
. identifier[filter] ( identifier[dal] . identifier[Price] . identifier[date] == identifier[price] . identifier[date] )
. identifier[filter] ( identifier[dal] . identifier[Price] . identifier[time] == identifier[price] . identifier[time] )
. identifier[first] ()
)
keyword[if] identifier[existing] :
identifier[new_value] = identifier[Decimal] ( identifier[price] . identifier[value] )/ identifier[Decimal] ( identifier[price] . identifier[denom] )
identifier[self] . identifier[logger] . identifier[info] ( literal[string] )
keyword[if] identifier[price] . identifier[currency] != identifier[existing] . identifier[currency] :
keyword[raise] identifier[ValueError] (
literal[string] )
keyword[if] identifier[existing] . identifier[value] != identifier[price] . identifier[value] :
identifier[existing] . identifier[value] = identifier[price] . identifier[value]
identifier[self] . identifier[logger] . identifier[info] ( literal[string] )
keyword[if] identifier[existing] . identifier[denom] != identifier[price] . identifier[denom] :
identifier[existing] . identifier[denom] = identifier[price] . identifier[denom]
keyword[else] :
identifier[self] . identifier[session] . identifier[add] ( identifier[price] )
identifier[self] . identifier[logger] . identifier[info] ( literal[string] )
|
def add_price_entity(self, price: dal.Price):
""" Adds the price """
from decimal import Decimal
# check if the price already exists in db.
repo = self.get_price_repository()
existing = repo.query.filter(dal.Price.namespace == price.namespace).filter(dal.Price.symbol == price.symbol).filter(dal.Price.date == price.date).filter(dal.Price.time == price.time).first()
if existing:
# Update existing price.
new_value = Decimal(price.value) / Decimal(price.denom)
self.logger.info(f'Exists: {price}')
if price.currency != existing.currency:
raise ValueError(f'The currency is different for price {price}!') # depends on [control=['if'], data=[]]
if existing.value != price.value:
existing.value = price.value
self.logger.info(f'Updating to {new_value}.') # depends on [control=['if'], data=[]]
if existing.denom != price.denom:
existing.denom = price.denom # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# Insert new price
self.session.add(price)
self.logger.info(f'Added {price}')
|
def advanced_wrap(f, wrapper):
"""
Wrap a decorated function while keeping the same keyword arguments
"""
f_sig = list(inspect.getargspec(f))
wrap_sig = list(inspect.getargspec(wrapper))
# Update the keyword arguments of the wrapper
if f_sig[3] is None or f_sig[3] == []:
f_sig[3], f_kwargs = [], []
else:
f_kwargs = f_sig[0][-len(f_sig[3]):]
for key, default in zip(f_kwargs, f_sig[3]):
wrap_sig[0].append(key)
wrap_sig[3] = wrap_sig[3] + (default, )
wrap_sig[2] = None # Remove kwargs
src = "lambda %s: " % (inspect.formatargspec(*wrap_sig)[1:-1])
new_args = inspect.formatargspec(
wrap_sig[0], wrap_sig[1], wrap_sig[2], f_kwargs,
formatvalue=lambda x: '=' + x)
src += 'wrapper%s\n' % new_args
decorated = eval(src, locals())
decorated.func = f
return update_wrapper(decorated, f)
|
def function[advanced_wrap, parameter[f, wrapper]]:
constant[
Wrap a decorated function while keeping the same keyword arguments
]
variable[f_sig] assign[=] call[name[list], parameter[call[name[inspect].getargspec, parameter[name[f]]]]]
variable[wrap_sig] assign[=] call[name[list], parameter[call[name[inspect].getargspec, parameter[name[wrapper]]]]]
if <ast.BoolOp object at 0x7da1b0d3eb60> begin[:]
<ast.Tuple object at 0x7da1b0d3e470> assign[=] tuple[[<ast.List object at 0x7da1b0d3e650>, <ast.List object at 0x7da1b0d3e590>]]
for taget[tuple[[<ast.Name object at 0x7da1b0d3e860>, <ast.Name object at 0x7da1b0d3e920>]]] in starred[call[name[zip], parameter[name[f_kwargs], call[name[f_sig]][constant[3]]]]] begin[:]
call[call[name[wrap_sig]][constant[0]].append, parameter[name[key]]]
call[name[wrap_sig]][constant[3]] assign[=] binary_operation[call[name[wrap_sig]][constant[3]] + tuple[[<ast.Name object at 0x7da1b0d3c280>]]]
call[name[wrap_sig]][constant[2]] assign[=] constant[None]
variable[src] assign[=] binary_operation[constant[lambda %s: ] <ast.Mod object at 0x7da2590d6920> call[call[name[inspect].formatargspec, parameter[<ast.Starred object at 0x7da1b0d3c970>]]][<ast.Slice object at 0x7da1b0d3c910>]]
variable[new_args] assign[=] call[name[inspect].formatargspec, parameter[call[name[wrap_sig]][constant[0]], call[name[wrap_sig]][constant[1]], call[name[wrap_sig]][constant[2]], name[f_kwargs]]]
<ast.AugAssign object at 0x7da1b0df7d90>
variable[decorated] assign[=] call[name[eval], parameter[name[src], call[name[locals], parameter[]]]]
name[decorated].func assign[=] name[f]
return[call[name[update_wrapper], parameter[name[decorated], name[f]]]]
|
keyword[def] identifier[advanced_wrap] ( identifier[f] , identifier[wrapper] ):
literal[string]
identifier[f_sig] = identifier[list] ( identifier[inspect] . identifier[getargspec] ( identifier[f] ))
identifier[wrap_sig] = identifier[list] ( identifier[inspect] . identifier[getargspec] ( identifier[wrapper] ))
keyword[if] identifier[f_sig] [ literal[int] ] keyword[is] keyword[None] keyword[or] identifier[f_sig] [ literal[int] ]==[]:
identifier[f_sig] [ literal[int] ], identifier[f_kwargs] =[],[]
keyword[else] :
identifier[f_kwargs] = identifier[f_sig] [ literal[int] ][- identifier[len] ( identifier[f_sig] [ literal[int] ]):]
keyword[for] identifier[key] , identifier[default] keyword[in] identifier[zip] ( identifier[f_kwargs] , identifier[f_sig] [ literal[int] ]):
identifier[wrap_sig] [ literal[int] ]. identifier[append] ( identifier[key] )
identifier[wrap_sig] [ literal[int] ]= identifier[wrap_sig] [ literal[int] ]+( identifier[default] ,)
identifier[wrap_sig] [ literal[int] ]= keyword[None]
identifier[src] = literal[string] %( identifier[inspect] . identifier[formatargspec] (* identifier[wrap_sig] )[ literal[int] :- literal[int] ])
identifier[new_args] = identifier[inspect] . identifier[formatargspec] (
identifier[wrap_sig] [ literal[int] ], identifier[wrap_sig] [ literal[int] ], identifier[wrap_sig] [ literal[int] ], identifier[f_kwargs] ,
identifier[formatvalue] = keyword[lambda] identifier[x] : literal[string] + identifier[x] )
identifier[src] += literal[string] % identifier[new_args]
identifier[decorated] = identifier[eval] ( identifier[src] , identifier[locals] ())
identifier[decorated] . identifier[func] = identifier[f]
keyword[return] identifier[update_wrapper] ( identifier[decorated] , identifier[f] )
|
def advanced_wrap(f, wrapper):
"""
Wrap a decorated function while keeping the same keyword arguments
"""
f_sig = list(inspect.getargspec(f))
wrap_sig = list(inspect.getargspec(wrapper))
# Update the keyword arguments of the wrapper
if f_sig[3] is None or f_sig[3] == []:
(f_sig[3], f_kwargs) = ([], []) # depends on [control=['if'], data=[]]
else:
f_kwargs = f_sig[0][-len(f_sig[3]):]
for (key, default) in zip(f_kwargs, f_sig[3]):
wrap_sig[0].append(key)
wrap_sig[3] = wrap_sig[3] + (default,) # depends on [control=['for'], data=[]]
wrap_sig[2] = None # Remove kwargs
src = 'lambda %s: ' % inspect.formatargspec(*wrap_sig)[1:-1]
new_args = inspect.formatargspec(wrap_sig[0], wrap_sig[1], wrap_sig[2], f_kwargs, formatvalue=lambda x: '=' + x)
src += 'wrapper%s\n' % new_args
decorated = eval(src, locals())
decorated.func = f
return update_wrapper(decorated, f)
|
def import_module(module_name):
"""
Given a dotted Python path, imports & returns the module.
If not found, raises ``UnknownModuleError``.
Ex::
mod = import_module('random')
:param module_name: The dotted Python path
:type module_name: string
:returns: module
"""
try:
return importlib.import_module(module_name)
except ImportError as err:
raise UnknownModuleError(str(err))
|
def function[import_module, parameter[module_name]]:
constant[
Given a dotted Python path, imports & returns the module.
If not found, raises ``UnknownModuleError``.
Ex::
mod = import_module('random')
:param module_name: The dotted Python path
:type module_name: string
:returns: module
]
<ast.Try object at 0x7da20e9619f0>
|
keyword[def] identifier[import_module] ( identifier[module_name] ):
literal[string]
keyword[try] :
keyword[return] identifier[importlib] . identifier[import_module] ( identifier[module_name] )
keyword[except] identifier[ImportError] keyword[as] identifier[err] :
keyword[raise] identifier[UnknownModuleError] ( identifier[str] ( identifier[err] ))
|
def import_module(module_name):
"""
Given a dotted Python path, imports & returns the module.
If not found, raises ``UnknownModuleError``.
Ex::
mod = import_module('random')
:param module_name: The dotted Python path
:type module_name: string
:returns: module
"""
try:
return importlib.import_module(module_name) # depends on [control=['try'], data=[]]
except ImportError as err:
raise UnknownModuleError(str(err)) # depends on [control=['except'], data=['err']]
|
def flattened(value, split=None):
"""
Args:
value: Possibly nested arguments (sequence of lists, nested lists)
split (int | str | unicode | (str | unicode, int) | None): How to split values:
- None: simply flatten, no further processing
- one char string: split() on specified char
- SANITIZED: discard all None items
- UNIQUE: each value will appear only once
- SHELL: filter out sequences of the form ["-f", None] (handy for simplified cmd line specification)
Returns:
list: 'value' flattened out (leaves from all involved lists/tuples)
"""
result = []
separator = None
mode = 0
if isinstance(split, tuple):
separator, mode = split
elif isinstance(split, int):
mode = split
else:
separator = split
_flatten(result, value, separator, mode)
return result
|
def function[flattened, parameter[value, split]]:
constant[
Args:
value: Possibly nested arguments (sequence of lists, nested lists)
split (int | str | unicode | (str | unicode, int) | None): How to split values:
- None: simply flatten, no further processing
- one char string: split() on specified char
- SANITIZED: discard all None items
- UNIQUE: each value will appear only once
- SHELL: filter out sequences of the form ["-f", None] (handy for simplified cmd line specification)
Returns:
list: 'value' flattened out (leaves from all involved lists/tuples)
]
variable[result] assign[=] list[[]]
variable[separator] assign[=] constant[None]
variable[mode] assign[=] constant[0]
if call[name[isinstance], parameter[name[split], name[tuple]]] begin[:]
<ast.Tuple object at 0x7da1b23edb70> assign[=] name[split]
call[name[_flatten], parameter[name[result], name[value], name[separator], name[mode]]]
return[name[result]]
|
keyword[def] identifier[flattened] ( identifier[value] , identifier[split] = keyword[None] ):
literal[string]
identifier[result] =[]
identifier[separator] = keyword[None]
identifier[mode] = literal[int]
keyword[if] identifier[isinstance] ( identifier[split] , identifier[tuple] ):
identifier[separator] , identifier[mode] = identifier[split]
keyword[elif] identifier[isinstance] ( identifier[split] , identifier[int] ):
identifier[mode] = identifier[split]
keyword[else] :
identifier[separator] = identifier[split]
identifier[_flatten] ( identifier[result] , identifier[value] , identifier[separator] , identifier[mode] )
keyword[return] identifier[result]
|
def flattened(value, split=None):
"""
Args:
value: Possibly nested arguments (sequence of lists, nested lists)
split (int | str | unicode | (str | unicode, int) | None): How to split values:
- None: simply flatten, no further processing
- one char string: split() on specified char
- SANITIZED: discard all None items
- UNIQUE: each value will appear only once
- SHELL: filter out sequences of the form ["-f", None] (handy for simplified cmd line specification)
Returns:
list: 'value' flattened out (leaves from all involved lists/tuples)
"""
result = []
separator = None
mode = 0
if isinstance(split, tuple):
(separator, mode) = split # depends on [control=['if'], data=[]]
elif isinstance(split, int):
mode = split # depends on [control=['if'], data=[]]
else:
separator = split
_flatten(result, value, separator, mode)
return result
|
def write_fpga_reg(self, fpga_num, addr, value, cabinet, frame, board):
"""Write the value of an FPGA (SPI) register.
See the SpI/O project's spinnaker_fpga design's `README`_ for a listing
of FPGA registers. The SpI/O project can be found on GitHub at:
https://github.com/SpiNNakerManchester/spio/
.. _README: https://github.com/SpiNNakerManchester/spio/\
blob/master/designs/spinnaker_fpgas/README.md#spi-interface
Parameters
----------
fpga_num : int
FPGA number (0, 1 or 2) to communicate with.
addr : int
Register address to read or write to (will be rounded down to the
nearest 32-bit word boundary).
value : int
A 32-bit int value to write to the register
"""
arg1 = addr & (~0x3)
arg2 = 4 # Write a 32-bit value
arg3 = fpga_num
self._send_scp(cabinet, frame, board, SCPCommands.link_write,
arg1=arg1, arg2=arg2, arg3=arg3,
data=struct.pack("<I", value), expected_args=0)
|
def function[write_fpga_reg, parameter[self, fpga_num, addr, value, cabinet, frame, board]]:
constant[Write the value of an FPGA (SPI) register.
See the SpI/O project's spinnaker_fpga design's `README`_ for a listing
of FPGA registers. The SpI/O project can be found on GitHub at:
https://github.com/SpiNNakerManchester/spio/
.. _README: https://github.com/SpiNNakerManchester/spio/ blob/master/designs/spinnaker_fpgas/README.md#spi-interface
Parameters
----------
fpga_num : int
FPGA number (0, 1 or 2) to communicate with.
addr : int
Register address to read or write to (will be rounded down to the
nearest 32-bit word boundary).
value : int
A 32-bit int value to write to the register
]
variable[arg1] assign[=] binary_operation[name[addr] <ast.BitAnd object at 0x7da2590d6b60> <ast.UnaryOp object at 0x7da1b196a3e0>]
variable[arg2] assign[=] constant[4]
variable[arg3] assign[=] name[fpga_num]
call[name[self]._send_scp, parameter[name[cabinet], name[frame], name[board], name[SCPCommands].link_write]]
|
keyword[def] identifier[write_fpga_reg] ( identifier[self] , identifier[fpga_num] , identifier[addr] , identifier[value] , identifier[cabinet] , identifier[frame] , identifier[board] ):
literal[string]
identifier[arg1] = identifier[addr] &(~ literal[int] )
identifier[arg2] = literal[int]
identifier[arg3] = identifier[fpga_num]
identifier[self] . identifier[_send_scp] ( identifier[cabinet] , identifier[frame] , identifier[board] , identifier[SCPCommands] . identifier[link_write] ,
identifier[arg1] = identifier[arg1] , identifier[arg2] = identifier[arg2] , identifier[arg3] = identifier[arg3] ,
identifier[data] = identifier[struct] . identifier[pack] ( literal[string] , identifier[value] ), identifier[expected_args] = literal[int] )
|
def write_fpga_reg(self, fpga_num, addr, value, cabinet, frame, board):
"""Write the value of an FPGA (SPI) register.
See the SpI/O project's spinnaker_fpga design's `README`_ for a listing
of FPGA registers. The SpI/O project can be found on GitHub at:
https://github.com/SpiNNakerManchester/spio/
.. _README: https://github.com/SpiNNakerManchester/spio/ blob/master/designs/spinnaker_fpgas/README.md#spi-interface
Parameters
----------
fpga_num : int
FPGA number (0, 1 or 2) to communicate with.
addr : int
Register address to read or write to (will be rounded down to the
nearest 32-bit word boundary).
value : int
A 32-bit int value to write to the register
"""
arg1 = addr & ~3
arg2 = 4 # Write a 32-bit value
arg3 = fpga_num
self._send_scp(cabinet, frame, board, SCPCommands.link_write, arg1=arg1, arg2=arg2, arg3=arg3, data=struct.pack('<I', value), expected_args=0)
|
def rpc_get_usages(self, filename, source, offset):
"""Return the uses of the symbol at offset.
Returns a list of occurrences of the symbol, as dicts with the
fields name, filename, and offset.
"""
line, column = pos_to_linecol(source, offset)
uses = run_with_debug(jedi, 'usages',
source=source, line=line, column=column,
path=filename, encoding='utf-8')
if uses is None:
return None
result = []
for use in uses:
if use.module_path == filename:
offset = linecol_to_pos(source, use.line, use.column)
elif use.module_path is not None:
with open(use.module_path) as f:
text = f.read()
offset = linecol_to_pos(text, use.line, use.column)
result.append({"name": use.name,
"filename": use.module_path,
"offset": offset})
return result
|
def function[rpc_get_usages, parameter[self, filename, source, offset]]:
constant[Return the uses of the symbol at offset.
Returns a list of occurrences of the symbol, as dicts with the
fields name, filename, and offset.
]
<ast.Tuple object at 0x7da1b16be1d0> assign[=] call[name[pos_to_linecol], parameter[name[source], name[offset]]]
variable[uses] assign[=] call[name[run_with_debug], parameter[name[jedi], constant[usages]]]
if compare[name[uses] is constant[None]] begin[:]
return[constant[None]]
variable[result] assign[=] list[[]]
for taget[name[use]] in starred[name[uses]] begin[:]
if compare[name[use].module_path equal[==] name[filename]] begin[:]
variable[offset] assign[=] call[name[linecol_to_pos], parameter[name[source], name[use].line, name[use].column]]
call[name[result].append, parameter[dictionary[[<ast.Constant object at 0x7da1b16bf2b0>, <ast.Constant object at 0x7da1b16bef80>, <ast.Constant object at 0x7da1b16bd8a0>], [<ast.Attribute object at 0x7da1b16bf730>, <ast.Attribute object at 0x7da1b16bcb50>, <ast.Name object at 0x7da1b16bfb50>]]]]
return[name[result]]
|
keyword[def] identifier[rpc_get_usages] ( identifier[self] , identifier[filename] , identifier[source] , identifier[offset] ):
literal[string]
identifier[line] , identifier[column] = identifier[pos_to_linecol] ( identifier[source] , identifier[offset] )
identifier[uses] = identifier[run_with_debug] ( identifier[jedi] , literal[string] ,
identifier[source] = identifier[source] , identifier[line] = identifier[line] , identifier[column] = identifier[column] ,
identifier[path] = identifier[filename] , identifier[encoding] = literal[string] )
keyword[if] identifier[uses] keyword[is] keyword[None] :
keyword[return] keyword[None]
identifier[result] =[]
keyword[for] identifier[use] keyword[in] identifier[uses] :
keyword[if] identifier[use] . identifier[module_path] == identifier[filename] :
identifier[offset] = identifier[linecol_to_pos] ( identifier[source] , identifier[use] . identifier[line] , identifier[use] . identifier[column] )
keyword[elif] identifier[use] . identifier[module_path] keyword[is] keyword[not] keyword[None] :
keyword[with] identifier[open] ( identifier[use] . identifier[module_path] ) keyword[as] identifier[f] :
identifier[text] = identifier[f] . identifier[read] ()
identifier[offset] = identifier[linecol_to_pos] ( identifier[text] , identifier[use] . identifier[line] , identifier[use] . identifier[column] )
identifier[result] . identifier[append] ({ literal[string] : identifier[use] . identifier[name] ,
literal[string] : identifier[use] . identifier[module_path] ,
literal[string] : identifier[offset] })
keyword[return] identifier[result]
|
def rpc_get_usages(self, filename, source, offset):
"""Return the uses of the symbol at offset.
Returns a list of occurrences of the symbol, as dicts with the
fields name, filename, and offset.
"""
(line, column) = pos_to_linecol(source, offset)
uses = run_with_debug(jedi, 'usages', source=source, line=line, column=column, path=filename, encoding='utf-8')
if uses is None:
return None # depends on [control=['if'], data=[]]
result = []
for use in uses:
if use.module_path == filename:
offset = linecol_to_pos(source, use.line, use.column) # depends on [control=['if'], data=[]]
elif use.module_path is not None:
with open(use.module_path) as f:
text = f.read() # depends on [control=['with'], data=['f']]
offset = linecol_to_pos(text, use.line, use.column) # depends on [control=['if'], data=[]]
result.append({'name': use.name, 'filename': use.module_path, 'offset': offset}) # depends on [control=['for'], data=['use']]
return result
|
def get_map_location(self):
"""Get the location of the player, converted to world coordinates.
:return: a tuple (x, y, z).
"""
map_data = self.get_map()
(bounds_e, bounds_n), (bounds_w, bounds_s) = map_data["continent_rect"]
(map_e, map_n), (map_w, map_s) = map_data["map_rect"]
assert bounds_w < bounds_e
assert bounds_n < bounds_s
assert map_w < map_e
assert map_n < map_s
meters_to_inches = 39.3701
x, y, z = self.fAvatarPosition
map_x = bounds_w + ((x * meters_to_inches - map_w) /
(map_e - map_w) * (bounds_e - bounds_w))
map_y = bounds_n + ((-z * meters_to_inches - map_n) /
(map_s - map_n) * (bounds_s - bounds_n))
map_z = y * meters_to_inches
return map_x, map_y, map_z
|
def function[get_map_location, parameter[self]]:
constant[Get the location of the player, converted to world coordinates.
:return: a tuple (x, y, z).
]
variable[map_data] assign[=] call[name[self].get_map, parameter[]]
<ast.Tuple object at 0x7da204347040> assign[=] call[name[map_data]][constant[continent_rect]]
<ast.Tuple object at 0x7da2043476a0> assign[=] call[name[map_data]][constant[map_rect]]
assert[compare[name[bounds_w] less[<] name[bounds_e]]]
assert[compare[name[bounds_n] less[<] name[bounds_s]]]
assert[compare[name[map_w] less[<] name[map_e]]]
assert[compare[name[map_n] less[<] name[map_s]]]
variable[meters_to_inches] assign[=] constant[39.3701]
<ast.Tuple object at 0x7da204347160> assign[=] name[self].fAvatarPosition
variable[map_x] assign[=] binary_operation[name[bounds_w] + binary_operation[binary_operation[binary_operation[binary_operation[name[x] * name[meters_to_inches]] - name[map_w]] / binary_operation[name[map_e] - name[map_w]]] * binary_operation[name[bounds_e] - name[bounds_w]]]]
variable[map_y] assign[=] binary_operation[name[bounds_n] + binary_operation[binary_operation[binary_operation[binary_operation[<ast.UnaryOp object at 0x7da204346740> * name[meters_to_inches]] - name[map_n]] / binary_operation[name[map_s] - name[map_n]]] * binary_operation[name[bounds_s] - name[bounds_n]]]]
variable[map_z] assign[=] binary_operation[name[y] * name[meters_to_inches]]
return[tuple[[<ast.Name object at 0x7da2043440a0>, <ast.Name object at 0x7da2043469b0>, <ast.Name object at 0x7da2043463e0>]]]
|
keyword[def] identifier[get_map_location] ( identifier[self] ):
literal[string]
identifier[map_data] = identifier[self] . identifier[get_map] ()
( identifier[bounds_e] , identifier[bounds_n] ),( identifier[bounds_w] , identifier[bounds_s] )= identifier[map_data] [ literal[string] ]
( identifier[map_e] , identifier[map_n] ),( identifier[map_w] , identifier[map_s] )= identifier[map_data] [ literal[string] ]
keyword[assert] identifier[bounds_w] < identifier[bounds_e]
keyword[assert] identifier[bounds_n] < identifier[bounds_s]
keyword[assert] identifier[map_w] < identifier[map_e]
keyword[assert] identifier[map_n] < identifier[map_s]
identifier[meters_to_inches] = literal[int]
identifier[x] , identifier[y] , identifier[z] = identifier[self] . identifier[fAvatarPosition]
identifier[map_x] = identifier[bounds_w] +(( identifier[x] * identifier[meters_to_inches] - identifier[map_w] )/
( identifier[map_e] - identifier[map_w] )*( identifier[bounds_e] - identifier[bounds_w] ))
identifier[map_y] = identifier[bounds_n] +((- identifier[z] * identifier[meters_to_inches] - identifier[map_n] )/
( identifier[map_s] - identifier[map_n] )*( identifier[bounds_s] - identifier[bounds_n] ))
identifier[map_z] = identifier[y] * identifier[meters_to_inches]
keyword[return] identifier[map_x] , identifier[map_y] , identifier[map_z]
|
def get_map_location(self):
"""Get the location of the player, converted to world coordinates.
:return: a tuple (x, y, z).
"""
map_data = self.get_map()
((bounds_e, bounds_n), (bounds_w, bounds_s)) = map_data['continent_rect']
((map_e, map_n), (map_w, map_s)) = map_data['map_rect']
assert bounds_w < bounds_e
assert bounds_n < bounds_s
assert map_w < map_e
assert map_n < map_s
meters_to_inches = 39.3701
(x, y, z) = self.fAvatarPosition
map_x = bounds_w + (x * meters_to_inches - map_w) / (map_e - map_w) * (bounds_e - bounds_w)
map_y = bounds_n + (-z * meters_to_inches - map_n) / (map_s - map_n) * (bounds_s - bounds_n)
map_z = y * meters_to_inches
return (map_x, map_y, map_z)
|
def com_google_fonts_check_monospace(ttFont, glyph_metrics_stats):
"""Checking correctness of monospaced metadata.
There are various metadata in the OpenType spec to specify if
a font is monospaced or not. If the font is not trully monospaced,
then no monospaced metadata should be set (as sometimes
they mistakenly are...)
Monospace fonts must:
* post.isFixedWidth "Set to 0 if the font is proportionally spaced,
non-zero if the font is not proportionally spaced (monospaced)"
www.microsoft.com/typography/otspec/post.htm
* hhea.advanceWidthMax must be correct, meaning no glyph's
width value is greater.
www.microsoft.com/typography/otspec/hhea.htm
* OS/2.panose.bProportion must be set to 9 (monospace). Spec says:
"The PANOSE definition contains ten digits each of which currently
describes up to sixteen variations. Windows uses bFamilyType,
bSerifStyle and bProportion in the font mapper to determine
family type. It also uses bProportion to determine if the font
is monospaced."
www.microsoft.com/typography/otspec/os2.htm#pan
monotypecom-test.monotype.de/services/pan2
* OS/2.xAverageWidth must be set accurately.
"OS/2.xAverageWidth IS used when rendering monospaced fonts,
at least by Windows GDI"
http://typedrawers.com/discussion/comment/15397/#Comment_15397
Also we should report an error for glyphs not of average width
"""
from fontbakery.constants import (IsFixedWidth,
PANOSE_Proportion)
failed = False
# Note: These values are read from the dict here only to
# reduce the max line length in the check implementation below:
seems_monospaced = glyph_metrics_stats["seems_monospaced"]
most_common_width = glyph_metrics_stats["most_common_width"]
width_max = glyph_metrics_stats['width_max']
if ttFont['hhea'].advanceWidthMax != width_max:
failed = True
yield FAIL, Message("bad-advanceWidthMax",
("Value of hhea.advanceWidthMax"
" should be set to {} but got"
" {} instead."
"").format(width_max, ttFont['hhea'].advanceWidthMax))
if seems_monospaced:
if ttFont['post'].isFixedPitch == IsFixedWidth.NOT_MONOSPACED:
failed = True
yield FAIL, Message("mono-bad-post-isFixedPitch",
("On monospaced fonts, the value of"
" post.isFixedPitch must be set to a non-zero value"
" (meaning 'fixed width monospaced'),"
" but got {} instead."
"").format(ttFont['post'].isFixedPitch))
if ttFont['OS/2'].panose.bProportion != PANOSE_Proportion.MONOSPACED:
failed = True
yield FAIL, Message("mono-bad-panose-proportion",
("On monospaced fonts, the value of"
" OS/2.panose.bProportion must be set to {}"
" (proportion: monospaced), but got"
" {} instead."
"").format(PANOSE_Proportion.MONOSPACED,
ttFont['OS/2'].panose.bProportion))
num_glyphs = len(ttFont['glyf'].glyphs)
unusually_spaced_glyphs = [
g for g in ttFont['glyf'].glyphs
if g not in ['.notdef', '.null', 'NULL'] and
ttFont['hmtx'].metrics[g][0] != most_common_width
]
outliers_ratio = float(len(unusually_spaced_glyphs)) / num_glyphs
if outliers_ratio > 0:
failed = True
yield WARN, Message("mono-outliers",
("Font is monospaced but {} glyphs"
" ({}%) have a different width."
" You should check the widths of:"
" {}").format(
len(unusually_spaced_glyphs),
100.0 * outliers_ratio, unusually_spaced_glyphs))
if not failed:
yield PASS, Message("mono-good", ("Font is monospaced and all"
" related metadata look good."))
else:
# it is a non-monospaced font, so lets make sure
# that all monospace-related metadata is properly unset.
if ttFont['post'].isFixedPitch != IsFixedWidth.NOT_MONOSPACED:
failed = True
yield FAIL, Message("bad-post-isFixedPitch",
("On non-monospaced fonts, the"
" post.isFixedPitch value must be set to {}"
" (not monospaced), but got {} instead."
"").format(IsFixedWidth.NOT_MONOSPACED,
ttFont['post'].isFixedPitch))
if ttFont['OS/2'].panose.bProportion == PANOSE_Proportion.MONOSPACED:
failed = True
yield FAIL, Message("bad-panose-proportion",
("On non-monospaced fonts, the"
" OS/2.panose.bProportion value can be set to "
" any value except 9 (proportion: monospaced)"
" which is the bad value we got in this font."))
if not failed:
yield PASS, Message("good", ("Font is not monospaced and"
" all related metadata look good."))
|
def function[com_google_fonts_check_monospace, parameter[ttFont, glyph_metrics_stats]]:
constant[Checking correctness of monospaced metadata.
There are various metadata in the OpenType spec to specify if
a font is monospaced or not. If the font is not trully monospaced,
then no monospaced metadata should be set (as sometimes
they mistakenly are...)
Monospace fonts must:
* post.isFixedWidth "Set to 0 if the font is proportionally spaced,
non-zero if the font is not proportionally spaced (monospaced)"
www.microsoft.com/typography/otspec/post.htm
* hhea.advanceWidthMax must be correct, meaning no glyph's
width value is greater.
www.microsoft.com/typography/otspec/hhea.htm
* OS/2.panose.bProportion must be set to 9 (monospace). Spec says:
"The PANOSE definition contains ten digits each of which currently
describes up to sixteen variations. Windows uses bFamilyType,
bSerifStyle and bProportion in the font mapper to determine
family type. It also uses bProportion to determine if the font
is monospaced."
www.microsoft.com/typography/otspec/os2.htm#pan
monotypecom-test.monotype.de/services/pan2
* OS/2.xAverageWidth must be set accurately.
"OS/2.xAverageWidth IS used when rendering monospaced fonts,
at least by Windows GDI"
http://typedrawers.com/discussion/comment/15397/#Comment_15397
Also we should report an error for glyphs not of average width
]
from relative_module[fontbakery.constants] import module[IsFixedWidth], module[PANOSE_Proportion]
variable[failed] assign[=] constant[False]
variable[seems_monospaced] assign[=] call[name[glyph_metrics_stats]][constant[seems_monospaced]]
variable[most_common_width] assign[=] call[name[glyph_metrics_stats]][constant[most_common_width]]
variable[width_max] assign[=] call[name[glyph_metrics_stats]][constant[width_max]]
if compare[call[name[ttFont]][constant[hhea]].advanceWidthMax not_equal[!=] name[width_max]] begin[:]
variable[failed] assign[=] constant[True]
<ast.Yield object at 0x7da1b1213190>
if name[seems_monospaced] begin[:]
if compare[call[name[ttFont]][constant[post]].isFixedPitch equal[==] name[IsFixedWidth].NOT_MONOSPACED] begin[:]
variable[failed] assign[=] constant[True]
<ast.Yield object at 0x7da1b1212c20>
if compare[call[name[ttFont]][constant[OS/2]].panose.bProportion not_equal[!=] name[PANOSE_Proportion].MONOSPACED] begin[:]
variable[failed] assign[=] constant[True]
<ast.Yield object at 0x7da1b1212860>
variable[num_glyphs] assign[=] call[name[len], parameter[call[name[ttFont]][constant[glyf]].glyphs]]
variable[unusually_spaced_glyphs] assign[=] <ast.ListComp object at 0x7da1b12127d0>
variable[outliers_ratio] assign[=] binary_operation[call[name[float], parameter[call[name[len], parameter[name[unusually_spaced_glyphs]]]]] / name[num_glyphs]]
if compare[name[outliers_ratio] greater[>] constant[0]] begin[:]
variable[failed] assign[=] constant[True]
<ast.Yield object at 0x7da1b12504c0>
if <ast.UnaryOp object at 0x7da1b1251060> begin[:]
<ast.Yield object at 0x7da1b1252a10>
|
keyword[def] identifier[com_google_fonts_check_monospace] ( identifier[ttFont] , identifier[glyph_metrics_stats] ):
literal[string]
keyword[from] identifier[fontbakery] . identifier[constants] keyword[import] ( identifier[IsFixedWidth] ,
identifier[PANOSE_Proportion] )
identifier[failed] = keyword[False]
identifier[seems_monospaced] = identifier[glyph_metrics_stats] [ literal[string] ]
identifier[most_common_width] = identifier[glyph_metrics_stats] [ literal[string] ]
identifier[width_max] = identifier[glyph_metrics_stats] [ literal[string] ]
keyword[if] identifier[ttFont] [ literal[string] ]. identifier[advanceWidthMax] != identifier[width_max] :
identifier[failed] = keyword[True]
keyword[yield] identifier[FAIL] , identifier[Message] ( literal[string] ,
( literal[string]
literal[string]
literal[string]
literal[string] ). identifier[format] ( identifier[width_max] , identifier[ttFont] [ literal[string] ]. identifier[advanceWidthMax] ))
keyword[if] identifier[seems_monospaced] :
keyword[if] identifier[ttFont] [ literal[string] ]. identifier[isFixedPitch] == identifier[IsFixedWidth] . identifier[NOT_MONOSPACED] :
identifier[failed] = keyword[True]
keyword[yield] identifier[FAIL] , identifier[Message] ( literal[string] ,
( literal[string]
literal[string]
literal[string]
literal[string]
literal[string] ). identifier[format] ( identifier[ttFont] [ literal[string] ]. identifier[isFixedPitch] ))
keyword[if] identifier[ttFont] [ literal[string] ]. identifier[panose] . identifier[bProportion] != identifier[PANOSE_Proportion] . identifier[MONOSPACED] :
identifier[failed] = keyword[True]
keyword[yield] identifier[FAIL] , identifier[Message] ( literal[string] ,
( literal[string]
literal[string]
literal[string]
literal[string]
literal[string] ). identifier[format] ( identifier[PANOSE_Proportion] . identifier[MONOSPACED] ,
identifier[ttFont] [ literal[string] ]. identifier[panose] . identifier[bProportion] ))
identifier[num_glyphs] = identifier[len] ( identifier[ttFont] [ literal[string] ]. identifier[glyphs] )
identifier[unusually_spaced_glyphs] =[
identifier[g] keyword[for] identifier[g] keyword[in] identifier[ttFont] [ literal[string] ]. identifier[glyphs]
keyword[if] identifier[g] keyword[not] keyword[in] [ literal[string] , literal[string] , literal[string] ] keyword[and]
identifier[ttFont] [ literal[string] ]. identifier[metrics] [ identifier[g] ][ literal[int] ]!= identifier[most_common_width]
]
identifier[outliers_ratio] = identifier[float] ( identifier[len] ( identifier[unusually_spaced_glyphs] ))/ identifier[num_glyphs]
keyword[if] identifier[outliers_ratio] > literal[int] :
identifier[failed] = keyword[True]
keyword[yield] identifier[WARN] , identifier[Message] ( literal[string] ,
( literal[string]
literal[string]
literal[string]
literal[string] ). identifier[format] (
identifier[len] ( identifier[unusually_spaced_glyphs] ),
literal[int] * identifier[outliers_ratio] , identifier[unusually_spaced_glyphs] ))
keyword[if] keyword[not] identifier[failed] :
keyword[yield] identifier[PASS] , identifier[Message] ( literal[string] ,( literal[string]
literal[string] ))
keyword[else] :
keyword[if] identifier[ttFont] [ literal[string] ]. identifier[isFixedPitch] != identifier[IsFixedWidth] . identifier[NOT_MONOSPACED] :
identifier[failed] = keyword[True]
keyword[yield] identifier[FAIL] , identifier[Message] ( literal[string] ,
( literal[string]
literal[string]
literal[string]
literal[string] ). identifier[format] ( identifier[IsFixedWidth] . identifier[NOT_MONOSPACED] ,
identifier[ttFont] [ literal[string] ]. identifier[isFixedPitch] ))
keyword[if] identifier[ttFont] [ literal[string] ]. identifier[panose] . identifier[bProportion] == identifier[PANOSE_Proportion] . identifier[MONOSPACED] :
identifier[failed] = keyword[True]
keyword[yield] identifier[FAIL] , identifier[Message] ( literal[string] ,
( literal[string]
literal[string]
literal[string]
literal[string] ))
keyword[if] keyword[not] identifier[failed] :
keyword[yield] identifier[PASS] , identifier[Message] ( literal[string] ,( literal[string]
literal[string] ))
|
def com_google_fonts_check_monospace(ttFont, glyph_metrics_stats):
"""Checking correctness of monospaced metadata.
There are various metadata in the OpenType spec to specify if
a font is monospaced or not. If the font is not trully monospaced,
then no monospaced metadata should be set (as sometimes
they mistakenly are...)
Monospace fonts must:
* post.isFixedWidth "Set to 0 if the font is proportionally spaced,
non-zero if the font is not proportionally spaced (monospaced)"
www.microsoft.com/typography/otspec/post.htm
* hhea.advanceWidthMax must be correct, meaning no glyph's
width value is greater.
www.microsoft.com/typography/otspec/hhea.htm
* OS/2.panose.bProportion must be set to 9 (monospace). Spec says:
"The PANOSE definition contains ten digits each of which currently
describes up to sixteen variations. Windows uses bFamilyType,
bSerifStyle and bProportion in the font mapper to determine
family type. It also uses bProportion to determine if the font
is monospaced."
www.microsoft.com/typography/otspec/os2.htm#pan
monotypecom-test.monotype.de/services/pan2
* OS/2.xAverageWidth must be set accurately.
"OS/2.xAverageWidth IS used when rendering monospaced fonts,
at least by Windows GDI"
http://typedrawers.com/discussion/comment/15397/#Comment_15397
Also we should report an error for glyphs not of average width
"""
from fontbakery.constants import IsFixedWidth, PANOSE_Proportion
failed = False
# Note: These values are read from the dict here only to
# reduce the max line length in the check implementation below:
seems_monospaced = glyph_metrics_stats['seems_monospaced']
most_common_width = glyph_metrics_stats['most_common_width']
width_max = glyph_metrics_stats['width_max']
if ttFont['hhea'].advanceWidthMax != width_max:
failed = True
yield (FAIL, Message('bad-advanceWidthMax', 'Value of hhea.advanceWidthMax should be set to {} but got {} instead.'.format(width_max, ttFont['hhea'].advanceWidthMax))) # depends on [control=['if'], data=['width_max']]
if seems_monospaced:
if ttFont['post'].isFixedPitch == IsFixedWidth.NOT_MONOSPACED:
failed = True
yield (FAIL, Message('mono-bad-post-isFixedPitch', "On monospaced fonts, the value of post.isFixedPitch must be set to a non-zero value (meaning 'fixed width monospaced'), but got {} instead.".format(ttFont['post'].isFixedPitch))) # depends on [control=['if'], data=[]]
if ttFont['OS/2'].panose.bProportion != PANOSE_Proportion.MONOSPACED:
failed = True
yield (FAIL, Message('mono-bad-panose-proportion', 'On monospaced fonts, the value of OS/2.panose.bProportion must be set to {} (proportion: monospaced), but got {} instead.'.format(PANOSE_Proportion.MONOSPACED, ttFont['OS/2'].panose.bProportion))) # depends on [control=['if'], data=[]]
num_glyphs = len(ttFont['glyf'].glyphs)
unusually_spaced_glyphs = [g for g in ttFont['glyf'].glyphs if g not in ['.notdef', '.null', 'NULL'] and ttFont['hmtx'].metrics[g][0] != most_common_width]
outliers_ratio = float(len(unusually_spaced_glyphs)) / num_glyphs
if outliers_ratio > 0:
failed = True
yield (WARN, Message('mono-outliers', 'Font is monospaced but {} glyphs ({}%) have a different width. You should check the widths of: {}'.format(len(unusually_spaced_glyphs), 100.0 * outliers_ratio, unusually_spaced_glyphs))) # depends on [control=['if'], data=['outliers_ratio']]
if not failed:
yield (PASS, Message('mono-good', 'Font is monospaced and all related metadata look good.')) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# it is a non-monospaced font, so lets make sure
# that all monospace-related metadata is properly unset.
if ttFont['post'].isFixedPitch != IsFixedWidth.NOT_MONOSPACED:
failed = True
yield (FAIL, Message('bad-post-isFixedPitch', 'On non-monospaced fonts, the post.isFixedPitch value must be set to {} (not monospaced), but got {} instead.'.format(IsFixedWidth.NOT_MONOSPACED, ttFont['post'].isFixedPitch))) # depends on [control=['if'], data=[]]
if ttFont['OS/2'].panose.bProportion == PANOSE_Proportion.MONOSPACED:
failed = True
yield (FAIL, Message('bad-panose-proportion', 'On non-monospaced fonts, the OS/2.panose.bProportion value can be set to any value except 9 (proportion: monospaced) which is the bad value we got in this font.')) # depends on [control=['if'], data=[]]
if not failed:
yield (PASS, Message('good', 'Font is not monospaced and all related metadata look good.')) # depends on [control=['if'], data=[]]
|
def load_vi_open_in_editor_bindings():
"""
Pressing 'v' in navigation mode will open the buffer in an external editor.
"""
registry = Registry()
navigation_mode = ViNavigationMode()
registry.add_binding('v', filter=navigation_mode)(
get_by_name('edit-and-execute-command'))
return registry
|
def function[load_vi_open_in_editor_bindings, parameter[]]:
constant[
Pressing 'v' in navigation mode will open the buffer in an external editor.
]
variable[registry] assign[=] call[name[Registry], parameter[]]
variable[navigation_mode] assign[=] call[name[ViNavigationMode], parameter[]]
call[call[name[registry].add_binding, parameter[constant[v]]], parameter[call[name[get_by_name], parameter[constant[edit-and-execute-command]]]]]
return[name[registry]]
|
keyword[def] identifier[load_vi_open_in_editor_bindings] ():
literal[string]
identifier[registry] = identifier[Registry] ()
identifier[navigation_mode] = identifier[ViNavigationMode] ()
identifier[registry] . identifier[add_binding] ( literal[string] , identifier[filter] = identifier[navigation_mode] )(
identifier[get_by_name] ( literal[string] ))
keyword[return] identifier[registry]
|
def load_vi_open_in_editor_bindings():
"""
Pressing 'v' in navigation mode will open the buffer in an external editor.
"""
registry = Registry()
navigation_mode = ViNavigationMode()
registry.add_binding('v', filter=navigation_mode)(get_by_name('edit-and-execute-command'))
return registry
|
def classview_for(self, action='view'):
"""
Return the classview that contains the viewhandler for the specified action
"""
app = current_app._get_current_object()
return self.view_for_endpoints[app][action][0](self)
|
def function[classview_for, parameter[self, action]]:
constant[
Return the classview that contains the viewhandler for the specified action
]
variable[app] assign[=] call[name[current_app]._get_current_object, parameter[]]
return[call[call[call[call[name[self].view_for_endpoints][name[app]]][name[action]]][constant[0]], parameter[name[self]]]]
|
keyword[def] identifier[classview_for] ( identifier[self] , identifier[action] = literal[string] ):
literal[string]
identifier[app] = identifier[current_app] . identifier[_get_current_object] ()
keyword[return] identifier[self] . identifier[view_for_endpoints] [ identifier[app] ][ identifier[action] ][ literal[int] ]( identifier[self] )
|
def classview_for(self, action='view'):
"""
Return the classview that contains the viewhandler for the specified action
"""
app = current_app._get_current_object()
return self.view_for_endpoints[app][action][0](self)
|
def make_regression(func, n_samples=100, n_features=1, bias=0.0, noise=0.0,
random_state=None):
"""
Make dataset for a regression problem.
Examples
--------
>>> f = lambda x: 0.5*x + np.sin(2*x)
>>> X, y = make_regression(f, bias=.5, noise=1., random_state=1)
>>> X.shape
(100, 1)
>>> y.shape
(100,)
>>> X[:5].round(2)
array([[ 1.62],
[-0.61],
[-0.53],
[-1.07],
[ 0.87]])
>>> y[:5].round(2)
array([ 0.76, 0.48, -0.23, -0.28, 0.83])
"""
generator = check_random_state(random_state)
X = generator.randn(n_samples, n_features)
# unpack the columns of X
y = func(*X.T) + bias
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
return X, y
|
def function[make_regression, parameter[func, n_samples, n_features, bias, noise, random_state]]:
constant[
Make dataset for a regression problem.
Examples
--------
>>> f = lambda x: 0.5*x + np.sin(2*x)
>>> X, y = make_regression(f, bias=.5, noise=1., random_state=1)
>>> X.shape
(100, 1)
>>> y.shape
(100,)
>>> X[:5].round(2)
array([[ 1.62],
[-0.61],
[-0.53],
[-1.07],
[ 0.87]])
>>> y[:5].round(2)
array([ 0.76, 0.48, -0.23, -0.28, 0.83])
]
variable[generator] assign[=] call[name[check_random_state], parameter[name[random_state]]]
variable[X] assign[=] call[name[generator].randn, parameter[name[n_samples], name[n_features]]]
variable[y] assign[=] binary_operation[call[name[func], parameter[<ast.Starred object at 0x7da1b255c130>]] + name[bias]]
if compare[name[noise] greater[>] constant[0.0]] begin[:]
<ast.AugAssign object at 0x7da1b255ddb0>
return[tuple[[<ast.Name object at 0x7da1b255d6f0>, <ast.Name object at 0x7da1b255d1e0>]]]
|
keyword[def] identifier[make_regression] ( identifier[func] , identifier[n_samples] = literal[int] , identifier[n_features] = literal[int] , identifier[bias] = literal[int] , identifier[noise] = literal[int] ,
identifier[random_state] = keyword[None] ):
literal[string]
identifier[generator] = identifier[check_random_state] ( identifier[random_state] )
identifier[X] = identifier[generator] . identifier[randn] ( identifier[n_samples] , identifier[n_features] )
identifier[y] = identifier[func] (* identifier[X] . identifier[T] )+ identifier[bias]
keyword[if] identifier[noise] > literal[int] :
identifier[y] += identifier[generator] . identifier[normal] ( identifier[scale] = identifier[noise] , identifier[size] = identifier[y] . identifier[shape] )
keyword[return] identifier[X] , identifier[y]
|
def make_regression(func, n_samples=100, n_features=1, bias=0.0, noise=0.0, random_state=None):
"""
Make dataset for a regression problem.
Examples
--------
>>> f = lambda x: 0.5*x + np.sin(2*x)
>>> X, y = make_regression(f, bias=.5, noise=1., random_state=1)
>>> X.shape
(100, 1)
>>> y.shape
(100,)
>>> X[:5].round(2)
array([[ 1.62],
[-0.61],
[-0.53],
[-1.07],
[ 0.87]])
>>> y[:5].round(2)
array([ 0.76, 0.48, -0.23, -0.28, 0.83])
"""
generator = check_random_state(random_state)
X = generator.randn(n_samples, n_features)
# unpack the columns of X
y = func(*X.T) + bias
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape) # depends on [control=['if'], data=['noise']]
return (X, y)
|
def newDoc(version):
"""Creates a new XML document """
ret = libxml2mod.xmlNewDoc(version)
if ret is None:raise treeError('xmlNewDoc() failed')
return xmlDoc(_obj=ret)
|
def function[newDoc, parameter[version]]:
constant[Creates a new XML document ]
variable[ret] assign[=] call[name[libxml2mod].xmlNewDoc, parameter[name[version]]]
if compare[name[ret] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b1f61f60>
return[call[name[xmlDoc], parameter[]]]
|
keyword[def] identifier[newDoc] ( identifier[version] ):
literal[string]
identifier[ret] = identifier[libxml2mod] . identifier[xmlNewDoc] ( identifier[version] )
keyword[if] identifier[ret] keyword[is] keyword[None] : keyword[raise] identifier[treeError] ( literal[string] )
keyword[return] identifier[xmlDoc] ( identifier[_obj] = identifier[ret] )
|
def newDoc(version):
"""Creates a new XML document """
ret = libxml2mod.xmlNewDoc(version)
if ret is None:
raise treeError('xmlNewDoc() failed') # depends on [control=['if'], data=[]]
return xmlDoc(_obj=ret)
|
def build_xlsx_response(wb, title="report"):
""" Take a workbook and return a xlsx file response """
title = generate_filename(title, '.xlsx')
myfile = BytesIO()
myfile.write(save_virtual_workbook(wb))
response = HttpResponse(
myfile.getvalue(),
content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=%s' % title
response['Content-Length'] = myfile.tell()
return response
|
def function[build_xlsx_response, parameter[wb, title]]:
constant[ Take a workbook and return a xlsx file response ]
variable[title] assign[=] call[name[generate_filename], parameter[name[title], constant[.xlsx]]]
variable[myfile] assign[=] call[name[BytesIO], parameter[]]
call[name[myfile].write, parameter[call[name[save_virtual_workbook], parameter[name[wb]]]]]
variable[response] assign[=] call[name[HttpResponse], parameter[call[name[myfile].getvalue, parameter[]]]]
call[name[response]][constant[Content-Disposition]] assign[=] binary_operation[constant[attachment; filename=%s] <ast.Mod object at 0x7da2590d6920> name[title]]
call[name[response]][constant[Content-Length]] assign[=] call[name[myfile].tell, parameter[]]
return[name[response]]
|
keyword[def] identifier[build_xlsx_response] ( identifier[wb] , identifier[title] = literal[string] ):
literal[string]
identifier[title] = identifier[generate_filename] ( identifier[title] , literal[string] )
identifier[myfile] = identifier[BytesIO] ()
identifier[myfile] . identifier[write] ( identifier[save_virtual_workbook] ( identifier[wb] ))
identifier[response] = identifier[HttpResponse] (
identifier[myfile] . identifier[getvalue] (),
identifier[content_type] = literal[string] )
identifier[response] [ literal[string] ]= literal[string] % identifier[title]
identifier[response] [ literal[string] ]= identifier[myfile] . identifier[tell] ()
keyword[return] identifier[response]
|
def build_xlsx_response(wb, title='report'):
""" Take a workbook and return a xlsx file response """
title = generate_filename(title, '.xlsx')
myfile = BytesIO()
myfile.write(save_virtual_workbook(wb))
response = HttpResponse(myfile.getvalue(), content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=%s' % title
response['Content-Length'] = myfile.tell()
return response
|
def _process_image_msg(self, msg):
""" Process an image message and return a numpy array with the image data
Returns
-------
:obj:`numpy.ndarray` containing the image in the image message
Raises
------
CvBridgeError
If the bridge is not able to convert the image
"""
encoding = msg.encoding
try:
image = self._bridge.imgmsg_to_cv2(msg, encoding)
except CvBridgeError as e:
rospy.logerr(e)
return image
|
def function[_process_image_msg, parameter[self, msg]]:
constant[ Process an image message and return a numpy array with the image data
Returns
-------
:obj:`numpy.ndarray` containing the image in the image message
Raises
------
CvBridgeError
If the bridge is not able to convert the image
]
variable[encoding] assign[=] name[msg].encoding
<ast.Try object at 0x7da1b05769b0>
return[name[image]]
|
keyword[def] identifier[_process_image_msg] ( identifier[self] , identifier[msg] ):
literal[string]
identifier[encoding] = identifier[msg] . identifier[encoding]
keyword[try] :
identifier[image] = identifier[self] . identifier[_bridge] . identifier[imgmsg_to_cv2] ( identifier[msg] , identifier[encoding] )
keyword[except] identifier[CvBridgeError] keyword[as] identifier[e] :
identifier[rospy] . identifier[logerr] ( identifier[e] )
keyword[return] identifier[image]
|
def _process_image_msg(self, msg):
""" Process an image message and return a numpy array with the image data
Returns
-------
:obj:`numpy.ndarray` containing the image in the image message
Raises
------
CvBridgeError
If the bridge is not able to convert the image
"""
encoding = msg.encoding
try:
image = self._bridge.imgmsg_to_cv2(msg, encoding) # depends on [control=['try'], data=[]]
except CvBridgeError as e:
rospy.logerr(e) # depends on [control=['except'], data=['e']]
return image
|
def vector_norm(data, axis=None, out=None):
"""Return length, i.e. Euclidean norm, of ndarray along axis.
>>> v = np.random.random(3)
>>> n = vector_norm(v)
>>> np.allclose(n, np.linalg.norm(v))
True
>>> v = np.random.rand(6, 5, 3)
>>> n = vector_norm(v, axis=-1)
>>> np.allclose(n, np.sqrt(np.sum(v*v, axis=2)))
True
>>> n = vector_norm(v, axis=1)
>>> np.allclose(n, np.sqrt(np.sum(v*v, axis=1)))
True
>>> v = np.random.rand(5, 4, 3)
>>> n = np.empty((5, 3))
>>> vector_norm(v, axis=1, out=n)
>>> np.allclose(n, np.sqrt(np.sum(v*v, axis=1)))
True
>>> vector_norm([])
0.0
>>> vector_norm([1])
1.0
"""
data = np.array(data, dtype=np.float64, copy=True)
if out is None:
if data.ndim == 1:
return math.sqrt(np.dot(data, data))
data *= data
out = np.atleast_1d(np.sum(data, axis=axis))
np.sqrt(out, out)
return out
else:
data *= data
np.sum(data, axis=axis, out=out)
np.sqrt(out, out)
|
def function[vector_norm, parameter[data, axis, out]]:
constant[Return length, i.e. Euclidean norm, of ndarray along axis.
>>> v = np.random.random(3)
>>> n = vector_norm(v)
>>> np.allclose(n, np.linalg.norm(v))
True
>>> v = np.random.rand(6, 5, 3)
>>> n = vector_norm(v, axis=-1)
>>> np.allclose(n, np.sqrt(np.sum(v*v, axis=2)))
True
>>> n = vector_norm(v, axis=1)
>>> np.allclose(n, np.sqrt(np.sum(v*v, axis=1)))
True
>>> v = np.random.rand(5, 4, 3)
>>> n = np.empty((5, 3))
>>> vector_norm(v, axis=1, out=n)
>>> np.allclose(n, np.sqrt(np.sum(v*v, axis=1)))
True
>>> vector_norm([])
0.0
>>> vector_norm([1])
1.0
]
variable[data] assign[=] call[name[np].array, parameter[name[data]]]
if compare[name[out] is constant[None]] begin[:]
if compare[name[data].ndim equal[==] constant[1]] begin[:]
return[call[name[math].sqrt, parameter[call[name[np].dot, parameter[name[data], name[data]]]]]]
<ast.AugAssign object at 0x7da18bc71600>
variable[out] assign[=] call[name[np].atleast_1d, parameter[call[name[np].sum, parameter[name[data]]]]]
call[name[np].sqrt, parameter[name[out], name[out]]]
return[name[out]]
|
keyword[def] identifier[vector_norm] ( identifier[data] , identifier[axis] = keyword[None] , identifier[out] = keyword[None] ):
literal[string]
identifier[data] = identifier[np] . identifier[array] ( identifier[data] , identifier[dtype] = identifier[np] . identifier[float64] , identifier[copy] = keyword[True] )
keyword[if] identifier[out] keyword[is] keyword[None] :
keyword[if] identifier[data] . identifier[ndim] == literal[int] :
keyword[return] identifier[math] . identifier[sqrt] ( identifier[np] . identifier[dot] ( identifier[data] , identifier[data] ))
identifier[data] *= identifier[data]
identifier[out] = identifier[np] . identifier[atleast_1d] ( identifier[np] . identifier[sum] ( identifier[data] , identifier[axis] = identifier[axis] ))
identifier[np] . identifier[sqrt] ( identifier[out] , identifier[out] )
keyword[return] identifier[out]
keyword[else] :
identifier[data] *= identifier[data]
identifier[np] . identifier[sum] ( identifier[data] , identifier[axis] = identifier[axis] , identifier[out] = identifier[out] )
identifier[np] . identifier[sqrt] ( identifier[out] , identifier[out] )
|
def vector_norm(data, axis=None, out=None):
"""Return length, i.e. Euclidean norm, of ndarray along axis.
>>> v = np.random.random(3)
>>> n = vector_norm(v)
>>> np.allclose(n, np.linalg.norm(v))
True
>>> v = np.random.rand(6, 5, 3)
>>> n = vector_norm(v, axis=-1)
>>> np.allclose(n, np.sqrt(np.sum(v*v, axis=2)))
True
>>> n = vector_norm(v, axis=1)
>>> np.allclose(n, np.sqrt(np.sum(v*v, axis=1)))
True
>>> v = np.random.rand(5, 4, 3)
>>> n = np.empty((5, 3))
>>> vector_norm(v, axis=1, out=n)
>>> np.allclose(n, np.sqrt(np.sum(v*v, axis=1)))
True
>>> vector_norm([])
0.0
>>> vector_norm([1])
1.0
"""
data = np.array(data, dtype=np.float64, copy=True)
if out is None:
if data.ndim == 1:
return math.sqrt(np.dot(data, data)) # depends on [control=['if'], data=[]]
data *= data
out = np.atleast_1d(np.sum(data, axis=axis))
np.sqrt(out, out)
return out # depends on [control=['if'], data=['out']]
else:
data *= data
np.sum(data, axis=axis, out=out)
np.sqrt(out, out)
|
def repeat(self, n=2, oscillate=False, callback=None):
"""
Returns a list that is a repetition of the given list.
When oscillate is True,
moves from the end back to the beginning,
and then from the beginning to the end, and so on.
"""
colorlist = ColorList()
colors = ColorList.copy(self)
for i in _range(n):
colorlist.extend(colors)
if oscillate: colors = colors.reverse()
if callback: colors = callback(colors)
return colorlist
|
def function[repeat, parameter[self, n, oscillate, callback]]:
constant[
Returns a list that is a repetition of the given list.
When oscillate is True,
moves from the end back to the beginning,
and then from the beginning to the end, and so on.
]
variable[colorlist] assign[=] call[name[ColorList], parameter[]]
variable[colors] assign[=] call[name[ColorList].copy, parameter[name[self]]]
for taget[name[i]] in starred[call[name[_range], parameter[name[n]]]] begin[:]
call[name[colorlist].extend, parameter[name[colors]]]
if name[oscillate] begin[:]
variable[colors] assign[=] call[name[colors].reverse, parameter[]]
if name[callback] begin[:]
variable[colors] assign[=] call[name[callback], parameter[name[colors]]]
return[name[colorlist]]
|
keyword[def] identifier[repeat] ( identifier[self] , identifier[n] = literal[int] , identifier[oscillate] = keyword[False] , identifier[callback] = keyword[None] ):
literal[string]
identifier[colorlist] = identifier[ColorList] ()
identifier[colors] = identifier[ColorList] . identifier[copy] ( identifier[self] )
keyword[for] identifier[i] keyword[in] identifier[_range] ( identifier[n] ):
identifier[colorlist] . identifier[extend] ( identifier[colors] )
keyword[if] identifier[oscillate] : identifier[colors] = identifier[colors] . identifier[reverse] ()
keyword[if] identifier[callback] : identifier[colors] = identifier[callback] ( identifier[colors] )
keyword[return] identifier[colorlist]
|
def repeat(self, n=2, oscillate=False, callback=None):
"""
Returns a list that is a repetition of the given list.
When oscillate is True,
moves from the end back to the beginning,
and then from the beginning to the end, and so on.
"""
colorlist = ColorList()
colors = ColorList.copy(self)
for i in _range(n):
colorlist.extend(colors)
if oscillate:
colors = colors.reverse() # depends on [control=['if'], data=[]]
if callback:
colors = callback(colors) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return colorlist
|
def write(self, attr_name, prefix=None):
'''Write attribute's value to a file.
:param str attr_name:
Attribute's name to be logged
:param str prefix:
Optional. Attribute's name that is prefixed to logging message,
defaults to ``None``.
:returns: message written to file
:rtype: str
'''
if self._folder is None:
return
separator = "\t"
attr = getattr(self.obj, attr_name)
if hasattr(attr, '__iter__'):
msg = separator.join([str(e) for e in attr])
else:
msg = str(attr)
if prefix is not None:
msg = "{}\t{}".format(getattr(self.obj, prefix), msg)
path = self.get_file(attr_name)
with open(path, 'a') as f:
f.write("{}\n".format(msg))
return msg
|
def function[write, parameter[self, attr_name, prefix]]:
constant[Write attribute's value to a file.
:param str attr_name:
Attribute's name to be logged
:param str prefix:
Optional. Attribute's name that is prefixed to logging message,
defaults to ``None``.
:returns: message written to file
:rtype: str
]
if compare[name[self]._folder is constant[None]] begin[:]
return[None]
variable[separator] assign[=] constant[ ]
variable[attr] assign[=] call[name[getattr], parameter[name[self].obj, name[attr_name]]]
if call[name[hasattr], parameter[name[attr], constant[__iter__]]] begin[:]
variable[msg] assign[=] call[name[separator].join, parameter[<ast.ListComp object at 0x7da204564970>]]
if compare[name[prefix] is_not constant[None]] begin[:]
variable[msg] assign[=] call[constant[{} {}].format, parameter[call[name[getattr], parameter[name[self].obj, name[prefix]]], name[msg]]]
variable[path] assign[=] call[name[self].get_file, parameter[name[attr_name]]]
with call[name[open], parameter[name[path], constant[a]]] begin[:]
call[name[f].write, parameter[call[constant[{}
].format, parameter[name[msg]]]]]
return[name[msg]]
|
keyword[def] identifier[write] ( identifier[self] , identifier[attr_name] , identifier[prefix] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[_folder] keyword[is] keyword[None] :
keyword[return]
identifier[separator] = literal[string]
identifier[attr] = identifier[getattr] ( identifier[self] . identifier[obj] , identifier[attr_name] )
keyword[if] identifier[hasattr] ( identifier[attr] , literal[string] ):
identifier[msg] = identifier[separator] . identifier[join] ([ identifier[str] ( identifier[e] ) keyword[for] identifier[e] keyword[in] identifier[attr] ])
keyword[else] :
identifier[msg] = identifier[str] ( identifier[attr] )
keyword[if] identifier[prefix] keyword[is] keyword[not] keyword[None] :
identifier[msg] = literal[string] . identifier[format] ( identifier[getattr] ( identifier[self] . identifier[obj] , identifier[prefix] ), identifier[msg] )
identifier[path] = identifier[self] . identifier[get_file] ( identifier[attr_name] )
keyword[with] identifier[open] ( identifier[path] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( literal[string] . identifier[format] ( identifier[msg] ))
keyword[return] identifier[msg]
|
def write(self, attr_name, prefix=None):
"""Write attribute's value to a file.
:param str attr_name:
Attribute's name to be logged
:param str prefix:
Optional. Attribute's name that is prefixed to logging message,
defaults to ``None``.
:returns: message written to file
:rtype: str
"""
if self._folder is None:
return # depends on [control=['if'], data=[]]
separator = '\t'
attr = getattr(self.obj, attr_name)
if hasattr(attr, '__iter__'):
msg = separator.join([str(e) for e in attr]) # depends on [control=['if'], data=[]]
else:
msg = str(attr)
if prefix is not None:
msg = '{}\t{}'.format(getattr(self.obj, prefix), msg) # depends on [control=['if'], data=['prefix']]
path = self.get_file(attr_name)
with open(path, 'a') as f:
f.write('{}\n'.format(msg)) # depends on [control=['with'], data=['f']]
return msg
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.