code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def update(self, d):
"""
This function make update according provided target
and the last used input vector.
**Args:**
* `d` : target (float or 1-dimensional array).
Size depends on number of MLP outputs.
**Returns:**
* `e` : error used for update (float or 1-diemnsional array).
Size correspond to size of input `d`.
"""
# update output layer
e = d - self.y
error = np.copy(e)
if self.outputs == 1:
dw = self.mu * e * self.x
w = np.copy(self.w)[1:]
else:
dw = self.mu * np.outer(e, self.x)
w = np.copy(self.w)[:,1:]
self.w += dw
# update hidden layers
for l in reversed(self.layers):
w, e = l.update(w, e)
return error | def function[update, parameter[self, d]]:
constant[
This function make update according provided target
and the last used input vector.
**Args:**
* `d` : target (float or 1-dimensional array).
Size depends on number of MLP outputs.
**Returns:**
* `e` : error used for update (float or 1-diemnsional array).
Size correspond to size of input `d`.
]
variable[e] assign[=] binary_operation[name[d] - name[self].y]
variable[error] assign[=] call[name[np].copy, parameter[name[e]]]
if compare[name[self].outputs equal[==] constant[1]] begin[:]
variable[dw] assign[=] binary_operation[binary_operation[name[self].mu * name[e]] * name[self].x]
variable[w] assign[=] call[call[name[np].copy, parameter[name[self].w]]][<ast.Slice object at 0x7da1b0ebf970>]
<ast.AugAssign object at 0x7da1b0efc100>
for taget[name[l]] in starred[call[name[reversed], parameter[name[self].layers]]] begin[:]
<ast.Tuple object at 0x7da1b0efc370> assign[=] call[name[l].update, parameter[name[w], name[e]]]
return[name[error]] | keyword[def] identifier[update] ( identifier[self] , identifier[d] ):
literal[string]
identifier[e] = identifier[d] - identifier[self] . identifier[y]
identifier[error] = identifier[np] . identifier[copy] ( identifier[e] )
keyword[if] identifier[self] . identifier[outputs] == literal[int] :
identifier[dw] = identifier[self] . identifier[mu] * identifier[e] * identifier[self] . identifier[x]
identifier[w] = identifier[np] . identifier[copy] ( identifier[self] . identifier[w] )[ literal[int] :]
keyword[else] :
identifier[dw] = identifier[self] . identifier[mu] * identifier[np] . identifier[outer] ( identifier[e] , identifier[self] . identifier[x] )
identifier[w] = identifier[np] . identifier[copy] ( identifier[self] . identifier[w] )[:, literal[int] :]
identifier[self] . identifier[w] += identifier[dw]
keyword[for] identifier[l] keyword[in] identifier[reversed] ( identifier[self] . identifier[layers] ):
identifier[w] , identifier[e] = identifier[l] . identifier[update] ( identifier[w] , identifier[e] )
keyword[return] identifier[error] | def update(self, d):
"""
This function make update according provided target
and the last used input vector.
**Args:**
* `d` : target (float or 1-dimensional array).
Size depends on number of MLP outputs.
**Returns:**
* `e` : error used for update (float or 1-diemnsional array).
Size correspond to size of input `d`.
"""
# update output layer
e = d - self.y
error = np.copy(e)
if self.outputs == 1:
dw = self.mu * e * self.x
w = np.copy(self.w)[1:] # depends on [control=['if'], data=[]]
else:
dw = self.mu * np.outer(e, self.x)
w = np.copy(self.w)[:, 1:]
self.w += dw
# update hidden layers
for l in reversed(self.layers):
(w, e) = l.update(w, e) # depends on [control=['for'], data=['l']]
return error |
def delete_selected(self, request, queryset):
'''
The real delete function always evaluated either from the action, or from the instance delete link
'''
opts = self.model._meta
app_label = opts.app_label
# Populate deletable_objects, a data structure of all related objects that
# will also be deleted.
deletable_objects, model_count, perms_needed, protected = self.get_deleted_objects(request, queryset)
# The user has already confirmed the deletion.
# Do the deletion and return a None to display the change list view again.
if request.POST.get('post') and not protected:
if perms_needed or protected:
raise PermissionDenied
n = queryset.count()
if n:
for obj in queryset:
obj_display = force_text(obj)
self.log_deletion(request, obj, obj_display)
queryset.delete()
self.message_user(request, _("Successfully deleted %(count)d %(items)s.") % {
"count": n, "items": model_ngettext(self.opts, n)
}, messages.SUCCESS)
# Return None to display the change list page again.
return None
sz = queryset.count()
if sz == 1:
objects_name = _('%(verbose_name)s "%(object)s"') % {
'verbose_name': force_text(opts.verbose_name),
'object': queryset[0]
}
else:
objects_name = _('%(count)s %(verbose_name_plural)s') % {
'verbose_name_plural': force_text(opts.verbose_name_plural),
'count': sz
}
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": objects_name}
else:
title = _("Are you sure?")
context = dict(
self.admin_site.each_context(request),
title=title,
objects_name=objects_name,
deletable_objects=[deletable_objects],
model_count=dict(model_count).items(),
queryset=queryset,
perms_lacking=perms_needed,
protected=protected,
opts=opts,
action_checkbox_name=helpers.ACTION_CHECKBOX_NAME,
media=self.media,
)
request.current_app = self.admin_site.name
# Display the confirmation page
return TemplateResponse(request, self.delete_selected_confirmation_template or [
"admin/%s/%s/delete_selected_confirmation.html" % (app_label, opts.model_name),
"admin/%s/delete_selected_confirmation.html" % app_label,
"admin/delete_selected_confirmation.html"
], context) | def function[delete_selected, parameter[self, request, queryset]]:
constant[
The real delete function always evaluated either from the action, or from the instance delete link
]
variable[opts] assign[=] name[self].model._meta
variable[app_label] assign[=] name[opts].app_label
<ast.Tuple object at 0x7da20c7caa70> assign[=] call[name[self].get_deleted_objects, parameter[name[request], name[queryset]]]
if <ast.BoolOp object at 0x7da20c7c8790> begin[:]
if <ast.BoolOp object at 0x7da18f7238e0> begin[:]
<ast.Raise object at 0x7da18f813fd0>
variable[n] assign[=] call[name[queryset].count, parameter[]]
if name[n] begin[:]
for taget[name[obj]] in starred[name[queryset]] begin[:]
variable[obj_display] assign[=] call[name[force_text], parameter[name[obj]]]
call[name[self].log_deletion, parameter[name[request], name[obj], name[obj_display]]]
call[name[queryset].delete, parameter[]]
call[name[self].message_user, parameter[name[request], binary_operation[call[name[_], parameter[constant[Successfully deleted %(count)d %(items)s.]]] <ast.Mod object at 0x7da2590d6920> dictionary[[<ast.Constant object at 0x7da18dc05420>, <ast.Constant object at 0x7da18dc079d0>], [<ast.Name object at 0x7da20c7c85e0>, <ast.Call object at 0x7da20c7c9f00>]]], name[messages].SUCCESS]]
return[constant[None]]
variable[sz] assign[=] call[name[queryset].count, parameter[]]
if compare[name[sz] equal[==] constant[1]] begin[:]
variable[objects_name] assign[=] binary_operation[call[name[_], parameter[constant[%(verbose_name)s "%(object)s"]]] <ast.Mod object at 0x7da2590d6920> dictionary[[<ast.Constant object at 0x7da20c7cbe50>, <ast.Constant object at 0x7da20c7ca7d0>], [<ast.Call object at 0x7da20c7c9db0>, <ast.Subscript object at 0x7da20c7c8040>]]]
if <ast.BoolOp object at 0x7da20c7cbbb0> begin[:]
variable[title] assign[=] binary_operation[call[name[_], parameter[constant[Cannot delete %(name)s]]] <ast.Mod object at 0x7da2590d6920> dictionary[[<ast.Constant object at 0x7da20c7cacb0>], [<ast.Name object at 0x7da20c7c8bb0>]]]
variable[context] assign[=] call[name[dict], parameter[call[name[self].admin_site.each_context, parameter[name[request]]]]]
name[request].current_app assign[=] name[self].admin_site.name
return[call[name[TemplateResponse], parameter[name[request], <ast.BoolOp object at 0x7da20c7c9a80>, name[context]]]] | keyword[def] identifier[delete_selected] ( identifier[self] , identifier[request] , identifier[queryset] ):
literal[string]
identifier[opts] = identifier[self] . identifier[model] . identifier[_meta]
identifier[app_label] = identifier[opts] . identifier[app_label]
identifier[deletable_objects] , identifier[model_count] , identifier[perms_needed] , identifier[protected] = identifier[self] . identifier[get_deleted_objects] ( identifier[request] , identifier[queryset] )
keyword[if] identifier[request] . identifier[POST] . identifier[get] ( literal[string] ) keyword[and] keyword[not] identifier[protected] :
keyword[if] identifier[perms_needed] keyword[or] identifier[protected] :
keyword[raise] identifier[PermissionDenied]
identifier[n] = identifier[queryset] . identifier[count] ()
keyword[if] identifier[n] :
keyword[for] identifier[obj] keyword[in] identifier[queryset] :
identifier[obj_display] = identifier[force_text] ( identifier[obj] )
identifier[self] . identifier[log_deletion] ( identifier[request] , identifier[obj] , identifier[obj_display] )
identifier[queryset] . identifier[delete] ()
identifier[self] . identifier[message_user] ( identifier[request] , identifier[_] ( literal[string] )%{
literal[string] : identifier[n] , literal[string] : identifier[model_ngettext] ( identifier[self] . identifier[opts] , identifier[n] )
}, identifier[messages] . identifier[SUCCESS] )
keyword[return] keyword[None]
identifier[sz] = identifier[queryset] . identifier[count] ()
keyword[if] identifier[sz] == literal[int] :
identifier[objects_name] = identifier[_] ( literal[string] )%{
literal[string] : identifier[force_text] ( identifier[opts] . identifier[verbose_name] ),
literal[string] : identifier[queryset] [ literal[int] ]
}
keyword[else] :
identifier[objects_name] = identifier[_] ( literal[string] )%{
literal[string] : identifier[force_text] ( identifier[opts] . identifier[verbose_name_plural] ),
literal[string] : identifier[sz]
}
keyword[if] identifier[perms_needed] keyword[or] identifier[protected] :
identifier[title] = identifier[_] ( literal[string] )%{ literal[string] : identifier[objects_name] }
keyword[else] :
identifier[title] = identifier[_] ( literal[string] )
identifier[context] = identifier[dict] (
identifier[self] . identifier[admin_site] . identifier[each_context] ( identifier[request] ),
identifier[title] = identifier[title] ,
identifier[objects_name] = identifier[objects_name] ,
identifier[deletable_objects] =[ identifier[deletable_objects] ],
identifier[model_count] = identifier[dict] ( identifier[model_count] ). identifier[items] (),
identifier[queryset] = identifier[queryset] ,
identifier[perms_lacking] = identifier[perms_needed] ,
identifier[protected] = identifier[protected] ,
identifier[opts] = identifier[opts] ,
identifier[action_checkbox_name] = identifier[helpers] . identifier[ACTION_CHECKBOX_NAME] ,
identifier[media] = identifier[self] . identifier[media] ,
)
identifier[request] . identifier[current_app] = identifier[self] . identifier[admin_site] . identifier[name]
keyword[return] identifier[TemplateResponse] ( identifier[request] , identifier[self] . identifier[delete_selected_confirmation_template] keyword[or] [
literal[string] %( identifier[app_label] , identifier[opts] . identifier[model_name] ),
literal[string] % identifier[app_label] ,
literal[string]
], identifier[context] ) | def delete_selected(self, request, queryset):
"""
The real delete function always evaluated either from the action, or from the instance delete link
"""
opts = self.model._meta
app_label = opts.app_label
# Populate deletable_objects, a data structure of all related objects that
# will also be deleted.
(deletable_objects, model_count, perms_needed, protected) = self.get_deleted_objects(request, queryset)
# The user has already confirmed the deletion.
# Do the deletion and return a None to display the change list view again.
if request.POST.get('post') and (not protected):
if perms_needed or protected:
raise PermissionDenied # depends on [control=['if'], data=[]]
n = queryset.count()
if n:
for obj in queryset:
obj_display = force_text(obj)
self.log_deletion(request, obj, obj_display) # depends on [control=['for'], data=['obj']]
queryset.delete()
self.message_user(request, _('Successfully deleted %(count)d %(items)s.') % {'count': n, 'items': model_ngettext(self.opts, n)}, messages.SUCCESS) # depends on [control=['if'], data=[]]
# Return None to display the change list page again.
return None # depends on [control=['if'], data=[]]
sz = queryset.count()
if sz == 1:
objects_name = _('%(verbose_name)s "%(object)s"') % {'verbose_name': force_text(opts.verbose_name), 'object': queryset[0]} # depends on [control=['if'], data=[]]
else:
objects_name = _('%(count)s %(verbose_name_plural)s') % {'verbose_name_plural': force_text(opts.verbose_name_plural), 'count': sz}
if perms_needed or protected:
title = _('Cannot delete %(name)s') % {'name': objects_name} # depends on [control=['if'], data=[]]
else:
title = _('Are you sure?')
context = dict(self.admin_site.each_context(request), title=title, objects_name=objects_name, deletable_objects=[deletable_objects], model_count=dict(model_count).items(), queryset=queryset, perms_lacking=perms_needed, protected=protected, opts=opts, action_checkbox_name=helpers.ACTION_CHECKBOX_NAME, media=self.media)
request.current_app = self.admin_site.name
# Display the confirmation page
return TemplateResponse(request, self.delete_selected_confirmation_template or ['admin/%s/%s/delete_selected_confirmation.html' % (app_label, opts.model_name), 'admin/%s/delete_selected_confirmation.html' % app_label, 'admin/delete_selected_confirmation.html'], context) |
def impact_check_range(func):
"""Decorator to check the range of interpolated kicks"""
@wraps(func)
def impact_wrapper(*args,**kwargs):
if isinstance(args[1],numpy.ndarray):
out= numpy.zeros(len(args[1]))
goodIndx= (args[1] < args[0]._deltaAngleTrackImpact)*(args[1] > 0.)
out[goodIndx]= func(args[0],args[1][goodIndx])
return out
elif args[1] >= args[0]._deltaAngleTrackImpact or args[1] <= 0.:
return 0.
else:
return func(*args,**kwargs)
return impact_wrapper | def function[impact_check_range, parameter[func]]:
constant[Decorator to check the range of interpolated kicks]
def function[impact_wrapper, parameter[]]:
if call[name[isinstance], parameter[call[name[args]][constant[1]], name[numpy].ndarray]] begin[:]
variable[out] assign[=] call[name[numpy].zeros, parameter[call[name[len], parameter[call[name[args]][constant[1]]]]]]
variable[goodIndx] assign[=] binary_operation[compare[call[name[args]][constant[1]] less[<] call[name[args]][constant[0]]._deltaAngleTrackImpact] * compare[call[name[args]][constant[1]] greater[>] constant[0.0]]]
call[name[out]][name[goodIndx]] assign[=] call[name[func], parameter[call[name[args]][constant[0]], call[call[name[args]][constant[1]]][name[goodIndx]]]]
return[name[out]]
return[name[impact_wrapper]] | keyword[def] identifier[impact_check_range] ( identifier[func] ):
literal[string]
@ identifier[wraps] ( identifier[func] )
keyword[def] identifier[impact_wrapper] (* identifier[args] ,** identifier[kwargs] ):
keyword[if] identifier[isinstance] ( identifier[args] [ literal[int] ], identifier[numpy] . identifier[ndarray] ):
identifier[out] = identifier[numpy] . identifier[zeros] ( identifier[len] ( identifier[args] [ literal[int] ]))
identifier[goodIndx] =( identifier[args] [ literal[int] ]< identifier[args] [ literal[int] ]. identifier[_deltaAngleTrackImpact] )*( identifier[args] [ literal[int] ]> literal[int] )
identifier[out] [ identifier[goodIndx] ]= identifier[func] ( identifier[args] [ literal[int] ], identifier[args] [ literal[int] ][ identifier[goodIndx] ])
keyword[return] identifier[out]
keyword[elif] identifier[args] [ literal[int] ]>= identifier[args] [ literal[int] ]. identifier[_deltaAngleTrackImpact] keyword[or] identifier[args] [ literal[int] ]<= literal[int] :
keyword[return] literal[int]
keyword[else] :
keyword[return] identifier[func] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[impact_wrapper] | def impact_check_range(func):
"""Decorator to check the range of interpolated kicks"""
@wraps(func)
def impact_wrapper(*args, **kwargs):
if isinstance(args[1], numpy.ndarray):
out = numpy.zeros(len(args[1]))
goodIndx = (args[1] < args[0]._deltaAngleTrackImpact) * (args[1] > 0.0)
out[goodIndx] = func(args[0], args[1][goodIndx])
return out # depends on [control=['if'], data=[]]
elif args[1] >= args[0]._deltaAngleTrackImpact or args[1] <= 0.0:
return 0.0 # depends on [control=['if'], data=[]]
else:
return func(*args, **kwargs)
return impact_wrapper |
def update_lists(self):
"""Update packages list and ChangeLog.txt file after
upgrade distribution
"""
print("{0}Update the package lists ?{1}".format(
self.meta.color["GREEN"], self.meta.color["ENDC"]))
print("=" * 79)
if self.msg.answer() in ["y", "Y"]:
Update().repository(["slack"]) | def function[update_lists, parameter[self]]:
constant[Update packages list and ChangeLog.txt file after
upgrade distribution
]
call[name[print], parameter[call[constant[{0}Update the package lists ?{1}].format, parameter[call[name[self].meta.color][constant[GREEN]], call[name[self].meta.color][constant[ENDC]]]]]]
call[name[print], parameter[binary_operation[constant[=] * constant[79]]]]
if compare[call[name[self].msg.answer, parameter[]] in list[[<ast.Constant object at 0x7da20c6c7040>, <ast.Constant object at 0x7da20c6c4df0>]]] begin[:]
call[call[name[Update], parameter[]].repository, parameter[list[[<ast.Constant object at 0x7da20c6c5bd0>]]]] | keyword[def] identifier[update_lists] ( identifier[self] ):
literal[string]
identifier[print] ( literal[string] . identifier[format] (
identifier[self] . identifier[meta] . identifier[color] [ literal[string] ], identifier[self] . identifier[meta] . identifier[color] [ literal[string] ]))
identifier[print] ( literal[string] * literal[int] )
keyword[if] identifier[self] . identifier[msg] . identifier[answer] () keyword[in] [ literal[string] , literal[string] ]:
identifier[Update] (). identifier[repository] ([ literal[string] ]) | def update_lists(self):
"""Update packages list and ChangeLog.txt file after
upgrade distribution
"""
print('{0}Update the package lists ?{1}'.format(self.meta.color['GREEN'], self.meta.color['ENDC']))
print('=' * 79)
if self.msg.answer() in ['y', 'Y']:
Update().repository(['slack']) # depends on [control=['if'], data=[]] |
def set_autoreload_params(self, scan_interval=None, ignore_modules=None):
"""Sets autoreload related parameters.
:param int scan_interval: Seconds. Monitor Python modules' modification times to trigger reload.
.. warning:: Use only in development.
:param list|st|unicode ignore_modules: Ignore the specified module during auto-reload scan.
"""
self._set('py-auto-reload', scan_interval)
self._set('py-auto-reload-ignore', ignore_modules, multi=True)
return self._section | def function[set_autoreload_params, parameter[self, scan_interval, ignore_modules]]:
constant[Sets autoreload related parameters.
:param int scan_interval: Seconds. Monitor Python modules' modification times to trigger reload.
.. warning:: Use only in development.
:param list|st|unicode ignore_modules: Ignore the specified module during auto-reload scan.
]
call[name[self]._set, parameter[constant[py-auto-reload], name[scan_interval]]]
call[name[self]._set, parameter[constant[py-auto-reload-ignore], name[ignore_modules]]]
return[name[self]._section] | keyword[def] identifier[set_autoreload_params] ( identifier[self] , identifier[scan_interval] = keyword[None] , identifier[ignore_modules] = keyword[None] ):
literal[string]
identifier[self] . identifier[_set] ( literal[string] , identifier[scan_interval] )
identifier[self] . identifier[_set] ( literal[string] , identifier[ignore_modules] , identifier[multi] = keyword[True] )
keyword[return] identifier[self] . identifier[_section] | def set_autoreload_params(self, scan_interval=None, ignore_modules=None):
"""Sets autoreload related parameters.
:param int scan_interval: Seconds. Monitor Python modules' modification times to trigger reload.
.. warning:: Use only in development.
:param list|st|unicode ignore_modules: Ignore the specified module during auto-reload scan.
"""
self._set('py-auto-reload', scan_interval)
self._set('py-auto-reload-ignore', ignore_modules, multi=True)
return self._section |
def get_results_generator(future_list, timeout=None, sort_by_completed=False):
"""Return as a generator of tasks order by completed sequence."""
try:
# python2 not support yield from
if sort_by_completed:
for future in as_completed(future_list, timeout=timeout):
yield future.x
else:
for future in future_list:
yield future.x
except TimeoutError:
return | def function[get_results_generator, parameter[future_list, timeout, sort_by_completed]]:
constant[Return as a generator of tasks order by completed sequence.]
<ast.Try object at 0x7da20e956650> | keyword[def] identifier[get_results_generator] ( identifier[future_list] , identifier[timeout] = keyword[None] , identifier[sort_by_completed] = keyword[False] ):
literal[string]
keyword[try] :
keyword[if] identifier[sort_by_completed] :
keyword[for] identifier[future] keyword[in] identifier[as_completed] ( identifier[future_list] , identifier[timeout] = identifier[timeout] ):
keyword[yield] identifier[future] . identifier[x]
keyword[else] :
keyword[for] identifier[future] keyword[in] identifier[future_list] :
keyword[yield] identifier[future] . identifier[x]
keyword[except] identifier[TimeoutError] :
keyword[return] | def get_results_generator(future_list, timeout=None, sort_by_completed=False):
"""Return as a generator of tasks order by completed sequence."""
try:
# python2 not support yield from
if sort_by_completed:
for future in as_completed(future_list, timeout=timeout):
yield future.x # depends on [control=['for'], data=['future']] # depends on [control=['if'], data=[]]
else:
for future in future_list:
yield future.x # depends on [control=['for'], data=['future']] # depends on [control=['try'], data=[]]
except TimeoutError:
return # depends on [control=['except'], data=[]] |
def method2png(output, mx, raw=False):
"""
Export method to a png file format
:param output: output filename
:type output: string
:param mx: specify the MethodAnalysis object
:type mx: :class:`MethodAnalysis` object
:param raw: use directly a dot raw buffer
:type raw: string
"""
buff = raw
if not raw:
buff = method2dot(mx)
method2format(output, "png", mx, buff) | def function[method2png, parameter[output, mx, raw]]:
constant[
Export method to a png file format
:param output: output filename
:type output: string
:param mx: specify the MethodAnalysis object
:type mx: :class:`MethodAnalysis` object
:param raw: use directly a dot raw buffer
:type raw: string
]
variable[buff] assign[=] name[raw]
if <ast.UnaryOp object at 0x7da204345f00> begin[:]
variable[buff] assign[=] call[name[method2dot], parameter[name[mx]]]
call[name[method2format], parameter[name[output], constant[png], name[mx], name[buff]]] | keyword[def] identifier[method2png] ( identifier[output] , identifier[mx] , identifier[raw] = keyword[False] ):
literal[string]
identifier[buff] = identifier[raw]
keyword[if] keyword[not] identifier[raw] :
identifier[buff] = identifier[method2dot] ( identifier[mx] )
identifier[method2format] ( identifier[output] , literal[string] , identifier[mx] , identifier[buff] ) | def method2png(output, mx, raw=False):
"""
Export method to a png file format
:param output: output filename
:type output: string
:param mx: specify the MethodAnalysis object
:type mx: :class:`MethodAnalysis` object
:param raw: use directly a dot raw buffer
:type raw: string
"""
buff = raw
if not raw:
buff = method2dot(mx) # depends on [control=['if'], data=[]]
method2format(output, 'png', mx, buff) |
def _build_backend():
"""Find and load the build backend"""
ep = os.environ['PEP517_BUILD_BACKEND']
mod_path, _, obj_path = ep.partition(':')
try:
obj = import_module(mod_path)
except ImportError:
raise BackendUnavailable
if obj_path:
for path_part in obj_path.split('.'):
obj = getattr(obj, path_part)
return obj | def function[_build_backend, parameter[]]:
constant[Find and load the build backend]
variable[ep] assign[=] call[name[os].environ][constant[PEP517_BUILD_BACKEND]]
<ast.Tuple object at 0x7da2054a6c80> assign[=] call[name[ep].partition, parameter[constant[:]]]
<ast.Try object at 0x7da2054a4c10>
if name[obj_path] begin[:]
for taget[name[path_part]] in starred[call[name[obj_path].split, parameter[constant[.]]]] begin[:]
variable[obj] assign[=] call[name[getattr], parameter[name[obj], name[path_part]]]
return[name[obj]] | keyword[def] identifier[_build_backend] ():
literal[string]
identifier[ep] = identifier[os] . identifier[environ] [ literal[string] ]
identifier[mod_path] , identifier[_] , identifier[obj_path] = identifier[ep] . identifier[partition] ( literal[string] )
keyword[try] :
identifier[obj] = identifier[import_module] ( identifier[mod_path] )
keyword[except] identifier[ImportError] :
keyword[raise] identifier[BackendUnavailable]
keyword[if] identifier[obj_path] :
keyword[for] identifier[path_part] keyword[in] identifier[obj_path] . identifier[split] ( literal[string] ):
identifier[obj] = identifier[getattr] ( identifier[obj] , identifier[path_part] )
keyword[return] identifier[obj] | def _build_backend():
"""Find and load the build backend"""
ep = os.environ['PEP517_BUILD_BACKEND']
(mod_path, _, obj_path) = ep.partition(':')
try:
obj = import_module(mod_path) # depends on [control=['try'], data=[]]
except ImportError:
raise BackendUnavailable # depends on [control=['except'], data=[]]
if obj_path:
for path_part in obj_path.split('.'):
obj = getattr(obj, path_part) # depends on [control=['for'], data=['path_part']] # depends on [control=['if'], data=[]]
return obj |
def connect(self, addr=None, userinfo=None):
"""Initiate a connection request to the device."""
if _debug: ProxyClientService._debug("connect addr=%r", addr)
# if the address was provided, use it
if addr:
self.address = addr
else:
addr = self.address
# if the user was provided, save it
if userinfo:
self.userinfo = userinfo
# make a connection
conn = ConnectionState(addr)
self.multiplexer.connections[addr] = conn
if _debug: ProxyClientService._debug(" - conn: %r", conn)
# associate with this service, but it is not connected until the ack comes back
conn.service = self
# keep a list of pending BSLPDU objects until the ack comes back
conn.pendingBSLPDU = []
# build a service request
request = ServiceRequest(PROXY_SERVICE_ID)
request.pduDestination = addr
# send it
self.service_request(request)
# return the connection object
return conn | def function[connect, parameter[self, addr, userinfo]]:
constant[Initiate a connection request to the device.]
if name[_debug] begin[:]
call[name[ProxyClientService]._debug, parameter[constant[connect addr=%r], name[addr]]]
if name[addr] begin[:]
name[self].address assign[=] name[addr]
if name[userinfo] begin[:]
name[self].userinfo assign[=] name[userinfo]
variable[conn] assign[=] call[name[ConnectionState], parameter[name[addr]]]
call[name[self].multiplexer.connections][name[addr]] assign[=] name[conn]
if name[_debug] begin[:]
call[name[ProxyClientService]._debug, parameter[constant[ - conn: %r], name[conn]]]
name[conn].service assign[=] name[self]
name[conn].pendingBSLPDU assign[=] list[[]]
variable[request] assign[=] call[name[ServiceRequest], parameter[name[PROXY_SERVICE_ID]]]
name[request].pduDestination assign[=] name[addr]
call[name[self].service_request, parameter[name[request]]]
return[name[conn]] | keyword[def] identifier[connect] ( identifier[self] , identifier[addr] = keyword[None] , identifier[userinfo] = keyword[None] ):
literal[string]
keyword[if] identifier[_debug] : identifier[ProxyClientService] . identifier[_debug] ( literal[string] , identifier[addr] )
keyword[if] identifier[addr] :
identifier[self] . identifier[address] = identifier[addr]
keyword[else] :
identifier[addr] = identifier[self] . identifier[address]
keyword[if] identifier[userinfo] :
identifier[self] . identifier[userinfo] = identifier[userinfo]
identifier[conn] = identifier[ConnectionState] ( identifier[addr] )
identifier[self] . identifier[multiplexer] . identifier[connections] [ identifier[addr] ]= identifier[conn]
keyword[if] identifier[_debug] : identifier[ProxyClientService] . identifier[_debug] ( literal[string] , identifier[conn] )
identifier[conn] . identifier[service] = identifier[self]
identifier[conn] . identifier[pendingBSLPDU] =[]
identifier[request] = identifier[ServiceRequest] ( identifier[PROXY_SERVICE_ID] )
identifier[request] . identifier[pduDestination] = identifier[addr]
identifier[self] . identifier[service_request] ( identifier[request] )
keyword[return] identifier[conn] | def connect(self, addr=None, userinfo=None):
"""Initiate a connection request to the device."""
if _debug:
ProxyClientService._debug('connect addr=%r', addr) # depends on [control=['if'], data=[]]
# if the address was provided, use it
if addr:
self.address = addr # depends on [control=['if'], data=[]]
else:
addr = self.address
# if the user was provided, save it
if userinfo:
self.userinfo = userinfo # depends on [control=['if'], data=[]]
# make a connection
conn = ConnectionState(addr)
self.multiplexer.connections[addr] = conn
if _debug:
ProxyClientService._debug(' - conn: %r', conn) # depends on [control=['if'], data=[]]
# associate with this service, but it is not connected until the ack comes back
conn.service = self
# keep a list of pending BSLPDU objects until the ack comes back
conn.pendingBSLPDU = []
# build a service request
request = ServiceRequest(PROXY_SERVICE_ID)
request.pduDestination = addr
# send it
self.service_request(request)
# return the connection object
return conn |
def slicer(self):
"""
Array slicer object for this tile
>>> Tile((2,3)).slicer
(slice(0, 2, None), slice(0, 3, None))
>>> np.arange(10)[Tile((4,)).slicer]
array([0, 1, 2, 3])
"""
return tuple(np.s_[l:r] for l,r in zip(*self.bounds)) | def function[slicer, parameter[self]]:
constant[
Array slicer object for this tile
>>> Tile((2,3)).slicer
(slice(0, 2, None), slice(0, 3, None))
>>> np.arange(10)[Tile((4,)).slicer]
array([0, 1, 2, 3])
]
return[call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da18ede4040>]]] | keyword[def] identifier[slicer] ( identifier[self] ):
literal[string]
keyword[return] identifier[tuple] ( identifier[np] . identifier[s_] [ identifier[l] : identifier[r] ] keyword[for] identifier[l] , identifier[r] keyword[in] identifier[zip] (* identifier[self] . identifier[bounds] )) | def slicer(self):
"""
Array slicer object for this tile
>>> Tile((2,3)).slicer
(slice(0, 2, None), slice(0, 3, None))
>>> np.arange(10)[Tile((4,)).slicer]
array([0, 1, 2, 3])
"""
return tuple((np.s_[l:r] for (l, r) in zip(*self.bounds))) |
def ajIrreguliers(self):
""" Chargement des formes irrégulières du fichier data/irregs.la
"""
lignes = lignesFichier(self.path("irregs.la"))
for lin in lignes:
try:
irr = self.parse_irreg(lin)
self.lemmatiseur._irregs[deramise(irr.gr())].append(irr)
except Exception as E:
warnings.warn("Erreur au chargement de l'irrégulier\n" + lin + "\n" + str(E))
raise E
for irr in flatten(self.lemmatiseur._irregs.values()):
irr.lemme().ajIrreg(irr) | def function[ajIrreguliers, parameter[self]]:
constant[ Chargement des formes irrégulières du fichier data/irregs.la
]
variable[lignes] assign[=] call[name[lignesFichier], parameter[call[name[self].path, parameter[constant[irregs.la]]]]]
for taget[name[lin]] in starred[name[lignes]] begin[:]
<ast.Try object at 0x7da204622830>
for taget[name[irr]] in starred[call[name[flatten], parameter[call[name[self].lemmatiseur._irregs.values, parameter[]]]]] begin[:]
call[call[name[irr].lemme, parameter[]].ajIrreg, parameter[name[irr]]] | keyword[def] identifier[ajIrreguliers] ( identifier[self] ):
literal[string]
identifier[lignes] = identifier[lignesFichier] ( identifier[self] . identifier[path] ( literal[string] ))
keyword[for] identifier[lin] keyword[in] identifier[lignes] :
keyword[try] :
identifier[irr] = identifier[self] . identifier[parse_irreg] ( identifier[lin] )
identifier[self] . identifier[lemmatiseur] . identifier[_irregs] [ identifier[deramise] ( identifier[irr] . identifier[gr] ())]. identifier[append] ( identifier[irr] )
keyword[except] identifier[Exception] keyword[as] identifier[E] :
identifier[warnings] . identifier[warn] ( literal[string] + identifier[lin] + literal[string] + identifier[str] ( identifier[E] ))
keyword[raise] identifier[E]
keyword[for] identifier[irr] keyword[in] identifier[flatten] ( identifier[self] . identifier[lemmatiseur] . identifier[_irregs] . identifier[values] ()):
identifier[irr] . identifier[lemme] (). identifier[ajIrreg] ( identifier[irr] ) | def ajIrreguliers(self):
""" Chargement des formes irrégulières du fichier data/irregs.la
"""
lignes = lignesFichier(self.path('irregs.la'))
for lin in lignes:
try:
irr = self.parse_irreg(lin)
self.lemmatiseur._irregs[deramise(irr.gr())].append(irr) # depends on [control=['try'], data=[]]
except Exception as E:
warnings.warn("Erreur au chargement de l'irrégulier\n" + lin + '\n' + str(E))
raise E # depends on [control=['except'], data=['E']] # depends on [control=['for'], data=['lin']]
for irr in flatten(self.lemmatiseur._irregs.values()):
irr.lemme().ajIrreg(irr) # depends on [control=['for'], data=['irr']] |
def generate_classes(outf, msgs):
"""
Generate the implementations of the classes representing MAVLink messages.
"""
print("Generating class definitions")
wrapper = textwrap.TextWrapper(initial_indent="", subsequent_indent="")
outf.write("\nmavlink.messages = {};\n\n");
def field_descriptions(fields):
ret = ""
for f in fields:
ret += " %-18s : %s (%s)\n" % (f.name, f.description.strip(), f.type)
return ret
for m in msgs:
comment = "%s\n\n%s" % (wrapper.fill(m.description.strip()), field_descriptions(m.fields))
selffieldnames = 'self, '
for f in m.fields:
# if f.omit_arg:
# selffieldnames += '%s=%s, ' % (f.name, f.const_value)
#else:
# -- Omitting the code above because it is rarely used (only once?) and would need some special handling
# in javascript. Specifically, inside the method definition, it needs to check for a value then assign
# a default.
selffieldnames += '%s, ' % f.name
selffieldnames = selffieldnames[:-2]
sub = {'NAMELOWER' : m.name.lower(),
'SELFFIELDNAMES' : selffieldnames,
'COMMENT' : comment,
'FIELDNAMES' : ", ".join(m.fieldnames)}
t.write(outf, """
/*
${COMMENT}
*/
""", sub)
# function signature + declaration
outf.write("mavlink.messages.%s = function(" % (m.name.lower()))
if len(m.fields) != 0:
outf.write(", ".join(m.fieldnames))
outf.write(") {")
# body: set message type properties
outf.write("""
this.format = '%s';
this.id = mavlink.MAVLINK_MSG_ID_%s;
this.order_map = %s;
this.crc_extra = %u;
this.name = '%s';
""" % (m.fmtstr, m.name.upper(), m.order_map, m.crc_extra, m.name.upper()))
# body: set own properties
if len(m.fieldnames) != 0:
outf.write(" this.fieldnames = ['%s'];\n" % "', '".join(m.fieldnames))
outf.write("""
this.set(arguments);
}
""")
# inherit methods from the base message class
outf.write("""
mavlink.messages.%s.prototype = new mavlink.message;
""" % m.name.lower())
# Implement the pack() function for this message
outf.write("""
mavlink.messages.%s.prototype.pack = function(mav) {
return mavlink.message.prototype.pack.call(this, mav, this.crc_extra, jspack.Pack(this.format""" % m.name.lower())
if len(m.fields) != 0:
outf.write(", [ this." + ", this.".join(m.ordered_fieldnames) + ']')
outf.write("));\n}\n\n") | def function[generate_classes, parameter[outf, msgs]]:
constant[
Generate the implementations of the classes representing MAVLink messages.
]
call[name[print], parameter[constant[Generating class definitions]]]
variable[wrapper] assign[=] call[name[textwrap].TextWrapper, parameter[]]
call[name[outf].write, parameter[constant[
mavlink.messages = {};
]]]
def function[field_descriptions, parameter[fields]]:
variable[ret] assign[=] constant[]
for taget[name[f]] in starred[name[fields]] begin[:]
<ast.AugAssign object at 0x7da1b16791e0>
return[name[ret]]
for taget[name[m]] in starred[name[msgs]] begin[:]
variable[comment] assign[=] binary_operation[constant[%s
%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b1678100>, <ast.Call object at 0x7da1b1679ed0>]]]
variable[selffieldnames] assign[=] constant[self, ]
for taget[name[f]] in starred[name[m].fields] begin[:]
<ast.AugAssign object at 0x7da1b167b250>
variable[selffieldnames] assign[=] call[name[selffieldnames]][<ast.Slice object at 0x7da1b1678340>]
variable[sub] assign[=] dictionary[[<ast.Constant object at 0x7da1b167aef0>, <ast.Constant object at 0x7da1b16786d0>, <ast.Constant object at 0x7da1b167b130>, <ast.Constant object at 0x7da1b1678790>], [<ast.Call object at 0x7da1b1679ea0>, <ast.Name object at 0x7da1b167b970>, <ast.Name object at 0x7da1b167ad10>, <ast.Call object at 0x7da1b16785b0>]]
call[name[t].write, parameter[name[outf], constant[
/*
${COMMENT}
*/
], name[sub]]]
call[name[outf].write, parameter[binary_operation[constant[mavlink.messages.%s = function(] <ast.Mod object at 0x7da2590d6920> call[name[m].name.lower, parameter[]]]]]
if compare[call[name[len], parameter[name[m].fields]] not_equal[!=] constant[0]] begin[:]
call[name[outf].write, parameter[call[constant[, ].join, parameter[name[m].fieldnames]]]]
call[name[outf].write, parameter[constant[) {]]]
call[name[outf].write, parameter[binary_operation[constant[
this.format = '%s';
this.id = mavlink.MAVLINK_MSG_ID_%s;
this.order_map = %s;
this.crc_extra = %u;
this.name = '%s';
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b167baf0>, <ast.Call object at 0x7da1b167a620>, <ast.Attribute object at 0x7da1b167b430>, <ast.Attribute object at 0x7da1b167ae90>, <ast.Call object at 0x7da1b167b5b0>]]]]]
if compare[call[name[len], parameter[name[m].fieldnames]] not_equal[!=] constant[0]] begin[:]
call[name[outf].write, parameter[binary_operation[constant[ this.fieldnames = ['%s'];
] <ast.Mod object at 0x7da2590d6920> call[constant[', '].join, parameter[name[m].fieldnames]]]]]
call[name[outf].write, parameter[constant[
this.set(arguments);
}
]]]
call[name[outf].write, parameter[binary_operation[constant[
mavlink.messages.%s.prototype = new mavlink.message;
] <ast.Mod object at 0x7da2590d6920> call[name[m].name.lower, parameter[]]]]]
call[name[outf].write, parameter[binary_operation[constant[
mavlink.messages.%s.prototype.pack = function(mav) {
return mavlink.message.prototype.pack.call(this, mav, this.crc_extra, jspack.Pack(this.format] <ast.Mod object at 0x7da2590d6920> call[name[m].name.lower, parameter[]]]]]
if compare[call[name[len], parameter[name[m].fields]] not_equal[!=] constant[0]] begin[:]
call[name[outf].write, parameter[binary_operation[binary_operation[constant[, [ this.] + call[constant[, this.].join, parameter[name[m].ordered_fieldnames]]] + constant[]]]]]
call[name[outf].write, parameter[constant[));
}
]]] | keyword[def] identifier[generate_classes] ( identifier[outf] , identifier[msgs] ):
literal[string]
identifier[print] ( literal[string] )
identifier[wrapper] = identifier[textwrap] . identifier[TextWrapper] ( identifier[initial_indent] = literal[string] , identifier[subsequent_indent] = literal[string] )
identifier[outf] . identifier[write] ( literal[string] );
keyword[def] identifier[field_descriptions] ( identifier[fields] ):
identifier[ret] = literal[string]
keyword[for] identifier[f] keyword[in] identifier[fields] :
identifier[ret] += literal[string] %( identifier[f] . identifier[name] , identifier[f] . identifier[description] . identifier[strip] (), identifier[f] . identifier[type] )
keyword[return] identifier[ret]
keyword[for] identifier[m] keyword[in] identifier[msgs] :
identifier[comment] = literal[string] %( identifier[wrapper] . identifier[fill] ( identifier[m] . identifier[description] . identifier[strip] ()), identifier[field_descriptions] ( identifier[m] . identifier[fields] ))
identifier[selffieldnames] = literal[string]
keyword[for] identifier[f] keyword[in] identifier[m] . identifier[fields] :
identifier[selffieldnames] += literal[string] % identifier[f] . identifier[name]
identifier[selffieldnames] = identifier[selffieldnames] [:- literal[int] ]
identifier[sub] ={ literal[string] : identifier[m] . identifier[name] . identifier[lower] (),
literal[string] : identifier[selffieldnames] ,
literal[string] : identifier[comment] ,
literal[string] : literal[string] . identifier[join] ( identifier[m] . identifier[fieldnames] )}
identifier[t] . identifier[write] ( identifier[outf] , literal[string] , identifier[sub] )
identifier[outf] . identifier[write] ( literal[string] %( identifier[m] . identifier[name] . identifier[lower] ()))
keyword[if] identifier[len] ( identifier[m] . identifier[fields] )!= literal[int] :
identifier[outf] . identifier[write] ( literal[string] . identifier[join] ( identifier[m] . identifier[fieldnames] ))
identifier[outf] . identifier[write] ( literal[string] )
identifier[outf] . identifier[write] ( literal[string] %( identifier[m] . identifier[fmtstr] , identifier[m] . identifier[name] . identifier[upper] (), identifier[m] . identifier[order_map] , identifier[m] . identifier[crc_extra] , identifier[m] . identifier[name] . identifier[upper] ()))
keyword[if] identifier[len] ( identifier[m] . identifier[fieldnames] )!= literal[int] :
identifier[outf] . identifier[write] ( literal[string] % literal[string] . identifier[join] ( identifier[m] . identifier[fieldnames] ))
identifier[outf] . identifier[write] ( literal[string] )
identifier[outf] . identifier[write] ( literal[string] % identifier[m] . identifier[name] . identifier[lower] ())
identifier[outf] . identifier[write] ( literal[string] % identifier[m] . identifier[name] . identifier[lower] ())
keyword[if] identifier[len] ( identifier[m] . identifier[fields] )!= literal[int] :
identifier[outf] . identifier[write] ( literal[string] + literal[string] . identifier[join] ( identifier[m] . identifier[ordered_fieldnames] )+ literal[string] )
identifier[outf] . identifier[write] ( literal[string] ) | def generate_classes(outf, msgs):
"""
Generate the implementations of the classes representing MAVLink messages.
"""
print('Generating class definitions')
wrapper = textwrap.TextWrapper(initial_indent='', subsequent_indent='')
outf.write('\nmavlink.messages = {};\n\n')
def field_descriptions(fields):
ret = ''
for f in fields:
ret += ' %-18s : %s (%s)\n' % (f.name, f.description.strip(), f.type) # depends on [control=['for'], data=['f']]
return ret
for m in msgs:
comment = '%s\n\n%s' % (wrapper.fill(m.description.strip()), field_descriptions(m.fields))
selffieldnames = 'self, '
for f in m.fields:
# if f.omit_arg:
# selffieldnames += '%s=%s, ' % (f.name, f.const_value)
#else:
# -- Omitting the code above because it is rarely used (only once?) and would need some special handling
# in javascript. Specifically, inside the method definition, it needs to check for a value then assign
# a default.
selffieldnames += '%s, ' % f.name # depends on [control=['for'], data=['f']]
selffieldnames = selffieldnames[:-2]
sub = {'NAMELOWER': m.name.lower(), 'SELFFIELDNAMES': selffieldnames, 'COMMENT': comment, 'FIELDNAMES': ', '.join(m.fieldnames)}
t.write(outf, '\n/* \n${COMMENT}\n*/\n', sub)
# function signature + declaration
outf.write('mavlink.messages.%s = function(' % m.name.lower())
if len(m.fields) != 0:
outf.write(', '.join(m.fieldnames)) # depends on [control=['if'], data=[]]
outf.write(') {') # body: set message type properties
outf.write("\n\n this.format = '%s';\n this.id = mavlink.MAVLINK_MSG_ID_%s;\n this.order_map = %s;\n this.crc_extra = %u;\n this.name = '%s';\n\n" % (m.fmtstr, m.name.upper(), m.order_map, m.crc_extra, m.name.upper()))
# body: set own properties
if len(m.fieldnames) != 0:
outf.write(" this.fieldnames = ['%s'];\n" % "', '".join(m.fieldnames)) # depends on [control=['if'], data=[]]
outf.write('\n\n this.set(arguments);\n\n}\n ')
# inherit methods from the base message class
outf.write('\nmavlink.messages.%s.prototype = new mavlink.message;\n' % m.name.lower())
# Implement the pack() function for this message
outf.write('\nmavlink.messages.%s.prototype.pack = function(mav) {\n return mavlink.message.prototype.pack.call(this, mav, this.crc_extra, jspack.Pack(this.format' % m.name.lower())
if len(m.fields) != 0:
outf.write(', [ this.' + ', this.'.join(m.ordered_fieldnames) + ']') # depends on [control=['if'], data=[]]
outf.write('));\n}\n\n') # depends on [control=['for'], data=['m']] |
def add_virtual_columns_polar_velocities_to_cartesian(self, x='x', y='y', azimuth=None, vr='vr_polar', vazimuth='vphi_polar', vx_out='vx', vy_out='vy', propagate_uncertainties=False):
""" Convert cylindrical polar velocities to Cartesian.
:param x:
:param y:
:param azimuth: Optional expression for the azimuth in degrees , may lead to a better performance when given.
:param vr:
:param vazimuth:
:param vx_out:
:param vy_out:
:param propagate_uncertainties: {propagate_uncertainties}
"""
x = self._expr(x)
y = self._expr(y)
vr = self._expr(vr)
vazimuth = self._expr(vazimuth)
if azimuth is not None:
azimuth = self._expr(azimuth)
azimuth = np.deg2rad(azimuth)
else:
azimuth = np.arctan2(y, x)
azimuth = self._expr(azimuth)
self[vx_out] = vr * np.cos(azimuth) - vazimuth * np.sin(azimuth)
self[vy_out] = vr * np.sin(azimuth) + vazimuth * np.cos(azimuth)
if propagate_uncertainties:
self.propagate_uncertainties([self[vx_out], self[vy_out]]) | def function[add_virtual_columns_polar_velocities_to_cartesian, parameter[self, x, y, azimuth, vr, vazimuth, vx_out, vy_out, propagate_uncertainties]]:
constant[ Convert cylindrical polar velocities to Cartesian.
:param x:
:param y:
:param azimuth: Optional expression for the azimuth in degrees , may lead to a better performance when given.
:param vr:
:param vazimuth:
:param vx_out:
:param vy_out:
:param propagate_uncertainties: {propagate_uncertainties}
]
variable[x] assign[=] call[name[self]._expr, parameter[name[x]]]
variable[y] assign[=] call[name[self]._expr, parameter[name[y]]]
variable[vr] assign[=] call[name[self]._expr, parameter[name[vr]]]
variable[vazimuth] assign[=] call[name[self]._expr, parameter[name[vazimuth]]]
if compare[name[azimuth] is_not constant[None]] begin[:]
variable[azimuth] assign[=] call[name[self]._expr, parameter[name[azimuth]]]
variable[azimuth] assign[=] call[name[np].deg2rad, parameter[name[azimuth]]]
variable[azimuth] assign[=] call[name[self]._expr, parameter[name[azimuth]]]
call[name[self]][name[vx_out]] assign[=] binary_operation[binary_operation[name[vr] * call[name[np].cos, parameter[name[azimuth]]]] - binary_operation[name[vazimuth] * call[name[np].sin, parameter[name[azimuth]]]]]
call[name[self]][name[vy_out]] assign[=] binary_operation[binary_operation[name[vr] * call[name[np].sin, parameter[name[azimuth]]]] + binary_operation[name[vazimuth] * call[name[np].cos, parameter[name[azimuth]]]]]
if name[propagate_uncertainties] begin[:]
call[name[self].propagate_uncertainties, parameter[list[[<ast.Subscript object at 0x7da2047ea920>, <ast.Subscript object at 0x7da2047e84c0>]]]] | keyword[def] identifier[add_virtual_columns_polar_velocities_to_cartesian] ( identifier[self] , identifier[x] = literal[string] , identifier[y] = literal[string] , identifier[azimuth] = keyword[None] , identifier[vr] = literal[string] , identifier[vazimuth] = literal[string] , identifier[vx_out] = literal[string] , identifier[vy_out] = literal[string] , identifier[propagate_uncertainties] = keyword[False] ):
literal[string]
identifier[x] = identifier[self] . identifier[_expr] ( identifier[x] )
identifier[y] = identifier[self] . identifier[_expr] ( identifier[y] )
identifier[vr] = identifier[self] . identifier[_expr] ( identifier[vr] )
identifier[vazimuth] = identifier[self] . identifier[_expr] ( identifier[vazimuth] )
keyword[if] identifier[azimuth] keyword[is] keyword[not] keyword[None] :
identifier[azimuth] = identifier[self] . identifier[_expr] ( identifier[azimuth] )
identifier[azimuth] = identifier[np] . identifier[deg2rad] ( identifier[azimuth] )
keyword[else] :
identifier[azimuth] = identifier[np] . identifier[arctan2] ( identifier[y] , identifier[x] )
identifier[azimuth] = identifier[self] . identifier[_expr] ( identifier[azimuth] )
identifier[self] [ identifier[vx_out] ]= identifier[vr] * identifier[np] . identifier[cos] ( identifier[azimuth] )- identifier[vazimuth] * identifier[np] . identifier[sin] ( identifier[azimuth] )
identifier[self] [ identifier[vy_out] ]= identifier[vr] * identifier[np] . identifier[sin] ( identifier[azimuth] )+ identifier[vazimuth] * identifier[np] . identifier[cos] ( identifier[azimuth] )
keyword[if] identifier[propagate_uncertainties] :
identifier[self] . identifier[propagate_uncertainties] ([ identifier[self] [ identifier[vx_out] ], identifier[self] [ identifier[vy_out] ]]) | def add_virtual_columns_polar_velocities_to_cartesian(self, x='x', y='y', azimuth=None, vr='vr_polar', vazimuth='vphi_polar', vx_out='vx', vy_out='vy', propagate_uncertainties=False):
""" Convert cylindrical polar velocities to Cartesian.
:param x:
:param y:
:param azimuth: Optional expression for the azimuth in degrees , may lead to a better performance when given.
:param vr:
:param vazimuth:
:param vx_out:
:param vy_out:
:param propagate_uncertainties: {propagate_uncertainties}
"""
x = self._expr(x)
y = self._expr(y)
vr = self._expr(vr)
vazimuth = self._expr(vazimuth)
if azimuth is not None:
azimuth = self._expr(azimuth)
azimuth = np.deg2rad(azimuth) # depends on [control=['if'], data=['azimuth']]
else:
azimuth = np.arctan2(y, x)
azimuth = self._expr(azimuth)
self[vx_out] = vr * np.cos(azimuth) - vazimuth * np.sin(azimuth)
self[vy_out] = vr * np.sin(azimuth) + vazimuth * np.cos(azimuth)
if propagate_uncertainties:
self.propagate_uncertainties([self[vx_out], self[vy_out]]) # depends on [control=['if'], data=[]] |
def to_unicode(value):
"""
Converts a string argument to a unicode string.
If the argument is already a unicode string or None, it is returned
unchanged. Otherwise it must be a byte string and is decoded as utf8.
"""
if isinstance(value, _TO_UNICODE_TYPES):
return value
if not isinstance(value, bytes):
raise TypeError(
"Expected bytes, unicode, or None; got %r" % type(value))
return value.decode("utf-8") | def function[to_unicode, parameter[value]]:
constant[
Converts a string argument to a unicode string.
If the argument is already a unicode string or None, it is returned
unchanged. Otherwise it must be a byte string and is decoded as utf8.
]
if call[name[isinstance], parameter[name[value], name[_TO_UNICODE_TYPES]]] begin[:]
return[name[value]]
if <ast.UnaryOp object at 0x7da18bc72b00> begin[:]
<ast.Raise object at 0x7da18bc70c10>
return[call[name[value].decode, parameter[constant[utf-8]]]] | keyword[def] identifier[to_unicode] ( identifier[value] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[value] , identifier[_TO_UNICODE_TYPES] ):
keyword[return] identifier[value]
keyword[if] keyword[not] identifier[isinstance] ( identifier[value] , identifier[bytes] ):
keyword[raise] identifier[TypeError] (
literal[string] % identifier[type] ( identifier[value] ))
keyword[return] identifier[value] . identifier[decode] ( literal[string] ) | def to_unicode(value):
"""
Converts a string argument to a unicode string.
If the argument is already a unicode string or None, it is returned
unchanged. Otherwise it must be a byte string and is decoded as utf8.
"""
if isinstance(value, _TO_UNICODE_TYPES):
return value # depends on [control=['if'], data=[]]
if not isinstance(value, bytes):
raise TypeError('Expected bytes, unicode, or None; got %r' % type(value)) # depends on [control=['if'], data=[]]
return value.decode('utf-8') |
def rebalance_brokers(self):
"""Rebalance partition-count across brokers within each replication-group."""
for rg in six.itervalues(self.cluster_topology.rgs):
rg.rebalance_brokers() | def function[rebalance_brokers, parameter[self]]:
constant[Rebalance partition-count across brokers within each replication-group.]
for taget[name[rg]] in starred[call[name[six].itervalues, parameter[name[self].cluster_topology.rgs]]] begin[:]
call[name[rg].rebalance_brokers, parameter[]] | keyword[def] identifier[rebalance_brokers] ( identifier[self] ):
literal[string]
keyword[for] identifier[rg] keyword[in] identifier[six] . identifier[itervalues] ( identifier[self] . identifier[cluster_topology] . identifier[rgs] ):
identifier[rg] . identifier[rebalance_brokers] () | def rebalance_brokers(self):
"""Rebalance partition-count across brokers within each replication-group."""
for rg in six.itervalues(self.cluster_topology.rgs):
rg.rebalance_brokers() # depends on [control=['for'], data=['rg']] |
def django_admin(request):
'''
Adds additional information to the context:
``django_admin`` - boolean variable indicating whether the current
page is part of the django admin or not.
``ADMIN_URL`` - normalized version of settings.ADMIN_URL; starts with a slash, ends without a slash
NOTE: do not set ADMIN_URL='/' in case your application provides functionality
outside of django admin as all incoming urls are interpreted as admin urls.
'''
# ensure that adminurl always starts with a '/' but never ends with a '/'
if settings.ADMIN_URL.endswith('/'):
admin_url = settings.ADMIN_URL[:-1]
if not settings.ADMIN_URL.startswith('/'):
admin_url = '/' + settings.ADMIN_URL
# add ADMIN_URL and django_admin to context
if request.META['PATH_INFO'].startswith(admin_url):
return {
'ADMIN_URL': admin_url,
'django_admin': True
}
else:
return {
'django_admin': False
} | def function[django_admin, parameter[request]]:
constant[
Adds additional information to the context:
``django_admin`` - boolean variable indicating whether the current
page is part of the django admin or not.
``ADMIN_URL`` - normalized version of settings.ADMIN_URL; starts with a slash, ends without a slash
NOTE: do not set ADMIN_URL='/' in case your application provides functionality
outside of django admin as all incoming urls are interpreted as admin urls.
]
if call[name[settings].ADMIN_URL.endswith, parameter[constant[/]]] begin[:]
variable[admin_url] assign[=] call[name[settings].ADMIN_URL][<ast.Slice object at 0x7da204622f50>]
if <ast.UnaryOp object at 0x7da204623f40> begin[:]
variable[admin_url] assign[=] binary_operation[constant[/] + name[settings].ADMIN_URL]
if call[call[name[request].META][constant[PATH_INFO]].startswith, parameter[name[admin_url]]] begin[:]
return[dictionary[[<ast.Constant object at 0x7da204620f70>, <ast.Constant object at 0x7da2046228f0>], [<ast.Name object at 0x7da2046218d0>, <ast.Constant object at 0x7da204623910>]]] | keyword[def] identifier[django_admin] ( identifier[request] ):
literal[string]
keyword[if] identifier[settings] . identifier[ADMIN_URL] . identifier[endswith] ( literal[string] ):
identifier[admin_url] = identifier[settings] . identifier[ADMIN_URL] [:- literal[int] ]
keyword[if] keyword[not] identifier[settings] . identifier[ADMIN_URL] . identifier[startswith] ( literal[string] ):
identifier[admin_url] = literal[string] + identifier[settings] . identifier[ADMIN_URL]
keyword[if] identifier[request] . identifier[META] [ literal[string] ]. identifier[startswith] ( identifier[admin_url] ):
keyword[return] {
literal[string] : identifier[admin_url] ,
literal[string] : keyword[True]
}
keyword[else] :
keyword[return] {
literal[string] : keyword[False]
} | def django_admin(request):
"""
Adds additional information to the context:
``django_admin`` - boolean variable indicating whether the current
page is part of the django admin or not.
``ADMIN_URL`` - normalized version of settings.ADMIN_URL; starts with a slash, ends without a slash
NOTE: do not set ADMIN_URL='/' in case your application provides functionality
outside of django admin as all incoming urls are interpreted as admin urls.
"""
# ensure that adminurl always starts with a '/' but never ends with a '/'
if settings.ADMIN_URL.endswith('/'):
admin_url = settings.ADMIN_URL[:-1] # depends on [control=['if'], data=[]]
if not settings.ADMIN_URL.startswith('/'):
admin_url = '/' + settings.ADMIN_URL # depends on [control=['if'], data=[]]
# add ADMIN_URL and django_admin to context
if request.META['PATH_INFO'].startswith(admin_url):
return {'ADMIN_URL': admin_url, 'django_admin': True} # depends on [control=['if'], data=[]]
else:
return {'django_admin': False} |
def newick(self):
'''Output this ``Tree`` as a Newick string
Returns:
``str``: Newick string of this ``Tree``
'''
if self.root.edge_length is None:
suffix = ';'
elif isinstance(self.root.edge_length,int):
suffix = ':%d;' % self.root.edge_length
elif isinstance(self.root.edge_length,float) and self.root.edge_length.is_integer():
suffix = ':%d;' % int(self.root.edge_length)
else:
suffix = ':%s;' % str(self.root.edge_length)
if self.is_rooted:
return '[&R] %s%s' % (self.root.newick(),suffix)
else:
return '%s%s' % (self.root.newick(),suffix) | def function[newick, parameter[self]]:
constant[Output this ``Tree`` as a Newick string
Returns:
``str``: Newick string of this ``Tree``
]
if compare[name[self].root.edge_length is constant[None]] begin[:]
variable[suffix] assign[=] constant[;]
if name[self].is_rooted begin[:]
return[binary_operation[constant[[&R] %s%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b0b7cd30>, <ast.Name object at 0x7da1b0b7f5e0>]]]] | keyword[def] identifier[newick] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[root] . identifier[edge_length] keyword[is] keyword[None] :
identifier[suffix] = literal[string]
keyword[elif] identifier[isinstance] ( identifier[self] . identifier[root] . identifier[edge_length] , identifier[int] ):
identifier[suffix] = literal[string] % identifier[self] . identifier[root] . identifier[edge_length]
keyword[elif] identifier[isinstance] ( identifier[self] . identifier[root] . identifier[edge_length] , identifier[float] ) keyword[and] identifier[self] . identifier[root] . identifier[edge_length] . identifier[is_integer] ():
identifier[suffix] = literal[string] % identifier[int] ( identifier[self] . identifier[root] . identifier[edge_length] )
keyword[else] :
identifier[suffix] = literal[string] % identifier[str] ( identifier[self] . identifier[root] . identifier[edge_length] )
keyword[if] identifier[self] . identifier[is_rooted] :
keyword[return] literal[string] %( identifier[self] . identifier[root] . identifier[newick] (), identifier[suffix] )
keyword[else] :
keyword[return] literal[string] %( identifier[self] . identifier[root] . identifier[newick] (), identifier[suffix] ) | def newick(self):
"""Output this ``Tree`` as a Newick string
Returns:
``str``: Newick string of this ``Tree``
"""
if self.root.edge_length is None:
suffix = ';' # depends on [control=['if'], data=[]]
elif isinstance(self.root.edge_length, int):
suffix = ':%d;' % self.root.edge_length # depends on [control=['if'], data=[]]
elif isinstance(self.root.edge_length, float) and self.root.edge_length.is_integer():
suffix = ':%d;' % int(self.root.edge_length) # depends on [control=['if'], data=[]]
else:
suffix = ':%s;' % str(self.root.edge_length)
if self.is_rooted:
return '[&R] %s%s' % (self.root.newick(), suffix) # depends on [control=['if'], data=[]]
else:
return '%s%s' % (self.root.newick(), suffix) |
def section(self, ctx, optional=False):
"""
Return section of the config for a specific context (sub-command).
Parameters:
ctx (Context): The Click context object.
optional (bool): If ``True``, return an empty config object when section is missing.
Returns:
Section: The configuration section belonging to
the active (sub-)command (based on ``ctx.info_name``).
"""
values = self.load()
try:
return values[ctx.info_name]
except KeyError:
if optional:
return configobj.ConfigObj({}, **self.DEFAULT_CONFIG_OPTS)
raise LoggedFailure("Configuration section '{}' not found!".format(ctx.info_name)) | def function[section, parameter[self, ctx, optional]]:
constant[
Return section of the config for a specific context (sub-command).
Parameters:
ctx (Context): The Click context object.
optional (bool): If ``True``, return an empty config object when section is missing.
Returns:
Section: The configuration section belonging to
the active (sub-)command (based on ``ctx.info_name``).
]
variable[values] assign[=] call[name[self].load, parameter[]]
<ast.Try object at 0x7da20c6aaec0> | keyword[def] identifier[section] ( identifier[self] , identifier[ctx] , identifier[optional] = keyword[False] ):
literal[string]
identifier[values] = identifier[self] . identifier[load] ()
keyword[try] :
keyword[return] identifier[values] [ identifier[ctx] . identifier[info_name] ]
keyword[except] identifier[KeyError] :
keyword[if] identifier[optional] :
keyword[return] identifier[configobj] . identifier[ConfigObj] ({},** identifier[self] . identifier[DEFAULT_CONFIG_OPTS] )
keyword[raise] identifier[LoggedFailure] ( literal[string] . identifier[format] ( identifier[ctx] . identifier[info_name] )) | def section(self, ctx, optional=False):
"""
Return section of the config for a specific context (sub-command).
Parameters:
ctx (Context): The Click context object.
optional (bool): If ``True``, return an empty config object when section is missing.
Returns:
Section: The configuration section belonging to
the active (sub-)command (based on ``ctx.info_name``).
"""
values = self.load()
try:
return values[ctx.info_name] # depends on [control=['try'], data=[]]
except KeyError:
if optional:
return configobj.ConfigObj({}, **self.DEFAULT_CONFIG_OPTS) # depends on [control=['if'], data=[]]
raise LoggedFailure("Configuration section '{}' not found!".format(ctx.info_name)) # depends on [control=['except'], data=[]] |
def has_role(self, role_or_handle):
""" Checks if user has role """
if not isinstance(role_or_handle, str):
return role_or_handle in self.roles
has_role = False
for role in self.roles:
if role.handle == role_or_handle:
has_role = True
break
return has_role | def function[has_role, parameter[self, role_or_handle]]:
constant[ Checks if user has role ]
if <ast.UnaryOp object at 0x7da18bc73730> begin[:]
return[compare[name[role_or_handle] in name[self].roles]]
variable[has_role] assign[=] constant[False]
for taget[name[role]] in starred[name[self].roles] begin[:]
if compare[name[role].handle equal[==] name[role_or_handle]] begin[:]
variable[has_role] assign[=] constant[True]
break
return[name[has_role]] | keyword[def] identifier[has_role] ( identifier[self] , identifier[role_or_handle] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[role_or_handle] , identifier[str] ):
keyword[return] identifier[role_or_handle] keyword[in] identifier[self] . identifier[roles]
identifier[has_role] = keyword[False]
keyword[for] identifier[role] keyword[in] identifier[self] . identifier[roles] :
keyword[if] identifier[role] . identifier[handle] == identifier[role_or_handle] :
identifier[has_role] = keyword[True]
keyword[break]
keyword[return] identifier[has_role] | def has_role(self, role_or_handle):
""" Checks if user has role """
if not isinstance(role_or_handle, str):
return role_or_handle in self.roles # depends on [control=['if'], data=[]]
has_role = False
for role in self.roles:
if role.handle == role_or_handle:
has_role = True
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['role']]
return has_role |
def is_host_target_supported(host_target, msvc_version):
"""Return True if the given (host, target) tuple is supported given the
msvc version.
Parameters
----------
host_target: tuple
tuple of (canonalized) host-target, e.g. ("x86", "amd64") for cross
compilation from 32 bits windows to 64 bits.
msvc_version: str
msvc version (major.minor, e.g. 10.0)
Note
----
This only check whether a given version *may* support the given (host,
target), not that the toolchain is actually present on the machine.
"""
# We assume that any Visual Studio version supports x86 as a target
if host_target[1] != "x86":
maj, min = msvc_version_to_maj_min(msvc_version)
if maj < 8:
return False
return True | def function[is_host_target_supported, parameter[host_target, msvc_version]]:
constant[Return True if the given (host, target) tuple is supported given the
msvc version.
Parameters
----------
host_target: tuple
tuple of (canonalized) host-target, e.g. ("x86", "amd64") for cross
compilation from 32 bits windows to 64 bits.
msvc_version: str
msvc version (major.minor, e.g. 10.0)
Note
----
This only check whether a given version *may* support the given (host,
target), not that the toolchain is actually present on the machine.
]
if compare[call[name[host_target]][constant[1]] not_equal[!=] constant[x86]] begin[:]
<ast.Tuple object at 0x7da20c9923e0> assign[=] call[name[msvc_version_to_maj_min], parameter[name[msvc_version]]]
if compare[name[maj] less[<] constant[8]] begin[:]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[is_host_target_supported] ( identifier[host_target] , identifier[msvc_version] ):
literal[string]
keyword[if] identifier[host_target] [ literal[int] ]!= literal[string] :
identifier[maj] , identifier[min] = identifier[msvc_version_to_maj_min] ( identifier[msvc_version] )
keyword[if] identifier[maj] < literal[int] :
keyword[return] keyword[False]
keyword[return] keyword[True] | def is_host_target_supported(host_target, msvc_version):
"""Return True if the given (host, target) tuple is supported given the
msvc version.
Parameters
----------
host_target: tuple
tuple of (canonalized) host-target, e.g. ("x86", "amd64") for cross
compilation from 32 bits windows to 64 bits.
msvc_version: str
msvc version (major.minor, e.g. 10.0)
Note
----
This only check whether a given version *may* support the given (host,
target), not that the toolchain is actually present on the machine.
"""
# We assume that any Visual Studio version supports x86 as a target
if host_target[1] != 'x86':
(maj, min) = msvc_version_to_maj_min(msvc_version)
if maj < 8:
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return True |
def _set_lsp_commit(self, v, load=False):
"""
Setter method for lsp_commit, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/lsp/secondary_path/lsp_commit (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_commit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_commit() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="lsp-commit", rest_name="commit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Commit the changes to adaptive LSP', u'alt-name': u'commit', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """lsp_commit must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-commit", rest_name="commit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Commit the changes to adaptive LSP', u'alt-name': u'commit', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='empty', is_config=True)""",
})
self.__lsp_commit = t
if hasattr(self, '_set'):
self._set() | def function[_set_lsp_commit, parameter[self, v, load]]:
constant[
Setter method for lsp_commit, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/lsp/secondary_path/lsp_commit (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_commit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_commit() directly.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da2054a7d90>
name[self].__lsp_commit assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_lsp_commit] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[YANGBool] , identifier[is_leaf] = keyword[True] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : keyword[None] , literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : keyword[None] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__lsp_commit] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_lsp_commit(self, v, load=False):
"""
Setter method for lsp_commit, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/lsp/secondary_path/lsp_commit (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_lsp_commit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_lsp_commit() directly.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=YANGBool, is_leaf=True, yang_name='lsp-commit', rest_name='commit', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Commit the changes to adaptive LSP', u'alt-name': u'commit', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='empty', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'lsp_commit must be of a type compatible with empty', 'defined-type': 'empty', 'generated-type': 'YANGDynClass(base=YANGBool, is_leaf=True, yang_name="lsp-commit", rest_name="commit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'cli-full-command\': None, u\'info\': u\'Commit the changes to adaptive LSP\', u\'alt-name\': u\'commit\', u\'cli-suppress-no\': None}}, namespace=\'urn:brocade.com:mgmt:brocade-mpls\', defining_module=\'brocade-mpls\', yang_type=\'empty\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__lsp_commit = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def get_x(self, var, coords=None):
"""
Get the centers of the triangles in the x-dimension
Parameters
----------
%(CFDecoder.get_y.parameters)s
Returns
-------
%(CFDecoder.get_y.returns)s"""
if coords is None:
coords = self.ds.coords
# first we try the super class
ret = super(UGridDecoder, self).get_x(var, coords)
# but if that doesn't work because we get the variable name in the
# dimension of `var`, we use the means of the triangles
if ret is None or ret.name in var.dims:
bounds = self.get_cell_node_coord(var, axis='x', coords=coords)
if bounds is not None:
centers = bounds.mean(axis=-1)
x = self.get_nodes(self.get_mesh(var, coords), coords)[0]
try:
cls = xr.IndexVariable
except AttributeError: # xarray < 0.9
cls = xr.Coordinate
return cls(x.name, centers, attrs=x.attrs.copy()) | def function[get_x, parameter[self, var, coords]]:
constant[
Get the centers of the triangles in the x-dimension
Parameters
----------
%(CFDecoder.get_y.parameters)s
Returns
-------
%(CFDecoder.get_y.returns)s]
if compare[name[coords] is constant[None]] begin[:]
variable[coords] assign[=] name[self].ds.coords
variable[ret] assign[=] call[call[name[super], parameter[name[UGridDecoder], name[self]]].get_x, parameter[name[var], name[coords]]]
if <ast.BoolOp object at 0x7da20c7cbcd0> begin[:]
variable[bounds] assign[=] call[name[self].get_cell_node_coord, parameter[name[var]]]
if compare[name[bounds] is_not constant[None]] begin[:]
variable[centers] assign[=] call[name[bounds].mean, parameter[]]
variable[x] assign[=] call[call[name[self].get_nodes, parameter[call[name[self].get_mesh, parameter[name[var], name[coords]]], name[coords]]]][constant[0]]
<ast.Try object at 0x7da18eb56e30>
return[call[name[cls], parameter[name[x].name, name[centers]]]] | keyword[def] identifier[get_x] ( identifier[self] , identifier[var] , identifier[coords] = keyword[None] ):
literal[string]
keyword[if] identifier[coords] keyword[is] keyword[None] :
identifier[coords] = identifier[self] . identifier[ds] . identifier[coords]
identifier[ret] = identifier[super] ( identifier[UGridDecoder] , identifier[self] ). identifier[get_x] ( identifier[var] , identifier[coords] )
keyword[if] identifier[ret] keyword[is] keyword[None] keyword[or] identifier[ret] . identifier[name] keyword[in] identifier[var] . identifier[dims] :
identifier[bounds] = identifier[self] . identifier[get_cell_node_coord] ( identifier[var] , identifier[axis] = literal[string] , identifier[coords] = identifier[coords] )
keyword[if] identifier[bounds] keyword[is] keyword[not] keyword[None] :
identifier[centers] = identifier[bounds] . identifier[mean] ( identifier[axis] =- literal[int] )
identifier[x] = identifier[self] . identifier[get_nodes] ( identifier[self] . identifier[get_mesh] ( identifier[var] , identifier[coords] ), identifier[coords] )[ literal[int] ]
keyword[try] :
identifier[cls] = identifier[xr] . identifier[IndexVariable]
keyword[except] identifier[AttributeError] :
identifier[cls] = identifier[xr] . identifier[Coordinate]
keyword[return] identifier[cls] ( identifier[x] . identifier[name] , identifier[centers] , identifier[attrs] = identifier[x] . identifier[attrs] . identifier[copy] ()) | def get_x(self, var, coords=None):
"""
Get the centers of the triangles in the x-dimension
Parameters
----------
%(CFDecoder.get_y.parameters)s
Returns
-------
%(CFDecoder.get_y.returns)s"""
if coords is None:
coords = self.ds.coords # depends on [control=['if'], data=['coords']]
# first we try the super class
ret = super(UGridDecoder, self).get_x(var, coords)
# but if that doesn't work because we get the variable name in the
# dimension of `var`, we use the means of the triangles
if ret is None or ret.name in var.dims:
bounds = self.get_cell_node_coord(var, axis='x', coords=coords)
if bounds is not None:
centers = bounds.mean(axis=-1)
x = self.get_nodes(self.get_mesh(var, coords), coords)[0]
try:
cls = xr.IndexVariable # depends on [control=['try'], data=[]]
except AttributeError: # xarray < 0.9
cls = xr.Coordinate # depends on [control=['except'], data=[]]
return cls(x.name, centers, attrs=x.attrs.copy()) # depends on [control=['if'], data=['bounds']] # depends on [control=['if'], data=[]] |
def operations_contain_expected_statuses(operations, expected_statuses):
"""
Checks whether the operation list has an operation with the
expected status, then returns true
If it encounters operations in FAILED or ABORTED state
throw :class:`airflow.exceptions.AirflowException`.
:param operations: (Required) List of transfer operations to check.
:type operations: list[dict]
:param expected_statuses: (Required) status that is expected
See:
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferOperations#Status
:type expected_statuses: set[str]
:return: If there is an operation with the expected state
in the operation list, returns true,
:raises: airflow.exceptions.AirflowException If it encounters operations
with a state in the list,
:rtype: bool
"""
expected_statuses = (
{expected_statuses} if isinstance(expected_statuses, six.string_types) else set(expected_statuses)
)
if len(operations) == 0:
return False
current_statuses = {operation[METADATA][STATUS] for operation in operations}
if len(current_statuses - set(expected_statuses)) != len(current_statuses):
return True
if len(NEGATIVE_STATUSES - current_statuses) != len(NEGATIVE_STATUSES):
raise AirflowException(
'An unexpected operation status was encountered. Expected: {}'.format(
", ".join(expected_statuses)
)
)
return False | def function[operations_contain_expected_statuses, parameter[operations, expected_statuses]]:
constant[
Checks whether the operation list has an operation with the
expected status, then returns true
If it encounters operations in FAILED or ABORTED state
throw :class:`airflow.exceptions.AirflowException`.
:param operations: (Required) List of transfer operations to check.
:type operations: list[dict]
:param expected_statuses: (Required) status that is expected
See:
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferOperations#Status
:type expected_statuses: set[str]
:return: If there is an operation with the expected state
in the operation list, returns true,
:raises: airflow.exceptions.AirflowException If it encounters operations
with a state in the list,
:rtype: bool
]
variable[expected_statuses] assign[=] <ast.IfExp object at 0x7da1b0528700>
if compare[call[name[len], parameter[name[operations]]] equal[==] constant[0]] begin[:]
return[constant[False]]
variable[current_statuses] assign[=] <ast.SetComp object at 0x7da1b052bd60>
if compare[call[name[len], parameter[binary_operation[name[current_statuses] - call[name[set], parameter[name[expected_statuses]]]]]] not_equal[!=] call[name[len], parameter[name[current_statuses]]]] begin[:]
return[constant[True]]
if compare[call[name[len], parameter[binary_operation[name[NEGATIVE_STATUSES] - name[current_statuses]]]] not_equal[!=] call[name[len], parameter[name[NEGATIVE_STATUSES]]]] begin[:]
<ast.Raise object at 0x7da1b0529780>
return[constant[False]] | keyword[def] identifier[operations_contain_expected_statuses] ( identifier[operations] , identifier[expected_statuses] ):
literal[string]
identifier[expected_statuses] =(
{ identifier[expected_statuses] } keyword[if] identifier[isinstance] ( identifier[expected_statuses] , identifier[six] . identifier[string_types] ) keyword[else] identifier[set] ( identifier[expected_statuses] )
)
keyword[if] identifier[len] ( identifier[operations] )== literal[int] :
keyword[return] keyword[False]
identifier[current_statuses] ={ identifier[operation] [ identifier[METADATA] ][ identifier[STATUS] ] keyword[for] identifier[operation] keyword[in] identifier[operations] }
keyword[if] identifier[len] ( identifier[current_statuses] - identifier[set] ( identifier[expected_statuses] ))!= identifier[len] ( identifier[current_statuses] ):
keyword[return] keyword[True]
keyword[if] identifier[len] ( identifier[NEGATIVE_STATUSES] - identifier[current_statuses] )!= identifier[len] ( identifier[NEGATIVE_STATUSES] ):
keyword[raise] identifier[AirflowException] (
literal[string] . identifier[format] (
literal[string] . identifier[join] ( identifier[expected_statuses] )
)
)
keyword[return] keyword[False] | def operations_contain_expected_statuses(operations, expected_statuses):
"""
Checks whether the operation list has an operation with the
expected status, then returns true
If it encounters operations in FAILED or ABORTED state
throw :class:`airflow.exceptions.AirflowException`.
:param operations: (Required) List of transfer operations to check.
:type operations: list[dict]
:param expected_statuses: (Required) status that is expected
See:
https://cloud.google.com/storage-transfer/docs/reference/rest/v1/transferOperations#Status
:type expected_statuses: set[str]
:return: If there is an operation with the expected state
in the operation list, returns true,
:raises: airflow.exceptions.AirflowException If it encounters operations
with a state in the list,
:rtype: bool
"""
expected_statuses = {expected_statuses} if isinstance(expected_statuses, six.string_types) else set(expected_statuses)
if len(operations) == 0:
return False # depends on [control=['if'], data=[]]
current_statuses = {operation[METADATA][STATUS] for operation in operations}
if len(current_statuses - set(expected_statuses)) != len(current_statuses):
return True # depends on [control=['if'], data=[]]
if len(NEGATIVE_STATUSES - current_statuses) != len(NEGATIVE_STATUSES):
raise AirflowException('An unexpected operation status was encountered. Expected: {}'.format(', '.join(expected_statuses))) # depends on [control=['if'], data=[]]
return False |
def scaledBy(self, scale):
""" Return a new Selector with scale denominators scaled by a number.
"""
scaled = deepcopy(self)
for test in scaled.elements[0].tests:
if type(test.value) in (int, float):
if test.property == 'scale-denominator':
test.value /= scale
elif test.property == 'zoom':
test.value += log(scale)/log(2)
return scaled | def function[scaledBy, parameter[self, scale]]:
constant[ Return a new Selector with scale denominators scaled by a number.
]
variable[scaled] assign[=] call[name[deepcopy], parameter[name[self]]]
for taget[name[test]] in starred[call[name[scaled].elements][constant[0]].tests] begin[:]
if compare[call[name[type], parameter[name[test].value]] in tuple[[<ast.Name object at 0x7da2101f5060>, <ast.Name object at 0x7da2101f50f0>]]] begin[:]
if compare[name[test].property equal[==] constant[scale-denominator]] begin[:]
<ast.AugAssign object at 0x7da20e797eb0>
return[name[scaled]] | keyword[def] identifier[scaledBy] ( identifier[self] , identifier[scale] ):
literal[string]
identifier[scaled] = identifier[deepcopy] ( identifier[self] )
keyword[for] identifier[test] keyword[in] identifier[scaled] . identifier[elements] [ literal[int] ]. identifier[tests] :
keyword[if] identifier[type] ( identifier[test] . identifier[value] ) keyword[in] ( identifier[int] , identifier[float] ):
keyword[if] identifier[test] . identifier[property] == literal[string] :
identifier[test] . identifier[value] /= identifier[scale]
keyword[elif] identifier[test] . identifier[property] == literal[string] :
identifier[test] . identifier[value] += identifier[log] ( identifier[scale] )/ identifier[log] ( literal[int] )
keyword[return] identifier[scaled] | def scaledBy(self, scale):
""" Return a new Selector with scale denominators scaled by a number.
"""
scaled = deepcopy(self)
for test in scaled.elements[0].tests:
if type(test.value) in (int, float):
if test.property == 'scale-denominator':
test.value /= scale # depends on [control=['if'], data=[]]
elif test.property == 'zoom':
test.value += log(scale) / log(2) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['test']]
return scaled |
def get_hosts_from_csv(filename,
default_protocol='telnet',
default_domain='',
encoding='utf-8'):
"""
Reads a list of hostnames and variables from the tab-separated .csv file
with the given name. The first line of the file must contain the column
names, e.g.::
address testvar1 testvar2
10.0.0.1 value1 othervalue
10.0.0.1 value2 othervalue2
10.0.0.2 foo bar
For the above example, the function returns *two* host objects, where
the 'testvar1' variable of the first host holds a list containing two
entries ('value1' and 'value2'), and the 'testvar1' variable of the
second host contains a list with a single entry ('foo').
Both, the address and the hostname of each host are set to the address
given in the first column. If you want the hostname set to another value,
you may add a second column containing the hostname::
address hostname testvar
10.0.0.1 myhost value
10.0.0.2 otherhost othervalue
:type filename: string
:param filename: A full filename.
:type default_protocol: str
:param default_protocol: Passed to the Host constructor.
:type default_domain: str
:param default_domain: Appended to each hostname that has no domain.
:type encoding: str
:param encoding: The encoding of the file.
:rtype: list[Host]
:return: The newly created host instances.
"""
# Open the file.
if not os.path.exists(filename):
raise IOError('No such file: %s' % filename)
with codecs.open(filename, 'r', encoding) as file_handle:
# Read and check the header.
header = file_handle.readline().rstrip()
if re.search(r'^(?:hostname|address)\b', header) is None:
msg = 'Syntax error in CSV file header:'
msg += ' File does not start with "hostname" or "address".'
raise Exception(msg)
if re.search(r'^(?:hostname|address)(?:\t[^\t]+)*$', header) is None:
msg = 'Syntax error in CSV file header:'
msg += ' Make sure to separate columns by tabs.'
raise Exception(msg)
varnames = [str(v) for v in header.split('\t')]
varnames.pop(0)
# Walk through all lines and create a map that maps hostname to
# definitions.
last_uri = ''
line_re = re.compile(r'[\r\n]*$')
hosts = []
for line in file_handle:
if line.strip() == '':
continue
line = line_re.sub('', line)
values = line.split('\t')
uri = values.pop(0).strip()
# Add the hostname to our list.
if uri != last_uri:
# print "Reading hostname", hostname_url, "from csv."
host = to_host(uri, default_protocol, default_domain)
last_uri = uri
hosts.append(host)
# Define variables according to the definition.
for i, varname in enumerate(varnames):
try:
value = values[i]
except IndexError:
value = ''
if varname == 'hostname':
host.set_name(value)
else:
host.append(varname, value)
return hosts | def function[get_hosts_from_csv, parameter[filename, default_protocol, default_domain, encoding]]:
constant[
Reads a list of hostnames and variables from the tab-separated .csv file
with the given name. The first line of the file must contain the column
names, e.g.::
address testvar1 testvar2
10.0.0.1 value1 othervalue
10.0.0.1 value2 othervalue2
10.0.0.2 foo bar
For the above example, the function returns *two* host objects, where
the 'testvar1' variable of the first host holds a list containing two
entries ('value1' and 'value2'), and the 'testvar1' variable of the
second host contains a list with a single entry ('foo').
Both, the address and the hostname of each host are set to the address
given in the first column. If you want the hostname set to another value,
you may add a second column containing the hostname::
address hostname testvar
10.0.0.1 myhost value
10.0.0.2 otherhost othervalue
:type filename: string
:param filename: A full filename.
:type default_protocol: str
:param default_protocol: Passed to the Host constructor.
:type default_domain: str
:param default_domain: Appended to each hostname that has no domain.
:type encoding: str
:param encoding: The encoding of the file.
:rtype: list[Host]
:return: The newly created host instances.
]
if <ast.UnaryOp object at 0x7da1b0717cd0> begin[:]
<ast.Raise object at 0x7da1b07172e0>
with call[name[codecs].open, parameter[name[filename], constant[r], name[encoding]]] begin[:]
variable[header] assign[=] call[call[name[file_handle].readline, parameter[]].rstrip, parameter[]]
if compare[call[name[re].search, parameter[constant[^(?:hostname|address)\b], name[header]]] is constant[None]] begin[:]
variable[msg] assign[=] constant[Syntax error in CSV file header:]
<ast.AugAssign object at 0x7da1b0717af0>
<ast.Raise object at 0x7da1b07150f0>
if compare[call[name[re].search, parameter[constant[^(?:hostname|address)(?:\t[^\t]+)*$], name[header]]] is constant[None]] begin[:]
variable[msg] assign[=] constant[Syntax error in CSV file header:]
<ast.AugAssign object at 0x7da1b0716320>
<ast.Raise object at 0x7da1b0714250>
variable[varnames] assign[=] <ast.ListComp object at 0x7da1b0715720>
call[name[varnames].pop, parameter[constant[0]]]
variable[last_uri] assign[=] constant[]
variable[line_re] assign[=] call[name[re].compile, parameter[constant[[\r\n]*$]]]
variable[hosts] assign[=] list[[]]
for taget[name[line]] in starred[name[file_handle]] begin[:]
if compare[call[name[line].strip, parameter[]] equal[==] constant[]] begin[:]
continue
variable[line] assign[=] call[name[line_re].sub, parameter[constant[], name[line]]]
variable[values] assign[=] call[name[line].split, parameter[constant[ ]]]
variable[uri] assign[=] call[call[name[values].pop, parameter[constant[0]]].strip, parameter[]]
if compare[name[uri] not_equal[!=] name[last_uri]] begin[:]
variable[host] assign[=] call[name[to_host], parameter[name[uri], name[default_protocol], name[default_domain]]]
variable[last_uri] assign[=] name[uri]
call[name[hosts].append, parameter[name[host]]]
for taget[tuple[[<ast.Name object at 0x7da1b0653220>, <ast.Name object at 0x7da1b06526b0>]]] in starred[call[name[enumerate], parameter[name[varnames]]]] begin[:]
<ast.Try object at 0x7da1b0652740>
if compare[name[varname] equal[==] constant[hostname]] begin[:]
call[name[host].set_name, parameter[name[value]]]
return[name[hosts]] | keyword[def] identifier[get_hosts_from_csv] ( identifier[filename] ,
identifier[default_protocol] = literal[string] ,
identifier[default_domain] = literal[string] ,
identifier[encoding] = literal[string] ):
literal[string]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[filename] ):
keyword[raise] identifier[IOError] ( literal[string] % identifier[filename] )
keyword[with] identifier[codecs] . identifier[open] ( identifier[filename] , literal[string] , identifier[encoding] ) keyword[as] identifier[file_handle] :
identifier[header] = identifier[file_handle] . identifier[readline] (). identifier[rstrip] ()
keyword[if] identifier[re] . identifier[search] ( literal[string] , identifier[header] ) keyword[is] keyword[None] :
identifier[msg] = literal[string]
identifier[msg] += literal[string]
keyword[raise] identifier[Exception] ( identifier[msg] )
keyword[if] identifier[re] . identifier[search] ( literal[string] , identifier[header] ) keyword[is] keyword[None] :
identifier[msg] = literal[string]
identifier[msg] += literal[string]
keyword[raise] identifier[Exception] ( identifier[msg] )
identifier[varnames] =[ identifier[str] ( identifier[v] ) keyword[for] identifier[v] keyword[in] identifier[header] . identifier[split] ( literal[string] )]
identifier[varnames] . identifier[pop] ( literal[int] )
identifier[last_uri] = literal[string]
identifier[line_re] = identifier[re] . identifier[compile] ( literal[string] )
identifier[hosts] =[]
keyword[for] identifier[line] keyword[in] identifier[file_handle] :
keyword[if] identifier[line] . identifier[strip] ()== literal[string] :
keyword[continue]
identifier[line] = identifier[line_re] . identifier[sub] ( literal[string] , identifier[line] )
identifier[values] = identifier[line] . identifier[split] ( literal[string] )
identifier[uri] = identifier[values] . identifier[pop] ( literal[int] ). identifier[strip] ()
keyword[if] identifier[uri] != identifier[last_uri] :
identifier[host] = identifier[to_host] ( identifier[uri] , identifier[default_protocol] , identifier[default_domain] )
identifier[last_uri] = identifier[uri]
identifier[hosts] . identifier[append] ( identifier[host] )
keyword[for] identifier[i] , identifier[varname] keyword[in] identifier[enumerate] ( identifier[varnames] ):
keyword[try] :
identifier[value] = identifier[values] [ identifier[i] ]
keyword[except] identifier[IndexError] :
identifier[value] = literal[string]
keyword[if] identifier[varname] == literal[string] :
identifier[host] . identifier[set_name] ( identifier[value] )
keyword[else] :
identifier[host] . identifier[append] ( identifier[varname] , identifier[value] )
keyword[return] identifier[hosts] | def get_hosts_from_csv(filename, default_protocol='telnet', default_domain='', encoding='utf-8'):
"""
Reads a list of hostnames and variables from the tab-separated .csv file
with the given name. The first line of the file must contain the column
names, e.g.::
address testvar1 testvar2
10.0.0.1 value1 othervalue
10.0.0.1 value2 othervalue2
10.0.0.2 foo bar
For the above example, the function returns *two* host objects, where
the 'testvar1' variable of the first host holds a list containing two
entries ('value1' and 'value2'), and the 'testvar1' variable of the
second host contains a list with a single entry ('foo').
Both, the address and the hostname of each host are set to the address
given in the first column. If you want the hostname set to another value,
you may add a second column containing the hostname::
address hostname testvar
10.0.0.1 myhost value
10.0.0.2 otherhost othervalue
:type filename: string
:param filename: A full filename.
:type default_protocol: str
:param default_protocol: Passed to the Host constructor.
:type default_domain: str
:param default_domain: Appended to each hostname that has no domain.
:type encoding: str
:param encoding: The encoding of the file.
:rtype: list[Host]
:return: The newly created host instances.
"""
# Open the file.
if not os.path.exists(filename):
raise IOError('No such file: %s' % filename) # depends on [control=['if'], data=[]]
with codecs.open(filename, 'r', encoding) as file_handle:
# Read and check the header.
header = file_handle.readline().rstrip()
if re.search('^(?:hostname|address)\\b', header) is None:
msg = 'Syntax error in CSV file header:'
msg += ' File does not start with "hostname" or "address".'
raise Exception(msg) # depends on [control=['if'], data=[]]
if re.search('^(?:hostname|address)(?:\\t[^\\t]+)*$', header) is None:
msg = 'Syntax error in CSV file header:'
msg += ' Make sure to separate columns by tabs.'
raise Exception(msg) # depends on [control=['if'], data=[]]
varnames = [str(v) for v in header.split('\t')]
varnames.pop(0)
# Walk through all lines and create a map that maps hostname to
# definitions.
last_uri = ''
line_re = re.compile('[\\r\\n]*$')
hosts = []
for line in file_handle:
if line.strip() == '':
continue # depends on [control=['if'], data=[]]
line = line_re.sub('', line)
values = line.split('\t')
uri = values.pop(0).strip()
# Add the hostname to our list.
if uri != last_uri:
# print "Reading hostname", hostname_url, "from csv."
host = to_host(uri, default_protocol, default_domain)
last_uri = uri
hosts.append(host) # depends on [control=['if'], data=['uri', 'last_uri']]
# Define variables according to the definition.
for (i, varname) in enumerate(varnames):
try:
value = values[i] # depends on [control=['try'], data=[]]
except IndexError:
value = '' # depends on [control=['except'], data=[]]
if varname == 'hostname':
host.set_name(value) # depends on [control=['if'], data=[]]
else:
host.append(varname, value) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['file_handle']]
return hosts |
def block_code(self, code, lang=None):
"""Rendering block level code. ``pre > code``.
:param code: text content of the code block.
:param lang: language of the given code.
"""
code = code.rstrip('\n')
if not lang:
code = escape(code, smart_amp=False)
return '<pre><code>%s\n</code></pre>\n' % code
code = escape(code, quote=True, smart_amp=False)
return '<pre><code class="lang-%s">%s\n</code></pre>\n' % (lang, code) | def function[block_code, parameter[self, code, lang]]:
constant[Rendering block level code. ``pre > code``.
:param code: text content of the code block.
:param lang: language of the given code.
]
variable[code] assign[=] call[name[code].rstrip, parameter[constant[
]]]
if <ast.UnaryOp object at 0x7da18dc043d0> begin[:]
variable[code] assign[=] call[name[escape], parameter[name[code]]]
return[binary_operation[constant[<pre><code>%s
</code></pre>
] <ast.Mod object at 0x7da2590d6920> name[code]]]
variable[code] assign[=] call[name[escape], parameter[name[code]]]
return[binary_operation[constant[<pre><code class="lang-%s">%s
</code></pre>
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18ede6920>, <ast.Name object at 0x7da18ede5fc0>]]]] | keyword[def] identifier[block_code] ( identifier[self] , identifier[code] , identifier[lang] = keyword[None] ):
literal[string]
identifier[code] = identifier[code] . identifier[rstrip] ( literal[string] )
keyword[if] keyword[not] identifier[lang] :
identifier[code] = identifier[escape] ( identifier[code] , identifier[smart_amp] = keyword[False] )
keyword[return] literal[string] % identifier[code]
identifier[code] = identifier[escape] ( identifier[code] , identifier[quote] = keyword[True] , identifier[smart_amp] = keyword[False] )
keyword[return] literal[string] %( identifier[lang] , identifier[code] ) | def block_code(self, code, lang=None):
"""Rendering block level code. ``pre > code``.
:param code: text content of the code block.
:param lang: language of the given code.
"""
code = code.rstrip('\n')
if not lang:
code = escape(code, smart_amp=False)
return '<pre><code>%s\n</code></pre>\n' % code # depends on [control=['if'], data=[]]
code = escape(code, quote=True, smart_amp=False)
return '<pre><code class="lang-%s">%s\n</code></pre>\n' % (lang, code) |
def _do_link_patterns(self, text):
"""Caveat emptor: there isn't much guarding against link
patterns being formed inside other standard Markdown links, e.g.
inside a [link def][like this].
Dev Notes: *Could* consider prefixing regexes with a negative
lookbehind assertion to attempt to guard against this.
"""
link_from_hash = {}
for regex, repl in self.link_patterns:
replacements = []
for match in regex.finditer(text):
if hasattr(repl, "__call__"):
href = repl(match)
else:
href = match.expand(repl)
replacements.append((match.span(), href))
for (start, end), href in reversed(replacements):
escaped_href = (
href.replace('"', '"') # b/c of attr quote
# To avoid markdown <em> and <strong>:
.replace('*', self._escape_table['*'])
.replace('_', self._escape_table['_']))
link = '<a href="%s">%s</a>' % (escaped_href, text[start:end])
hash = _hash_text(link)
link_from_hash[hash] = link
text = text[:start] + hash + text[end:]
for hash, link in list(link_from_hash.items()):
text = text.replace(hash, link)
return text | def function[_do_link_patterns, parameter[self, text]]:
constant[Caveat emptor: there isn't much guarding against link
patterns being formed inside other standard Markdown links, e.g.
inside a [link def][like this].
Dev Notes: *Could* consider prefixing regexes with a negative
lookbehind assertion to attempt to guard against this.
]
variable[link_from_hash] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b1d5d540>, <ast.Name object at 0x7da1b1d5ff40>]]] in starred[name[self].link_patterns] begin[:]
variable[replacements] assign[=] list[[]]
for taget[name[match]] in starred[call[name[regex].finditer, parameter[name[text]]]] begin[:]
if call[name[hasattr], parameter[name[repl], constant[__call__]]] begin[:]
variable[href] assign[=] call[name[repl], parameter[name[match]]]
call[name[replacements].append, parameter[tuple[[<ast.Call object at 0x7da1b1d5ce50>, <ast.Name object at 0x7da1b1d5fd00>]]]]
for taget[tuple[[<ast.Tuple object at 0x7da1b1d5fc70>, <ast.Name object at 0x7da1b1d5ccd0>]]] in starred[call[name[reversed], parameter[name[replacements]]]] begin[:]
variable[escaped_href] assign[=] call[call[call[name[href].replace, parameter[constant["], constant["]]].replace, parameter[constant[*], call[name[self]._escape_table][constant[*]]]].replace, parameter[constant[_], call[name[self]._escape_table][constant[_]]]]
variable[link] assign[=] binary_operation[constant[<a href="%s">%s</a>] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1d8c2e0>, <ast.Subscript object at 0x7da1b1d8c2b0>]]]
variable[hash] assign[=] call[name[_hash_text], parameter[name[link]]]
call[name[link_from_hash]][name[hash]] assign[=] name[link]
variable[text] assign[=] binary_operation[binary_operation[call[name[text]][<ast.Slice object at 0x7da1b1d8d180>] + name[hash]] + call[name[text]][<ast.Slice object at 0x7da1b1d8d270>]]
for taget[tuple[[<ast.Name object at 0x7da1b1d8d330>, <ast.Name object at 0x7da1b1d8d360>]]] in starred[call[name[list], parameter[call[name[link_from_hash].items, parameter[]]]]] begin[:]
variable[text] assign[=] call[name[text].replace, parameter[name[hash], name[link]]]
return[name[text]] | keyword[def] identifier[_do_link_patterns] ( identifier[self] , identifier[text] ):
literal[string]
identifier[link_from_hash] ={}
keyword[for] identifier[regex] , identifier[repl] keyword[in] identifier[self] . identifier[link_patterns] :
identifier[replacements] =[]
keyword[for] identifier[match] keyword[in] identifier[regex] . identifier[finditer] ( identifier[text] ):
keyword[if] identifier[hasattr] ( identifier[repl] , literal[string] ):
identifier[href] = identifier[repl] ( identifier[match] )
keyword[else] :
identifier[href] = identifier[match] . identifier[expand] ( identifier[repl] )
identifier[replacements] . identifier[append] (( identifier[match] . identifier[span] (), identifier[href] ))
keyword[for] ( identifier[start] , identifier[end] ), identifier[href] keyword[in] identifier[reversed] ( identifier[replacements] ):
identifier[escaped_href] =(
identifier[href] . identifier[replace] ( literal[string] , literal[string] )
. identifier[replace] ( literal[string] , identifier[self] . identifier[_escape_table] [ literal[string] ])
. identifier[replace] ( literal[string] , identifier[self] . identifier[_escape_table] [ literal[string] ]))
identifier[link] = literal[string] %( identifier[escaped_href] , identifier[text] [ identifier[start] : identifier[end] ])
identifier[hash] = identifier[_hash_text] ( identifier[link] )
identifier[link_from_hash] [ identifier[hash] ]= identifier[link]
identifier[text] = identifier[text] [: identifier[start] ]+ identifier[hash] + identifier[text] [ identifier[end] :]
keyword[for] identifier[hash] , identifier[link] keyword[in] identifier[list] ( identifier[link_from_hash] . identifier[items] ()):
identifier[text] = identifier[text] . identifier[replace] ( identifier[hash] , identifier[link] )
keyword[return] identifier[text] | def _do_link_patterns(self, text):
"""Caveat emptor: there isn't much guarding against link
patterns being formed inside other standard Markdown links, e.g.
inside a [link def][like this].
Dev Notes: *Could* consider prefixing regexes with a negative
lookbehind assertion to attempt to guard against this.
"""
link_from_hash = {}
for (regex, repl) in self.link_patterns:
replacements = []
for match in regex.finditer(text):
if hasattr(repl, '__call__'):
href = repl(match) # depends on [control=['if'], data=[]]
else:
href = match.expand(repl)
replacements.append((match.span(), href)) # depends on [control=['for'], data=['match']]
for ((start, end), href) in reversed(replacements): # b/c of attr quote
# To avoid markdown <em> and <strong>:
escaped_href = href.replace('"', '"').replace('*', self._escape_table['*']).replace('_', self._escape_table['_'])
link = '<a href="%s">%s</a>' % (escaped_href, text[start:end])
hash = _hash_text(link)
link_from_hash[hash] = link
text = text[:start] + hash + text[end:] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
for (hash, link) in list(link_from_hash.items()):
text = text.replace(hash, link) # depends on [control=['for'], data=[]]
return text |
def convert_to_english_phonetic_alphabet(self, arpabet):
'''
转换成英音
:param arpabet:
:return:
'''
word = self._convert_to_word(arpabet=arpabet)
if not word:
return None
return word.translate_to_english_phonetic_alphabet() | def function[convert_to_english_phonetic_alphabet, parameter[self, arpabet]]:
constant[
转换成英音
:param arpabet:
:return:
]
variable[word] assign[=] call[name[self]._convert_to_word, parameter[]]
if <ast.UnaryOp object at 0x7da1b11f0c10> begin[:]
return[constant[None]]
return[call[name[word].translate_to_english_phonetic_alphabet, parameter[]]] | keyword[def] identifier[convert_to_english_phonetic_alphabet] ( identifier[self] , identifier[arpabet] ):
literal[string]
identifier[word] = identifier[self] . identifier[_convert_to_word] ( identifier[arpabet] = identifier[arpabet] )
keyword[if] keyword[not] identifier[word] :
keyword[return] keyword[None]
keyword[return] identifier[word] . identifier[translate_to_english_phonetic_alphabet] () | def convert_to_english_phonetic_alphabet(self, arpabet):
"""
转换成英音
:param arpabet:
:return:
"""
word = self._convert_to_word(arpabet=arpabet)
if not word:
return None # depends on [control=['if'], data=[]]
return word.translate_to_english_phonetic_alphabet() |
def rollforward(self, dt):
"""
Roll provided date forward to next offset only if not on offset.
"""
dt = as_timestamp(dt)
if not self.onOffset(dt):
dt = dt + self.__class__(1, normalize=self.normalize, **self.kwds)
return dt | def function[rollforward, parameter[self, dt]]:
constant[
Roll provided date forward to next offset only if not on offset.
]
variable[dt] assign[=] call[name[as_timestamp], parameter[name[dt]]]
if <ast.UnaryOp object at 0x7da20c6e6860> begin[:]
variable[dt] assign[=] binary_operation[name[dt] + call[name[self].__class__, parameter[constant[1]]]]
return[name[dt]] | keyword[def] identifier[rollforward] ( identifier[self] , identifier[dt] ):
literal[string]
identifier[dt] = identifier[as_timestamp] ( identifier[dt] )
keyword[if] keyword[not] identifier[self] . identifier[onOffset] ( identifier[dt] ):
identifier[dt] = identifier[dt] + identifier[self] . identifier[__class__] ( literal[int] , identifier[normalize] = identifier[self] . identifier[normalize] ,** identifier[self] . identifier[kwds] )
keyword[return] identifier[dt] | def rollforward(self, dt):
"""
Roll provided date forward to next offset only if not on offset.
"""
dt = as_timestamp(dt)
if not self.onOffset(dt):
dt = dt + self.__class__(1, normalize=self.normalize, **self.kwds) # depends on [control=['if'], data=[]]
return dt |
def username(self, default=None):
'''
Return the full, given, or family name if set.
'''
if self.lis_person_name_given:
return self.lis_person_name_given
elif self.lis_person_name_family:
return self.lis_person_name_family
elif self.lis_person_name_full:
return self.lis_person_name_full
else:
return default | def function[username, parameter[self, default]]:
constant[
Return the full, given, or family name if set.
]
if name[self].lis_person_name_given begin[:]
return[name[self].lis_person_name_given] | keyword[def] identifier[username] ( identifier[self] , identifier[default] = keyword[None] ):
literal[string]
keyword[if] identifier[self] . identifier[lis_person_name_given] :
keyword[return] identifier[self] . identifier[lis_person_name_given]
keyword[elif] identifier[self] . identifier[lis_person_name_family] :
keyword[return] identifier[self] . identifier[lis_person_name_family]
keyword[elif] identifier[self] . identifier[lis_person_name_full] :
keyword[return] identifier[self] . identifier[lis_person_name_full]
keyword[else] :
keyword[return] identifier[default] | def username(self, default=None):
"""
Return the full, given, or family name if set.
"""
if self.lis_person_name_given:
return self.lis_person_name_given # depends on [control=['if'], data=[]]
elif self.lis_person_name_family:
return self.lis_person_name_family # depends on [control=['if'], data=[]]
elif self.lis_person_name_full:
return self.lis_person_name_full # depends on [control=['if'], data=[]]
else:
return default |
def split_number_unit(string, strip_list=None):
'''
takes a string and grabs the leading number and the following unit
both the number and unit returned are simple strings
returns a triple tuple of (successFlag, number, unit)
successFlag is False if the number is invalid
'''
successFlag = True
state = 0
number_so_far = ""
unit_so_far = ""
decimal_found = False
negate_flag = False
if string is None:
return (False, "", "")
string = string.strip()
if not strip_list:
strip_list = []
if len(string):
if string[0]=="-":
negate_flag = True
string = string[1:]
if len(string):
if string[0]==".":
string = "0"+string # a string of ".12" is actually a legit number, but the lack of a preceding 0 will confuse python, so we tack on a zero
if len(string):
for char in string:
if state==0: # number state
if char in ['0','1','2','3','4','5','6','7','8','9']:
number_so_far += char
elif char in strip_list:
pass
elif char==".":
if decimal_found:
successFlag = False # units do not begin with a period. ex: 234.2.anything
state=2
else:
number_so_far += char
decimal_found = True
elif char=="\n":
state=2
else:
unit_so_far += char
state=1
elif state==1: # unit state
if char=="\n":
state=2
elif char in strip_list:
pass
else:
unit_so_far += char
else: # discard state
pass
# clean up
unit_so_far = unit_so_far.strip()
if len(number_so_far)==0:
successFlag = False
if negate_flag:
number_so_far = "-"+number_so_far
try:
number_so_far = decimal.Decimal(number_so_far)
except:
successFlag = False
else:
successFlag = False
if not successFlag:
return(successFlag, "", unit_so_far)
return (successFlag, number_so_far, unit_so_far) | def function[split_number_unit, parameter[string, strip_list]]:
constant[
takes a string and grabs the leading number and the following unit
both the number and unit returned are simple strings
returns a triple tuple of (successFlag, number, unit)
successFlag is False if the number is invalid
]
variable[successFlag] assign[=] constant[True]
variable[state] assign[=] constant[0]
variable[number_so_far] assign[=] constant[]
variable[unit_so_far] assign[=] constant[]
variable[decimal_found] assign[=] constant[False]
variable[negate_flag] assign[=] constant[False]
if compare[name[string] is constant[None]] begin[:]
return[tuple[[<ast.Constant object at 0x7da2041db6d0>, <ast.Constant object at 0x7da2041d8760>, <ast.Constant object at 0x7da2041d86d0>]]]
variable[string] assign[=] call[name[string].strip, parameter[]]
if <ast.UnaryOp object at 0x7da2041d9cc0> begin[:]
variable[strip_list] assign[=] list[[]]
if call[name[len], parameter[name[string]]] begin[:]
if compare[call[name[string]][constant[0]] equal[==] constant[-]] begin[:]
variable[negate_flag] assign[=] constant[True]
variable[string] assign[=] call[name[string]][<ast.Slice object at 0x7da2041daa40>]
if call[name[len], parameter[name[string]]] begin[:]
if compare[call[name[string]][constant[0]] equal[==] constant[.]] begin[:]
variable[string] assign[=] binary_operation[constant[0] + name[string]]
if call[name[len], parameter[name[string]]] begin[:]
for taget[name[char]] in starred[name[string]] begin[:]
if compare[name[state] equal[==] constant[0]] begin[:]
if compare[name[char] in list[[<ast.Constant object at 0x7da2041dae00>, <ast.Constant object at 0x7da2041d8d00>, <ast.Constant object at 0x7da2041d8b50>, <ast.Constant object at 0x7da2041db8e0>, <ast.Constant object at 0x7da2041dbe50>, <ast.Constant object at 0x7da2041db280>, <ast.Constant object at 0x7da2041db970>, <ast.Constant object at 0x7da2041dbdc0>, <ast.Constant object at 0x7da2041d94e0>, <ast.Constant object at 0x7da2041da860>]]] begin[:]
<ast.AugAssign object at 0x7da2041d81f0>
variable[unit_so_far] assign[=] call[name[unit_so_far].strip, parameter[]]
if compare[call[name[len], parameter[name[number_so_far]]] equal[==] constant[0]] begin[:]
variable[successFlag] assign[=] constant[False]
if name[negate_flag] begin[:]
variable[number_so_far] assign[=] binary_operation[constant[-] + name[number_so_far]]
<ast.Try object at 0x7da1b1502b60>
if <ast.UnaryOp object at 0x7da1b1500a60> begin[:]
return[tuple[[<ast.Name object at 0x7da1b15007f0>, <ast.Constant object at 0x7da1b15001f0>, <ast.Name object at 0x7da1b1502dd0>]]]
return[tuple[[<ast.Name object at 0x7da1b1501720>, <ast.Name object at 0x7da1b1501de0>, <ast.Name object at 0x7da1b15005b0>]]] | keyword[def] identifier[split_number_unit] ( identifier[string] , identifier[strip_list] = keyword[None] ):
literal[string]
identifier[successFlag] = keyword[True]
identifier[state] = literal[int]
identifier[number_so_far] = literal[string]
identifier[unit_so_far] = literal[string]
identifier[decimal_found] = keyword[False]
identifier[negate_flag] = keyword[False]
keyword[if] identifier[string] keyword[is] keyword[None] :
keyword[return] ( keyword[False] , literal[string] , literal[string] )
identifier[string] = identifier[string] . identifier[strip] ()
keyword[if] keyword[not] identifier[strip_list] :
identifier[strip_list] =[]
keyword[if] identifier[len] ( identifier[string] ):
keyword[if] identifier[string] [ literal[int] ]== literal[string] :
identifier[negate_flag] = keyword[True]
identifier[string] = identifier[string] [ literal[int] :]
keyword[if] identifier[len] ( identifier[string] ):
keyword[if] identifier[string] [ literal[int] ]== literal[string] :
identifier[string] = literal[string] + identifier[string]
keyword[if] identifier[len] ( identifier[string] ):
keyword[for] identifier[char] keyword[in] identifier[string] :
keyword[if] identifier[state] == literal[int] :
keyword[if] identifier[char] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]:
identifier[number_so_far] += identifier[char]
keyword[elif] identifier[char] keyword[in] identifier[strip_list] :
keyword[pass]
keyword[elif] identifier[char] == literal[string] :
keyword[if] identifier[decimal_found] :
identifier[successFlag] = keyword[False]
identifier[state] = literal[int]
keyword[else] :
identifier[number_so_far] += identifier[char]
identifier[decimal_found] = keyword[True]
keyword[elif] identifier[char] == literal[string] :
identifier[state] = literal[int]
keyword[else] :
identifier[unit_so_far] += identifier[char]
identifier[state] = literal[int]
keyword[elif] identifier[state] == literal[int] :
keyword[if] identifier[char] == literal[string] :
identifier[state] = literal[int]
keyword[elif] identifier[char] keyword[in] identifier[strip_list] :
keyword[pass]
keyword[else] :
identifier[unit_so_far] += identifier[char]
keyword[else] :
keyword[pass]
identifier[unit_so_far] = identifier[unit_so_far] . identifier[strip] ()
keyword[if] identifier[len] ( identifier[number_so_far] )== literal[int] :
identifier[successFlag] = keyword[False]
keyword[if] identifier[negate_flag] :
identifier[number_so_far] = literal[string] + identifier[number_so_far]
keyword[try] :
identifier[number_so_far] = identifier[decimal] . identifier[Decimal] ( identifier[number_so_far] )
keyword[except] :
identifier[successFlag] = keyword[False]
keyword[else] :
identifier[successFlag] = keyword[False]
keyword[if] keyword[not] identifier[successFlag] :
keyword[return] ( identifier[successFlag] , literal[string] , identifier[unit_so_far] )
keyword[return] ( identifier[successFlag] , identifier[number_so_far] , identifier[unit_so_far] ) | def split_number_unit(string, strip_list=None):
"""
takes a string and grabs the leading number and the following unit
both the number and unit returned are simple strings
returns a triple tuple of (successFlag, number, unit)
successFlag is False if the number is invalid
"""
successFlag = True
state = 0
number_so_far = ''
unit_so_far = ''
decimal_found = False
negate_flag = False
if string is None:
return (False, '', '') # depends on [control=['if'], data=[]]
string = string.strip()
if not strip_list:
strip_list = [] # depends on [control=['if'], data=[]]
if len(string):
if string[0] == '-':
negate_flag = True
string = string[1:] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if len(string):
if string[0] == '.':
string = '0' + string # a string of ".12" is actually a legit number, but the lack of a preceding 0 will confuse python, so we tack on a zero # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if len(string):
for char in string:
if state == 0: # number state
if char in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']:
number_so_far += char # depends on [control=['if'], data=['char']]
elif char in strip_list:
pass # depends on [control=['if'], data=[]]
elif char == '.':
if decimal_found:
successFlag = False # units do not begin with a period. ex: 234.2.anything
state = 2 # depends on [control=['if'], data=[]]
else:
number_so_far += char
decimal_found = True # depends on [control=['if'], data=['char']]
elif char == '\n':
state = 2 # depends on [control=['if'], data=[]]
else:
unit_so_far += char
state = 1 # depends on [control=['if'], data=['state']]
elif state == 1: # unit state
if char == '\n':
state = 2 # depends on [control=['if'], data=[]]
elif char in strip_list:
pass # depends on [control=['if'], data=[]]
else:
unit_so_far += char # depends on [control=['if'], data=['state']]
else: # discard state
pass # depends on [control=['for'], data=['char']] # clean up
unit_so_far = unit_so_far.strip()
if len(number_so_far) == 0:
successFlag = False # depends on [control=['if'], data=[]]
if negate_flag:
number_so_far = '-' + number_so_far # depends on [control=['if'], data=[]]
try:
number_so_far = decimal.Decimal(number_so_far) # depends on [control=['try'], data=[]]
except:
successFlag = False # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
successFlag = False
if not successFlag:
return (successFlag, '', unit_so_far) # depends on [control=['if'], data=[]]
return (successFlag, number_so_far, unit_so_far) |
def outgoing_caller_ids(self):
"""
Access the outgoing_caller_ids
:returns: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdList
:rtype: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdList
"""
if self._outgoing_caller_ids is None:
self._outgoing_caller_ids = OutgoingCallerIdList(self._version, account_sid=self._solution['sid'], )
return self._outgoing_caller_ids | def function[outgoing_caller_ids, parameter[self]]:
constant[
Access the outgoing_caller_ids
:returns: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdList
:rtype: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdList
]
if compare[name[self]._outgoing_caller_ids is constant[None]] begin[:]
name[self]._outgoing_caller_ids assign[=] call[name[OutgoingCallerIdList], parameter[name[self]._version]]
return[name[self]._outgoing_caller_ids] | keyword[def] identifier[outgoing_caller_ids] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_outgoing_caller_ids] keyword[is] keyword[None] :
identifier[self] . identifier[_outgoing_caller_ids] = identifier[OutgoingCallerIdList] ( identifier[self] . identifier[_version] , identifier[account_sid] = identifier[self] . identifier[_solution] [ literal[string] ],)
keyword[return] identifier[self] . identifier[_outgoing_caller_ids] | def outgoing_caller_ids(self):
"""
Access the outgoing_caller_ids
:returns: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdList
:rtype: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdList
"""
if self._outgoing_caller_ids is None:
self._outgoing_caller_ids = OutgoingCallerIdList(self._version, account_sid=self._solution['sid']) # depends on [control=['if'], data=[]]
return self._outgoing_caller_ids |
def _get_destination_paths(self):
# type: (Uploader) ->
# Tuple[blobxfer.operations.azure.StorageAccount, str, str, str]
"""Get destination paths
:param Uploader self: this
:rtype: tuple
:return: (storage account, container, name, dpath)
"""
for dst in self._spec.destinations:
for dpath in dst.paths:
sdpath = str(dpath)
cont, dir = blobxfer.util.explode_azure_path(sdpath)
sa = self._creds.get_storage_account(
dst.lookup_storage_account(sdpath))
yield sa, cont, dir, dpath | def function[_get_destination_paths, parameter[self]]:
constant[Get destination paths
:param Uploader self: this
:rtype: tuple
:return: (storage account, container, name, dpath)
]
for taget[name[dst]] in starred[name[self]._spec.destinations] begin[:]
for taget[name[dpath]] in starred[name[dst].paths] begin[:]
variable[sdpath] assign[=] call[name[str], parameter[name[dpath]]]
<ast.Tuple object at 0x7da18dc99d80> assign[=] call[name[blobxfer].util.explode_azure_path, parameter[name[sdpath]]]
variable[sa] assign[=] call[name[self]._creds.get_storage_account, parameter[call[name[dst].lookup_storage_account, parameter[name[sdpath]]]]]
<ast.Yield object at 0x7da18dc9be80> | keyword[def] identifier[_get_destination_paths] ( identifier[self] ):
literal[string]
keyword[for] identifier[dst] keyword[in] identifier[self] . identifier[_spec] . identifier[destinations] :
keyword[for] identifier[dpath] keyword[in] identifier[dst] . identifier[paths] :
identifier[sdpath] = identifier[str] ( identifier[dpath] )
identifier[cont] , identifier[dir] = identifier[blobxfer] . identifier[util] . identifier[explode_azure_path] ( identifier[sdpath] )
identifier[sa] = identifier[self] . identifier[_creds] . identifier[get_storage_account] (
identifier[dst] . identifier[lookup_storage_account] ( identifier[sdpath] ))
keyword[yield] identifier[sa] , identifier[cont] , identifier[dir] , identifier[dpath] | def _get_destination_paths(self):
# type: (Uploader) ->
# Tuple[blobxfer.operations.azure.StorageAccount, str, str, str]
'Get destination paths\n :param Uploader self: this\n :rtype: tuple\n :return: (storage account, container, name, dpath)\n '
for dst in self._spec.destinations:
for dpath in dst.paths:
sdpath = str(dpath)
(cont, dir) = blobxfer.util.explode_azure_path(sdpath)
sa = self._creds.get_storage_account(dst.lookup_storage_account(sdpath))
yield (sa, cont, dir, dpath) # depends on [control=['for'], data=['dpath']] # depends on [control=['for'], data=['dst']] |
def add_element_list(self, elt_list, **kwargs):
"""Helper to add a list of similar elements to the current section.
Element names will be used as an identifier."""
for e in elt_list:
self.add_element(Element(e, **kwargs)) | def function[add_element_list, parameter[self, elt_list]]:
constant[Helper to add a list of similar elements to the current section.
Element names will be used as an identifier.]
for taget[name[e]] in starred[name[elt_list]] begin[:]
call[name[self].add_element, parameter[call[name[Element], parameter[name[e]]]]] | keyword[def] identifier[add_element_list] ( identifier[self] , identifier[elt_list] ,** identifier[kwargs] ):
literal[string]
keyword[for] identifier[e] keyword[in] identifier[elt_list] :
identifier[self] . identifier[add_element] ( identifier[Element] ( identifier[e] ,** identifier[kwargs] )) | def add_element_list(self, elt_list, **kwargs):
"""Helper to add a list of similar elements to the current section.
Element names will be used as an identifier."""
for e in elt_list:
self.add_element(Element(e, **kwargs)) # depends on [control=['for'], data=['e']] |
def findNext(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._findOne(self.findAllNext, name, attrs, text, **kwargs) | def function[findNext, parameter[self, name, attrs, text]]:
constant[Returns the first item that matches the given criteria and
appears after this Tag in the document.]
return[call[name[self]._findOne, parameter[name[self].findAllNext, name[name], name[attrs], name[text]]]] | keyword[def] identifier[findNext] ( identifier[self] , identifier[name] = keyword[None] , identifier[attrs] ={}, identifier[text] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[self] . identifier[_findOne] ( identifier[self] . identifier[findAllNext] , identifier[name] , identifier[attrs] , identifier[text] ,** identifier[kwargs] ) | def findNext(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._findOne(self.findAllNext, name, attrs, text, **kwargs) |
def get_memory_percent(self):
"""Compare physical system memory to process resident memory and
calculate process memory utilization as a percentage.
"""
rss = self._platform_impl.get_memory_info()[0]
try:
return (rss / float(TOTAL_PHYMEM)) * 100
except ZeroDivisionError:
return 0.0 | def function[get_memory_percent, parameter[self]]:
constant[Compare physical system memory to process resident memory and
calculate process memory utilization as a percentage.
]
variable[rss] assign[=] call[call[name[self]._platform_impl.get_memory_info, parameter[]]][constant[0]]
<ast.Try object at 0x7da20e9b2bf0> | keyword[def] identifier[get_memory_percent] ( identifier[self] ):
literal[string]
identifier[rss] = identifier[self] . identifier[_platform_impl] . identifier[get_memory_info] ()[ literal[int] ]
keyword[try] :
keyword[return] ( identifier[rss] / identifier[float] ( identifier[TOTAL_PHYMEM] ))* literal[int]
keyword[except] identifier[ZeroDivisionError] :
keyword[return] literal[int] | def get_memory_percent(self):
"""Compare physical system memory to process resident memory and
calculate process memory utilization as a percentage.
"""
rss = self._platform_impl.get_memory_info()[0]
try:
return rss / float(TOTAL_PHYMEM) * 100 # depends on [control=['try'], data=[]]
except ZeroDivisionError:
return 0.0 # depends on [control=['except'], data=[]] |
def deleteMessage(self, date):
"""
消息历史记录删除方法(删除 APP 内指定某天某小时内的所有会话消息记录。调用该接口返回成功后,date参数指定的某小时的消息记录文件将在随后的5-10分钟内被永久删除。) 方法
@param date:指定北京时间某天某小时,格式为2014010101,表示:2014年1月1日凌晨1点。(必传)
@return code:返回码,200 为正常。
@return errorMessage:错误信息。
"""
desc = {
"name": "CodeSuccessReslut",
"desc": " http 成功返回结果",
"fields": [{
"name": "code",
"type": "Integer",
"desc": "返回码,200 为正常。"
}, {
"name": "errorMessage",
"type": "String",
"desc": "错误信息。"
}]
}
r = self.call_api(
method=('API', 'POST', 'application/x-www-form-urlencoded'),
action='/message/history/delete.json',
params={"date": date})
return Response(r, desc) | def function[deleteMessage, parameter[self, date]]:
constant[
消息历史记录删除方法(删除 APP 内指定某天某小时内的所有会话消息记录。调用该接口返回成功后,date参数指定的某小时的消息记录文件将在随后的5-10分钟内被永久删除。) 方法
@param date:指定北京时间某天某小时,格式为2014010101,表示:2014年1月1日凌晨1点。(必传)
@return code:返回码,200 为正常。
@return errorMessage:错误信息。
]
variable[desc] assign[=] dictionary[[<ast.Constant object at 0x7da20c6e4ee0>, <ast.Constant object at 0x7da20c6e5ed0>, <ast.Constant object at 0x7da20c6e5600>], [<ast.Constant object at 0x7da20c6e54e0>, <ast.Constant object at 0x7da20c6e71c0>, <ast.List object at 0x7da20c6e5630>]]
variable[r] assign[=] call[name[self].call_api, parameter[]]
return[call[name[Response], parameter[name[r], name[desc]]]] | keyword[def] identifier[deleteMessage] ( identifier[self] , identifier[date] ):
literal[string]
identifier[desc] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] :[{
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string]
},{
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string]
}]
}
identifier[r] = identifier[self] . identifier[call_api] (
identifier[method] =( literal[string] , literal[string] , literal[string] ),
identifier[action] = literal[string] ,
identifier[params] ={ literal[string] : identifier[date] })
keyword[return] identifier[Response] ( identifier[r] , identifier[desc] ) | def deleteMessage(self, date):
"""
消息历史记录删除方法(删除 APP 内指定某天某小时内的所有会话消息记录。调用该接口返回成功后,date参数指定的某小时的消息记录文件将在随后的5-10分钟内被永久删除。) 方法
@param date:指定北京时间某天某小时,格式为2014010101,表示:2014年1月1日凌晨1点。(必传)
@return code:返回码,200 为正常。
@return errorMessage:错误信息。
"""
desc = {'name': 'CodeSuccessReslut', 'desc': ' http 成功返回结果', 'fields': [{'name': 'code', 'type': 'Integer', 'desc': '返回码,200 为正常。'}, {'name': 'errorMessage', 'type': 'String', 'desc': '错误信息。'}]}
r = self.call_api(method=('API', 'POST', 'application/x-www-form-urlencoded'), action='/message/history/delete.json', params={'date': date})
return Response(r, desc) |
def extents(self):
"""list[VolumeExtent]: volume extents."""
if not self._is_parsed:
self._Parse()
self._is_parsed = True
return self._extents | def function[extents, parameter[self]]:
constant[list[VolumeExtent]: volume extents.]
if <ast.UnaryOp object at 0x7da1b07a1f00> begin[:]
call[name[self]._Parse, parameter[]]
name[self]._is_parsed assign[=] constant[True]
return[name[self]._extents] | keyword[def] identifier[extents] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_is_parsed] :
identifier[self] . identifier[_Parse] ()
identifier[self] . identifier[_is_parsed] = keyword[True]
keyword[return] identifier[self] . identifier[_extents] | def extents(self):
"""list[VolumeExtent]: volume extents."""
if not self._is_parsed:
self._Parse()
self._is_parsed = True # depends on [control=['if'], data=[]]
return self._extents |
def _compile_state(sls_opts, mods=None):
'''
Generates the chunks of lowdata from the list of modules
'''
st_ = HighState(sls_opts)
if not mods:
return st_.compile_low_chunks()
high_data, errors = st_.render_highstate({sls_opts['saltenv']: mods})
high_data, ext_errors = st_.state.reconcile_extend(high_data)
errors += ext_errors
errors += st_.state.verify_high(high_data)
if errors:
return errors
high_data, req_in_errors = st_.state.requisite_in(high_data)
errors += req_in_errors
high_data = st_.state.apply_exclude(high_data)
# Verify that the high data is structurally sound
if errors:
return errors
# Compile and verify the raw chunks
return st_.state.compile_high_data(high_data) | def function[_compile_state, parameter[sls_opts, mods]]:
constant[
Generates the chunks of lowdata from the list of modules
]
variable[st_] assign[=] call[name[HighState], parameter[name[sls_opts]]]
if <ast.UnaryOp object at 0x7da1b1f957e0> begin[:]
return[call[name[st_].compile_low_chunks, parameter[]]]
<ast.Tuple object at 0x7da1b1f964d0> assign[=] call[name[st_].render_highstate, parameter[dictionary[[<ast.Subscript object at 0x7da1b1f940d0>], [<ast.Name object at 0x7da1b1f97e80>]]]]
<ast.Tuple object at 0x7da1b1f28490> assign[=] call[name[st_].state.reconcile_extend, parameter[name[high_data]]]
<ast.AugAssign object at 0x7da1b1f28e20>
<ast.AugAssign object at 0x7da1b1f299f0>
if name[errors] begin[:]
return[name[errors]]
<ast.Tuple object at 0x7da1b1f29ab0> assign[=] call[name[st_].state.requisite_in, parameter[name[high_data]]]
<ast.AugAssign object at 0x7da1b1f29330>
variable[high_data] assign[=] call[name[st_].state.apply_exclude, parameter[name[high_data]]]
if name[errors] begin[:]
return[name[errors]]
return[call[name[st_].state.compile_high_data, parameter[name[high_data]]]] | keyword[def] identifier[_compile_state] ( identifier[sls_opts] , identifier[mods] = keyword[None] ):
literal[string]
identifier[st_] = identifier[HighState] ( identifier[sls_opts] )
keyword[if] keyword[not] identifier[mods] :
keyword[return] identifier[st_] . identifier[compile_low_chunks] ()
identifier[high_data] , identifier[errors] = identifier[st_] . identifier[render_highstate] ({ identifier[sls_opts] [ literal[string] ]: identifier[mods] })
identifier[high_data] , identifier[ext_errors] = identifier[st_] . identifier[state] . identifier[reconcile_extend] ( identifier[high_data] )
identifier[errors] += identifier[ext_errors]
identifier[errors] += identifier[st_] . identifier[state] . identifier[verify_high] ( identifier[high_data] )
keyword[if] identifier[errors] :
keyword[return] identifier[errors]
identifier[high_data] , identifier[req_in_errors] = identifier[st_] . identifier[state] . identifier[requisite_in] ( identifier[high_data] )
identifier[errors] += identifier[req_in_errors]
identifier[high_data] = identifier[st_] . identifier[state] . identifier[apply_exclude] ( identifier[high_data] )
keyword[if] identifier[errors] :
keyword[return] identifier[errors]
keyword[return] identifier[st_] . identifier[state] . identifier[compile_high_data] ( identifier[high_data] ) | def _compile_state(sls_opts, mods=None):
"""
Generates the chunks of lowdata from the list of modules
"""
st_ = HighState(sls_opts)
if not mods:
return st_.compile_low_chunks() # depends on [control=['if'], data=[]]
(high_data, errors) = st_.render_highstate({sls_opts['saltenv']: mods})
(high_data, ext_errors) = st_.state.reconcile_extend(high_data)
errors += ext_errors
errors += st_.state.verify_high(high_data)
if errors:
return errors # depends on [control=['if'], data=[]]
(high_data, req_in_errors) = st_.state.requisite_in(high_data)
errors += req_in_errors
high_data = st_.state.apply_exclude(high_data)
# Verify that the high data is structurally sound
if errors:
return errors # depends on [control=['if'], data=[]]
# Compile and verify the raw chunks
return st_.state.compile_high_data(high_data) |
def _Tension(T):
"""Equation for the surface tension
Parameters
----------
T : float
Temperature, [K]
Returns
-------
σ : float
Surface tension, [N/m]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* 248.15 ≤ T ≤ 647
* Estrapolate to -25ºC in supercooled liquid metastable state
Examples
--------
>>> _Tension(300)
0.0716859625
>>> _Tension(450)
0.0428914992
References
----------
IAPWS, Revised Release on Surface Tension of Ordinary Water Substance
June 2014, http://www.iapws.org/relguide/Surf-H2O.html
"""
if 248.15 <= T <= Tc:
Tr = T/Tc
return 1e-3*(235.8*(1-Tr)**1.256*(1-0.625*(1-Tr)))
else:
raise NotImplementedError("Incoming out of bound") | def function[_Tension, parameter[T]]:
constant[Equation for the surface tension
Parameters
----------
T : float
Temperature, [K]
Returns
-------
σ : float
Surface tension, [N/m]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* 248.15 ≤ T ≤ 647
* Estrapolate to -25ºC in supercooled liquid metastable state
Examples
--------
>>> _Tension(300)
0.0716859625
>>> _Tension(450)
0.0428914992
References
----------
IAPWS, Revised Release on Surface Tension of Ordinary Water Substance
June 2014, http://www.iapws.org/relguide/Surf-H2O.html
]
if compare[constant[248.15] less_or_equal[<=] name[T]] begin[:]
variable[Tr] assign[=] binary_operation[name[T] / name[Tc]]
return[binary_operation[constant[0.001] * binary_operation[binary_operation[constant[235.8] * binary_operation[binary_operation[constant[1] - name[Tr]] ** constant[1.256]]] * binary_operation[constant[1] - binary_operation[constant[0.625] * binary_operation[constant[1] - name[Tr]]]]]]] | keyword[def] identifier[_Tension] ( identifier[T] ):
literal[string]
keyword[if] literal[int] <= identifier[T] <= identifier[Tc] :
identifier[Tr] = identifier[T] / identifier[Tc]
keyword[return] literal[int] *( literal[int] *( literal[int] - identifier[Tr] )** literal[int] *( literal[int] - literal[int] *( literal[int] - identifier[Tr] )))
keyword[else] :
keyword[raise] identifier[NotImplementedError] ( literal[string] ) | def _Tension(T):
"""Equation for the surface tension
Parameters
----------
T : float
Temperature, [K]
Returns
-------
σ : float
Surface tension, [N/m]
Notes
------
Raise :class:`NotImplementedError` if input isn't in limit:
* 248.15 ≤ T ≤ 647
* Estrapolate to -25ºC in supercooled liquid metastable state
Examples
--------
>>> _Tension(300)
0.0716859625
>>> _Tension(450)
0.0428914992
References
----------
IAPWS, Revised Release on Surface Tension of Ordinary Water Substance
June 2014, http://www.iapws.org/relguide/Surf-H2O.html
"""
if 248.15 <= T <= Tc:
Tr = T / Tc
return 0.001 * (235.8 * (1 - Tr) ** 1.256 * (1 - 0.625 * (1 - Tr))) # depends on [control=['if'], data=['T']]
else:
raise NotImplementedError('Incoming out of bound') |
def align(self, out_path=None):
"""We align the sequences in the fasta file with muscle."""
if out_path is None: out_path = self.prefix_path + '.aln'
sh.muscle38("-in", self.path, "-out", out_path)
return AlignedFASTA(out_path) | def function[align, parameter[self, out_path]]:
constant[We align the sequences in the fasta file with muscle.]
if compare[name[out_path] is constant[None]] begin[:]
variable[out_path] assign[=] binary_operation[name[self].prefix_path + constant[.aln]]
call[name[sh].muscle38, parameter[constant[-in], name[self].path, constant[-out], name[out_path]]]
return[call[name[AlignedFASTA], parameter[name[out_path]]]] | keyword[def] identifier[align] ( identifier[self] , identifier[out_path] = keyword[None] ):
literal[string]
keyword[if] identifier[out_path] keyword[is] keyword[None] : identifier[out_path] = identifier[self] . identifier[prefix_path] + literal[string]
identifier[sh] . identifier[muscle38] ( literal[string] , identifier[self] . identifier[path] , literal[string] , identifier[out_path] )
keyword[return] identifier[AlignedFASTA] ( identifier[out_path] ) | def align(self, out_path=None):
"""We align the sequences in the fasta file with muscle."""
if out_path is None:
out_path = self.prefix_path + '.aln' # depends on [control=['if'], data=['out_path']]
sh.muscle38('-in', self.path, '-out', out_path)
return AlignedFASTA(out_path) |
def get_aux_files(basename):
"""
Look for and return all the aux files that are associated witht this filename.
Will look for:
background (_bkg.fits)
rms (_rms.fits)
mask (.mim)
catalogue (_comp.fits)
psf map (_psf.fits)
will return filenames if they exist, or None where they do not.
Parameters
----------
basename : str
The name/path of the input image.
Returns
-------
aux : dict
Dict of filenames or None with keys (bkg, rms, mask, cat, psf)
"""
base = os.path.splitext(basename)[0]
files = {"bkg": base + "_bkg.fits",
"rms": base + "_rms.fits",
"mask": base + ".mim",
"cat": base + "_comp.fits",
"psf": base + "_psf.fits"}
for k in files.keys():
if not os.path.exists(files[k]):
files[k] = None
return files | def function[get_aux_files, parameter[basename]]:
constant[
Look for and return all the aux files that are associated witht this filename.
Will look for:
background (_bkg.fits)
rms (_rms.fits)
mask (.mim)
catalogue (_comp.fits)
psf map (_psf.fits)
will return filenames if they exist, or None where they do not.
Parameters
----------
basename : str
The name/path of the input image.
Returns
-------
aux : dict
Dict of filenames or None with keys (bkg, rms, mask, cat, psf)
]
variable[base] assign[=] call[call[name[os].path.splitext, parameter[name[basename]]]][constant[0]]
variable[files] assign[=] dictionary[[<ast.Constant object at 0x7da2041d8760>, <ast.Constant object at 0x7da2041d9330>, <ast.Constant object at 0x7da2041d9f60>, <ast.Constant object at 0x7da2041da0e0>, <ast.Constant object at 0x7da2041db5b0>], [<ast.BinOp object at 0x7da2041d9b10>, <ast.BinOp object at 0x7da2041d8c70>, <ast.BinOp object at 0x7da2041daad0>, <ast.BinOp object at 0x7da2041d80d0>, <ast.BinOp object at 0x7da2041d8e80>]]
for taget[name[k]] in starred[call[name[files].keys, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da2041d8280> begin[:]
call[name[files]][name[k]] assign[=] constant[None]
return[name[files]] | keyword[def] identifier[get_aux_files] ( identifier[basename] ):
literal[string]
identifier[base] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[basename] )[ literal[int] ]
identifier[files] ={ literal[string] : identifier[base] + literal[string] ,
literal[string] : identifier[base] + literal[string] ,
literal[string] : identifier[base] + literal[string] ,
literal[string] : identifier[base] + literal[string] ,
literal[string] : identifier[base] + literal[string] }
keyword[for] identifier[k] keyword[in] identifier[files] . identifier[keys] ():
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[files] [ identifier[k] ]):
identifier[files] [ identifier[k] ]= keyword[None]
keyword[return] identifier[files] | def get_aux_files(basename):
"""
Look for and return all the aux files that are associated witht this filename.
Will look for:
background (_bkg.fits)
rms (_rms.fits)
mask (.mim)
catalogue (_comp.fits)
psf map (_psf.fits)
will return filenames if they exist, or None where they do not.
Parameters
----------
basename : str
The name/path of the input image.
Returns
-------
aux : dict
Dict of filenames or None with keys (bkg, rms, mask, cat, psf)
"""
base = os.path.splitext(basename)[0]
files = {'bkg': base + '_bkg.fits', 'rms': base + '_rms.fits', 'mask': base + '.mim', 'cat': base + '_comp.fits', 'psf': base + '_psf.fits'}
for k in files.keys():
if not os.path.exists(files[k]):
files[k] = None # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['k']]
return files |
def show_periodical_tree_by_issn(issn):
"""
Render tree using ISSN.
"""
trees = tree_handler().trees_by_issn(issn)
if not trees:
abort(404, "Dokument s ISSN '%s' není dostupný." % issn)
return render_trees(
trees,
partial(web_tools.compose_tree_path, issn=True)
) | def function[show_periodical_tree_by_issn, parameter[issn]]:
constant[
Render tree using ISSN.
]
variable[trees] assign[=] call[call[name[tree_handler], parameter[]].trees_by_issn, parameter[name[issn]]]
if <ast.UnaryOp object at 0x7da1b14d2050> begin[:]
call[name[abort], parameter[constant[404], binary_operation[constant[Dokument s ISSN '%s' není dostupný.] <ast.Mod object at 0x7da2590d6920> name[issn]]]]
return[call[name[render_trees], parameter[name[trees], call[name[partial], parameter[name[web_tools].compose_tree_path]]]]] | keyword[def] identifier[show_periodical_tree_by_issn] ( identifier[issn] ):
literal[string]
identifier[trees] = identifier[tree_handler] (). identifier[trees_by_issn] ( identifier[issn] )
keyword[if] keyword[not] identifier[trees] :
identifier[abort] ( literal[int] , literal[string] % identifier[issn] )
keyword[return] identifier[render_trees] (
identifier[trees] ,
identifier[partial] ( identifier[web_tools] . identifier[compose_tree_path] , identifier[issn] = keyword[True] )
) | def show_periodical_tree_by_issn(issn):
"""
Render tree using ISSN.
"""
trees = tree_handler().trees_by_issn(issn)
if not trees:
abort(404, "Dokument s ISSN '%s' není dostupný." % issn) # depends on [control=['if'], data=[]]
return render_trees(trees, partial(web_tools.compose_tree_path, issn=True)) |
def one_or_more(
schema: dict, unique_items: bool = True, min: int = 1, max: int = None
) -> dict:
"""
Helper function to construct a schema that validates items matching
`schema` or an array containing items matching `schema`.
:param schema: The schema to use
:param unique_items: Flag if array items should be unique
:param min: Correlates to ``minLength`` attribute of JSON Schema array
:param max: Correlates to ``maxLength`` attribute of JSON Schema array
"""
multi_schema = {
"type": "array",
"items": schema,
"minItems": min,
"uniqueItems": unique_items,
}
if max:
multi_schema["maxItems"] = max
return {"oneOf": [multi_schema, schema]} | def function[one_or_more, parameter[schema, unique_items, min, max]]:
constant[
Helper function to construct a schema that validates items matching
`schema` or an array containing items matching `schema`.
:param schema: The schema to use
:param unique_items: Flag if array items should be unique
:param min: Correlates to ``minLength`` attribute of JSON Schema array
:param max: Correlates to ``maxLength`` attribute of JSON Schema array
]
variable[multi_schema] assign[=] dictionary[[<ast.Constant object at 0x7da1b1e94700>, <ast.Constant object at 0x7da1b1e969e0>, <ast.Constant object at 0x7da1b1e940a0>, <ast.Constant object at 0x7da1b1e96fb0>], [<ast.Constant object at 0x7da1b1e95db0>, <ast.Name object at 0x7da1b1e96020>, <ast.Name object at 0x7da1b1e95720>, <ast.Name object at 0x7da1b1e97dc0>]]
if name[max] begin[:]
call[name[multi_schema]][constant[maxItems]] assign[=] name[max]
return[dictionary[[<ast.Constant object at 0x7da1b1e97970>], [<ast.List object at 0x7da1b1e95ed0>]]] | keyword[def] identifier[one_or_more] (
identifier[schema] : identifier[dict] , identifier[unique_items] : identifier[bool] = keyword[True] , identifier[min] : identifier[int] = literal[int] , identifier[max] : identifier[int] = keyword[None]
)-> identifier[dict] :
literal[string]
identifier[multi_schema] ={
literal[string] : literal[string] ,
literal[string] : identifier[schema] ,
literal[string] : identifier[min] ,
literal[string] : identifier[unique_items] ,
}
keyword[if] identifier[max] :
identifier[multi_schema] [ literal[string] ]= identifier[max]
keyword[return] { literal[string] :[ identifier[multi_schema] , identifier[schema] ]} | def one_or_more(schema: dict, unique_items: bool=True, min: int=1, max: int=None) -> dict:
"""
Helper function to construct a schema that validates items matching
`schema` or an array containing items matching `schema`.
:param schema: The schema to use
:param unique_items: Flag if array items should be unique
:param min: Correlates to ``minLength`` attribute of JSON Schema array
:param max: Correlates to ``maxLength`` attribute of JSON Schema array
"""
multi_schema = {'type': 'array', 'items': schema, 'minItems': min, 'uniqueItems': unique_items}
if max:
multi_schema['maxItems'] = max # depends on [control=['if'], data=[]]
return {'oneOf': [multi_schema, schema]} |
def contents(self, from_date=DEFAULT_DATETIME,
offset=None, max_contents=MAX_CONTENTS):
"""Get the contents of a repository.
This method returns an iterator that manages the pagination
over contents. Take into account that the seconds of `from_date`
parameter will be ignored because the API only works with
hours and minutes.
:param from_date: fetch the contents updated since this date
:param offset: fetch the contents starting from this offset
:param limit: maximum number of contents to fetch per request
"""
resource = self.RCONTENTS + '/' + self.MSEARCH
# Set confluence query parameter (cql)
date = from_date.strftime("%Y-%m-%d %H:%M")
cql = self.VCQL % {'date': date}
# Set parameters
params = {
self.PCQL: cql,
self.PLIMIT: max_contents,
self.PEXPAND: self.PANCESTORS
}
if offset:
params[self.PSTART] = offset
for response in self._call(resource, params):
yield response | def function[contents, parameter[self, from_date, offset, max_contents]]:
constant[Get the contents of a repository.
This method returns an iterator that manages the pagination
over contents. Take into account that the seconds of `from_date`
parameter will be ignored because the API only works with
hours and minutes.
:param from_date: fetch the contents updated since this date
:param offset: fetch the contents starting from this offset
:param limit: maximum number of contents to fetch per request
]
variable[resource] assign[=] binary_operation[binary_operation[name[self].RCONTENTS + constant[/]] + name[self].MSEARCH]
variable[date] assign[=] call[name[from_date].strftime, parameter[constant[%Y-%m-%d %H:%M]]]
variable[cql] assign[=] binary_operation[name[self].VCQL <ast.Mod object at 0x7da2590d6920> dictionary[[<ast.Constant object at 0x7da1b0350790>], [<ast.Name object at 0x7da1b0353610>]]]
variable[params] assign[=] dictionary[[<ast.Attribute object at 0x7da1b03506a0>, <ast.Attribute object at 0x7da1b020c4c0>, <ast.Attribute object at 0x7da1b020fe80>], [<ast.Name object at 0x7da1b020e230>, <ast.Name object at 0x7da1b020ce20>, <ast.Attribute object at 0x7da1b020dd80>]]
if name[offset] begin[:]
call[name[params]][name[self].PSTART] assign[=] name[offset]
for taget[name[response]] in starred[call[name[self]._call, parameter[name[resource], name[params]]]] begin[:]
<ast.Yield object at 0x7da1b020cf70> | keyword[def] identifier[contents] ( identifier[self] , identifier[from_date] = identifier[DEFAULT_DATETIME] ,
identifier[offset] = keyword[None] , identifier[max_contents] = identifier[MAX_CONTENTS] ):
literal[string]
identifier[resource] = identifier[self] . identifier[RCONTENTS] + literal[string] + identifier[self] . identifier[MSEARCH]
identifier[date] = identifier[from_date] . identifier[strftime] ( literal[string] )
identifier[cql] = identifier[self] . identifier[VCQL] %{ literal[string] : identifier[date] }
identifier[params] ={
identifier[self] . identifier[PCQL] : identifier[cql] ,
identifier[self] . identifier[PLIMIT] : identifier[max_contents] ,
identifier[self] . identifier[PEXPAND] : identifier[self] . identifier[PANCESTORS]
}
keyword[if] identifier[offset] :
identifier[params] [ identifier[self] . identifier[PSTART] ]= identifier[offset]
keyword[for] identifier[response] keyword[in] identifier[self] . identifier[_call] ( identifier[resource] , identifier[params] ):
keyword[yield] identifier[response] | def contents(self, from_date=DEFAULT_DATETIME, offset=None, max_contents=MAX_CONTENTS):
"""Get the contents of a repository.
This method returns an iterator that manages the pagination
over contents. Take into account that the seconds of `from_date`
parameter will be ignored because the API only works with
hours and minutes.
:param from_date: fetch the contents updated since this date
:param offset: fetch the contents starting from this offset
:param limit: maximum number of contents to fetch per request
"""
resource = self.RCONTENTS + '/' + self.MSEARCH
# Set confluence query parameter (cql)
date = from_date.strftime('%Y-%m-%d %H:%M')
cql = self.VCQL % {'date': date}
# Set parameters
params = {self.PCQL: cql, self.PLIMIT: max_contents, self.PEXPAND: self.PANCESTORS}
if offset:
params[self.PSTART] = offset # depends on [control=['if'], data=[]]
for response in self._call(resource, params):
yield response # depends on [control=['for'], data=['response']] |
def _strip_prefix(self, message):
"""
Checks if the bot was called by a user.
Returns the suffix if so.
Prefixes include the bot's nick as well as a set symbol.
"""
if not hasattr(self, "name_regex"):
"""
regex example:
^(((BotA|BotB)[,:]?\s+)|%)(.+)$
names = [BotA, BotB]
prefix = %
"""
names = self.config['names']
prefix = self.config['prefix']
name_regex_str = r'^(?:(?:(%s)[,:]?\s+)|%s)(.+)$' % (re.escape("|".join(names)), prefix)
self.name_regex = re.compile(name_regex_str, re.IGNORECASE)
search = self.name_regex.search(message)
if search:
return search.groups()[1]
return None | def function[_strip_prefix, parameter[self, message]]:
constant[
Checks if the bot was called by a user.
Returns the suffix if so.
Prefixes include the bot's nick as well as a set symbol.
]
if <ast.UnaryOp object at 0x7da1b131bfd0> begin[:]
constant[
regex example:
^(((BotA|BotB)[,:]?\s+)|%)(.+)$
names = [BotA, BotB]
prefix = %
]
variable[names] assign[=] call[name[self].config][constant[names]]
variable[prefix] assign[=] call[name[self].config][constant[prefix]]
variable[name_regex_str] assign[=] binary_operation[constant[^(?:(?:(%s)[,:]?\s+)|%s)(.+)$] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b131b610>, <ast.Name object at 0x7da1b131b6a0>]]]
name[self].name_regex assign[=] call[name[re].compile, parameter[name[name_regex_str], name[re].IGNORECASE]]
variable[search] assign[=] call[name[self].name_regex.search, parameter[name[message]]]
if name[search] begin[:]
return[call[call[name[search].groups, parameter[]]][constant[1]]]
return[constant[None]] | keyword[def] identifier[_strip_prefix] ( identifier[self] , identifier[message] ):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ):
literal[string]
identifier[names] = identifier[self] . identifier[config] [ literal[string] ]
identifier[prefix] = identifier[self] . identifier[config] [ literal[string] ]
identifier[name_regex_str] = literal[string] %( identifier[re] . identifier[escape] ( literal[string] . identifier[join] ( identifier[names] )), identifier[prefix] )
identifier[self] . identifier[name_regex] = identifier[re] . identifier[compile] ( identifier[name_regex_str] , identifier[re] . identifier[IGNORECASE] )
identifier[search] = identifier[self] . identifier[name_regex] . identifier[search] ( identifier[message] )
keyword[if] identifier[search] :
keyword[return] identifier[search] . identifier[groups] ()[ literal[int] ]
keyword[return] keyword[None] | def _strip_prefix(self, message):
"""
Checks if the bot was called by a user.
Returns the suffix if so.
Prefixes include the bot's nick as well as a set symbol.
"""
if not hasattr(self, 'name_regex'):
'\n regex example:\n ^(((BotA|BotB)[,:]?\\s+)|%)(.+)$\n \n names = [BotA, BotB]\n prefix = %\n '
names = self.config['names']
prefix = self.config['prefix']
name_regex_str = '^(?:(?:(%s)[,:]?\\s+)|%s)(.+)$' % (re.escape('|'.join(names)), prefix)
self.name_regex = re.compile(name_regex_str, re.IGNORECASE) # depends on [control=['if'], data=[]]
search = self.name_regex.search(message)
if search:
return search.groups()[1] # depends on [control=['if'], data=[]]
return None |
def get_url(cls, world, category=Category.EXPERIENCE, vocation=VocationFilter.ALL, page=1):
"""Gets the Tibia.com URL of the highscores for the given parameters.
Parameters
----------
world: :class:`str`
The game world of the desired highscores.
category: :class:`Category`
The desired highscores category.
vocation: :class:`VocationFiler`
The vocation filter to apply. By default all vocations will be shown.
page: :class:`int`
The page of highscores to show.
Returns
-------
The URL to the Tibia.com highscores.
"""
return HIGHSCORES_URL % (world, category.value, vocation.value, page) | def function[get_url, parameter[cls, world, category, vocation, page]]:
constant[Gets the Tibia.com URL of the highscores for the given parameters.
Parameters
----------
world: :class:`str`
The game world of the desired highscores.
category: :class:`Category`
The desired highscores category.
vocation: :class:`VocationFiler`
The vocation filter to apply. By default all vocations will be shown.
page: :class:`int`
The page of highscores to show.
Returns
-------
The URL to the Tibia.com highscores.
]
return[binary_operation[name[HIGHSCORES_URL] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0bda5f0>, <ast.Attribute object at 0x7da1b0bdb700>, <ast.Attribute object at 0x7da1b0bd97e0>, <ast.Name object at 0x7da1b0bd8460>]]]] | keyword[def] identifier[get_url] ( identifier[cls] , identifier[world] , identifier[category] = identifier[Category] . identifier[EXPERIENCE] , identifier[vocation] = identifier[VocationFilter] . identifier[ALL] , identifier[page] = literal[int] ):
literal[string]
keyword[return] identifier[HIGHSCORES_URL] %( identifier[world] , identifier[category] . identifier[value] , identifier[vocation] . identifier[value] , identifier[page] ) | def get_url(cls, world, category=Category.EXPERIENCE, vocation=VocationFilter.ALL, page=1):
"""Gets the Tibia.com URL of the highscores for the given parameters.
Parameters
----------
world: :class:`str`
The game world of the desired highscores.
category: :class:`Category`
The desired highscores category.
vocation: :class:`VocationFiler`
The vocation filter to apply. By default all vocations will be shown.
page: :class:`int`
The page of highscores to show.
Returns
-------
The URL to the Tibia.com highscores.
"""
return HIGHSCORES_URL % (world, category.value, vocation.value, page) |
def glob(dirs, patterns, exclude_patterns=None):
"""Returns the list of files matching the given pattern in the
specified directory. Both directories and patterns are
supplied as portable paths. Each pattern should be non-absolute
path, and can't contain '.' or '..' elements. Each slash separated
element of pattern can contain the following special characters:
- '?', which match any character
- '*', which matches arbitrary number of characters.
A file $(d)/e1/e2/e3 (where 'd' is in $(dirs)) matches pattern p1/p2/p3
if and only if e1 matches p1, e2 matches p2 and so on.
For example:
[ glob . : *.cpp ]
[ glob . : */build/Jamfile ]
"""
assert(isinstance(patterns, list))
assert(isinstance(dirs, list))
if not exclude_patterns:
exclude_patterns = []
else:
assert(isinstance(exclude_patterns, list))
real_patterns = [os.path.join(d, p) for p in patterns for d in dirs]
real_exclude_patterns = [os.path.join(d, p) for p in exclude_patterns
for d in dirs]
inc = [os.path.normpath(name) for p in real_patterns
for name in builtin_glob(p)]
exc = [os.path.normpath(name) for p in real_exclude_patterns
for name in builtin_glob(p)]
return [x for x in inc if x not in exc] | def function[glob, parameter[dirs, patterns, exclude_patterns]]:
constant[Returns the list of files matching the given pattern in the
specified directory. Both directories and patterns are
supplied as portable paths. Each pattern should be non-absolute
path, and can't contain '.' or '..' elements. Each slash separated
element of pattern can contain the following special characters:
- '?', which match any character
- '*', which matches arbitrary number of characters.
A file $(d)/e1/e2/e3 (where 'd' is in $(dirs)) matches pattern p1/p2/p3
if and only if e1 matches p1, e2 matches p2 and so on.
For example:
[ glob . : *.cpp ]
[ glob . : */build/Jamfile ]
]
assert[call[name[isinstance], parameter[name[patterns], name[list]]]]
assert[call[name[isinstance], parameter[name[dirs], name[list]]]]
if <ast.UnaryOp object at 0x7da1b1f65780> begin[:]
variable[exclude_patterns] assign[=] list[[]]
variable[real_patterns] assign[=] <ast.ListComp object at 0x7da1b1f65660>
variable[real_exclude_patterns] assign[=] <ast.ListComp object at 0x7da1b1f67c40>
variable[inc] assign[=] <ast.ListComp object at 0x7da1b1ff0df0>
variable[exc] assign[=] <ast.ListComp object at 0x7da1b1ff21a0>
return[<ast.ListComp object at 0x7da1b1ff1c30>] | keyword[def] identifier[glob] ( identifier[dirs] , identifier[patterns] , identifier[exclude_patterns] = keyword[None] ):
literal[string]
keyword[assert] ( identifier[isinstance] ( identifier[patterns] , identifier[list] ))
keyword[assert] ( identifier[isinstance] ( identifier[dirs] , identifier[list] ))
keyword[if] keyword[not] identifier[exclude_patterns] :
identifier[exclude_patterns] =[]
keyword[else] :
keyword[assert] ( identifier[isinstance] ( identifier[exclude_patterns] , identifier[list] ))
identifier[real_patterns] =[ identifier[os] . identifier[path] . identifier[join] ( identifier[d] , identifier[p] ) keyword[for] identifier[p] keyword[in] identifier[patterns] keyword[for] identifier[d] keyword[in] identifier[dirs] ]
identifier[real_exclude_patterns] =[ identifier[os] . identifier[path] . identifier[join] ( identifier[d] , identifier[p] ) keyword[for] identifier[p] keyword[in] identifier[exclude_patterns]
keyword[for] identifier[d] keyword[in] identifier[dirs] ]
identifier[inc] =[ identifier[os] . identifier[path] . identifier[normpath] ( identifier[name] ) keyword[for] identifier[p] keyword[in] identifier[real_patterns]
keyword[for] identifier[name] keyword[in] identifier[builtin_glob] ( identifier[p] )]
identifier[exc] =[ identifier[os] . identifier[path] . identifier[normpath] ( identifier[name] ) keyword[for] identifier[p] keyword[in] identifier[real_exclude_patterns]
keyword[for] identifier[name] keyword[in] identifier[builtin_glob] ( identifier[p] )]
keyword[return] [ identifier[x] keyword[for] identifier[x] keyword[in] identifier[inc] keyword[if] identifier[x] keyword[not] keyword[in] identifier[exc] ] | def glob(dirs, patterns, exclude_patterns=None):
"""Returns the list of files matching the given pattern in the
specified directory. Both directories and patterns are
supplied as portable paths. Each pattern should be non-absolute
path, and can't contain '.' or '..' elements. Each slash separated
element of pattern can contain the following special characters:
- '?', which match any character
- '*', which matches arbitrary number of characters.
A file $(d)/e1/e2/e3 (where 'd' is in $(dirs)) matches pattern p1/p2/p3
if and only if e1 matches p1, e2 matches p2 and so on.
For example:
[ glob . : *.cpp ]
[ glob . : */build/Jamfile ]
"""
assert isinstance(patterns, list)
assert isinstance(dirs, list)
if not exclude_patterns:
exclude_patterns = [] # depends on [control=['if'], data=[]]
else:
assert isinstance(exclude_patterns, list)
real_patterns = [os.path.join(d, p) for p in patterns for d in dirs]
real_exclude_patterns = [os.path.join(d, p) for p in exclude_patterns for d in dirs]
inc = [os.path.normpath(name) for p in real_patterns for name in builtin_glob(p)]
exc = [os.path.normpath(name) for p in real_exclude_patterns for name in builtin_glob(p)]
return [x for x in inc if x not in exc] |
def DTF(self):
"""Directed transfer function.
.. math:: \mathrm{DTF}_{ij}(f) = \\frac{H_{ij}(f)}
{\sqrt{H_{i:}(f) H_{i:}'(f)}}
References
----------
M. J. Kaminski, K. J. Blinowska. A new method of the description of the
information flow in the brain structures. Biol. Cybernetics 65(3):
203-210, 1991.
"""
H = self.H()
return np.abs(H / np.sqrt(np.sum(H * H.conj(), axis=1, keepdims=True))) | def function[DTF, parameter[self]]:
constant[Directed transfer function.
.. math:: \mathrm{DTF}_{ij}(f) = \frac{H_{ij}(f)}
{\sqrt{H_{i:}(f) H_{i:}'(f)}}
References
----------
M. J. Kaminski, K. J. Blinowska. A new method of the description of the
information flow in the brain structures. Biol. Cybernetics 65(3):
203-210, 1991.
]
variable[H] assign[=] call[name[self].H, parameter[]]
return[call[name[np].abs, parameter[binary_operation[name[H] / call[name[np].sqrt, parameter[call[name[np].sum, parameter[binary_operation[name[H] * call[name[H].conj, parameter[]]]]]]]]]]] | keyword[def] identifier[DTF] ( identifier[self] ):
literal[string]
identifier[H] = identifier[self] . identifier[H] ()
keyword[return] identifier[np] . identifier[abs] ( identifier[H] / identifier[np] . identifier[sqrt] ( identifier[np] . identifier[sum] ( identifier[H] * identifier[H] . identifier[conj] (), identifier[axis] = literal[int] , identifier[keepdims] = keyword[True] ))) | def DTF(self):
"""Directed transfer function.
.. math:: \\mathrm{DTF}_{ij}(f) = \\frac{H_{ij}(f)}
{\\sqrt{H_{i:}(f) H_{i:}'(f)}}
References
----------
M. J. Kaminski, K. J. Blinowska. A new method of the description of the
information flow in the brain structures. Biol. Cybernetics 65(3):
203-210, 1991.
"""
H = self.H()
return np.abs(H / np.sqrt(np.sum(H * H.conj(), axis=1, keepdims=True))) |
def make_chains_with_names(sentences):
'''
assemble in-doc coref chains by mapping equiv_id to tokens and
their cleansed name strings
:param sentences: iterator over token generators
:returns dict:
keys are equiv_ids,
values are tuple(concatentated name string, list of tokens)
'''
## if an equiv_id is -1, then the token is classified into some
## entity_type but has not other tokens in its chain. We don't
## want these all lumped together, so we give them distinct "fake"
## equiv_id other than -1 -- counting negatively to avoid
## collisions with "real" equiv_ids
fake_equiv_ids = -2
## use a default dictionary
equiv_ids = collections.defaultdict(lambda: (set(), set()))
for tagger_id, sents in sentences.items():
for sent in sents:
for tok in sent.tokens:
if tok.entity_type is not None:
## get an appropriate equiv_id
if tok.equiv_id == -1:
eqid = fake_equiv_ids
fake_equiv_ids -= 1
else:
eqid = tok.equiv_id
## store the name parts initially as a set
equiv_ids[eqid][0].add(cleanse(tok.token.decode('utf8')))
## carry a *reference* to the entire Token object
equiv_ids[eqid][1].add(tok)
return equiv_ids | def function[make_chains_with_names, parameter[sentences]]:
constant[
assemble in-doc coref chains by mapping equiv_id to tokens and
their cleansed name strings
:param sentences: iterator over token generators
:returns dict:
keys are equiv_ids,
values are tuple(concatentated name string, list of tokens)
]
variable[fake_equiv_ids] assign[=] <ast.UnaryOp object at 0x7da20c76eef0>
variable[equiv_ids] assign[=] call[name[collections].defaultdict, parameter[<ast.Lambda object at 0x7da20c76dcc0>]]
for taget[tuple[[<ast.Name object at 0x7da20c76cd90>, <ast.Name object at 0x7da20c76e2f0>]]] in starred[call[name[sentences].items, parameter[]]] begin[:]
for taget[name[sent]] in starred[name[sents]] begin[:]
for taget[name[tok]] in starred[name[sent].tokens] begin[:]
if compare[name[tok].entity_type is_not constant[None]] begin[:]
if compare[name[tok].equiv_id equal[==] <ast.UnaryOp object at 0x7da20c76dae0>] begin[:]
variable[eqid] assign[=] name[fake_equiv_ids]
<ast.AugAssign object at 0x7da20c76c6d0>
call[call[call[name[equiv_ids]][name[eqid]]][constant[0]].add, parameter[call[name[cleanse], parameter[call[name[tok].token.decode, parameter[constant[utf8]]]]]]]
call[call[call[name[equiv_ids]][name[eqid]]][constant[1]].add, parameter[name[tok]]]
return[name[equiv_ids]] | keyword[def] identifier[make_chains_with_names] ( identifier[sentences] ):
literal[string]
identifier[fake_equiv_ids] =- literal[int]
identifier[equiv_ids] = identifier[collections] . identifier[defaultdict] ( keyword[lambda] :( identifier[set] (), identifier[set] ()))
keyword[for] identifier[tagger_id] , identifier[sents] keyword[in] identifier[sentences] . identifier[items] ():
keyword[for] identifier[sent] keyword[in] identifier[sents] :
keyword[for] identifier[tok] keyword[in] identifier[sent] . identifier[tokens] :
keyword[if] identifier[tok] . identifier[entity_type] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[tok] . identifier[equiv_id] ==- literal[int] :
identifier[eqid] = identifier[fake_equiv_ids]
identifier[fake_equiv_ids] -= literal[int]
keyword[else] :
identifier[eqid] = identifier[tok] . identifier[equiv_id]
identifier[equiv_ids] [ identifier[eqid] ][ literal[int] ]. identifier[add] ( identifier[cleanse] ( identifier[tok] . identifier[token] . identifier[decode] ( literal[string] )))
identifier[equiv_ids] [ identifier[eqid] ][ literal[int] ]. identifier[add] ( identifier[tok] )
keyword[return] identifier[equiv_ids] | def make_chains_with_names(sentences):
"""
assemble in-doc coref chains by mapping equiv_id to tokens and
their cleansed name strings
:param sentences: iterator over token generators
:returns dict:
keys are equiv_ids,
values are tuple(concatentated name string, list of tokens)
"""
## if an equiv_id is -1, then the token is classified into some
## entity_type but has not other tokens in its chain. We don't
## want these all lumped together, so we give them distinct "fake"
## equiv_id other than -1 -- counting negatively to avoid
## collisions with "real" equiv_ids
fake_equiv_ids = -2
## use a default dictionary
equiv_ids = collections.defaultdict(lambda : (set(), set()))
for (tagger_id, sents) in sentences.items():
for sent in sents:
for tok in sent.tokens:
if tok.entity_type is not None:
## get an appropriate equiv_id
if tok.equiv_id == -1:
eqid = fake_equiv_ids
fake_equiv_ids -= 1 # depends on [control=['if'], data=[]]
else:
eqid = tok.equiv_id
## store the name parts initially as a set
equiv_ids[eqid][0].add(cleanse(tok.token.decode('utf8')))
## carry a *reference* to the entire Token object
equiv_ids[eqid][1].add(tok) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['tok']] # depends on [control=['for'], data=['sent']] # depends on [control=['for'], data=[]]
return equiv_ids |
def confd_state_internal_cdb_datastore_pending_subscription_sync_priority(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring")
internal = ET.SubElement(confd_state, "internal")
cdb = ET.SubElement(internal, "cdb")
datastore = ET.SubElement(cdb, "datastore")
name_key = ET.SubElement(datastore, "name")
name_key.text = kwargs.pop('name')
pending_subscription_sync = ET.SubElement(datastore, "pending-subscription-sync")
priority = ET.SubElement(pending_subscription_sync, "priority")
priority.text = kwargs.pop('priority')
callback = kwargs.pop('callback', self._callback)
return callback(config) | def function[confd_state_internal_cdb_datastore_pending_subscription_sync_priority, parameter[self]]:
constant[Auto Generated Code
]
variable[config] assign[=] call[name[ET].Element, parameter[constant[config]]]
variable[confd_state] assign[=] call[name[ET].SubElement, parameter[name[config], constant[confd-state]]]
variable[internal] assign[=] call[name[ET].SubElement, parameter[name[confd_state], constant[internal]]]
variable[cdb] assign[=] call[name[ET].SubElement, parameter[name[internal], constant[cdb]]]
variable[datastore] assign[=] call[name[ET].SubElement, parameter[name[cdb], constant[datastore]]]
variable[name_key] assign[=] call[name[ET].SubElement, parameter[name[datastore], constant[name]]]
name[name_key].text assign[=] call[name[kwargs].pop, parameter[constant[name]]]
variable[pending_subscription_sync] assign[=] call[name[ET].SubElement, parameter[name[datastore], constant[pending-subscription-sync]]]
variable[priority] assign[=] call[name[ET].SubElement, parameter[name[pending_subscription_sync], constant[priority]]]
name[priority].text assign[=] call[name[kwargs].pop, parameter[constant[priority]]]
variable[callback] assign[=] call[name[kwargs].pop, parameter[constant[callback], name[self]._callback]]
return[call[name[callback], parameter[name[config]]]] | keyword[def] identifier[confd_state_internal_cdb_datastore_pending_subscription_sync_priority] ( identifier[self] ,** identifier[kwargs] ):
literal[string]
identifier[config] = identifier[ET] . identifier[Element] ( literal[string] )
identifier[confd_state] = identifier[ET] . identifier[SubElement] ( identifier[config] , literal[string] , identifier[xmlns] = literal[string] )
identifier[internal] = identifier[ET] . identifier[SubElement] ( identifier[confd_state] , literal[string] )
identifier[cdb] = identifier[ET] . identifier[SubElement] ( identifier[internal] , literal[string] )
identifier[datastore] = identifier[ET] . identifier[SubElement] ( identifier[cdb] , literal[string] )
identifier[name_key] = identifier[ET] . identifier[SubElement] ( identifier[datastore] , literal[string] )
identifier[name_key] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[pending_subscription_sync] = identifier[ET] . identifier[SubElement] ( identifier[datastore] , literal[string] )
identifier[priority] = identifier[ET] . identifier[SubElement] ( identifier[pending_subscription_sync] , literal[string] )
identifier[priority] . identifier[text] = identifier[kwargs] . identifier[pop] ( literal[string] )
identifier[callback] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[self] . identifier[_callback] )
keyword[return] identifier[callback] ( identifier[config] ) | def confd_state_internal_cdb_datastore_pending_subscription_sync_priority(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element('config')
confd_state = ET.SubElement(config, 'confd-state', xmlns='http://tail-f.com/yang/confd-monitoring')
internal = ET.SubElement(confd_state, 'internal')
cdb = ET.SubElement(internal, 'cdb')
datastore = ET.SubElement(cdb, 'datastore')
name_key = ET.SubElement(datastore, 'name')
name_key.text = kwargs.pop('name')
pending_subscription_sync = ET.SubElement(datastore, 'pending-subscription-sync')
priority = ET.SubElement(pending_subscription_sync, 'priority')
priority.text = kwargs.pop('priority')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def hard_limit_remote(self,
yidx,
ridx,
rtype='y',
rmin=None,
rmax=None,
min_yset=0,
max_yset=0):
"""Limit the output of yidx if the remote y is not within the limits
This function needs to be modernized.
"""
ny = len(yidx)
assert ny == len(
ridx), "Length of output vars and remote vars does not match"
assert rtype in ('x',
'y'), "ridx must be either y (algeb) or x (state)"
if isinstance(min_yset, (int, float)):
min_yset = matrix(min_yset, (ny, 1), 'd')
if isinstance(max_yset, (int, float)):
max_yset = matrix(max_yset, (ny, 1), 'd')
above_idx, below_idx = list(), list()
yidx = matrix(yidx)
if rmax:
# find the over-limit remote idx
above = ageb(self.__dict__[rtype][ridx], rmax)
above_idx = index(above, 1.0)
# reset the y values based on the remote limit violations
self.y[yidx[above_idx]] = max_yset[above_idx]
self.zymax[yidx[above_idx]] = 0
if rmin:
below = aleb(self.__dict__[rtype][ridx], rmin)
below_idx = index(below, 1.0)
self.y[yidx[below_idx]] = min_yset[below_idx]
self.zymin[yidx[below_idx]] = 0
idx = above_idx + below_idx
self.g[yidx[idx]] = 0
if len(idx) > 0:
self.factorize = True | def function[hard_limit_remote, parameter[self, yidx, ridx, rtype, rmin, rmax, min_yset, max_yset]]:
constant[Limit the output of yidx if the remote y is not within the limits
This function needs to be modernized.
]
variable[ny] assign[=] call[name[len], parameter[name[yidx]]]
assert[compare[name[ny] equal[==] call[name[len], parameter[name[ridx]]]]]
assert[compare[name[rtype] in tuple[[<ast.Constant object at 0x7da2044c3220>, <ast.Constant object at 0x7da2044c0f70>]]]]
if call[name[isinstance], parameter[name[min_yset], tuple[[<ast.Name object at 0x7da2044c1090>, <ast.Name object at 0x7da2044c0c40>]]]] begin[:]
variable[min_yset] assign[=] call[name[matrix], parameter[name[min_yset], tuple[[<ast.Name object at 0x7da2044c2dd0>, <ast.Constant object at 0x7da2044c3250>]], constant[d]]]
if call[name[isinstance], parameter[name[max_yset], tuple[[<ast.Name object at 0x7da2044c0640>, <ast.Name object at 0x7da2044c3880>]]]] begin[:]
variable[max_yset] assign[=] call[name[matrix], parameter[name[max_yset], tuple[[<ast.Name object at 0x7da2044c36a0>, <ast.Constant object at 0x7da2044c3a00>]], constant[d]]]
<ast.Tuple object at 0x7da2044c05b0> assign[=] tuple[[<ast.Call object at 0x7da2044c2770>, <ast.Call object at 0x7da2044c1060>]]
variable[yidx] assign[=] call[name[matrix], parameter[name[yidx]]]
if name[rmax] begin[:]
variable[above] assign[=] call[name[ageb], parameter[call[call[name[self].__dict__][name[rtype]]][name[ridx]], name[rmax]]]
variable[above_idx] assign[=] call[name[index], parameter[name[above], constant[1.0]]]
call[name[self].y][call[name[yidx]][name[above_idx]]] assign[=] call[name[max_yset]][name[above_idx]]
call[name[self].zymax][call[name[yidx]][name[above_idx]]] assign[=] constant[0]
if name[rmin] begin[:]
variable[below] assign[=] call[name[aleb], parameter[call[call[name[self].__dict__][name[rtype]]][name[ridx]], name[rmin]]]
variable[below_idx] assign[=] call[name[index], parameter[name[below], constant[1.0]]]
call[name[self].y][call[name[yidx]][name[below_idx]]] assign[=] call[name[min_yset]][name[below_idx]]
call[name[self].zymin][call[name[yidx]][name[below_idx]]] assign[=] constant[0]
variable[idx] assign[=] binary_operation[name[above_idx] + name[below_idx]]
call[name[self].g][call[name[yidx]][name[idx]]] assign[=] constant[0]
if compare[call[name[len], parameter[name[idx]]] greater[>] constant[0]] begin[:]
name[self].factorize assign[=] constant[True] | keyword[def] identifier[hard_limit_remote] ( identifier[self] ,
identifier[yidx] ,
identifier[ridx] ,
identifier[rtype] = literal[string] ,
identifier[rmin] = keyword[None] ,
identifier[rmax] = keyword[None] ,
identifier[min_yset] = literal[int] ,
identifier[max_yset] = literal[int] ):
literal[string]
identifier[ny] = identifier[len] ( identifier[yidx] )
keyword[assert] identifier[ny] == identifier[len] (
identifier[ridx] ), literal[string]
keyword[assert] identifier[rtype] keyword[in] ( literal[string] ,
literal[string] ), literal[string]
keyword[if] identifier[isinstance] ( identifier[min_yset] ,( identifier[int] , identifier[float] )):
identifier[min_yset] = identifier[matrix] ( identifier[min_yset] ,( identifier[ny] , literal[int] ), literal[string] )
keyword[if] identifier[isinstance] ( identifier[max_yset] ,( identifier[int] , identifier[float] )):
identifier[max_yset] = identifier[matrix] ( identifier[max_yset] ,( identifier[ny] , literal[int] ), literal[string] )
identifier[above_idx] , identifier[below_idx] = identifier[list] (), identifier[list] ()
identifier[yidx] = identifier[matrix] ( identifier[yidx] )
keyword[if] identifier[rmax] :
identifier[above] = identifier[ageb] ( identifier[self] . identifier[__dict__] [ identifier[rtype] ][ identifier[ridx] ], identifier[rmax] )
identifier[above_idx] = identifier[index] ( identifier[above] , literal[int] )
identifier[self] . identifier[y] [ identifier[yidx] [ identifier[above_idx] ]]= identifier[max_yset] [ identifier[above_idx] ]
identifier[self] . identifier[zymax] [ identifier[yidx] [ identifier[above_idx] ]]= literal[int]
keyword[if] identifier[rmin] :
identifier[below] = identifier[aleb] ( identifier[self] . identifier[__dict__] [ identifier[rtype] ][ identifier[ridx] ], identifier[rmin] )
identifier[below_idx] = identifier[index] ( identifier[below] , literal[int] )
identifier[self] . identifier[y] [ identifier[yidx] [ identifier[below_idx] ]]= identifier[min_yset] [ identifier[below_idx] ]
identifier[self] . identifier[zymin] [ identifier[yidx] [ identifier[below_idx] ]]= literal[int]
identifier[idx] = identifier[above_idx] + identifier[below_idx]
identifier[self] . identifier[g] [ identifier[yidx] [ identifier[idx] ]]= literal[int]
keyword[if] identifier[len] ( identifier[idx] )> literal[int] :
identifier[self] . identifier[factorize] = keyword[True] | def hard_limit_remote(self, yidx, ridx, rtype='y', rmin=None, rmax=None, min_yset=0, max_yset=0):
"""Limit the output of yidx if the remote y is not within the limits
This function needs to be modernized.
"""
ny = len(yidx)
assert ny == len(ridx), 'Length of output vars and remote vars does not match'
assert rtype in ('x', 'y'), 'ridx must be either y (algeb) or x (state)'
if isinstance(min_yset, (int, float)):
min_yset = matrix(min_yset, (ny, 1), 'd') # depends on [control=['if'], data=[]]
if isinstance(max_yset, (int, float)):
max_yset = matrix(max_yset, (ny, 1), 'd') # depends on [control=['if'], data=[]]
(above_idx, below_idx) = (list(), list())
yidx = matrix(yidx)
if rmax:
# find the over-limit remote idx
above = ageb(self.__dict__[rtype][ridx], rmax)
above_idx = index(above, 1.0)
# reset the y values based on the remote limit violations
self.y[yidx[above_idx]] = max_yset[above_idx]
self.zymax[yidx[above_idx]] = 0 # depends on [control=['if'], data=[]]
if rmin:
below = aleb(self.__dict__[rtype][ridx], rmin)
below_idx = index(below, 1.0)
self.y[yidx[below_idx]] = min_yset[below_idx]
self.zymin[yidx[below_idx]] = 0 # depends on [control=['if'], data=[]]
idx = above_idx + below_idx
self.g[yidx[idx]] = 0
if len(idx) > 0:
self.factorize = True # depends on [control=['if'], data=[]] |
def createUser(self, localpart, domain, password=None):
"""
Create a new, blank user account with the given name and domain and, if
specified, with the given password.
@type localpart: C{unicode}
@param localpart: The local portion of the username. ie, the
C{'alice'} in C{'alice@example.com'}.
@type domain: C{unicode}
@param domain: The domain portion of the username. ie, the
C{'example.com'} in C{'alice@example.com'}.
@type password: C{unicode} or C{None}
@param password: The password to associate with the new account. If
C{None}, generate a new password automatically.
"""
loginSystem = self.browser.store.parent.findUnique(userbase.LoginSystem)
if password is None:
password = u''.join([random.choice(string.ascii_letters + string.digits) for i in xrange(8)])
loginSystem.addAccount(localpart, domain, password) | def function[createUser, parameter[self, localpart, domain, password]]:
constant[
Create a new, blank user account with the given name and domain and, if
specified, with the given password.
@type localpart: C{unicode}
@param localpart: The local portion of the username. ie, the
C{'alice'} in C{'alice@example.com'}.
@type domain: C{unicode}
@param domain: The domain portion of the username. ie, the
C{'example.com'} in C{'alice@example.com'}.
@type password: C{unicode} or C{None}
@param password: The password to associate with the new account. If
C{None}, generate a new password automatically.
]
variable[loginSystem] assign[=] call[name[self].browser.store.parent.findUnique, parameter[name[userbase].LoginSystem]]
if compare[name[password] is constant[None]] begin[:]
variable[password] assign[=] call[constant[].join, parameter[<ast.ListComp object at 0x7da1b0a346d0>]]
call[name[loginSystem].addAccount, parameter[name[localpart], name[domain], name[password]]] | keyword[def] identifier[createUser] ( identifier[self] , identifier[localpart] , identifier[domain] , identifier[password] = keyword[None] ):
literal[string]
identifier[loginSystem] = identifier[self] . identifier[browser] . identifier[store] . identifier[parent] . identifier[findUnique] ( identifier[userbase] . identifier[LoginSystem] )
keyword[if] identifier[password] keyword[is] keyword[None] :
identifier[password] = literal[string] . identifier[join] ([ identifier[random] . identifier[choice] ( identifier[string] . identifier[ascii_letters] + identifier[string] . identifier[digits] ) keyword[for] identifier[i] keyword[in] identifier[xrange] ( literal[int] )])
identifier[loginSystem] . identifier[addAccount] ( identifier[localpart] , identifier[domain] , identifier[password] ) | def createUser(self, localpart, domain, password=None):
"""
Create a new, blank user account with the given name and domain and, if
specified, with the given password.
@type localpart: C{unicode}
@param localpart: The local portion of the username. ie, the
C{'alice'} in C{'alice@example.com'}.
@type domain: C{unicode}
@param domain: The domain portion of the username. ie, the
C{'example.com'} in C{'alice@example.com'}.
@type password: C{unicode} or C{None}
@param password: The password to associate with the new account. If
C{None}, generate a new password automatically.
"""
loginSystem = self.browser.store.parent.findUnique(userbase.LoginSystem)
if password is None:
password = u''.join([random.choice(string.ascii_letters + string.digits) for i in xrange(8)]) # depends on [control=['if'], data=['password']]
loginSystem.addAccount(localpart, domain, password) |
def invalidate_caches(cls):
"""Call the invalidate_caches() method on all path entry finders
stored in sys.path_importer_caches (where implemented)."""
for finder in sys.path_importer_cache.values():
if hasattr(finder, 'invalidate_caches'):
finder.invalidate_caches() | def function[invalidate_caches, parameter[cls]]:
constant[Call the invalidate_caches() method on all path entry finders
stored in sys.path_importer_caches (where implemented).]
for taget[name[finder]] in starred[call[name[sys].path_importer_cache.values, parameter[]]] begin[:]
if call[name[hasattr], parameter[name[finder], constant[invalidate_caches]]] begin[:]
call[name[finder].invalidate_caches, parameter[]] | keyword[def] identifier[invalidate_caches] ( identifier[cls] ):
literal[string]
keyword[for] identifier[finder] keyword[in] identifier[sys] . identifier[path_importer_cache] . identifier[values] ():
keyword[if] identifier[hasattr] ( identifier[finder] , literal[string] ):
identifier[finder] . identifier[invalidate_caches] () | def invalidate_caches(cls):
"""Call the invalidate_caches() method on all path entry finders
stored in sys.path_importer_caches (where implemented)."""
for finder in sys.path_importer_cache.values():
if hasattr(finder, 'invalidate_caches'):
finder.invalidate_caches() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['finder']] |
def get_popular_players(self, **params: keys):
"""Get a list of most queried players
\*\*keys: Optional[list] = None
Filter which keys should be included in the
response
\*\*exclude: Optional[list] = None
Filter which keys should be excluded from the
response
\*\*max: Optional[int] = None
Limit the number of items returned in the response
\*\*page: Optional[int] = None
Works with max, the zero-based page of the
items
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.POPULAR + '/players'
return self._get_model(url, PartialPlayerClan, **params) | def function[get_popular_players, parameter[self]]:
constant[Get a list of most queried players
\*\*keys: Optional[list] = None
Filter which keys should be included in the
response
\*\*exclude: Optional[list] = None
Filter which keys should be excluded from the
response
\*\*max: Optional[int] = None
Limit the number of items returned in the response
\*\*page: Optional[int] = None
Works with max, the zero-based page of the
items
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
]
variable[url] assign[=] binary_operation[name[self].api.POPULAR + constant[/players]]
return[call[name[self]._get_model, parameter[name[url], name[PartialPlayerClan]]]] | keyword[def] identifier[get_popular_players] ( identifier[self] ,** identifier[params] : identifier[keys] ):
literal[string]
identifier[url] = identifier[self] . identifier[api] . identifier[POPULAR] + literal[string]
keyword[return] identifier[self] . identifier[_get_model] ( identifier[url] , identifier[PartialPlayerClan] ,** identifier[params] ) | def get_popular_players(self, **params: keys):
"""Get a list of most queried players
\\*\\*keys: Optional[list] = None
Filter which keys should be included in the
response
\\*\\*exclude: Optional[list] = None
Filter which keys should be excluded from the
response
\\*\\*max: Optional[int] = None
Limit the number of items returned in the response
\\*\\*page: Optional[int] = None
Works with max, the zero-based page of the
items
\\*\\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.POPULAR + '/players'
return self._get_model(url, PartialPlayerClan, **params) |
def rowmapmany(table, rowgenerator, header, failonerror=False):
"""
Map each input row to any number of output rows via an arbitrary
function. E.g.::
>>> import petl as etl
>>> table1 = [['id', 'sex', 'age', 'height', 'weight'],
... [1, 'male', 16, 1.45, 62.0],
... [2, 'female', 19, 1.34, 55.4],
... [3, '-', 17, 1.78, 74.4],
... [4, 'male', 21, 1.33]]
>>> def rowgenerator(row):
... transmf = {'male': 'M', 'female': 'F'}
... yield [row[0], 'gender',
... transmf[row['sex']] if row['sex'] in transmf else None]
... yield [row[0], 'age_months', row.age * 12]
... yield [row[0], 'bmi', row.height / row.weight ** 2]
...
>>> table2 = etl.rowmapmany(table1, rowgenerator,
... header=['subject_id', 'variable', 'value'])
>>> table2.lookall()
+------------+--------------+-----------------------+
| subject_id | variable | value |
+============+==============+=======================+
| 1 | 'gender' | 'M' |
+------------+--------------+-----------------------+
| 1 | 'age_months' | 192 |
+------------+--------------+-----------------------+
| 1 | 'bmi' | 0.0003772112382934443 |
+------------+--------------+-----------------------+
| 2 | 'gender' | 'F' |
+------------+--------------+-----------------------+
| 2 | 'age_months' | 228 |
+------------+--------------+-----------------------+
| 2 | 'bmi' | 0.0004366015456998006 |
+------------+--------------+-----------------------+
| 3 | 'gender' | None |
+------------+--------------+-----------------------+
| 3 | 'age_months' | 204 |
+------------+--------------+-----------------------+
| 3 | 'bmi' | 0.0003215689675106949 |
+------------+--------------+-----------------------+
| 4 | 'gender' | 'M' |
+------------+--------------+-----------------------+
| 4 | 'age_months' | 252 |
+------------+--------------+-----------------------+
The `rowgenerator` function should accept a single row and yield zero or
more rows (lists or tuples).
See also the :func:`petl.transform.reshape.melt` function.
"""
return RowMapManyView(table, rowgenerator, header, failonerror=failonerror) | def function[rowmapmany, parameter[table, rowgenerator, header, failonerror]]:
constant[
Map each input row to any number of output rows via an arbitrary
function. E.g.::
>>> import petl as etl
>>> table1 = [['id', 'sex', 'age', 'height', 'weight'],
... [1, 'male', 16, 1.45, 62.0],
... [2, 'female', 19, 1.34, 55.4],
... [3, '-', 17, 1.78, 74.4],
... [4, 'male', 21, 1.33]]
>>> def rowgenerator(row):
... transmf = {'male': 'M', 'female': 'F'}
... yield [row[0], 'gender',
... transmf[row['sex']] if row['sex'] in transmf else None]
... yield [row[0], 'age_months', row.age * 12]
... yield [row[0], 'bmi', row.height / row.weight ** 2]
...
>>> table2 = etl.rowmapmany(table1, rowgenerator,
... header=['subject_id', 'variable', 'value'])
>>> table2.lookall()
+------------+--------------+-----------------------+
| subject_id | variable | value |
+============+==============+=======================+
| 1 | 'gender' | 'M' |
+------------+--------------+-----------------------+
| 1 | 'age_months' | 192 |
+------------+--------------+-----------------------+
| 1 | 'bmi' | 0.0003772112382934443 |
+------------+--------------+-----------------------+
| 2 | 'gender' | 'F' |
+------------+--------------+-----------------------+
| 2 | 'age_months' | 228 |
+------------+--------------+-----------------------+
| 2 | 'bmi' | 0.0004366015456998006 |
+------------+--------------+-----------------------+
| 3 | 'gender' | None |
+------------+--------------+-----------------------+
| 3 | 'age_months' | 204 |
+------------+--------------+-----------------------+
| 3 | 'bmi' | 0.0003215689675106949 |
+------------+--------------+-----------------------+
| 4 | 'gender' | 'M' |
+------------+--------------+-----------------------+
| 4 | 'age_months' | 252 |
+------------+--------------+-----------------------+
The `rowgenerator` function should accept a single row and yield zero or
more rows (lists or tuples).
See also the :func:`petl.transform.reshape.melt` function.
]
return[call[name[RowMapManyView], parameter[name[table], name[rowgenerator], name[header]]]] | keyword[def] identifier[rowmapmany] ( identifier[table] , identifier[rowgenerator] , identifier[header] , identifier[failonerror] = keyword[False] ):
literal[string]
keyword[return] identifier[RowMapManyView] ( identifier[table] , identifier[rowgenerator] , identifier[header] , identifier[failonerror] = identifier[failonerror] ) | def rowmapmany(table, rowgenerator, header, failonerror=False):
"""
Map each input row to any number of output rows via an arbitrary
function. E.g.::
>>> import petl as etl
>>> table1 = [['id', 'sex', 'age', 'height', 'weight'],
... [1, 'male', 16, 1.45, 62.0],
... [2, 'female', 19, 1.34, 55.4],
... [3, '-', 17, 1.78, 74.4],
... [4, 'male', 21, 1.33]]
>>> def rowgenerator(row):
... transmf = {'male': 'M', 'female': 'F'}
... yield [row[0], 'gender',
... transmf[row['sex']] if row['sex'] in transmf else None]
... yield [row[0], 'age_months', row.age * 12]
... yield [row[0], 'bmi', row.height / row.weight ** 2]
...
>>> table2 = etl.rowmapmany(table1, rowgenerator,
... header=['subject_id', 'variable', 'value'])
>>> table2.lookall()
+------------+--------------+-----------------------+
| subject_id | variable | value |
+============+==============+=======================+
| 1 | 'gender' | 'M' |
+------------+--------------+-----------------------+
| 1 | 'age_months' | 192 |
+------------+--------------+-----------------------+
| 1 | 'bmi' | 0.0003772112382934443 |
+------------+--------------+-----------------------+
| 2 | 'gender' | 'F' |
+------------+--------------+-----------------------+
| 2 | 'age_months' | 228 |
+------------+--------------+-----------------------+
| 2 | 'bmi' | 0.0004366015456998006 |
+------------+--------------+-----------------------+
| 3 | 'gender' | None |
+------------+--------------+-----------------------+
| 3 | 'age_months' | 204 |
+------------+--------------+-----------------------+
| 3 | 'bmi' | 0.0003215689675106949 |
+------------+--------------+-----------------------+
| 4 | 'gender' | 'M' |
+------------+--------------+-----------------------+
| 4 | 'age_months' | 252 |
+------------+--------------+-----------------------+
The `rowgenerator` function should accept a single row and yield zero or
more rows (lists or tuples).
See also the :func:`petl.transform.reshape.melt` function.
"""
return RowMapManyView(table, rowgenerator, header, failonerror=failonerror) |
def is_following(user, obj, flag=''):
"""
Checks if a "follow" relationship exists.
Returns True if exists, False otherwise.
Pass a string value to ``flag`` to determine which type of "follow" relationship you want to check.
Example::
is_following(request.user, group)
is_following(request.user, group, flag='liking')
"""
check(obj)
qs = apps.get_model('actstream', 'follow').objects.filter(
user=user, object_id=obj.pk,
content_type=ContentType.objects.get_for_model(obj)
)
if flag:
qs = qs.filter(flag=flag)
return qs.exists() | def function[is_following, parameter[user, obj, flag]]:
constant[
Checks if a "follow" relationship exists.
Returns True if exists, False otherwise.
Pass a string value to ``flag`` to determine which type of "follow" relationship you want to check.
Example::
is_following(request.user, group)
is_following(request.user, group, flag='liking')
]
call[name[check], parameter[name[obj]]]
variable[qs] assign[=] call[call[name[apps].get_model, parameter[constant[actstream], constant[follow]]].objects.filter, parameter[]]
if name[flag] begin[:]
variable[qs] assign[=] call[name[qs].filter, parameter[]]
return[call[name[qs].exists, parameter[]]] | keyword[def] identifier[is_following] ( identifier[user] , identifier[obj] , identifier[flag] = literal[string] ):
literal[string]
identifier[check] ( identifier[obj] )
identifier[qs] = identifier[apps] . identifier[get_model] ( literal[string] , literal[string] ). identifier[objects] . identifier[filter] (
identifier[user] = identifier[user] , identifier[object_id] = identifier[obj] . identifier[pk] ,
identifier[content_type] = identifier[ContentType] . identifier[objects] . identifier[get_for_model] ( identifier[obj] )
)
keyword[if] identifier[flag] :
identifier[qs] = identifier[qs] . identifier[filter] ( identifier[flag] = identifier[flag] )
keyword[return] identifier[qs] . identifier[exists] () | def is_following(user, obj, flag=''):
"""
Checks if a "follow" relationship exists.
Returns True if exists, False otherwise.
Pass a string value to ``flag`` to determine which type of "follow" relationship you want to check.
Example::
is_following(request.user, group)
is_following(request.user, group, flag='liking')
"""
check(obj)
qs = apps.get_model('actstream', 'follow').objects.filter(user=user, object_id=obj.pk, content_type=ContentType.objects.get_for_model(obj))
if flag:
qs = qs.filter(flag=flag) # depends on [control=['if'], data=[]]
return qs.exists() |
def is_all_Ns(self, start=0, end=None):
'''Returns true if the sequence is all Ns (upper or lower case)'''
if end is not None:
if start > end:
raise Error('Error in is_all_Ns. Start coord must be <= end coord')
end += 1
else:
end = len(self)
if len(self) == 0:
return False
else:
return re.search('[^Nn]', self.seq[start:end]) is None | def function[is_all_Ns, parameter[self, start, end]]:
constant[Returns true if the sequence is all Ns (upper or lower case)]
if compare[name[end] is_not constant[None]] begin[:]
if compare[name[start] greater[>] name[end]] begin[:]
<ast.Raise object at 0x7da1aff74040>
<ast.AugAssign object at 0x7da1aff77790>
if compare[call[name[len], parameter[name[self]]] equal[==] constant[0]] begin[:]
return[constant[False]] | keyword[def] identifier[is_all_Ns] ( identifier[self] , identifier[start] = literal[int] , identifier[end] = keyword[None] ):
literal[string]
keyword[if] identifier[end] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[start] > identifier[end] :
keyword[raise] identifier[Error] ( literal[string] )
identifier[end] += literal[int]
keyword[else] :
identifier[end] = identifier[len] ( identifier[self] )
keyword[if] identifier[len] ( identifier[self] )== literal[int] :
keyword[return] keyword[False]
keyword[else] :
keyword[return] identifier[re] . identifier[search] ( literal[string] , identifier[self] . identifier[seq] [ identifier[start] : identifier[end] ]) keyword[is] keyword[None] | def is_all_Ns(self, start=0, end=None):
"""Returns true if the sequence is all Ns (upper or lower case)"""
if end is not None:
if start > end:
raise Error('Error in is_all_Ns. Start coord must be <= end coord') # depends on [control=['if'], data=[]]
end += 1 # depends on [control=['if'], data=['end']]
else:
end = len(self)
if len(self) == 0:
return False # depends on [control=['if'], data=[]]
else:
return re.search('[^Nn]', self.seq[start:end]) is None |
def prune_feed_map(meta_graph, feed_map):
"""Function to prune the feedmap of nodes which no longer exist."""
node_names = [x.name + ":0" for x in meta_graph.graph_def.node]
keys_to_delete = []
for k, _ in feed_map.items():
if k not in node_names:
keys_to_delete.append(k)
for k in keys_to_delete:
del feed_map[k] | def function[prune_feed_map, parameter[meta_graph, feed_map]]:
constant[Function to prune the feedmap of nodes which no longer exist.]
variable[node_names] assign[=] <ast.ListComp object at 0x7da20c6aae90>
variable[keys_to_delete] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da20c6a89d0>, <ast.Name object at 0x7da20c6aa440>]]] in starred[call[name[feed_map].items, parameter[]]] begin[:]
if compare[name[k] <ast.NotIn object at 0x7da2590d7190> name[node_names]] begin[:]
call[name[keys_to_delete].append, parameter[name[k]]]
for taget[name[k]] in starred[name[keys_to_delete]] begin[:]
<ast.Delete object at 0x7da20c6a9180> | keyword[def] identifier[prune_feed_map] ( identifier[meta_graph] , identifier[feed_map] ):
literal[string]
identifier[node_names] =[ identifier[x] . identifier[name] + literal[string] keyword[for] identifier[x] keyword[in] identifier[meta_graph] . identifier[graph_def] . identifier[node] ]
identifier[keys_to_delete] =[]
keyword[for] identifier[k] , identifier[_] keyword[in] identifier[feed_map] . identifier[items] ():
keyword[if] identifier[k] keyword[not] keyword[in] identifier[node_names] :
identifier[keys_to_delete] . identifier[append] ( identifier[k] )
keyword[for] identifier[k] keyword[in] identifier[keys_to_delete] :
keyword[del] identifier[feed_map] [ identifier[k] ] | def prune_feed_map(meta_graph, feed_map):
"""Function to prune the feedmap of nodes which no longer exist."""
node_names = [x.name + ':0' for x in meta_graph.graph_def.node]
keys_to_delete = []
for (k, _) in feed_map.items():
if k not in node_names:
keys_to_delete.append(k) # depends on [control=['if'], data=['k']] # depends on [control=['for'], data=[]]
for k in keys_to_delete:
del feed_map[k] # depends on [control=['for'], data=['k']] |
def to_string(self):
"""Returns a string representation of the edge in dot language.
"""
src = self.parse_node_ref( self.get_source() )
dst = self.parse_node_ref( self.get_destination() )
if isinstance(src, frozendict):
edge = [ Subgraph(obj_dict=src).to_string() ]
elif isinstance(src, (int, long)):
edge = [ str(src) ]
else:
edge = [ src ]
if (self.get_parent_graph() and
self.get_parent_graph().get_top_graph_type() and
self.get_parent_graph().get_top_graph_type() == 'digraph' ):
edge.append( '->' )
else:
edge.append( '--' )
if isinstance(dst, frozendict):
edge.append( Subgraph(obj_dict=dst).to_string() )
elif isinstance(dst, (int, long)):
edge.append( str(dst) )
else:
edge.append( dst )
edge_attr = list()
for attr, value in self.obj_dict['attributes'].iteritems():
if value is not None:
edge_attr.append( '%s=%s' % (attr, quote_if_necessary(value) ) )
else:
edge_attr.append( attr )
edge_attr = ', '.join(edge_attr)
if edge_attr:
edge.append( ' [' + edge_attr + ']' )
return ' '.join(edge) + ';' | def function[to_string, parameter[self]]:
constant[Returns a string representation of the edge in dot language.
]
variable[src] assign[=] call[name[self].parse_node_ref, parameter[call[name[self].get_source, parameter[]]]]
variable[dst] assign[=] call[name[self].parse_node_ref, parameter[call[name[self].get_destination, parameter[]]]]
if call[name[isinstance], parameter[name[src], name[frozendict]]] begin[:]
variable[edge] assign[=] list[[<ast.Call object at 0x7da1b16dd990>]]
if <ast.BoolOp object at 0x7da207f01060> begin[:]
call[name[edge].append, parameter[constant[->]]]
if call[name[isinstance], parameter[name[dst], name[frozendict]]] begin[:]
call[name[edge].append, parameter[call[call[name[Subgraph], parameter[]].to_string, parameter[]]]]
variable[edge_attr] assign[=] call[name[list], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da207f03d60>, <ast.Name object at 0x7da207f02800>]]] in starred[call[call[name[self].obj_dict][constant[attributes]].iteritems, parameter[]]] begin[:]
if compare[name[value] is_not constant[None]] begin[:]
call[name[edge_attr].append, parameter[binary_operation[constant[%s=%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da207f018d0>, <ast.Call object at 0x7da207f024a0>]]]]]
variable[edge_attr] assign[=] call[constant[, ].join, parameter[name[edge_attr]]]
if name[edge_attr] begin[:]
call[name[edge].append, parameter[binary_operation[binary_operation[constant[ [] + name[edge_attr]] + constant[]]]]]
return[binary_operation[call[constant[ ].join, parameter[name[edge]]] + constant[;]]] | keyword[def] identifier[to_string] ( identifier[self] ):
literal[string]
identifier[src] = identifier[self] . identifier[parse_node_ref] ( identifier[self] . identifier[get_source] ())
identifier[dst] = identifier[self] . identifier[parse_node_ref] ( identifier[self] . identifier[get_destination] ())
keyword[if] identifier[isinstance] ( identifier[src] , identifier[frozendict] ):
identifier[edge] =[ identifier[Subgraph] ( identifier[obj_dict] = identifier[src] ). identifier[to_string] ()]
keyword[elif] identifier[isinstance] ( identifier[src] ,( identifier[int] , identifier[long] )):
identifier[edge] =[ identifier[str] ( identifier[src] )]
keyword[else] :
identifier[edge] =[ identifier[src] ]
keyword[if] ( identifier[self] . identifier[get_parent_graph] () keyword[and]
identifier[self] . identifier[get_parent_graph] (). identifier[get_top_graph_type] () keyword[and]
identifier[self] . identifier[get_parent_graph] (). identifier[get_top_graph_type] ()== literal[string] ):
identifier[edge] . identifier[append] ( literal[string] )
keyword[else] :
identifier[edge] . identifier[append] ( literal[string] )
keyword[if] identifier[isinstance] ( identifier[dst] , identifier[frozendict] ):
identifier[edge] . identifier[append] ( identifier[Subgraph] ( identifier[obj_dict] = identifier[dst] ). identifier[to_string] ())
keyword[elif] identifier[isinstance] ( identifier[dst] ,( identifier[int] , identifier[long] )):
identifier[edge] . identifier[append] ( identifier[str] ( identifier[dst] ))
keyword[else] :
identifier[edge] . identifier[append] ( identifier[dst] )
identifier[edge_attr] = identifier[list] ()
keyword[for] identifier[attr] , identifier[value] keyword[in] identifier[self] . identifier[obj_dict] [ literal[string] ]. identifier[iteritems] ():
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
identifier[edge_attr] . identifier[append] ( literal[string] %( identifier[attr] , identifier[quote_if_necessary] ( identifier[value] )))
keyword[else] :
identifier[edge_attr] . identifier[append] ( identifier[attr] )
identifier[edge_attr] = literal[string] . identifier[join] ( identifier[edge_attr] )
keyword[if] identifier[edge_attr] :
identifier[edge] . identifier[append] ( literal[string] + identifier[edge_attr] + literal[string] )
keyword[return] literal[string] . identifier[join] ( identifier[edge] )+ literal[string] | def to_string(self):
"""Returns a string representation of the edge in dot language.
"""
src = self.parse_node_ref(self.get_source())
dst = self.parse_node_ref(self.get_destination())
if isinstance(src, frozendict):
edge = [Subgraph(obj_dict=src).to_string()] # depends on [control=['if'], data=[]]
elif isinstance(src, (int, long)):
edge = [str(src)] # depends on [control=['if'], data=[]]
else:
edge = [src]
if self.get_parent_graph() and self.get_parent_graph().get_top_graph_type() and (self.get_parent_graph().get_top_graph_type() == 'digraph'):
edge.append('->') # depends on [control=['if'], data=[]]
else:
edge.append('--')
if isinstance(dst, frozendict):
edge.append(Subgraph(obj_dict=dst).to_string()) # depends on [control=['if'], data=[]]
elif isinstance(dst, (int, long)):
edge.append(str(dst)) # depends on [control=['if'], data=[]]
else:
edge.append(dst)
edge_attr = list()
for (attr, value) in self.obj_dict['attributes'].iteritems():
if value is not None:
edge_attr.append('%s=%s' % (attr, quote_if_necessary(value))) # depends on [control=['if'], data=['value']]
else:
edge_attr.append(attr) # depends on [control=['for'], data=[]]
edge_attr = ', '.join(edge_attr)
if edge_attr:
edge.append(' [' + edge_attr + ']') # depends on [control=['if'], data=[]]
return ' '.join(edge) + ';' |
def restart():
"""Restarts scapy"""
if not conf.interactive or not os.path.isfile(sys.argv[0]):
raise OSError("Scapy was not started from console")
if WINDOWS:
try:
res_code = subprocess.call([sys.executable] + sys.argv)
except KeyboardInterrupt:
res_code = 1
finally:
os._exit(res_code)
os.execv(sys.executable, [sys.executable] + sys.argv) | def function[restart, parameter[]]:
constant[Restarts scapy]
if <ast.BoolOp object at 0x7da1b21a0820> begin[:]
<ast.Raise object at 0x7da1b21a3730>
if name[WINDOWS] begin[:]
<ast.Try object at 0x7da1b21a3d30>
call[name[os].execv, parameter[name[sys].executable, binary_operation[list[[<ast.Attribute object at 0x7da1b21a3250>]] + name[sys].argv]]] | keyword[def] identifier[restart] ():
literal[string]
keyword[if] keyword[not] identifier[conf] . identifier[interactive] keyword[or] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[sys] . identifier[argv] [ literal[int] ]):
keyword[raise] identifier[OSError] ( literal[string] )
keyword[if] identifier[WINDOWS] :
keyword[try] :
identifier[res_code] = identifier[subprocess] . identifier[call] ([ identifier[sys] . identifier[executable] ]+ identifier[sys] . identifier[argv] )
keyword[except] identifier[KeyboardInterrupt] :
identifier[res_code] = literal[int]
keyword[finally] :
identifier[os] . identifier[_exit] ( identifier[res_code] )
identifier[os] . identifier[execv] ( identifier[sys] . identifier[executable] ,[ identifier[sys] . identifier[executable] ]+ identifier[sys] . identifier[argv] ) | def restart():
"""Restarts scapy"""
if not conf.interactive or not os.path.isfile(sys.argv[0]):
raise OSError('Scapy was not started from console') # depends on [control=['if'], data=[]]
if WINDOWS:
try:
res_code = subprocess.call([sys.executable] + sys.argv) # depends on [control=['try'], data=[]]
except KeyboardInterrupt:
res_code = 1 # depends on [control=['except'], data=[]]
finally:
os._exit(res_code) # depends on [control=['if'], data=[]]
os.execv(sys.executable, [sys.executable] + sys.argv) |
def getElementsCustomFilter(self, filterFunc):
'''
getElementsCustomFilter - Searches children of this tag for those matching a provided user function
@param filterFunc <function> - A function or lambda expression that should return "True" if the passed node matches criteria.
@return - TagCollection of matching results
@see getFirstElementCustomFilter
'''
elements = []
for child in self.children:
if filterFunc(child) is True:
elements.append(child)
elements += child.getElementsCustomFilter(filterFunc)
return TagCollection(elements) | def function[getElementsCustomFilter, parameter[self, filterFunc]]:
constant[
getElementsCustomFilter - Searches children of this tag for those matching a provided user function
@param filterFunc <function> - A function or lambda expression that should return "True" if the passed node matches criteria.
@return - TagCollection of matching results
@see getFirstElementCustomFilter
]
variable[elements] assign[=] list[[]]
for taget[name[child]] in starred[name[self].children] begin[:]
if compare[call[name[filterFunc], parameter[name[child]]] is constant[True]] begin[:]
call[name[elements].append, parameter[name[child]]]
<ast.AugAssign object at 0x7da1b11bf790>
return[call[name[TagCollection], parameter[name[elements]]]] | keyword[def] identifier[getElementsCustomFilter] ( identifier[self] , identifier[filterFunc] ):
literal[string]
identifier[elements] =[]
keyword[for] identifier[child] keyword[in] identifier[self] . identifier[children] :
keyword[if] identifier[filterFunc] ( identifier[child] ) keyword[is] keyword[True] :
identifier[elements] . identifier[append] ( identifier[child] )
identifier[elements] += identifier[child] . identifier[getElementsCustomFilter] ( identifier[filterFunc] )
keyword[return] identifier[TagCollection] ( identifier[elements] ) | def getElementsCustomFilter(self, filterFunc):
"""
getElementsCustomFilter - Searches children of this tag for those matching a provided user function
@param filterFunc <function> - A function or lambda expression that should return "True" if the passed node matches criteria.
@return - TagCollection of matching results
@see getFirstElementCustomFilter
"""
elements = []
for child in self.children:
if filterFunc(child) is True:
elements.append(child) # depends on [control=['if'], data=[]]
elements += child.getElementsCustomFilter(filterFunc) # depends on [control=['for'], data=['child']]
return TagCollection(elements) |
def get_decoders_by_path(query):
"""
RETURN MAP FROM QUERY PATH TO LIST OF DECODER ARRAYS
:param query:
:return:
"""
schema = query.frum.schema
output = Data()
if query.edges:
if query.sort and query.format != "cube":
# REORDER EDGES/GROUPBY TO MATCH THE SORT
query.edges = sort_edges(query, "edges")
elif query.groupby:
if query.sort and query.format != "cube":
query.groupby = sort_edges(query, "groupby")
for edge in wrap(coalesce(query.edges, query.groupby, [])):
limit = coalesce(edge.domain.limit, query.limit, DEFAULT_LIMIT)
if edge.value != None and not edge.value is NULL:
edge = edge.copy()
vars_ = edge.value.vars()
for v in vars_:
if not schema.leaves(v.var):
Log.error("{{var}} does not exist in schema", var=v)
elif edge.range:
vars_ = edge.range.min.vars() | edge.range.max.vars()
for v in vars_:
if not schema[v.var]:
Log.error("{{var}} does not exist in schema", var=v)
elif edge.domain.dimension:
vars_ = edge.domain.dimension.fields
edge.domain.dimension = edge.domain.dimension.copy()
edge.domain.dimension.fields = [schema[v].es_column for v in vars_]
elif all(edge.domain.partitions.where):
vars_ = set()
for p in edge.domain.partitions:
vars_ |= p.where.vars()
vars_ |= edge.value.vars()
depths = set(c.nested_path[0] for v in vars_ for c in schema.leaves(v.var))
if not depths:
Log.error(
"Do not know of column {{column}}",
column=unwraplist([v for v in vars_ if schema[v] == None])
)
if len(depths) > 1:
Log.error("expression {{expr|quote}} spans tables, can not handle", expr=edge.value)
decoder = AggsDecoder(edge, query, limit)
output[literal_field(first(depths))] += [decoder]
return output | def function[get_decoders_by_path, parameter[query]]:
constant[
RETURN MAP FROM QUERY PATH TO LIST OF DECODER ARRAYS
:param query:
:return:
]
variable[schema] assign[=] name[query].frum.schema
variable[output] assign[=] call[name[Data], parameter[]]
if name[query].edges begin[:]
if <ast.BoolOp object at 0x7da18f00db70> begin[:]
name[query].edges assign[=] call[name[sort_edges], parameter[name[query], constant[edges]]]
for taget[name[edge]] in starred[call[name[wrap], parameter[call[name[coalesce], parameter[name[query].edges, name[query].groupby, list[[]]]]]]] begin[:]
variable[limit] assign[=] call[name[coalesce], parameter[name[edge].domain.limit, name[query].limit, name[DEFAULT_LIMIT]]]
if <ast.BoolOp object at 0x7da1b0b6fe20> begin[:]
variable[edge] assign[=] call[name[edge].copy, parameter[]]
variable[vars_] assign[=] call[name[edge].value.vars, parameter[]]
for taget[name[v]] in starred[name[vars_]] begin[:]
if <ast.UnaryOp object at 0x7da1b0b6fc40> begin[:]
call[name[Log].error, parameter[constant[{{var}} does not exist in schema]]]
<ast.AugAssign object at 0x7da1b0bd32e0>
variable[depths] assign[=] call[name[set], parameter[<ast.GeneratorExp object at 0x7da1b0bd11e0>]]
if <ast.UnaryOp object at 0x7da1b0bd3be0> begin[:]
call[name[Log].error, parameter[constant[Do not know of column {{column}}]]]
if compare[call[name[len], parameter[name[depths]]] greater[>] constant[1]] begin[:]
call[name[Log].error, parameter[constant[expression {{expr|quote}} spans tables, can not handle]]]
variable[decoder] assign[=] call[name[AggsDecoder], parameter[name[edge], name[query], name[limit]]]
<ast.AugAssign object at 0x7da1b0bafd90>
return[name[output]] | keyword[def] identifier[get_decoders_by_path] ( identifier[query] ):
literal[string]
identifier[schema] = identifier[query] . identifier[frum] . identifier[schema]
identifier[output] = identifier[Data] ()
keyword[if] identifier[query] . identifier[edges] :
keyword[if] identifier[query] . identifier[sort] keyword[and] identifier[query] . identifier[format] != literal[string] :
identifier[query] . identifier[edges] = identifier[sort_edges] ( identifier[query] , literal[string] )
keyword[elif] identifier[query] . identifier[groupby] :
keyword[if] identifier[query] . identifier[sort] keyword[and] identifier[query] . identifier[format] != literal[string] :
identifier[query] . identifier[groupby] = identifier[sort_edges] ( identifier[query] , literal[string] )
keyword[for] identifier[edge] keyword[in] identifier[wrap] ( identifier[coalesce] ( identifier[query] . identifier[edges] , identifier[query] . identifier[groupby] ,[])):
identifier[limit] = identifier[coalesce] ( identifier[edge] . identifier[domain] . identifier[limit] , identifier[query] . identifier[limit] , identifier[DEFAULT_LIMIT] )
keyword[if] identifier[edge] . identifier[value] != keyword[None] keyword[and] keyword[not] identifier[edge] . identifier[value] keyword[is] identifier[NULL] :
identifier[edge] = identifier[edge] . identifier[copy] ()
identifier[vars_] = identifier[edge] . identifier[value] . identifier[vars] ()
keyword[for] identifier[v] keyword[in] identifier[vars_] :
keyword[if] keyword[not] identifier[schema] . identifier[leaves] ( identifier[v] . identifier[var] ):
identifier[Log] . identifier[error] ( literal[string] , identifier[var] = identifier[v] )
keyword[elif] identifier[edge] . identifier[range] :
identifier[vars_] = identifier[edge] . identifier[range] . identifier[min] . identifier[vars] ()| identifier[edge] . identifier[range] . identifier[max] . identifier[vars] ()
keyword[for] identifier[v] keyword[in] identifier[vars_] :
keyword[if] keyword[not] identifier[schema] [ identifier[v] . identifier[var] ]:
identifier[Log] . identifier[error] ( literal[string] , identifier[var] = identifier[v] )
keyword[elif] identifier[edge] . identifier[domain] . identifier[dimension] :
identifier[vars_] = identifier[edge] . identifier[domain] . identifier[dimension] . identifier[fields]
identifier[edge] . identifier[domain] . identifier[dimension] = identifier[edge] . identifier[domain] . identifier[dimension] . identifier[copy] ()
identifier[edge] . identifier[domain] . identifier[dimension] . identifier[fields] =[ identifier[schema] [ identifier[v] ]. identifier[es_column] keyword[for] identifier[v] keyword[in] identifier[vars_] ]
keyword[elif] identifier[all] ( identifier[edge] . identifier[domain] . identifier[partitions] . identifier[where] ):
identifier[vars_] = identifier[set] ()
keyword[for] identifier[p] keyword[in] identifier[edge] . identifier[domain] . identifier[partitions] :
identifier[vars_] |= identifier[p] . identifier[where] . identifier[vars] ()
identifier[vars_] |= identifier[edge] . identifier[value] . identifier[vars] ()
identifier[depths] = identifier[set] ( identifier[c] . identifier[nested_path] [ literal[int] ] keyword[for] identifier[v] keyword[in] identifier[vars_] keyword[for] identifier[c] keyword[in] identifier[schema] . identifier[leaves] ( identifier[v] . identifier[var] ))
keyword[if] keyword[not] identifier[depths] :
identifier[Log] . identifier[error] (
literal[string] ,
identifier[column] = identifier[unwraplist] ([ identifier[v] keyword[for] identifier[v] keyword[in] identifier[vars_] keyword[if] identifier[schema] [ identifier[v] ]== keyword[None] ])
)
keyword[if] identifier[len] ( identifier[depths] )> literal[int] :
identifier[Log] . identifier[error] ( literal[string] , identifier[expr] = identifier[edge] . identifier[value] )
identifier[decoder] = identifier[AggsDecoder] ( identifier[edge] , identifier[query] , identifier[limit] )
identifier[output] [ identifier[literal_field] ( identifier[first] ( identifier[depths] ))]+=[ identifier[decoder] ]
keyword[return] identifier[output] | def get_decoders_by_path(query):
"""
RETURN MAP FROM QUERY PATH TO LIST OF DECODER ARRAYS
:param query:
:return:
"""
schema = query.frum.schema
output = Data()
if query.edges:
if query.sort and query.format != 'cube':
# REORDER EDGES/GROUPBY TO MATCH THE SORT
query.edges = sort_edges(query, 'edges') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif query.groupby:
if query.sort and query.format != 'cube':
query.groupby = sort_edges(query, 'groupby') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
for edge in wrap(coalesce(query.edges, query.groupby, [])):
limit = coalesce(edge.domain.limit, query.limit, DEFAULT_LIMIT)
if edge.value != None and (not edge.value is NULL):
edge = edge.copy()
vars_ = edge.value.vars()
for v in vars_:
if not schema.leaves(v.var):
Log.error('{{var}} does not exist in schema', var=v) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['v']] # depends on [control=['if'], data=[]]
elif edge.range:
vars_ = edge.range.min.vars() | edge.range.max.vars()
for v in vars_:
if not schema[v.var]:
Log.error('{{var}} does not exist in schema', var=v) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['v']] # depends on [control=['if'], data=[]]
elif edge.domain.dimension:
vars_ = edge.domain.dimension.fields
edge.domain.dimension = edge.domain.dimension.copy()
edge.domain.dimension.fields = [schema[v].es_column for v in vars_] # depends on [control=['if'], data=[]]
elif all(edge.domain.partitions.where):
vars_ = set()
for p in edge.domain.partitions:
vars_ |= p.where.vars() # depends on [control=['for'], data=['p']] # depends on [control=['if'], data=[]]
vars_ |= edge.value.vars()
depths = set((c.nested_path[0] for v in vars_ for c in schema.leaves(v.var)))
if not depths:
Log.error('Do not know of column {{column}}', column=unwraplist([v for v in vars_ if schema[v] == None])) # depends on [control=['if'], data=[]]
if len(depths) > 1:
Log.error('expression {{expr|quote}} spans tables, can not handle', expr=edge.value) # depends on [control=['if'], data=[]]
decoder = AggsDecoder(edge, query, limit)
output[literal_field(first(depths))] += [decoder] # depends on [control=['for'], data=['edge']]
return output |
def wrap_spark_sql_udf(self, name, package_name=None, object_name=None, java_class_instance=None, doc=""):
"""Wraps a scala/java spark user defined function """
def _(*cols):
jcontainer = self.get_java_container(package_name=package_name, object_name=object_name, java_class_instance=java_class_instance)
# Ensure that your argument is a column
function = getattr(jcontainer, name)
judf = function()
jc = judf.apply(self.to_scala_seq([_to_java_column(c) for c in cols]))
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _ | def function[wrap_spark_sql_udf, parameter[self, name, package_name, object_name, java_class_instance, doc]]:
constant[Wraps a scala/java spark user defined function ]
def function[_, parameter[]]:
variable[jcontainer] assign[=] call[name[self].get_java_container, parameter[]]
variable[function] assign[=] call[name[getattr], parameter[name[jcontainer], name[name]]]
variable[judf] assign[=] call[name[function], parameter[]]
variable[jc] assign[=] call[name[judf].apply, parameter[call[name[self].to_scala_seq, parameter[<ast.ListComp object at 0x7da1b2346530>]]]]
return[call[name[Column], parameter[name[jc]]]]
name[_].__name__ assign[=] name[name]
name[_].__doc__ assign[=] name[doc]
return[name[_]] | keyword[def] identifier[wrap_spark_sql_udf] ( identifier[self] , identifier[name] , identifier[package_name] = keyword[None] , identifier[object_name] = keyword[None] , identifier[java_class_instance] = keyword[None] , identifier[doc] = literal[string] ):
literal[string]
keyword[def] identifier[_] (* identifier[cols] ):
identifier[jcontainer] = identifier[self] . identifier[get_java_container] ( identifier[package_name] = identifier[package_name] , identifier[object_name] = identifier[object_name] , identifier[java_class_instance] = identifier[java_class_instance] )
identifier[function] = identifier[getattr] ( identifier[jcontainer] , identifier[name] )
identifier[judf] = identifier[function] ()
identifier[jc] = identifier[judf] . identifier[apply] ( identifier[self] . identifier[to_scala_seq] ([ identifier[_to_java_column] ( identifier[c] ) keyword[for] identifier[c] keyword[in] identifier[cols] ]))
keyword[return] identifier[Column] ( identifier[jc] )
identifier[_] . identifier[__name__] = identifier[name]
identifier[_] . identifier[__doc__] = identifier[doc]
keyword[return] identifier[_] | def wrap_spark_sql_udf(self, name, package_name=None, object_name=None, java_class_instance=None, doc=''):
"""Wraps a scala/java spark user defined function """
def _(*cols):
jcontainer = self.get_java_container(package_name=package_name, object_name=object_name, java_class_instance=java_class_instance)
# Ensure that your argument is a column
function = getattr(jcontainer, name)
judf = function()
jc = judf.apply(self.to_scala_seq([_to_java_column(c) for c in cols]))
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _ |
def isword(somestr):
"""
Checks that some string is a word
:param str somestr:
It is some string that will be checked for word.
The word is a string that contains only alphabetic characters,
its length not less 2 characters and
noone characters does not reeats more than 2 times in a row.
The word can not be multiline
:except TypeError:
If the checked word is not a string
:return bool:
"""
# check invalid data types
OnlyStringsCanBeChecked(somestr)
if somestr.isalpha() and len(somestr) > 1:
# find characters that repeats more then 2 times in a row
matches = re.search(r'(.)(\1)(\1)', somestr, flags = re.IGNORECASE)
if matches == None:
# russian
# check first character
if somestr.casefold()[0:1] not in ['ъ', 'ь']:
return True
# in other cases
return False | def function[isword, parameter[somestr]]:
constant[
Checks that some string is a word
:param str somestr:
It is some string that will be checked for word.
The word is a string that contains only alphabetic characters,
its length not less 2 characters and
noone characters does not reeats more than 2 times in a row.
The word can not be multiline
:except TypeError:
If the checked word is not a string
:return bool:
]
call[name[OnlyStringsCanBeChecked], parameter[name[somestr]]]
if <ast.BoolOp object at 0x7da1b1309630> begin[:]
variable[matches] assign[=] call[name[re].search, parameter[constant[(.)(\1)(\1)], name[somestr]]]
if compare[name[matches] equal[==] constant[None]] begin[:]
if compare[call[call[name[somestr].casefold, parameter[]]][<ast.Slice object at 0x7da1b130b460>] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da1b130bfa0>, <ast.Constant object at 0x7da1b130aa40>]]] begin[:]
return[constant[True]]
return[constant[False]] | keyword[def] identifier[isword] ( identifier[somestr] ):
literal[string]
identifier[OnlyStringsCanBeChecked] ( identifier[somestr] )
keyword[if] identifier[somestr] . identifier[isalpha] () keyword[and] identifier[len] ( identifier[somestr] )> literal[int] :
identifier[matches] = identifier[re] . identifier[search] ( literal[string] , identifier[somestr] , identifier[flags] = identifier[re] . identifier[IGNORECASE] )
keyword[if] identifier[matches] == keyword[None] :
keyword[if] identifier[somestr] . identifier[casefold] ()[ literal[int] : literal[int] ] keyword[not] keyword[in] [ literal[string] , literal[string] ]:
keyword[return] keyword[True]
keyword[return] keyword[False] | def isword(somestr):
"""
Checks that some string is a word
:param str somestr:
It is some string that will be checked for word.
The word is a string that contains only alphabetic characters,
its length not less 2 characters and
noone characters does not reeats more than 2 times in a row.
The word can not be multiline
:except TypeError:
If the checked word is not a string
:return bool:
""" # check invalid data types
OnlyStringsCanBeChecked(somestr)
if somestr.isalpha() and len(somestr) > 1: # find characters that repeats more then 2 times in a row
matches = re.search('(.)(\\1)(\\1)', somestr, flags=re.IGNORECASE)
if matches == None: # russian
# check first character
if somestr.casefold()[0:1] not in ['ъ', 'ь']:
return True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # in other cases
return False |
def on_entry_click(self, event):
"""
function that gets called whenever entry is clicked
"""
if event.widget.config('fg') [4] == 'grey':
event.widget.delete(0, "end" ) # delete all the text in the entry
event.widget.insert(0, '') #Insert blank for user input
event.widget.config(fg = 'black') | def function[on_entry_click, parameter[self, event]]:
constant[
function that gets called whenever entry is clicked
]
if compare[call[call[name[event].widget.config, parameter[constant[fg]]]][constant[4]] equal[==] constant[grey]] begin[:]
call[name[event].widget.delete, parameter[constant[0], constant[end]]]
call[name[event].widget.insert, parameter[constant[0], constant[]]]
call[name[event].widget.config, parameter[]] | keyword[def] identifier[on_entry_click] ( identifier[self] , identifier[event] ):
literal[string]
keyword[if] identifier[event] . identifier[widget] . identifier[config] ( literal[string] )[ literal[int] ]== literal[string] :
identifier[event] . identifier[widget] . identifier[delete] ( literal[int] , literal[string] )
identifier[event] . identifier[widget] . identifier[insert] ( literal[int] , literal[string] )
identifier[event] . identifier[widget] . identifier[config] ( identifier[fg] = literal[string] ) | def on_entry_click(self, event):
"""
function that gets called whenever entry is clicked
"""
if event.widget.config('fg')[4] == 'grey':
event.widget.delete(0, 'end') # delete all the text in the entry
event.widget.insert(0, '') #Insert blank for user input
event.widget.config(fg='black') # depends on [control=['if'], data=[]] |
def exit_frames(self):
'''
Returns a list of frames whose children include a frame outside of the group
'''
if self._exit_frames is None:
exit_frames = []
for frame in self.frames:
if any(c.group != self for c in frame.children):
exit_frames.append(frame)
self._exit_frames = exit_frames
return self._exit_frames | def function[exit_frames, parameter[self]]:
constant[
Returns a list of frames whose children include a frame outside of the group
]
if compare[name[self]._exit_frames is constant[None]] begin[:]
variable[exit_frames] assign[=] list[[]]
for taget[name[frame]] in starred[name[self].frames] begin[:]
if call[name[any], parameter[<ast.GeneratorExp object at 0x7da204564e50>]] begin[:]
call[name[exit_frames].append, parameter[name[frame]]]
name[self]._exit_frames assign[=] name[exit_frames]
return[name[self]._exit_frames] | keyword[def] identifier[exit_frames] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_exit_frames] keyword[is] keyword[None] :
identifier[exit_frames] =[]
keyword[for] identifier[frame] keyword[in] identifier[self] . identifier[frames] :
keyword[if] identifier[any] ( identifier[c] . identifier[group] != identifier[self] keyword[for] identifier[c] keyword[in] identifier[frame] . identifier[children] ):
identifier[exit_frames] . identifier[append] ( identifier[frame] )
identifier[self] . identifier[_exit_frames] = identifier[exit_frames]
keyword[return] identifier[self] . identifier[_exit_frames] | def exit_frames(self):
"""
Returns a list of frames whose children include a frame outside of the group
"""
if self._exit_frames is None:
exit_frames = []
for frame in self.frames:
if any((c.group != self for c in frame.children)):
exit_frames.append(frame) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['frame']]
self._exit_frames = exit_frames # depends on [control=['if'], data=[]]
return self._exit_frames |
def reboot(self, timeout=None, wait_polling_interval=None):
"""Reboot the device, waiting for the adb connection to become stable.
:param timeout: Maximum time to wait for reboot.
:param wait_polling_interval: Interval at which to poll for device readiness.
"""
if timeout is None:
timeout = self._timeout
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval
self._logger.info("Rebooting device")
self.wait_for_device_ready(timeout,
after_first=lambda:self.command_output(["reboot"])) | def function[reboot, parameter[self, timeout, wait_polling_interval]]:
constant[Reboot the device, waiting for the adb connection to become stable.
:param timeout: Maximum time to wait for reboot.
:param wait_polling_interval: Interval at which to poll for device readiness.
]
if compare[name[timeout] is constant[None]] begin[:]
variable[timeout] assign[=] name[self]._timeout
if compare[name[wait_polling_interval] is constant[None]] begin[:]
variable[wait_polling_interval] assign[=] name[self]._wait_polling_interval
call[name[self]._logger.info, parameter[constant[Rebooting device]]]
call[name[self].wait_for_device_ready, parameter[name[timeout]]] | keyword[def] identifier[reboot] ( identifier[self] , identifier[timeout] = keyword[None] , identifier[wait_polling_interval] = keyword[None] ):
literal[string]
keyword[if] identifier[timeout] keyword[is] keyword[None] :
identifier[timeout] = identifier[self] . identifier[_timeout]
keyword[if] identifier[wait_polling_interval] keyword[is] keyword[None] :
identifier[wait_polling_interval] = identifier[self] . identifier[_wait_polling_interval]
identifier[self] . identifier[_logger] . identifier[info] ( literal[string] )
identifier[self] . identifier[wait_for_device_ready] ( identifier[timeout] ,
identifier[after_first] = keyword[lambda] : identifier[self] . identifier[command_output] ([ literal[string] ])) | def reboot(self, timeout=None, wait_polling_interval=None):
"""Reboot the device, waiting for the adb connection to become stable.
:param timeout: Maximum time to wait for reboot.
:param wait_polling_interval: Interval at which to poll for device readiness.
"""
if timeout is None:
timeout = self._timeout # depends on [control=['if'], data=['timeout']]
if wait_polling_interval is None:
wait_polling_interval = self._wait_polling_interval # depends on [control=['if'], data=['wait_polling_interval']]
self._logger.info('Rebooting device')
self.wait_for_device_ready(timeout, after_first=lambda : self.command_output(['reboot'])) |
def load_region(adapter, case_id, hgnc_id=None, chrom=None, start=None, end=None):
"""Load all variants in a region defined by a HGNC id
Args:
adapter (MongoAdapter)
case_id (str): Case id
hgnc_id (int): If all variants from a gene should be uploaded
chrom (str): If variants from coordinates should be uploaded
start (int): Start position for region
end (int): Stop position for region
"""
if hgnc_id:
gene_obj = adapter.hgnc_gene(hgnc_id)
if not gene_obj:
ValueError("Gene {} does not exist in database".format(hgnc_id))
chrom = gene_obj['chromosome']
start = gene_obj['start']
end = gene_obj['end']
case_obj = adapter.case(case_id=case_id)
if not case_obj:
raise ValueError("Case {} does not exist in database".format(case_id))
log.info("Load clinical SNV variants for case: {0} region: chr {1}, start"
" {2}, end {3}".format(case_obj['_id'], chrom, start, end))
adapter.load_variants(case_obj=case_obj, variant_type='clinical',
category='snv', chrom=chrom, start=start, end=end)
vcf_sv_file = case_obj['vcf_files'].get('vcf_sv')
if vcf_sv_file:
log.info("Load clinical SV variants for case: {0} region: chr {1}, "
"start {2}, end {3}".format(case_obj['_id'], chrom, start, end))
adapter.load_variants(case_obj=case_obj, variant_type='clinical',
category='sv', chrom=chrom, start=start, end=end)
vcf_str_file = case_obj['vcf_files'].get('vcf_str')
if vcf_str_file:
log.info("Load clinical STR variants for case: {0} region: chr {1}, "
"start {2}, end {3}".format(case_obj['_id'], chrom, start, end))
adapter.load_variants(case_obj=case_obj, variant_type='clinical',
category='str', chrom=chrom, start=start, end=end)
if case_obj['is_research']:
log.info("Load research SNV variants for case: {0} region: chr {1}, "
"start {2}, end {3}".format(case_obj['_id'], chrom, start, end))
adapter.load_variants(case_obj=case_obj, variant_type='research',
category='snv', chrom=chrom, start=start, end=end)
vcf_sv_research = case_obj['vcf_files'].get('vcf_sv_research')
if vcf_sv_research:
log.info("Load research SV variants for case: {0} region: chr {1},"
" start {2}, end {3}".format(case_obj['_id'], chrom, start, end))
adapter.load_variants(case_obj=case_obj, variant_type='research',
category='sv', chrom=chrom, start=start, end=end) | def function[load_region, parameter[adapter, case_id, hgnc_id, chrom, start, end]]:
constant[Load all variants in a region defined by a HGNC id
Args:
adapter (MongoAdapter)
case_id (str): Case id
hgnc_id (int): If all variants from a gene should be uploaded
chrom (str): If variants from coordinates should be uploaded
start (int): Start position for region
end (int): Stop position for region
]
if name[hgnc_id] begin[:]
variable[gene_obj] assign[=] call[name[adapter].hgnc_gene, parameter[name[hgnc_id]]]
if <ast.UnaryOp object at 0x7da204620280> begin[:]
call[name[ValueError], parameter[call[constant[Gene {} does not exist in database].format, parameter[name[hgnc_id]]]]]
variable[chrom] assign[=] call[name[gene_obj]][constant[chromosome]]
variable[start] assign[=] call[name[gene_obj]][constant[start]]
variable[end] assign[=] call[name[gene_obj]][constant[end]]
variable[case_obj] assign[=] call[name[adapter].case, parameter[]]
if <ast.UnaryOp object at 0x7da204621ed0> begin[:]
<ast.Raise object at 0x7da204622cb0>
call[name[log].info, parameter[call[constant[Load clinical SNV variants for case: {0} region: chr {1}, start {2}, end {3}].format, parameter[call[name[case_obj]][constant[_id]], name[chrom], name[start], name[end]]]]]
call[name[adapter].load_variants, parameter[]]
variable[vcf_sv_file] assign[=] call[call[name[case_obj]][constant[vcf_files]].get, parameter[constant[vcf_sv]]]
if name[vcf_sv_file] begin[:]
call[name[log].info, parameter[call[constant[Load clinical SV variants for case: {0} region: chr {1}, start {2}, end {3}].format, parameter[call[name[case_obj]][constant[_id]], name[chrom], name[start], name[end]]]]]
call[name[adapter].load_variants, parameter[]]
variable[vcf_str_file] assign[=] call[call[name[case_obj]][constant[vcf_files]].get, parameter[constant[vcf_str]]]
if name[vcf_str_file] begin[:]
call[name[log].info, parameter[call[constant[Load clinical STR variants for case: {0} region: chr {1}, start {2}, end {3}].format, parameter[call[name[case_obj]][constant[_id]], name[chrom], name[start], name[end]]]]]
call[name[adapter].load_variants, parameter[]]
if call[name[case_obj]][constant[is_research]] begin[:]
call[name[log].info, parameter[call[constant[Load research SNV variants for case: {0} region: chr {1}, start {2}, end {3}].format, parameter[call[name[case_obj]][constant[_id]], name[chrom], name[start], name[end]]]]]
call[name[adapter].load_variants, parameter[]]
variable[vcf_sv_research] assign[=] call[call[name[case_obj]][constant[vcf_files]].get, parameter[constant[vcf_sv_research]]]
if name[vcf_sv_research] begin[:]
call[name[log].info, parameter[call[constant[Load research SV variants for case: {0} region: chr {1}, start {2}, end {3}].format, parameter[call[name[case_obj]][constant[_id]], name[chrom], name[start], name[end]]]]]
call[name[adapter].load_variants, parameter[]] | keyword[def] identifier[load_region] ( identifier[adapter] , identifier[case_id] , identifier[hgnc_id] = keyword[None] , identifier[chrom] = keyword[None] , identifier[start] = keyword[None] , identifier[end] = keyword[None] ):
literal[string]
keyword[if] identifier[hgnc_id] :
identifier[gene_obj] = identifier[adapter] . identifier[hgnc_gene] ( identifier[hgnc_id] )
keyword[if] keyword[not] identifier[gene_obj] :
identifier[ValueError] ( literal[string] . identifier[format] ( identifier[hgnc_id] ))
identifier[chrom] = identifier[gene_obj] [ literal[string] ]
identifier[start] = identifier[gene_obj] [ literal[string] ]
identifier[end] = identifier[gene_obj] [ literal[string] ]
identifier[case_obj] = identifier[adapter] . identifier[case] ( identifier[case_id] = identifier[case_id] )
keyword[if] keyword[not] identifier[case_obj] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[case_id] ))
identifier[log] . identifier[info] ( literal[string]
literal[string] . identifier[format] ( identifier[case_obj] [ literal[string] ], identifier[chrom] , identifier[start] , identifier[end] ))
identifier[adapter] . identifier[load_variants] ( identifier[case_obj] = identifier[case_obj] , identifier[variant_type] = literal[string] ,
identifier[category] = literal[string] , identifier[chrom] = identifier[chrom] , identifier[start] = identifier[start] , identifier[end] = identifier[end] )
identifier[vcf_sv_file] = identifier[case_obj] [ literal[string] ]. identifier[get] ( literal[string] )
keyword[if] identifier[vcf_sv_file] :
identifier[log] . identifier[info] ( literal[string]
literal[string] . identifier[format] ( identifier[case_obj] [ literal[string] ], identifier[chrom] , identifier[start] , identifier[end] ))
identifier[adapter] . identifier[load_variants] ( identifier[case_obj] = identifier[case_obj] , identifier[variant_type] = literal[string] ,
identifier[category] = literal[string] , identifier[chrom] = identifier[chrom] , identifier[start] = identifier[start] , identifier[end] = identifier[end] )
identifier[vcf_str_file] = identifier[case_obj] [ literal[string] ]. identifier[get] ( literal[string] )
keyword[if] identifier[vcf_str_file] :
identifier[log] . identifier[info] ( literal[string]
literal[string] . identifier[format] ( identifier[case_obj] [ literal[string] ], identifier[chrom] , identifier[start] , identifier[end] ))
identifier[adapter] . identifier[load_variants] ( identifier[case_obj] = identifier[case_obj] , identifier[variant_type] = literal[string] ,
identifier[category] = literal[string] , identifier[chrom] = identifier[chrom] , identifier[start] = identifier[start] , identifier[end] = identifier[end] )
keyword[if] identifier[case_obj] [ literal[string] ]:
identifier[log] . identifier[info] ( literal[string]
literal[string] . identifier[format] ( identifier[case_obj] [ literal[string] ], identifier[chrom] , identifier[start] , identifier[end] ))
identifier[adapter] . identifier[load_variants] ( identifier[case_obj] = identifier[case_obj] , identifier[variant_type] = literal[string] ,
identifier[category] = literal[string] , identifier[chrom] = identifier[chrom] , identifier[start] = identifier[start] , identifier[end] = identifier[end] )
identifier[vcf_sv_research] = identifier[case_obj] [ literal[string] ]. identifier[get] ( literal[string] )
keyword[if] identifier[vcf_sv_research] :
identifier[log] . identifier[info] ( literal[string]
literal[string] . identifier[format] ( identifier[case_obj] [ literal[string] ], identifier[chrom] , identifier[start] , identifier[end] ))
identifier[adapter] . identifier[load_variants] ( identifier[case_obj] = identifier[case_obj] , identifier[variant_type] = literal[string] ,
identifier[category] = literal[string] , identifier[chrom] = identifier[chrom] , identifier[start] = identifier[start] , identifier[end] = identifier[end] ) | def load_region(adapter, case_id, hgnc_id=None, chrom=None, start=None, end=None):
"""Load all variants in a region defined by a HGNC id
Args:
adapter (MongoAdapter)
case_id (str): Case id
hgnc_id (int): If all variants from a gene should be uploaded
chrom (str): If variants from coordinates should be uploaded
start (int): Start position for region
end (int): Stop position for region
"""
if hgnc_id:
gene_obj = adapter.hgnc_gene(hgnc_id)
if not gene_obj:
ValueError('Gene {} does not exist in database'.format(hgnc_id)) # depends on [control=['if'], data=[]]
chrom = gene_obj['chromosome']
start = gene_obj['start']
end = gene_obj['end'] # depends on [control=['if'], data=[]]
case_obj = adapter.case(case_id=case_id)
if not case_obj:
raise ValueError('Case {} does not exist in database'.format(case_id)) # depends on [control=['if'], data=[]]
log.info('Load clinical SNV variants for case: {0} region: chr {1}, start {2}, end {3}'.format(case_obj['_id'], chrom, start, end))
adapter.load_variants(case_obj=case_obj, variant_type='clinical', category='snv', chrom=chrom, start=start, end=end)
vcf_sv_file = case_obj['vcf_files'].get('vcf_sv')
if vcf_sv_file:
log.info('Load clinical SV variants for case: {0} region: chr {1}, start {2}, end {3}'.format(case_obj['_id'], chrom, start, end))
adapter.load_variants(case_obj=case_obj, variant_type='clinical', category='sv', chrom=chrom, start=start, end=end) # depends on [control=['if'], data=[]]
vcf_str_file = case_obj['vcf_files'].get('vcf_str')
if vcf_str_file:
log.info('Load clinical STR variants for case: {0} region: chr {1}, start {2}, end {3}'.format(case_obj['_id'], chrom, start, end))
adapter.load_variants(case_obj=case_obj, variant_type='clinical', category='str', chrom=chrom, start=start, end=end) # depends on [control=['if'], data=[]]
if case_obj['is_research']:
log.info('Load research SNV variants for case: {0} region: chr {1}, start {2}, end {3}'.format(case_obj['_id'], chrom, start, end))
adapter.load_variants(case_obj=case_obj, variant_type='research', category='snv', chrom=chrom, start=start, end=end)
vcf_sv_research = case_obj['vcf_files'].get('vcf_sv_research')
if vcf_sv_research:
log.info('Load research SV variants for case: {0} region: chr {1}, start {2}, end {3}'.format(case_obj['_id'], chrom, start, end))
adapter.load_variants(case_obj=case_obj, variant_type='research', category='sv', chrom=chrom, start=start, end=end) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def ethinca_order_from_string(order):
"""
Returns the integer giving twice the post-Newtonian order
used by the ethinca calculation. Currently valid only for TaylorF2 metric
Parameters
----------
order : string
Returns
-------
int
"""
if order in get_ethinca_orders().keys():
return get_ethinca_orders()[order]
else: raise ValueError("Order "+str(order)+" is not valid for ethinca"
"calculation! Valid orders: "+
str(get_ethinca_orders().keys())) | def function[ethinca_order_from_string, parameter[order]]:
constant[
Returns the integer giving twice the post-Newtonian order
used by the ethinca calculation. Currently valid only for TaylorF2 metric
Parameters
----------
order : string
Returns
-------
int
]
if compare[name[order] in call[call[name[get_ethinca_orders], parameter[]].keys, parameter[]]] begin[:]
return[call[call[name[get_ethinca_orders], parameter[]]][name[order]]] | keyword[def] identifier[ethinca_order_from_string] ( identifier[order] ):
literal[string]
keyword[if] identifier[order] keyword[in] identifier[get_ethinca_orders] (). identifier[keys] ():
keyword[return] identifier[get_ethinca_orders] ()[ identifier[order] ]
keyword[else] : keyword[raise] identifier[ValueError] ( literal[string] + identifier[str] ( identifier[order] )+ literal[string]
literal[string] +
identifier[str] ( identifier[get_ethinca_orders] (). identifier[keys] ())) | def ethinca_order_from_string(order):
"""
Returns the integer giving twice the post-Newtonian order
used by the ethinca calculation. Currently valid only for TaylorF2 metric
Parameters
----------
order : string
Returns
-------
int
"""
if order in get_ethinca_orders().keys():
return get_ethinca_orders()[order] # depends on [control=['if'], data=['order']]
else:
raise ValueError('Order ' + str(order) + ' is not valid for ethincacalculation! Valid orders: ' + str(get_ethinca_orders().keys())) |
def Fierz_to_JMS_lep(C, ddll):
"""From Fierz to semileptonic JMS basis for Class V.
`ddll` should be of the form 'sbl_enu_tau', 'dbl_munu_e' etc."""
if ddll[:2] == 'uc':
s = str(uflav[ddll[0]] + 1)
b = str(uflav[ddll[1]] + 1)
q = 'u'
else:
s = str(dflav[ddll[0]] + 1)
b = str(dflav[ddll[1]] + 1)
q = 'd'
l = str(lflav[ddll[4:ddll.find('n')]] + 1)
lp = str(lflav[ddll[ddll.find('_',5)+1:len(ddll)]] + 1)
ind = ddll.replace('l_','').replace('nu_','')
d = {
"Ve" + q + "LL" + '_' + l + lp + s + b : -C['F' + ind + '10'] + C['F' + ind + '9'],
"V" + q + "eLR" + '_' + s + b + l + lp : C['F' + ind + '10'] + C['F' + ind + '9'],
"Se" + q + "RR" + '_' + l + lp + s + b : C['F' + ind + 'P'] + C['F' + ind + 'S'],
"Se" + q + "RL" + '_' + lp + l + b + s : -C['F' + ind + 'P'].conjugate() + C['F' + ind + 'S'].conjugate(),
"Te" + q + "RR" + '_' + lp + l + b + s : C['F' + ind + 'T'].conjugate() - C['F' + ind + 'T5'].conjugate(),
"Te" + q + "RR" + '_' + l + lp + s + b : C['F' + ind + 'T'] + C['F' + ind + 'T5'],
"Ve" + q + "LR" + '_' + l + lp + s + b : -C['F' + ind + '10p'] + C['F' + ind + '9p'],
"Ve" + q + "RR" + '_' + l + lp + s + b : C['F' + ind + '10p'] + C['F' + ind + '9p'],
"Se" + q + "RL" + '_' + l + lp + s + b : C['F' + ind + 'Pp'] + C['F' + ind + 'Sp'],
"Se" + q + "RR" + '_' + lp + l + b + s : -C['F' + ind + 'Pp'].conjugate() + C['F' + ind + 'Sp'].conjugate(),
}
return symmetrize_JMS_dict(d) | def function[Fierz_to_JMS_lep, parameter[C, ddll]]:
constant[From Fierz to semileptonic JMS basis for Class V.
`ddll` should be of the form 'sbl_enu_tau', 'dbl_munu_e' etc.]
if compare[call[name[ddll]][<ast.Slice object at 0x7da1b190bd90>] equal[==] constant[uc]] begin[:]
variable[s] assign[=] call[name[str], parameter[binary_operation[call[name[uflav]][call[name[ddll]][constant[0]]] + constant[1]]]]
variable[b] assign[=] call[name[str], parameter[binary_operation[call[name[uflav]][call[name[ddll]][constant[1]]] + constant[1]]]]
variable[q] assign[=] constant[u]
variable[l] assign[=] call[name[str], parameter[binary_operation[call[name[lflav]][call[name[ddll]][<ast.Slice object at 0x7da1b1943fd0>]] + constant[1]]]]
variable[lp] assign[=] call[name[str], parameter[binary_operation[call[name[lflav]][call[name[ddll]][<ast.Slice object at 0x7da1b19410c0>]] + constant[1]]]]
variable[ind] assign[=] call[call[name[ddll].replace, parameter[constant[l_], constant[]]].replace, parameter[constant[nu_], constant[]]]
variable[d] assign[=] dictionary[[<ast.BinOp object at 0x7da1b19426b0>, <ast.BinOp object at 0x7da1b190b370>, <ast.BinOp object at 0x7da1b190b0a0>, <ast.BinOp object at 0x7da1b190add0>, <ast.BinOp object at 0x7da1b190ab00>, <ast.BinOp object at 0x7da1b1909a50>, <ast.BinOp object at 0x7da1b1909780>, <ast.BinOp object at 0x7da1b19094b0>, <ast.BinOp object at 0x7da1b19091e0>, <ast.BinOp object at 0x7da1b1908f10>], [<ast.BinOp object at 0x7da1b1908c40>, <ast.BinOp object at 0x7da1b1908940>, <ast.BinOp object at 0x7da1b1908670>, <ast.BinOp object at 0x7da1b19083a0>, <ast.BinOp object at 0x7da1b1af8520>, <ast.BinOp object at 0x7da1b19f25c0>, <ast.BinOp object at 0x7da1b19f29e0>, <ast.BinOp object at 0x7da1b19f26b0>, <ast.BinOp object at 0x7da1b1a9c820>, <ast.BinOp object at 0x7da1b1a9fca0>]]
return[call[name[symmetrize_JMS_dict], parameter[name[d]]]] | keyword[def] identifier[Fierz_to_JMS_lep] ( identifier[C] , identifier[ddll] ):
literal[string]
keyword[if] identifier[ddll] [: literal[int] ]== literal[string] :
identifier[s] = identifier[str] ( identifier[uflav] [ identifier[ddll] [ literal[int] ]]+ literal[int] )
identifier[b] = identifier[str] ( identifier[uflav] [ identifier[ddll] [ literal[int] ]]+ literal[int] )
identifier[q] = literal[string]
keyword[else] :
identifier[s] = identifier[str] ( identifier[dflav] [ identifier[ddll] [ literal[int] ]]+ literal[int] )
identifier[b] = identifier[str] ( identifier[dflav] [ identifier[ddll] [ literal[int] ]]+ literal[int] )
identifier[q] = literal[string]
identifier[l] = identifier[str] ( identifier[lflav] [ identifier[ddll] [ literal[int] : identifier[ddll] . identifier[find] ( literal[string] )]]+ literal[int] )
identifier[lp] = identifier[str] ( identifier[lflav] [ identifier[ddll] [ identifier[ddll] . identifier[find] ( literal[string] , literal[int] )+ literal[int] : identifier[len] ( identifier[ddll] )]]+ literal[int] )
identifier[ind] = identifier[ddll] . identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] )
identifier[d] ={
literal[string] + identifier[q] + literal[string] + literal[string] + identifier[l] + identifier[lp] + identifier[s] + identifier[b] :- identifier[C] [ literal[string] + identifier[ind] + literal[string] ]+ identifier[C] [ literal[string] + identifier[ind] + literal[string] ],
literal[string] + identifier[q] + literal[string] + literal[string] + identifier[s] + identifier[b] + identifier[l] + identifier[lp] : identifier[C] [ literal[string] + identifier[ind] + literal[string] ]+ identifier[C] [ literal[string] + identifier[ind] + literal[string] ],
literal[string] + identifier[q] + literal[string] + literal[string] + identifier[l] + identifier[lp] + identifier[s] + identifier[b] : identifier[C] [ literal[string] + identifier[ind] + literal[string] ]+ identifier[C] [ literal[string] + identifier[ind] + literal[string] ],
literal[string] + identifier[q] + literal[string] + literal[string] + identifier[lp] + identifier[l] + identifier[b] + identifier[s] :- identifier[C] [ literal[string] + identifier[ind] + literal[string] ]. identifier[conjugate] ()+ identifier[C] [ literal[string] + identifier[ind] + literal[string] ]. identifier[conjugate] (),
literal[string] + identifier[q] + literal[string] + literal[string] + identifier[lp] + identifier[l] + identifier[b] + identifier[s] : identifier[C] [ literal[string] + identifier[ind] + literal[string] ]. identifier[conjugate] ()- identifier[C] [ literal[string] + identifier[ind] + literal[string] ]. identifier[conjugate] (),
literal[string] + identifier[q] + literal[string] + literal[string] + identifier[l] + identifier[lp] + identifier[s] + identifier[b] : identifier[C] [ literal[string] + identifier[ind] + literal[string] ]+ identifier[C] [ literal[string] + identifier[ind] + literal[string] ],
literal[string] + identifier[q] + literal[string] + literal[string] + identifier[l] + identifier[lp] + identifier[s] + identifier[b] :- identifier[C] [ literal[string] + identifier[ind] + literal[string] ]+ identifier[C] [ literal[string] + identifier[ind] + literal[string] ],
literal[string] + identifier[q] + literal[string] + literal[string] + identifier[l] + identifier[lp] + identifier[s] + identifier[b] : identifier[C] [ literal[string] + identifier[ind] + literal[string] ]+ identifier[C] [ literal[string] + identifier[ind] + literal[string] ],
literal[string] + identifier[q] + literal[string] + literal[string] + identifier[l] + identifier[lp] + identifier[s] + identifier[b] : identifier[C] [ literal[string] + identifier[ind] + literal[string] ]+ identifier[C] [ literal[string] + identifier[ind] + literal[string] ],
literal[string] + identifier[q] + literal[string] + literal[string] + identifier[lp] + identifier[l] + identifier[b] + identifier[s] :- identifier[C] [ literal[string] + identifier[ind] + literal[string] ]. identifier[conjugate] ()+ identifier[C] [ literal[string] + identifier[ind] + literal[string] ]. identifier[conjugate] (),
}
keyword[return] identifier[symmetrize_JMS_dict] ( identifier[d] ) | def Fierz_to_JMS_lep(C, ddll):
"""From Fierz to semileptonic JMS basis for Class V.
`ddll` should be of the form 'sbl_enu_tau', 'dbl_munu_e' etc."""
if ddll[:2] == 'uc':
s = str(uflav[ddll[0]] + 1)
b = str(uflav[ddll[1]] + 1)
q = 'u' # depends on [control=['if'], data=[]]
else:
s = str(dflav[ddll[0]] + 1)
b = str(dflav[ddll[1]] + 1)
q = 'd'
l = str(lflav[ddll[4:ddll.find('n')]] + 1)
lp = str(lflav[ddll[ddll.find('_', 5) + 1:len(ddll)]] + 1)
ind = ddll.replace('l_', '').replace('nu_', '')
d = {'Ve' + q + 'LL' + '_' + l + lp + s + b: -C['F' + ind + '10'] + C['F' + ind + '9'], 'V' + q + 'eLR' + '_' + s + b + l + lp: C['F' + ind + '10'] + C['F' + ind + '9'], 'Se' + q + 'RR' + '_' + l + lp + s + b: C['F' + ind + 'P'] + C['F' + ind + 'S'], 'Se' + q + 'RL' + '_' + lp + l + b + s: -C['F' + ind + 'P'].conjugate() + C['F' + ind + 'S'].conjugate(), 'Te' + q + 'RR' + '_' + lp + l + b + s: C['F' + ind + 'T'].conjugate() - C['F' + ind + 'T5'].conjugate(), 'Te' + q + 'RR' + '_' + l + lp + s + b: C['F' + ind + 'T'] + C['F' + ind + 'T5'], 'Ve' + q + 'LR' + '_' + l + lp + s + b: -C['F' + ind + '10p'] + C['F' + ind + '9p'], 'Ve' + q + 'RR' + '_' + l + lp + s + b: C['F' + ind + '10p'] + C['F' + ind + '9p'], 'Se' + q + 'RL' + '_' + l + lp + s + b: C['F' + ind + 'Pp'] + C['F' + ind + 'Sp'], 'Se' + q + 'RR' + '_' + lp + l + b + s: -C['F' + ind + 'Pp'].conjugate() + C['F' + ind + 'Sp'].conjugate()}
return symmetrize_JMS_dict(d) |
def get_value(record, key, default=None):
"""Return item as `dict.__getitem__` but using 'smart queries'.
.. note::
Accessing one value in a normal way, meaning d['a'], is almost as
fast as accessing a regular dictionary. But using the special
name convention is a bit slower than using the regular access:
.. code-block:: python
>>> %timeit x = dd['a[0].b']
100000 loops, best of 3: 3.94 us per loop
>>> %timeit x = dd['a'][0]['b']
1000000 loops, best of 3: 598 ns per loop
"""
def getitem(k, v, default):
if isinstance(v, string_types):
raise KeyError
elif isinstance(v, dict):
return v[k]
elif ']' in k:
k = k[:-1].replace('n', '-1')
# Work around for list indexes and slices
try:
return v[int(k)]
except IndexError:
return default
except ValueError:
return v[slice(*map(
lambda x: int(x.strip()) if x.strip() else None,
k.split(':')
))]
else:
tmp = []
for inner_v in v:
try:
tmp.append(getitem(k, inner_v, default))
except KeyError:
continue
return tmp
# Wrap a top-level list in a dict
if isinstance(record, list):
record = {'record': record}
key = '.'.join(['record', key])
# Check if we are using python regular keys
try:
return record[key]
except KeyError:
pass
keys = SPLIT_KEY_PATTERN.split(key)
value = record
for k in keys:
try:
value = getitem(k, value, default)
except KeyError:
return default
return value | def function[get_value, parameter[record, key, default]]:
constant[Return item as `dict.__getitem__` but using 'smart queries'.
.. note::
Accessing one value in a normal way, meaning d['a'], is almost as
fast as accessing a regular dictionary. But using the special
name convention is a bit slower than using the regular access:
.. code-block:: python
>>> %timeit x = dd['a[0].b']
100000 loops, best of 3: 3.94 us per loop
>>> %timeit x = dd['a'][0]['b']
1000000 loops, best of 3: 598 ns per loop
]
def function[getitem, parameter[k, v, default]]:
if call[name[isinstance], parameter[name[v], name[string_types]]] begin[:]
<ast.Raise object at 0x7da1b0a48070>
if call[name[isinstance], parameter[name[record], name[list]]] begin[:]
variable[record] assign[=] dictionary[[<ast.Constant object at 0x7da1b0a48760>], [<ast.Name object at 0x7da1b0a48430>]]
variable[key] assign[=] call[constant[.].join, parameter[list[[<ast.Constant object at 0x7da1b0a48670>, <ast.Name object at 0x7da1b0a483d0>]]]]
<ast.Try object at 0x7da1b0a4bb20>
variable[keys] assign[=] call[name[SPLIT_KEY_PATTERN].split, parameter[name[key]]]
variable[value] assign[=] name[record]
for taget[name[k]] in starred[name[keys]] begin[:]
<ast.Try object at 0x7da1b0a4aef0>
return[name[value]] | keyword[def] identifier[get_value] ( identifier[record] , identifier[key] , identifier[default] = keyword[None] ):
literal[string]
keyword[def] identifier[getitem] ( identifier[k] , identifier[v] , identifier[default] ):
keyword[if] identifier[isinstance] ( identifier[v] , identifier[string_types] ):
keyword[raise] identifier[KeyError]
keyword[elif] identifier[isinstance] ( identifier[v] , identifier[dict] ):
keyword[return] identifier[v] [ identifier[k] ]
keyword[elif] literal[string] keyword[in] identifier[k] :
identifier[k] = identifier[k] [:- literal[int] ]. identifier[replace] ( literal[string] , literal[string] )
keyword[try] :
keyword[return] identifier[v] [ identifier[int] ( identifier[k] )]
keyword[except] identifier[IndexError] :
keyword[return] identifier[default]
keyword[except] identifier[ValueError] :
keyword[return] identifier[v] [ identifier[slice] (* identifier[map] (
keyword[lambda] identifier[x] : identifier[int] ( identifier[x] . identifier[strip] ()) keyword[if] identifier[x] . identifier[strip] () keyword[else] keyword[None] ,
identifier[k] . identifier[split] ( literal[string] )
))]
keyword[else] :
identifier[tmp] =[]
keyword[for] identifier[inner_v] keyword[in] identifier[v] :
keyword[try] :
identifier[tmp] . identifier[append] ( identifier[getitem] ( identifier[k] , identifier[inner_v] , identifier[default] ))
keyword[except] identifier[KeyError] :
keyword[continue]
keyword[return] identifier[tmp]
keyword[if] identifier[isinstance] ( identifier[record] , identifier[list] ):
identifier[record] ={ literal[string] : identifier[record] }
identifier[key] = literal[string] . identifier[join] ([ literal[string] , identifier[key] ])
keyword[try] :
keyword[return] identifier[record] [ identifier[key] ]
keyword[except] identifier[KeyError] :
keyword[pass]
identifier[keys] = identifier[SPLIT_KEY_PATTERN] . identifier[split] ( identifier[key] )
identifier[value] = identifier[record]
keyword[for] identifier[k] keyword[in] identifier[keys] :
keyword[try] :
identifier[value] = identifier[getitem] ( identifier[k] , identifier[value] , identifier[default] )
keyword[except] identifier[KeyError] :
keyword[return] identifier[default]
keyword[return] identifier[value] | def get_value(record, key, default=None):
"""Return item as `dict.__getitem__` but using 'smart queries'.
.. note::
Accessing one value in a normal way, meaning d['a'], is almost as
fast as accessing a regular dictionary. But using the special
name convention is a bit slower than using the regular access:
.. code-block:: python
>>> %timeit x = dd['a[0].b']
100000 loops, best of 3: 3.94 us per loop
>>> %timeit x = dd['a'][0]['b']
1000000 loops, best of 3: 598 ns per loop
"""
def getitem(k, v, default):
if isinstance(v, string_types):
raise KeyError # depends on [control=['if'], data=[]]
elif isinstance(v, dict):
return v[k] # depends on [control=['if'], data=[]]
elif ']' in k:
k = k[:-1].replace('n', '-1')
# Work around for list indexes and slices
try:
return v[int(k)] # depends on [control=['try'], data=[]]
except IndexError:
return default # depends on [control=['except'], data=[]]
except ValueError:
return v[slice(*map(lambda x: int(x.strip()) if x.strip() else None, k.split(':')))] # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['k']]
else:
tmp = []
for inner_v in v:
try:
tmp.append(getitem(k, inner_v, default)) # depends on [control=['try'], data=[]]
except KeyError:
continue # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['inner_v']]
return tmp
# Wrap a top-level list in a dict
if isinstance(record, list):
record = {'record': record}
key = '.'.join(['record', key]) # depends on [control=['if'], data=[]]
# Check if we are using python regular keys
try:
return record[key] # depends on [control=['try'], data=[]]
except KeyError:
pass # depends on [control=['except'], data=[]]
keys = SPLIT_KEY_PATTERN.split(key)
value = record
for k in keys:
try:
value = getitem(k, value, default) # depends on [control=['try'], data=[]]
except KeyError:
return default # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['k']]
return value |
def append(self, data):
"""Append a section to the body.
:param data: The data to append.
:type data: str or bytes
"""
if isinstance(data, six.text_type):
self._message.add_body_data(data.encode(self._encoding))
elif isinstance(data, six.binary_type):
self._message.add_body_data(data) | def function[append, parameter[self, data]]:
constant[Append a section to the body.
:param data: The data to append.
:type data: str or bytes
]
if call[name[isinstance], parameter[name[data], name[six].text_type]] begin[:]
call[name[self]._message.add_body_data, parameter[call[name[data].encode, parameter[name[self]._encoding]]]] | keyword[def] identifier[append] ( identifier[self] , identifier[data] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[data] , identifier[six] . identifier[text_type] ):
identifier[self] . identifier[_message] . identifier[add_body_data] ( identifier[data] . identifier[encode] ( identifier[self] . identifier[_encoding] ))
keyword[elif] identifier[isinstance] ( identifier[data] , identifier[six] . identifier[binary_type] ):
identifier[self] . identifier[_message] . identifier[add_body_data] ( identifier[data] ) | def append(self, data):
"""Append a section to the body.
:param data: The data to append.
:type data: str or bytes
"""
if isinstance(data, six.text_type):
self._message.add_body_data(data.encode(self._encoding)) # depends on [control=['if'], data=[]]
elif isinstance(data, six.binary_type):
self._message.add_body_data(data) # depends on [control=['if'], data=[]] |
def pygmentify(value, **kwargs):
"""Return a highlighted code block with Pygments."""
soup = BeautifulSoup(value, 'html.parser')
for pre in soup.find_all('pre'):
# Get code
code = ''.join([to_string(item) for item in pre.contents])
code = code.replace('<', '<')
code = code.replace('>', '>')
code = code.replace(''', "'")
code = code.replace('"', '"')
code = code.replace('&', '&')
# Get lexer by language
class_list = pre.get('class', [])
lexers = []
options = {
'stripall': True
}
# Collect all found lexers
for c in class_list:
try:
lexers.append(get_lexer_by_name(c, **options))
except ClassNotFound:
pass
# Get first lexer match or none
try:
lexer = lexers[0]
except IndexError:
lexer = None
# If no lexer, try guessing
if lexer is None:
try:
lexer = guess_lexer(pre.text, **options)
class_list += [alias for alias in lexer.aliases]
except ClassNotFound:
pass
if lexer is not None:
# Get formatter
formatter = HtmlFormatter(**kwargs)
# Highlight code
highlighted = highlight(code, lexer, formatter)
class_string = ' '.join([c for c in class_list])
highlighted = highlighted.replace(
'<div class="%s"><pre>' % kwargs['cssclass'],
'<div class="%s"><pre class="%s">' % (kwargs['cssclass'], class_string)
)
pre.replace_with(highlighted)
return soup.decode(formatter=None).strip() | def function[pygmentify, parameter[value]]:
constant[Return a highlighted code block with Pygments.]
variable[soup] assign[=] call[name[BeautifulSoup], parameter[name[value], constant[html.parser]]]
for taget[name[pre]] in starred[call[name[soup].find_all, parameter[constant[pre]]]] begin[:]
variable[code] assign[=] call[constant[].join, parameter[<ast.ListComp object at 0x7da18f00f0a0>]]
variable[code] assign[=] call[name[code].replace, parameter[constant[<], constant[<]]]
variable[code] assign[=] call[name[code].replace, parameter[constant[>], constant[>]]]
variable[code] assign[=] call[name[code].replace, parameter[constant['], constant[']]]
variable[code] assign[=] call[name[code].replace, parameter[constant["], constant["]]]
variable[code] assign[=] call[name[code].replace, parameter[constant[&], constant[&]]]
variable[class_list] assign[=] call[name[pre].get, parameter[constant[class], list[[]]]]
variable[lexers] assign[=] list[[]]
variable[options] assign[=] dictionary[[<ast.Constant object at 0x7da1b0a726e0>], [<ast.Constant object at 0x7da1b0a72e00>]]
for taget[name[c]] in starred[name[class_list]] begin[:]
<ast.Try object at 0x7da1b0a73e80>
<ast.Try object at 0x7da1b0a70c40>
if compare[name[lexer] is constant[None]] begin[:]
<ast.Try object at 0x7da1b0a71a80>
if compare[name[lexer] is_not constant[None]] begin[:]
variable[formatter] assign[=] call[name[HtmlFormatter], parameter[]]
variable[highlighted] assign[=] call[name[highlight], parameter[name[code], name[lexer], name[formatter]]]
variable[class_string] assign[=] call[constant[ ].join, parameter[<ast.ListComp object at 0x7da1b0a704c0>]]
variable[highlighted] assign[=] call[name[highlighted].replace, parameter[binary_operation[constant[<div class="%s"><pre>] <ast.Mod object at 0x7da2590d6920> call[name[kwargs]][constant[cssclass]]], binary_operation[constant[<div class="%s"><pre class="%s">] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da1b0a72c20>, <ast.Name object at 0x7da1b0a72620>]]]]]
call[name[pre].replace_with, parameter[name[highlighted]]]
return[call[call[name[soup].decode, parameter[]].strip, parameter[]]] | keyword[def] identifier[pygmentify] ( identifier[value] ,** identifier[kwargs] ):
literal[string]
identifier[soup] = identifier[BeautifulSoup] ( identifier[value] , literal[string] )
keyword[for] identifier[pre] keyword[in] identifier[soup] . identifier[find_all] ( literal[string] ):
identifier[code] = literal[string] . identifier[join] ([ identifier[to_string] ( identifier[item] ) keyword[for] identifier[item] keyword[in] identifier[pre] . identifier[contents] ])
identifier[code] = identifier[code] . identifier[replace] ( literal[string] , literal[string] )
identifier[code] = identifier[code] . identifier[replace] ( literal[string] , literal[string] )
identifier[code] = identifier[code] . identifier[replace] ( literal[string] , literal[string] )
identifier[code] = identifier[code] . identifier[replace] ( literal[string] , literal[string] )
identifier[code] = identifier[code] . identifier[replace] ( literal[string] , literal[string] )
identifier[class_list] = identifier[pre] . identifier[get] ( literal[string] ,[])
identifier[lexers] =[]
identifier[options] ={
literal[string] : keyword[True]
}
keyword[for] identifier[c] keyword[in] identifier[class_list] :
keyword[try] :
identifier[lexers] . identifier[append] ( identifier[get_lexer_by_name] ( identifier[c] ,** identifier[options] ))
keyword[except] identifier[ClassNotFound] :
keyword[pass]
keyword[try] :
identifier[lexer] = identifier[lexers] [ literal[int] ]
keyword[except] identifier[IndexError] :
identifier[lexer] = keyword[None]
keyword[if] identifier[lexer] keyword[is] keyword[None] :
keyword[try] :
identifier[lexer] = identifier[guess_lexer] ( identifier[pre] . identifier[text] ,** identifier[options] )
identifier[class_list] +=[ identifier[alias] keyword[for] identifier[alias] keyword[in] identifier[lexer] . identifier[aliases] ]
keyword[except] identifier[ClassNotFound] :
keyword[pass]
keyword[if] identifier[lexer] keyword[is] keyword[not] keyword[None] :
identifier[formatter] = identifier[HtmlFormatter] (** identifier[kwargs] )
identifier[highlighted] = identifier[highlight] ( identifier[code] , identifier[lexer] , identifier[formatter] )
identifier[class_string] = literal[string] . identifier[join] ([ identifier[c] keyword[for] identifier[c] keyword[in] identifier[class_list] ])
identifier[highlighted] = identifier[highlighted] . identifier[replace] (
literal[string] % identifier[kwargs] [ literal[string] ],
literal[string] %( identifier[kwargs] [ literal[string] ], identifier[class_string] )
)
identifier[pre] . identifier[replace_with] ( identifier[highlighted] )
keyword[return] identifier[soup] . identifier[decode] ( identifier[formatter] = keyword[None] ). identifier[strip] () | def pygmentify(value, **kwargs):
"""Return a highlighted code block with Pygments."""
soup = BeautifulSoup(value, 'html.parser')
for pre in soup.find_all('pre'):
# Get code
code = ''.join([to_string(item) for item in pre.contents])
code = code.replace('<', '<')
code = code.replace('>', '>')
code = code.replace(''', "'")
code = code.replace('"', '"')
code = code.replace('&', '&')
# Get lexer by language
class_list = pre.get('class', [])
lexers = []
options = {'stripall': True}
# Collect all found lexers
for c in class_list:
try:
lexers.append(get_lexer_by_name(c, **options)) # depends on [control=['try'], data=[]]
except ClassNotFound:
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['c']]
# Get first lexer match or none
try:
lexer = lexers[0] # depends on [control=['try'], data=[]]
except IndexError:
lexer = None # depends on [control=['except'], data=[]]
# If no lexer, try guessing
if lexer is None:
try:
lexer = guess_lexer(pre.text, **options)
class_list += [alias for alias in lexer.aliases] # depends on [control=['try'], data=[]]
except ClassNotFound:
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['lexer']]
if lexer is not None:
# Get formatter
formatter = HtmlFormatter(**kwargs)
# Highlight code
highlighted = highlight(code, lexer, formatter)
class_string = ' '.join([c for c in class_list])
highlighted = highlighted.replace('<div class="%s"><pre>' % kwargs['cssclass'], '<div class="%s"><pre class="%s">' % (kwargs['cssclass'], class_string))
pre.replace_with(highlighted) # depends on [control=['if'], data=['lexer']] # depends on [control=['for'], data=['pre']]
return soup.decode(formatter=None).strip() |
def _insert(self, tree):
""" Run an INSERT statement """
tablename = tree.table
count = 0
kwargs = {}
batch = self.connection.batch_write(tablename, **kwargs)
with batch:
for item in iter_insert_items(tree):
batch.put(item)
count += 1
return count | def function[_insert, parameter[self, tree]]:
constant[ Run an INSERT statement ]
variable[tablename] assign[=] name[tree].table
variable[count] assign[=] constant[0]
variable[kwargs] assign[=] dictionary[[], []]
variable[batch] assign[=] call[name[self].connection.batch_write, parameter[name[tablename]]]
with name[batch] begin[:]
for taget[name[item]] in starred[call[name[iter_insert_items], parameter[name[tree]]]] begin[:]
call[name[batch].put, parameter[name[item]]]
<ast.AugAssign object at 0x7da207f9a050>
return[name[count]] | keyword[def] identifier[_insert] ( identifier[self] , identifier[tree] ):
literal[string]
identifier[tablename] = identifier[tree] . identifier[table]
identifier[count] = literal[int]
identifier[kwargs] ={}
identifier[batch] = identifier[self] . identifier[connection] . identifier[batch_write] ( identifier[tablename] ,** identifier[kwargs] )
keyword[with] identifier[batch] :
keyword[for] identifier[item] keyword[in] identifier[iter_insert_items] ( identifier[tree] ):
identifier[batch] . identifier[put] ( identifier[item] )
identifier[count] += literal[int]
keyword[return] identifier[count] | def _insert(self, tree):
""" Run an INSERT statement """
tablename = tree.table
count = 0
kwargs = {}
batch = self.connection.batch_write(tablename, **kwargs)
with batch:
for item in iter_insert_items(tree):
batch.put(item)
count += 1 # depends on [control=['for'], data=['item']] # depends on [control=['with'], data=[]]
return count |
def copy_file_if_newer(
src_fs, # type: Union[FS, Text]
src_path, # type: Text
dst_fs, # type: Union[FS, Text]
dst_path, # type: Text
):
# type: (...) -> bool
"""Copy a file from one filesystem to another, checking times.
If the destination exists, and is a file, it will be first truncated.
If both source and destination files exist, the copy is executed only
if the source file is newer than the destination file. In case
modification times of source or destination files are not available,
copy is always executed.
Arguments:
src_fs (FS or str): Source filesystem (instance or URL).
src_path (str): Path to a file on the source filesystem.
dst_fs (FS or str): Destination filesystem (instance or URL).
dst_path (str): Path to a file on the destination filesystem.
Returns:
bool: `True` if the file copy was executed, `False` otherwise.
"""
with manage_fs(src_fs, writeable=False) as _src_fs:
with manage_fs(dst_fs, create=True) as _dst_fs:
if _src_fs is _dst_fs:
# Same filesystem, so we can do a potentially optimized
# copy
if _source_is_newer(_src_fs, src_path, _dst_fs, dst_path):
_src_fs.copy(src_path, dst_path, overwrite=True)
return True
else:
return False
else:
# Standard copy
with _src_fs.lock(), _dst_fs.lock():
if _source_is_newer(_src_fs, src_path, _dst_fs, dst_path):
copy_file_internal(_src_fs, src_path, _dst_fs, dst_path)
return True
else:
return False | def function[copy_file_if_newer, parameter[src_fs, src_path, dst_fs, dst_path]]:
constant[Copy a file from one filesystem to another, checking times.
If the destination exists, and is a file, it will be first truncated.
If both source and destination files exist, the copy is executed only
if the source file is newer than the destination file. In case
modification times of source or destination files are not available,
copy is always executed.
Arguments:
src_fs (FS or str): Source filesystem (instance or URL).
src_path (str): Path to a file on the source filesystem.
dst_fs (FS or str): Destination filesystem (instance or URL).
dst_path (str): Path to a file on the destination filesystem.
Returns:
bool: `True` if the file copy was executed, `False` otherwise.
]
with call[name[manage_fs], parameter[name[src_fs]]] begin[:]
with call[name[manage_fs], parameter[name[dst_fs]]] begin[:]
if compare[name[_src_fs] is name[_dst_fs]] begin[:]
if call[name[_source_is_newer], parameter[name[_src_fs], name[src_path], name[_dst_fs], name[dst_path]]] begin[:]
call[name[_src_fs].copy, parameter[name[src_path], name[dst_path]]]
return[constant[True]] | keyword[def] identifier[copy_file_if_newer] (
identifier[src_fs] ,
identifier[src_path] ,
identifier[dst_fs] ,
identifier[dst_path] ,
):
literal[string]
keyword[with] identifier[manage_fs] ( identifier[src_fs] , identifier[writeable] = keyword[False] ) keyword[as] identifier[_src_fs] :
keyword[with] identifier[manage_fs] ( identifier[dst_fs] , identifier[create] = keyword[True] ) keyword[as] identifier[_dst_fs] :
keyword[if] identifier[_src_fs] keyword[is] identifier[_dst_fs] :
keyword[if] identifier[_source_is_newer] ( identifier[_src_fs] , identifier[src_path] , identifier[_dst_fs] , identifier[dst_path] ):
identifier[_src_fs] . identifier[copy] ( identifier[src_path] , identifier[dst_path] , identifier[overwrite] = keyword[True] )
keyword[return] keyword[True]
keyword[else] :
keyword[return] keyword[False]
keyword[else] :
keyword[with] identifier[_src_fs] . identifier[lock] (), identifier[_dst_fs] . identifier[lock] ():
keyword[if] identifier[_source_is_newer] ( identifier[_src_fs] , identifier[src_path] , identifier[_dst_fs] , identifier[dst_path] ):
identifier[copy_file_internal] ( identifier[_src_fs] , identifier[src_path] , identifier[_dst_fs] , identifier[dst_path] )
keyword[return] keyword[True]
keyword[else] :
keyword[return] keyword[False] | def copy_file_if_newer(src_fs, src_path, dst_fs, dst_path): # type: Union[FS, Text]
# type: Text
# type: Union[FS, Text]
# type: Text
# type: (...) -> bool
'Copy a file from one filesystem to another, checking times.\n\n If the destination exists, and is a file, it will be first truncated.\n If both source and destination files exist, the copy is executed only\n if the source file is newer than the destination file. In case\n modification times of source or destination files are not available,\n copy is always executed.\n\n Arguments:\n src_fs (FS or str): Source filesystem (instance or URL).\n src_path (str): Path to a file on the source filesystem.\n dst_fs (FS or str): Destination filesystem (instance or URL).\n dst_path (str): Path to a file on the destination filesystem.\n\n Returns:\n bool: `True` if the file copy was executed, `False` otherwise.\n\n '
with manage_fs(src_fs, writeable=False) as _src_fs:
with manage_fs(dst_fs, create=True) as _dst_fs:
if _src_fs is _dst_fs:
# Same filesystem, so we can do a potentially optimized
# copy
if _source_is_newer(_src_fs, src_path, _dst_fs, dst_path):
_src_fs.copy(src_path, dst_path, overwrite=True)
return True # depends on [control=['if'], data=[]]
else:
return False # depends on [control=['if'], data=['_src_fs', '_dst_fs']]
else:
# Standard copy
with _src_fs.lock(), _dst_fs.lock():
if _source_is_newer(_src_fs, src_path, _dst_fs, dst_path):
copy_file_internal(_src_fs, src_path, _dst_fs, dst_path)
return True # depends on [control=['if'], data=[]]
else:
return False # depends on [control=['with'], data=[]] # depends on [control=['with'], data=['_dst_fs']] # depends on [control=['with'], data=['manage_fs', '_src_fs']] |
def orders(self, dt=None):
"""Retrieve the dict-form of all of the orders in a given bar or for
the whole simulation.
Parameters
----------
dt : pd.Timestamp or None, optional
The particular datetime to look up order for. If not passed, or
None is explicitly passed, all of the orders will be returned.
Returns
-------
orders : list[dict]
The order information.
"""
if dt is None:
# orders by id is already flattened
return [o.to_dict() for o in itervalues(self._orders_by_id)]
return [
o.to_dict()
for o in itervalues(self._orders_by_modified.get(dt, {}))
] | def function[orders, parameter[self, dt]]:
constant[Retrieve the dict-form of all of the orders in a given bar or for
the whole simulation.
Parameters
----------
dt : pd.Timestamp or None, optional
The particular datetime to look up order for. If not passed, or
None is explicitly passed, all of the orders will be returned.
Returns
-------
orders : list[dict]
The order information.
]
if compare[name[dt] is constant[None]] begin[:]
return[<ast.ListComp object at 0x7da1b2041e40>]
return[<ast.ListComp object at 0x7da1b20430d0>] | keyword[def] identifier[orders] ( identifier[self] , identifier[dt] = keyword[None] ):
literal[string]
keyword[if] identifier[dt] keyword[is] keyword[None] :
keyword[return] [ identifier[o] . identifier[to_dict] () keyword[for] identifier[o] keyword[in] identifier[itervalues] ( identifier[self] . identifier[_orders_by_id] )]
keyword[return] [
identifier[o] . identifier[to_dict] ()
keyword[for] identifier[o] keyword[in] identifier[itervalues] ( identifier[self] . identifier[_orders_by_modified] . identifier[get] ( identifier[dt] ,{}))
] | def orders(self, dt=None):
"""Retrieve the dict-form of all of the orders in a given bar or for
the whole simulation.
Parameters
----------
dt : pd.Timestamp or None, optional
The particular datetime to look up order for. If not passed, or
None is explicitly passed, all of the orders will be returned.
Returns
-------
orders : list[dict]
The order information.
"""
if dt is None:
# orders by id is already flattened
return [o.to_dict() for o in itervalues(self._orders_by_id)] # depends on [control=['if'], data=[]]
return [o.to_dict() for o in itervalues(self._orders_by_modified.get(dt, {}))] |
def ensure_loopback_device(path, size):
'''
Ensure a loopback device exists for a given backing file path and size.
If it a loopback device is not mapped to file, a new one will be created.
TODO: Confirm size of found loopback device.
:returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
'''
for d, f in six.iteritems(loopback_devices()):
if f == path:
return d
if not os.path.exists(path):
cmd = ['truncate', '--size', size, path]
check_call(cmd)
return create_loopback(path) | def function[ensure_loopback_device, parameter[path, size]]:
constant[
Ensure a loopback device exists for a given backing file path and size.
If it a loopback device is not mapped to file, a new one will be created.
TODO: Confirm size of found loopback device.
:returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
]
for taget[tuple[[<ast.Name object at 0x7da1b121ac80>, <ast.Name object at 0x7da1b121aa70>]]] in starred[call[name[six].iteritems, parameter[call[name[loopback_devices], parameter[]]]]] begin[:]
if compare[name[f] equal[==] name[path]] begin[:]
return[name[d]]
if <ast.UnaryOp object at 0x7da1b121a740> begin[:]
variable[cmd] assign[=] list[[<ast.Constant object at 0x7da1b12196f0>, <ast.Constant object at 0x7da1b121a6e0>, <ast.Name object at 0x7da1b121b460>, <ast.Name object at 0x7da1b121b4c0>]]
call[name[check_call], parameter[name[cmd]]]
return[call[name[create_loopback], parameter[name[path]]]] | keyword[def] identifier[ensure_loopback_device] ( identifier[path] , identifier[size] ):
literal[string]
keyword[for] identifier[d] , identifier[f] keyword[in] identifier[six] . identifier[iteritems] ( identifier[loopback_devices] ()):
keyword[if] identifier[f] == identifier[path] :
keyword[return] identifier[d]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[path] ):
identifier[cmd] =[ literal[string] , literal[string] , identifier[size] , identifier[path] ]
identifier[check_call] ( identifier[cmd] )
keyword[return] identifier[create_loopback] ( identifier[path] ) | def ensure_loopback_device(path, size):
"""
Ensure a loopback device exists for a given backing file path and size.
If it a loopback device is not mapped to file, a new one will be created.
TODO: Confirm size of found loopback device.
:returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
"""
for (d, f) in six.iteritems(loopback_devices()):
if f == path:
return d # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if not os.path.exists(path):
cmd = ['truncate', '--size', size, path]
check_call(cmd) # depends on [control=['if'], data=[]]
return create_loopback(path) |
def get_document(self, doc_url):
""" Retrieve the content for the given document from the cache.
:type doc_url: String or Document
:param doc_url: the URL of the document, or a Document object
:rtype: String
:returns: the document data
:raises: ValueError if the item is not in the cache
"""
c = self.conn.cursor()
c.execute("SELECT * FROM documents WHERE url=?", (str(doc_url),))
row = c.fetchone()
c.close()
if row is None:
raise ValueError("Item not present in cache")
file_path = row[1]
try:
with open(file_path, 'rb') as f:
return f.read()
except IOError as e:
raise IOError("Error reading file " + file_path +
" to retrieve document " + doc_url +
": " + e.message) | def function[get_document, parameter[self, doc_url]]:
constant[ Retrieve the content for the given document from the cache.
:type doc_url: String or Document
:param doc_url: the URL of the document, or a Document object
:rtype: String
:returns: the document data
:raises: ValueError if the item is not in the cache
]
variable[c] assign[=] call[name[self].conn.cursor, parameter[]]
call[name[c].execute, parameter[constant[SELECT * FROM documents WHERE url=?], tuple[[<ast.Call object at 0x7da1b2457f40>]]]]
variable[row] assign[=] call[name[c].fetchone, parameter[]]
call[name[c].close, parameter[]]
if compare[name[row] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b2457be0>
variable[file_path] assign[=] call[name[row]][constant[1]]
<ast.Try object at 0x7da1b24576d0> | keyword[def] identifier[get_document] ( identifier[self] , identifier[doc_url] ):
literal[string]
identifier[c] = identifier[self] . identifier[conn] . identifier[cursor] ()
identifier[c] . identifier[execute] ( literal[string] ,( identifier[str] ( identifier[doc_url] ),))
identifier[row] = identifier[c] . identifier[fetchone] ()
identifier[c] . identifier[close] ()
keyword[if] identifier[row] keyword[is] keyword[None] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[file_path] = identifier[row] [ literal[int] ]
keyword[try] :
keyword[with] identifier[open] ( identifier[file_path] , literal[string] ) keyword[as] identifier[f] :
keyword[return] identifier[f] . identifier[read] ()
keyword[except] identifier[IOError] keyword[as] identifier[e] :
keyword[raise] identifier[IOError] ( literal[string] + identifier[file_path] +
literal[string] + identifier[doc_url] +
literal[string] + identifier[e] . identifier[message] ) | def get_document(self, doc_url):
""" Retrieve the content for the given document from the cache.
:type doc_url: String or Document
:param doc_url: the URL of the document, or a Document object
:rtype: String
:returns: the document data
:raises: ValueError if the item is not in the cache
"""
c = self.conn.cursor()
c.execute('SELECT * FROM documents WHERE url=?', (str(doc_url),))
row = c.fetchone()
c.close()
if row is None:
raise ValueError('Item not present in cache') # depends on [control=['if'], data=[]]
file_path = row[1]
try:
with open(file_path, 'rb') as f:
return f.read() # depends on [control=['with'], data=['f']] # depends on [control=['try'], data=[]]
except IOError as e:
raise IOError('Error reading file ' + file_path + ' to retrieve document ' + doc_url + ': ' + e.message) # depends on [control=['except'], data=['e']] |
def AddSignatureToRPMs(self, rpm_filenames):
"""Sign RPM with rpmsign."""
# The horrible second argument here is necessary to get a V3 signature to
# support CentOS 5 systems. See:
# http://ilostmynotes.blogspot.com/2016/03/the-horror-of-signing-rpms-that-support.html
args = [
"--define=%%_gpg_name %s" % self.gpg_name,
("--define=__gpg_sign_cmd %%{__gpg} gpg --force-v3-sigs --yes "
"--digest-algo=sha1 --no-verbose --no-armor --pinentry-mode loopback "
"--no-secmem-warning -u '%s' -sbo %%{__signature_filename} "
"%%{__plaintext_filename}" % self.gpg_name), "--addsign"
] + rpm_filenames
try:
output_log = io.StringIO()
rpmsign = pexpect.spawn("rpmsign", args, timeout=1000)
# Use logfile_read so we don't get the password we're injecting.
rpmsign.logfile_read = output_log
rpmsign.expect("phrase:")
rpmsign.sendline(self.password)
rpmsign.wait()
except pexpect.exceptions.EOF:
# This could have worked using a cached passphrase, we check for the
# actual signature below and raise if the package wasn't signed after all.
pass
except pexpect.ExceptionPexpect:
logging.error(output_log.getvalue())
raise
for rpm_filename in rpm_filenames:
try:
# Expected output is: filename.rpm: rsa sha1 (md5) pgp md5 OK
output = subprocess.check_output(["rpm", "--checksig", rpm_filename])
if "pgp" not in output:
raise SigningError("PGP missing checksig %s" % rpm_filename)
except subprocess.CalledProcessError:
logging.exception("Bad signature verification on %s", rpm_filename)
raise SigningError("Bad signature verification on %s" % rpm_filename) | def function[AddSignatureToRPMs, parameter[self, rpm_filenames]]:
constant[Sign RPM with rpmsign.]
variable[args] assign[=] binary_operation[list[[<ast.BinOp object at 0x7da1b1c3e8c0>, <ast.BinOp object at 0x7da1b1c3c550>, <ast.Constant object at 0x7da1b1c3fd30>]] + name[rpm_filenames]]
<ast.Try object at 0x7da1b1c3d270>
for taget[name[rpm_filename]] in starred[name[rpm_filenames]] begin[:]
<ast.Try object at 0x7da1b1b29cf0> | keyword[def] identifier[AddSignatureToRPMs] ( identifier[self] , identifier[rpm_filenames] ):
literal[string]
identifier[args] =[
literal[string] % identifier[self] . identifier[gpg_name] ,
( literal[string]
literal[string]
literal[string]
literal[string] % identifier[self] . identifier[gpg_name] ), literal[string]
]+ identifier[rpm_filenames]
keyword[try] :
identifier[output_log] = identifier[io] . identifier[StringIO] ()
identifier[rpmsign] = identifier[pexpect] . identifier[spawn] ( literal[string] , identifier[args] , identifier[timeout] = literal[int] )
identifier[rpmsign] . identifier[logfile_read] = identifier[output_log]
identifier[rpmsign] . identifier[expect] ( literal[string] )
identifier[rpmsign] . identifier[sendline] ( identifier[self] . identifier[password] )
identifier[rpmsign] . identifier[wait] ()
keyword[except] identifier[pexpect] . identifier[exceptions] . identifier[EOF] :
keyword[pass]
keyword[except] identifier[pexpect] . identifier[ExceptionPexpect] :
identifier[logging] . identifier[error] ( identifier[output_log] . identifier[getvalue] ())
keyword[raise]
keyword[for] identifier[rpm_filename] keyword[in] identifier[rpm_filenames] :
keyword[try] :
identifier[output] = identifier[subprocess] . identifier[check_output] ([ literal[string] , literal[string] , identifier[rpm_filename] ])
keyword[if] literal[string] keyword[not] keyword[in] identifier[output] :
keyword[raise] identifier[SigningError] ( literal[string] % identifier[rpm_filename] )
keyword[except] identifier[subprocess] . identifier[CalledProcessError] :
identifier[logging] . identifier[exception] ( literal[string] , identifier[rpm_filename] )
keyword[raise] identifier[SigningError] ( literal[string] % identifier[rpm_filename] ) | def AddSignatureToRPMs(self, rpm_filenames):
"""Sign RPM with rpmsign."""
# The horrible second argument here is necessary to get a V3 signature to
# support CentOS 5 systems. See:
# http://ilostmynotes.blogspot.com/2016/03/the-horror-of-signing-rpms-that-support.html
args = ['--define=%%_gpg_name %s' % self.gpg_name, "--define=__gpg_sign_cmd %%{__gpg} gpg --force-v3-sigs --yes --digest-algo=sha1 --no-verbose --no-armor --pinentry-mode loopback --no-secmem-warning -u '%s' -sbo %%{__signature_filename} %%{__plaintext_filename}" % self.gpg_name, '--addsign'] + rpm_filenames
try:
output_log = io.StringIO()
rpmsign = pexpect.spawn('rpmsign', args, timeout=1000)
# Use logfile_read so we don't get the password we're injecting.
rpmsign.logfile_read = output_log
rpmsign.expect('phrase:')
rpmsign.sendline(self.password)
rpmsign.wait() # depends on [control=['try'], data=[]]
except pexpect.exceptions.EOF:
# This could have worked using a cached passphrase, we check for the
# actual signature below and raise if the package wasn't signed after all.
pass # depends on [control=['except'], data=[]]
except pexpect.ExceptionPexpect:
logging.error(output_log.getvalue())
raise # depends on [control=['except'], data=[]]
for rpm_filename in rpm_filenames:
try:
# Expected output is: filename.rpm: rsa sha1 (md5) pgp md5 OK
output = subprocess.check_output(['rpm', '--checksig', rpm_filename])
if 'pgp' not in output:
raise SigningError('PGP missing checksig %s' % rpm_filename) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except subprocess.CalledProcessError:
logging.exception('Bad signature verification on %s', rpm_filename)
raise SigningError('Bad signature verification on %s' % rpm_filename) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['rpm_filename']] |
def pause(self):
"""
Pauses a running pipeline. This will stop retrieving results from the
pipeline. Parallel parts of the pipeline will stop after the ``NuMap``
buffer is has been filled. A paused pipeline can be run or stopped.
"""
# 1. stop the plumbing thread by raising a StopIteration on a stride
# boundary
if self._started.isSet() and \
self._running.isSet() and \
not self._pausing.isSet():
self._pausing.set()
self._plunger.join()
del self._plunger
self._pausing.clear()
self._running.clear()
else:
raise PlumberError | def function[pause, parameter[self]]:
constant[
Pauses a running pipeline. This will stop retrieving results from the
pipeline. Parallel parts of the pipeline will stop after the ``NuMap``
buffer is has been filled. A paused pipeline can be run or stopped.
]
if <ast.BoolOp object at 0x7da1b2564100> begin[:]
call[name[self]._pausing.set, parameter[]]
call[name[self]._plunger.join, parameter[]]
<ast.Delete object at 0x7da1b2519930>
call[name[self]._pausing.clear, parameter[]]
call[name[self]._running.clear, parameter[]] | keyword[def] identifier[pause] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_started] . identifier[isSet] () keyword[and] identifier[self] . identifier[_running] . identifier[isSet] () keyword[and] keyword[not] identifier[self] . identifier[_pausing] . identifier[isSet] ():
identifier[self] . identifier[_pausing] . identifier[set] ()
identifier[self] . identifier[_plunger] . identifier[join] ()
keyword[del] identifier[self] . identifier[_plunger]
identifier[self] . identifier[_pausing] . identifier[clear] ()
identifier[self] . identifier[_running] . identifier[clear] ()
keyword[else] :
keyword[raise] identifier[PlumberError] | def pause(self):
"""
Pauses a running pipeline. This will stop retrieving results from the
pipeline. Parallel parts of the pipeline will stop after the ``NuMap``
buffer is has been filled. A paused pipeline can be run or stopped.
""" # 1. stop the plumbing thread by raising a StopIteration on a stride
# boundary
if self._started.isSet() and self._running.isSet() and (not self._pausing.isSet()):
self._pausing.set()
self._plunger.join()
del self._plunger
self._pausing.clear()
self._running.clear() # depends on [control=['if'], data=[]]
else:
raise PlumberError |
def regexp_prompt(prompt, regexp='.', answer=''):
'''Ask the user for a text entry that matches a regular expression
Parameters
==========
prompt: the prompt to ask the user
regexp: the regular expression to match. defaults to anything.
'''
get_input = getattr(__builtins__, 'raw_input', input)
while not re.search(regexp, answer):
answer = get_input(prompt + ': ').strip()
# If the option isn't valid, this is shown next
message = "Your entry must match the regular expression %s" %regexp
return answer | def function[regexp_prompt, parameter[prompt, regexp, answer]]:
constant[Ask the user for a text entry that matches a regular expression
Parameters
==========
prompt: the prompt to ask the user
regexp: the regular expression to match. defaults to anything.
]
variable[get_input] assign[=] call[name[getattr], parameter[name[__builtins__], constant[raw_input], name[input]]]
while <ast.UnaryOp object at 0x7da204347f70> begin[:]
variable[answer] assign[=] call[call[name[get_input], parameter[binary_operation[name[prompt] + constant[: ]]]].strip, parameter[]]
variable[message] assign[=] binary_operation[constant[Your entry must match the regular expression %s] <ast.Mod object at 0x7da2590d6920> name[regexp]]
return[name[answer]] | keyword[def] identifier[regexp_prompt] ( identifier[prompt] , identifier[regexp] = literal[string] , identifier[answer] = literal[string] ):
literal[string]
identifier[get_input] = identifier[getattr] ( identifier[__builtins__] , literal[string] , identifier[input] )
keyword[while] keyword[not] identifier[re] . identifier[search] ( identifier[regexp] , identifier[answer] ):
identifier[answer] = identifier[get_input] ( identifier[prompt] + literal[string] ). identifier[strip] ()
identifier[message] = literal[string] % identifier[regexp]
keyword[return] identifier[answer] | def regexp_prompt(prompt, regexp='.', answer=''):
"""Ask the user for a text entry that matches a regular expression
Parameters
==========
prompt: the prompt to ask the user
regexp: the regular expression to match. defaults to anything.
"""
get_input = getattr(__builtins__, 'raw_input', input)
while not re.search(regexp, answer):
answer = get_input(prompt + ': ').strip()
# If the option isn't valid, this is shown next
message = 'Your entry must match the regular expression %s' % regexp # depends on [control=['while'], data=[]]
return answer |
def add_callbacks(self, future, callback, errback):
"""
callback or errback may be None, but at least one must be
non-None.
"""
assert future is not None
if callback is None:
assert errback is not None
future.addErrback(errback)
else:
# Twisted allows errback to be None here
future.addCallbacks(callback, errback)
return future | def function[add_callbacks, parameter[self, future, callback, errback]]:
constant[
callback or errback may be None, but at least one must be
non-None.
]
assert[compare[name[future] is_not constant[None]]]
if compare[name[callback] is constant[None]] begin[:]
assert[compare[name[errback] is_not constant[None]]]
call[name[future].addErrback, parameter[name[errback]]]
return[name[future]] | keyword[def] identifier[add_callbacks] ( identifier[self] , identifier[future] , identifier[callback] , identifier[errback] ):
literal[string]
keyword[assert] identifier[future] keyword[is] keyword[not] keyword[None]
keyword[if] identifier[callback] keyword[is] keyword[None] :
keyword[assert] identifier[errback] keyword[is] keyword[not] keyword[None]
identifier[future] . identifier[addErrback] ( identifier[errback] )
keyword[else] :
identifier[future] . identifier[addCallbacks] ( identifier[callback] , identifier[errback] )
keyword[return] identifier[future] | def add_callbacks(self, future, callback, errback):
"""
callback or errback may be None, but at least one must be
non-None.
"""
assert future is not None
if callback is None:
assert errback is not None
future.addErrback(errback) # depends on [control=['if'], data=[]]
else:
# Twisted allows errback to be None here
future.addCallbacks(callback, errback)
return future |
def save_record(self, agent_id, t_step, key, value):
'''
Save a collection of records to the database.
Database writes are cached.
'''
value = self.convert(key, value)
self._tups.append(Record(agent_id=agent_id,
t_step=t_step,
key=key,
value=value))
if len(self._tups) > 100:
self.flush_cache() | def function[save_record, parameter[self, agent_id, t_step, key, value]]:
constant[
Save a collection of records to the database.
Database writes are cached.
]
variable[value] assign[=] call[name[self].convert, parameter[name[key], name[value]]]
call[name[self]._tups.append, parameter[call[name[Record], parameter[]]]]
if compare[call[name[len], parameter[name[self]._tups]] greater[>] constant[100]] begin[:]
call[name[self].flush_cache, parameter[]] | keyword[def] identifier[save_record] ( identifier[self] , identifier[agent_id] , identifier[t_step] , identifier[key] , identifier[value] ):
literal[string]
identifier[value] = identifier[self] . identifier[convert] ( identifier[key] , identifier[value] )
identifier[self] . identifier[_tups] . identifier[append] ( identifier[Record] ( identifier[agent_id] = identifier[agent_id] ,
identifier[t_step] = identifier[t_step] ,
identifier[key] = identifier[key] ,
identifier[value] = identifier[value] ))
keyword[if] identifier[len] ( identifier[self] . identifier[_tups] )> literal[int] :
identifier[self] . identifier[flush_cache] () | def save_record(self, agent_id, t_step, key, value):
"""
Save a collection of records to the database.
Database writes are cached.
"""
value = self.convert(key, value)
self._tups.append(Record(agent_id=agent_id, t_step=t_step, key=key, value=value))
if len(self._tups) > 100:
self.flush_cache() # depends on [control=['if'], data=[]] |
def ensureiterable(value, iterable=list, exclude=None):
"""Convert a value into an iterable if it is not.
:param object value: object to convert
:param type iterable: iterable type to apply (default: list)
:param type/tuple exclude: types to not convert
:Example:
>>> ensureiterable([])
[]
>>> ensureiterable([], iterable=tuple)
()
>>> ensureiterable('test', exclude=str)
['test']
>>> ensureiterable('test')
['t', 'e', 's', 't']
"""
result = value
if not isiterable(value, exclude=exclude):
result = [value]
result = iterable(result)
else:
result = iterable(value)
return result | def function[ensureiterable, parameter[value, iterable, exclude]]:
constant[Convert a value into an iterable if it is not.
:param object value: object to convert
:param type iterable: iterable type to apply (default: list)
:param type/tuple exclude: types to not convert
:Example:
>>> ensureiterable([])
[]
>>> ensureiterable([], iterable=tuple)
()
>>> ensureiterable('test', exclude=str)
['test']
>>> ensureiterable('test')
['t', 'e', 's', 't']
]
variable[result] assign[=] name[value]
if <ast.UnaryOp object at 0x7da1b13bb160> begin[:]
variable[result] assign[=] list[[<ast.Name object at 0x7da1b13b83d0>]]
variable[result] assign[=] call[name[iterable], parameter[name[result]]]
return[name[result]] | keyword[def] identifier[ensureiterable] ( identifier[value] , identifier[iterable] = identifier[list] , identifier[exclude] = keyword[None] ):
literal[string]
identifier[result] = identifier[value]
keyword[if] keyword[not] identifier[isiterable] ( identifier[value] , identifier[exclude] = identifier[exclude] ):
identifier[result] =[ identifier[value] ]
identifier[result] = identifier[iterable] ( identifier[result] )
keyword[else] :
identifier[result] = identifier[iterable] ( identifier[value] )
keyword[return] identifier[result] | def ensureiterable(value, iterable=list, exclude=None):
"""Convert a value into an iterable if it is not.
:param object value: object to convert
:param type iterable: iterable type to apply (default: list)
:param type/tuple exclude: types to not convert
:Example:
>>> ensureiterable([])
[]
>>> ensureiterable([], iterable=tuple)
()
>>> ensureiterable('test', exclude=str)
['test']
>>> ensureiterable('test')
['t', 'e', 's', 't']
"""
result = value
if not isiterable(value, exclude=exclude):
result = [value]
result = iterable(result) # depends on [control=['if'], data=[]]
else:
result = iterable(value)
return result |
def extractArgumentsFromCallStr(callStr):
"""
Parse the argument string via an AST instead of the overly simple
callStr.split(','). The latter incorrectly splits any string parameters
that contain commas therein, like ic(1, 'a,b', 2).
"""
def isTuple(ele):
return classname(ele) == 'Tuple'
paramsStr = callStr.split('(', 1)[-1].rsplit(')', 1)[0].strip()
root = ast.parse(paramsStr).body[0].value
eles = root.elts if isTuple(root) else [root]
# The ast module parses 'a, b' and '(a, b)' identically. Thus, ast.parse()
# alone can't tell the difference between
#
# ic(a, b)
#
# and
#
# ic((a, b))
#
# Detect this situation and preserve whole tuples, e.g. '(a, b)', passed to
# ic() by creating a new, temporary tuple around the original tuple and
# parsing that.
if paramsStr[0] == '(' and paramsStr[-1] == ')' and len(eles) > 1:
newTupleStr = '(' + paramsStr + ", 'ignored')"
argStrs = extractArgumentsFromCallStr(newTupleStr)[:-1]
return argStrs
indices = [
max(0, e.col_offset - 1) if isTuple(e) else e.col_offset for e in eles]
argStrs = [s.strip(' ,') for s in splitStringAtIndices(paramsStr, indices)]
return argStrs | def function[extractArgumentsFromCallStr, parameter[callStr]]:
constant[
Parse the argument string via an AST instead of the overly simple
callStr.split(','). The latter incorrectly splits any string parameters
that contain commas therein, like ic(1, 'a,b', 2).
]
def function[isTuple, parameter[ele]]:
return[compare[call[name[classname], parameter[name[ele]]] equal[==] constant[Tuple]]]
variable[paramsStr] assign[=] call[call[call[call[call[name[callStr].split, parameter[constant[(], constant[1]]]][<ast.UnaryOp object at 0x7da2054a62c0>].rsplit, parameter[constant[)], constant[1]]]][constant[0]].strip, parameter[]]
variable[root] assign[=] call[call[name[ast].parse, parameter[name[paramsStr]]].body][constant[0]].value
variable[eles] assign[=] <ast.IfExp object at 0x7da1b1263d90>
if <ast.BoolOp object at 0x7da1b1262680> begin[:]
variable[newTupleStr] assign[=] binary_operation[binary_operation[constant[(] + name[paramsStr]] + constant[, 'ignored')]]
variable[argStrs] assign[=] call[call[name[extractArgumentsFromCallStr], parameter[name[newTupleStr]]]][<ast.Slice object at 0x7da1b1260e50>]
return[name[argStrs]]
variable[indices] assign[=] <ast.ListComp object at 0x7da1b1262fb0>
variable[argStrs] assign[=] <ast.ListComp object at 0x7da1b12623b0>
return[name[argStrs]] | keyword[def] identifier[extractArgumentsFromCallStr] ( identifier[callStr] ):
literal[string]
keyword[def] identifier[isTuple] ( identifier[ele] ):
keyword[return] identifier[classname] ( identifier[ele] )== literal[string]
identifier[paramsStr] = identifier[callStr] . identifier[split] ( literal[string] , literal[int] )[- literal[int] ]. identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ]. identifier[strip] ()
identifier[root] = identifier[ast] . identifier[parse] ( identifier[paramsStr] ). identifier[body] [ literal[int] ]. identifier[value]
identifier[eles] = identifier[root] . identifier[elts] keyword[if] identifier[isTuple] ( identifier[root] ) keyword[else] [ identifier[root] ]
keyword[if] identifier[paramsStr] [ literal[int] ]== literal[string] keyword[and] identifier[paramsStr] [- literal[int] ]== literal[string] keyword[and] identifier[len] ( identifier[eles] )> literal[int] :
identifier[newTupleStr] = literal[string] + identifier[paramsStr] + literal[string]
identifier[argStrs] = identifier[extractArgumentsFromCallStr] ( identifier[newTupleStr] )[:- literal[int] ]
keyword[return] identifier[argStrs]
identifier[indices] =[
identifier[max] ( literal[int] , identifier[e] . identifier[col_offset] - literal[int] ) keyword[if] identifier[isTuple] ( identifier[e] ) keyword[else] identifier[e] . identifier[col_offset] keyword[for] identifier[e] keyword[in] identifier[eles] ]
identifier[argStrs] =[ identifier[s] . identifier[strip] ( literal[string] ) keyword[for] identifier[s] keyword[in] identifier[splitStringAtIndices] ( identifier[paramsStr] , identifier[indices] )]
keyword[return] identifier[argStrs] | def extractArgumentsFromCallStr(callStr):
"""
Parse the argument string via an AST instead of the overly simple
callStr.split(','). The latter incorrectly splits any string parameters
that contain commas therein, like ic(1, 'a,b', 2).
"""
def isTuple(ele):
return classname(ele) == 'Tuple'
paramsStr = callStr.split('(', 1)[-1].rsplit(')', 1)[0].strip()
root = ast.parse(paramsStr).body[0].value
eles = root.elts if isTuple(root) else [root]
# The ast module parses 'a, b' and '(a, b)' identically. Thus, ast.parse()
# alone can't tell the difference between
#
# ic(a, b)
#
# and
#
# ic((a, b))
#
# Detect this situation and preserve whole tuples, e.g. '(a, b)', passed to
# ic() by creating a new, temporary tuple around the original tuple and
# parsing that.
if paramsStr[0] == '(' and paramsStr[-1] == ')' and (len(eles) > 1):
newTupleStr = '(' + paramsStr + ", 'ignored')"
argStrs = extractArgumentsFromCallStr(newTupleStr)[:-1]
return argStrs # depends on [control=['if'], data=[]]
indices = [max(0, e.col_offset - 1) if isTuple(e) else e.col_offset for e in eles]
argStrs = [s.strip(' ,') for s in splitStringAtIndices(paramsStr, indices)]
return argStrs |
def ellipse(self, org_x, org_y, radius_x, radius_y, arc_start, arc_end):
"""
:param org_x: origination x axis
:param org_y: origination y axis
:param radius_x: radius x axis
:param radius_y: radius y axis
:param arc_start: arc start angle
:param arc_end: arc end angle
"""
ellipse = pgmagick.DrawableEllipse(float(org_x), float(org_y),
float(radius_x), float(radius_y),
float(arc_start), float(arc_end))
self.drawer.append(ellipse) | def function[ellipse, parameter[self, org_x, org_y, radius_x, radius_y, arc_start, arc_end]]:
constant[
:param org_x: origination x axis
:param org_y: origination y axis
:param radius_x: radius x axis
:param radius_y: radius y axis
:param arc_start: arc start angle
:param arc_end: arc end angle
]
variable[ellipse] assign[=] call[name[pgmagick].DrawableEllipse, parameter[call[name[float], parameter[name[org_x]]], call[name[float], parameter[name[org_y]]], call[name[float], parameter[name[radius_x]]], call[name[float], parameter[name[radius_y]]], call[name[float], parameter[name[arc_start]]], call[name[float], parameter[name[arc_end]]]]]
call[name[self].drawer.append, parameter[name[ellipse]]] | keyword[def] identifier[ellipse] ( identifier[self] , identifier[org_x] , identifier[org_y] , identifier[radius_x] , identifier[radius_y] , identifier[arc_start] , identifier[arc_end] ):
literal[string]
identifier[ellipse] = identifier[pgmagick] . identifier[DrawableEllipse] ( identifier[float] ( identifier[org_x] ), identifier[float] ( identifier[org_y] ),
identifier[float] ( identifier[radius_x] ), identifier[float] ( identifier[radius_y] ),
identifier[float] ( identifier[arc_start] ), identifier[float] ( identifier[arc_end] ))
identifier[self] . identifier[drawer] . identifier[append] ( identifier[ellipse] ) | def ellipse(self, org_x, org_y, radius_x, radius_y, arc_start, arc_end):
"""
:param org_x: origination x axis
:param org_y: origination y axis
:param radius_x: radius x axis
:param radius_y: radius y axis
:param arc_start: arc start angle
:param arc_end: arc end angle
"""
ellipse = pgmagick.DrawableEllipse(float(org_x), float(org_y), float(radius_x), float(radius_y), float(arc_start), float(arc_end))
self.drawer.append(ellipse) |
def _load(self, filename=None):
"""Read the AATSR rsr data"""
if not filename:
filename = self.aatsr_path
wb_ = open_workbook(filename)
for sheet in wb_.sheets():
ch_name = sheet.name.strip()
if ch_name == 'aatsr_' + self.bandname:
data = np.array([s.split() for s in
sheet.col_values(0,
start_rowx=3, end_rowx=258)])
data = data.astype('f')
wvl = data[:, 0]
resp = data[:, 1]
self.rsr = {'wavelength': wvl, 'response': resp} | def function[_load, parameter[self, filename]]:
constant[Read the AATSR rsr data]
if <ast.UnaryOp object at 0x7da20c992e60> begin[:]
variable[filename] assign[=] name[self].aatsr_path
variable[wb_] assign[=] call[name[open_workbook], parameter[name[filename]]]
for taget[name[sheet]] in starred[call[name[wb_].sheets, parameter[]]] begin[:]
variable[ch_name] assign[=] call[name[sheet].name.strip, parameter[]]
if compare[name[ch_name] equal[==] binary_operation[constant[aatsr_] + name[self].bandname]] begin[:]
variable[data] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da20c6a9030>]]
variable[data] assign[=] call[name[data].astype, parameter[constant[f]]]
variable[wvl] assign[=] call[name[data]][tuple[[<ast.Slice object at 0x7da1b26ad8a0>, <ast.Constant object at 0x7da1b26aead0>]]]
variable[resp] assign[=] call[name[data]][tuple[[<ast.Slice object at 0x7da1b26ae4d0>, <ast.Constant object at 0x7da1b26ac670>]]]
name[self].rsr assign[=] dictionary[[<ast.Constant object at 0x7da1b26aed10>, <ast.Constant object at 0x7da1b26afdf0>], [<ast.Name object at 0x7da1b26acc10>, <ast.Name object at 0x7da1b26addb0>]] | keyword[def] identifier[_load] ( identifier[self] , identifier[filename] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[filename] :
identifier[filename] = identifier[self] . identifier[aatsr_path]
identifier[wb_] = identifier[open_workbook] ( identifier[filename] )
keyword[for] identifier[sheet] keyword[in] identifier[wb_] . identifier[sheets] ():
identifier[ch_name] = identifier[sheet] . identifier[name] . identifier[strip] ()
keyword[if] identifier[ch_name] == literal[string] + identifier[self] . identifier[bandname] :
identifier[data] = identifier[np] . identifier[array] ([ identifier[s] . identifier[split] () keyword[for] identifier[s] keyword[in]
identifier[sheet] . identifier[col_values] ( literal[int] ,
identifier[start_rowx] = literal[int] , identifier[end_rowx] = literal[int] )])
identifier[data] = identifier[data] . identifier[astype] ( literal[string] )
identifier[wvl] = identifier[data] [:, literal[int] ]
identifier[resp] = identifier[data] [:, literal[int] ]
identifier[self] . identifier[rsr] ={ literal[string] : identifier[wvl] , literal[string] : identifier[resp] } | def _load(self, filename=None):
"""Read the AATSR rsr data"""
if not filename:
filename = self.aatsr_path # depends on [control=['if'], data=[]]
wb_ = open_workbook(filename)
for sheet in wb_.sheets():
ch_name = sheet.name.strip()
if ch_name == 'aatsr_' + self.bandname:
data = np.array([s.split() for s in sheet.col_values(0, start_rowx=3, end_rowx=258)])
data = data.astype('f')
wvl = data[:, 0]
resp = data[:, 1]
self.rsr = {'wavelength': wvl, 'response': resp} # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['sheet']] |
def define_buckets(max_seq_len: int, step=10) -> List[int]:
"""
Returns a list of integers defining bucket boundaries.
Bucket boundaries are created according to the following policy:
We generate buckets with a step size of step until the final bucket fits max_seq_len.
We then limit that bucket to max_seq_len (difference between semi-final and final bucket may be less than step).
:param max_seq_len: Maximum bucket size.
:param step: Distance between buckets.
:return: List of bucket sizes.
"""
buckets = [bucket_len for bucket_len in range(step, max_seq_len + step, step)]
buckets[-1] = max_seq_len
return buckets | def function[define_buckets, parameter[max_seq_len, step]]:
constant[
Returns a list of integers defining bucket boundaries.
Bucket boundaries are created according to the following policy:
We generate buckets with a step size of step until the final bucket fits max_seq_len.
We then limit that bucket to max_seq_len (difference between semi-final and final bucket may be less than step).
:param max_seq_len: Maximum bucket size.
:param step: Distance between buckets.
:return: List of bucket sizes.
]
variable[buckets] assign[=] <ast.ListComp object at 0x7da2044c2e30>
call[name[buckets]][<ast.UnaryOp object at 0x7da1b1de7460>] assign[=] name[max_seq_len]
return[name[buckets]] | keyword[def] identifier[define_buckets] ( identifier[max_seq_len] : identifier[int] , identifier[step] = literal[int] )-> identifier[List] [ identifier[int] ]:
literal[string]
identifier[buckets] =[ identifier[bucket_len] keyword[for] identifier[bucket_len] keyword[in] identifier[range] ( identifier[step] , identifier[max_seq_len] + identifier[step] , identifier[step] )]
identifier[buckets] [- literal[int] ]= identifier[max_seq_len]
keyword[return] identifier[buckets] | def define_buckets(max_seq_len: int, step=10) -> List[int]:
"""
Returns a list of integers defining bucket boundaries.
Bucket boundaries are created according to the following policy:
We generate buckets with a step size of step until the final bucket fits max_seq_len.
We then limit that bucket to max_seq_len (difference between semi-final and final bucket may be less than step).
:param max_seq_len: Maximum bucket size.
:param step: Distance between buckets.
:return: List of bucket sizes.
"""
buckets = [bucket_len for bucket_len in range(step, max_seq_len + step, step)]
buckets[-1] = max_seq_len
return buckets |
def group_by(self, key_names=[], key=lambda x: x, result_func=lambda x: x):
"""
Groups an enumerable on given key selector. Index of key name
corresponds to index of key lambda function.
Usage:
Enumerable([1,2,3]).group_by(key_names=['id'], key=lambda x: x) _
.to_list() -->
Enumerable object [
Grouping object {
key.id: 1,
_data: [1]
},
Grouping object {
key.id: 2,
_data: [2]
},
Grouping object {
key.id: 3,
_data: [3]
}
]
Thus the key names for each grouping object can be referenced
through the key property. Using the above example:
Enumerable([1,2,3]).group_by(key_names=['id'], key=lambda x: x) _
.select(lambda g: { 'key': g.key.id, 'count': g.count() }
:param key_names: list of key names
:param key: key selector as lambda expression
:param result_func: transformation function as lambda expression
:return: Enumerable of grouping objects
"""
result = []
ordered = sorted(self, key=key)
grouped = itertools.groupby(ordered, key)
for k, g in grouped:
can_enumerate = isinstance(k, list) or isinstance(k, tuple) \
and len(k) > 0
key_prop = {}
for i, prop in enumerate(key_names):
key_prop.setdefault(prop, k[i] if can_enumerate else k)
key_object = Key(key_prop)
result.append(Grouping(key_object, list(g)))
return Enumerable(result).select(result_func) | def function[group_by, parameter[self, key_names, key, result_func]]:
constant[
Groups an enumerable on given key selector. Index of key name
corresponds to index of key lambda function.
Usage:
Enumerable([1,2,3]).group_by(key_names=['id'], key=lambda x: x) _
.to_list() -->
Enumerable object [
Grouping object {
key.id: 1,
_data: [1]
},
Grouping object {
key.id: 2,
_data: [2]
},
Grouping object {
key.id: 3,
_data: [3]
}
]
Thus the key names for each grouping object can be referenced
through the key property. Using the above example:
Enumerable([1,2,3]).group_by(key_names=['id'], key=lambda x: x) _
.select(lambda g: { 'key': g.key.id, 'count': g.count() }
:param key_names: list of key names
:param key: key selector as lambda expression
:param result_func: transformation function as lambda expression
:return: Enumerable of grouping objects
]
variable[result] assign[=] list[[]]
variable[ordered] assign[=] call[name[sorted], parameter[name[self]]]
variable[grouped] assign[=] call[name[itertools].groupby, parameter[name[ordered], name[key]]]
for taget[tuple[[<ast.Name object at 0x7da1b0c525c0>, <ast.Name object at 0x7da1b0c502b0>]]] in starred[name[grouped]] begin[:]
variable[can_enumerate] assign[=] <ast.BoolOp object at 0x7da1b0c51750>
variable[key_prop] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da207f00430>, <ast.Name object at 0x7da207f01990>]]] in starred[call[name[enumerate], parameter[name[key_names]]]] begin[:]
call[name[key_prop].setdefault, parameter[name[prop], <ast.IfExp object at 0x7da207f021a0>]]
variable[key_object] assign[=] call[name[Key], parameter[name[key_prop]]]
call[name[result].append, parameter[call[name[Grouping], parameter[name[key_object], call[name[list], parameter[name[g]]]]]]]
return[call[call[name[Enumerable], parameter[name[result]]].select, parameter[name[result_func]]]] | keyword[def] identifier[group_by] ( identifier[self] , identifier[key_names] =[], identifier[key] = keyword[lambda] identifier[x] : identifier[x] , identifier[result_func] = keyword[lambda] identifier[x] : identifier[x] ):
literal[string]
identifier[result] =[]
identifier[ordered] = identifier[sorted] ( identifier[self] , identifier[key] = identifier[key] )
identifier[grouped] = identifier[itertools] . identifier[groupby] ( identifier[ordered] , identifier[key] )
keyword[for] identifier[k] , identifier[g] keyword[in] identifier[grouped] :
identifier[can_enumerate] = identifier[isinstance] ( identifier[k] , identifier[list] ) keyword[or] identifier[isinstance] ( identifier[k] , identifier[tuple] ) keyword[and] identifier[len] ( identifier[k] )> literal[int]
identifier[key_prop] ={}
keyword[for] identifier[i] , identifier[prop] keyword[in] identifier[enumerate] ( identifier[key_names] ):
identifier[key_prop] . identifier[setdefault] ( identifier[prop] , identifier[k] [ identifier[i] ] keyword[if] identifier[can_enumerate] keyword[else] identifier[k] )
identifier[key_object] = identifier[Key] ( identifier[key_prop] )
identifier[result] . identifier[append] ( identifier[Grouping] ( identifier[key_object] , identifier[list] ( identifier[g] )))
keyword[return] identifier[Enumerable] ( identifier[result] ). identifier[select] ( identifier[result_func] ) | def group_by(self, key_names=[], key=lambda x: x, result_func=lambda x: x):
"""
Groups an enumerable on given key selector. Index of key name
corresponds to index of key lambda function.
Usage:
Enumerable([1,2,3]).group_by(key_names=['id'], key=lambda x: x) _
.to_list() -->
Enumerable object [
Grouping object {
key.id: 1,
_data: [1]
},
Grouping object {
key.id: 2,
_data: [2]
},
Grouping object {
key.id: 3,
_data: [3]
}
]
Thus the key names for each grouping object can be referenced
through the key property. Using the above example:
Enumerable([1,2,3]).group_by(key_names=['id'], key=lambda x: x) _
.select(lambda g: { 'key': g.key.id, 'count': g.count() }
:param key_names: list of key names
:param key: key selector as lambda expression
:param result_func: transformation function as lambda expression
:return: Enumerable of grouping objects
"""
result = []
ordered = sorted(self, key=key)
grouped = itertools.groupby(ordered, key)
for (k, g) in grouped:
can_enumerate = isinstance(k, list) or (isinstance(k, tuple) and len(k) > 0)
key_prop = {}
for (i, prop) in enumerate(key_names):
key_prop.setdefault(prop, k[i] if can_enumerate else k) # depends on [control=['for'], data=[]]
key_object = Key(key_prop)
result.append(Grouping(key_object, list(g))) # depends on [control=['for'], data=[]]
return Enumerable(result).select(result_func) |
def _update_time_stamp(self, hash_value):
""" timestamps are being stored distributed over several lru databases.
The timestamp is a time.time() snapshot (float), which are seconds since epoch."""
db_name = self._database_from_key(hash_value)
if not db_name:
db_name=':memory:'
def _update():
import sqlite3
try:
with sqlite3.connect(db_name, timeout=self.lru_timeout) as conn:
""" last_read is a result of time.time()"""
conn.execute('CREATE TABLE IF NOT EXISTS usage '
'(hash VARCHAR(32), last_read FLOAT)')
conn.commit()
cur = conn.execute('select * from usage where hash=?', (hash_value,))
row = cur.fetchone()
if not row:
conn.execute("insert into usage(hash, last_read) values(?, ?)", (hash_value, time.time()))
else:
conn.execute("update usage set last_read=? where hash=?", (time.time(), hash_value))
conn.commit()
except sqlite3.OperationalError:
# if there are many jobs to write to same database at same time, the timeout could be hit
logger.debug('could not update LRU info for db %s', db_name)
# this could lead to another (rare) race condition during cleaning...
#import threading
#threading.Thread(target=_update).start()
_update() | def function[_update_time_stamp, parameter[self, hash_value]]:
constant[ timestamps are being stored distributed over several lru databases.
The timestamp is a time.time() snapshot (float), which are seconds since epoch.]
variable[db_name] assign[=] call[name[self]._database_from_key, parameter[name[hash_value]]]
if <ast.UnaryOp object at 0x7da18f00d150> begin[:]
variable[db_name] assign[=] constant[:memory:]
def function[_update, parameter[]]:
import module[sqlite3]
<ast.Try object at 0x7da18f00c280>
call[name[_update], parameter[]] | keyword[def] identifier[_update_time_stamp] ( identifier[self] , identifier[hash_value] ):
literal[string]
identifier[db_name] = identifier[self] . identifier[_database_from_key] ( identifier[hash_value] )
keyword[if] keyword[not] identifier[db_name] :
identifier[db_name] = literal[string]
keyword[def] identifier[_update] ():
keyword[import] identifier[sqlite3]
keyword[try] :
keyword[with] identifier[sqlite3] . identifier[connect] ( identifier[db_name] , identifier[timeout] = identifier[self] . identifier[lru_timeout] ) keyword[as] identifier[conn] :
literal[string]
identifier[conn] . identifier[execute] ( literal[string]
literal[string] )
identifier[conn] . identifier[commit] ()
identifier[cur] = identifier[conn] . identifier[execute] ( literal[string] ,( identifier[hash_value] ,))
identifier[row] = identifier[cur] . identifier[fetchone] ()
keyword[if] keyword[not] identifier[row] :
identifier[conn] . identifier[execute] ( literal[string] ,( identifier[hash_value] , identifier[time] . identifier[time] ()))
keyword[else] :
identifier[conn] . identifier[execute] ( literal[string] ,( identifier[time] . identifier[time] (), identifier[hash_value] ))
identifier[conn] . identifier[commit] ()
keyword[except] identifier[sqlite3] . identifier[OperationalError] :
identifier[logger] . identifier[debug] ( literal[string] , identifier[db_name] )
identifier[_update] () | def _update_time_stamp(self, hash_value):
""" timestamps are being stored distributed over several lru databases.
The timestamp is a time.time() snapshot (float), which are seconds since epoch."""
db_name = self._database_from_key(hash_value)
if not db_name:
db_name = ':memory:' # depends on [control=['if'], data=[]]
def _update():
import sqlite3
try:
with sqlite3.connect(db_name, timeout=self.lru_timeout) as conn:
' last_read is a result of time.time()'
conn.execute('CREATE TABLE IF NOT EXISTS usage (hash VARCHAR(32), last_read FLOAT)')
conn.commit()
cur = conn.execute('select * from usage where hash=?', (hash_value,))
row = cur.fetchone()
if not row:
conn.execute('insert into usage(hash, last_read) values(?, ?)', (hash_value, time.time())) # depends on [control=['if'], data=[]]
else:
conn.execute('update usage set last_read=? where hash=?', (time.time(), hash_value))
conn.commit() # depends on [control=['with'], data=['conn']] # depends on [control=['try'], data=[]]
except sqlite3.OperationalError:
# if there are many jobs to write to same database at same time, the timeout could be hit
logger.debug('could not update LRU info for db %s', db_name) # depends on [control=['except'], data=[]]
# this could lead to another (rare) race condition during cleaning...
#import threading
#threading.Thread(target=_update).start()
_update() |
def get_selections(fetchempty=True):
'''
Answers to debconf questions for all packages in the following format::
{'package': [['question', 'type', 'value'], ...]}
CLI Example:
.. code-block:: bash
salt '*' debconf.get_selections
'''
selections = {}
cmd = 'debconf-get-selections'
out = __salt__['cmd.run_stdout'](cmd)
lines = _unpack_lines(out)
for line in lines:
package, question, type_, value = line
if fetchempty or value:
(selections
.setdefault(package, [])
.append([question, type_, value]))
return selections | def function[get_selections, parameter[fetchempty]]:
constant[
Answers to debconf questions for all packages in the following format::
{'package': [['question', 'type', 'value'], ...]}
CLI Example:
.. code-block:: bash
salt '*' debconf.get_selections
]
variable[selections] assign[=] dictionary[[], []]
variable[cmd] assign[=] constant[debconf-get-selections]
variable[out] assign[=] call[call[name[__salt__]][constant[cmd.run_stdout]], parameter[name[cmd]]]
variable[lines] assign[=] call[name[_unpack_lines], parameter[name[out]]]
for taget[name[line]] in starred[name[lines]] begin[:]
<ast.Tuple object at 0x7da1b1f350c0> assign[=] name[line]
if <ast.BoolOp object at 0x7da1b216a170> begin[:]
call[call[name[selections].setdefault, parameter[name[package], list[[]]]].append, parameter[list[[<ast.Name object at 0x7da1b216b670>, <ast.Name object at 0x7da1b2169db0>, <ast.Name object at 0x7da1b2169fc0>]]]]
return[name[selections]] | keyword[def] identifier[get_selections] ( identifier[fetchempty] = keyword[True] ):
literal[string]
identifier[selections] ={}
identifier[cmd] = literal[string]
identifier[out] = identifier[__salt__] [ literal[string] ]( identifier[cmd] )
identifier[lines] = identifier[_unpack_lines] ( identifier[out] )
keyword[for] identifier[line] keyword[in] identifier[lines] :
identifier[package] , identifier[question] , identifier[type_] , identifier[value] = identifier[line]
keyword[if] identifier[fetchempty] keyword[or] identifier[value] :
( identifier[selections]
. identifier[setdefault] ( identifier[package] ,[])
. identifier[append] ([ identifier[question] , identifier[type_] , identifier[value] ]))
keyword[return] identifier[selections] | def get_selections(fetchempty=True):
"""
Answers to debconf questions for all packages in the following format::
{'package': [['question', 'type', 'value'], ...]}
CLI Example:
.. code-block:: bash
salt '*' debconf.get_selections
"""
selections = {}
cmd = 'debconf-get-selections'
out = __salt__['cmd.run_stdout'](cmd)
lines = _unpack_lines(out)
for line in lines:
(package, question, type_, value) = line
if fetchempty or value:
selections.setdefault(package, []).append([question, type_, value]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
return selections |
def _uses_db(func, self, *args, **kwargs):
""" Use as a decorator for operations on the database, to ensure connection setup and
teardown. Can only be used on methods on objects with a `self.session` attribute.
"""
if not self.session:
_logger.debug('Creating new db session')
self._init_db_session()
try:
ret = func(self, *args, **kwargs)
self.session.commit()
except:
self.session.rollback()
tb = traceback.format_exc()
_logger.debug(tb)
raise
finally:
_logger.debug('Closing db session')
self.session.close()
return ret | def function[_uses_db, parameter[func, self]]:
constant[ Use as a decorator for operations on the database, to ensure connection setup and
teardown. Can only be used on methods on objects with a `self.session` attribute.
]
if <ast.UnaryOp object at 0x7da18f00fd90> begin[:]
call[name[_logger].debug, parameter[constant[Creating new db session]]]
call[name[self]._init_db_session, parameter[]]
<ast.Try object at 0x7da18f00cfd0>
return[name[ret]] | keyword[def] identifier[_uses_db] ( identifier[func] , identifier[self] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[session] :
identifier[_logger] . identifier[debug] ( literal[string] )
identifier[self] . identifier[_init_db_session] ()
keyword[try] :
identifier[ret] = identifier[func] ( identifier[self] ,* identifier[args] ,** identifier[kwargs] )
identifier[self] . identifier[session] . identifier[commit] ()
keyword[except] :
identifier[self] . identifier[session] . identifier[rollback] ()
identifier[tb] = identifier[traceback] . identifier[format_exc] ()
identifier[_logger] . identifier[debug] ( identifier[tb] )
keyword[raise]
keyword[finally] :
identifier[_logger] . identifier[debug] ( literal[string] )
identifier[self] . identifier[session] . identifier[close] ()
keyword[return] identifier[ret] | def _uses_db(func, self, *args, **kwargs):
""" Use as a decorator for operations on the database, to ensure connection setup and
teardown. Can only be used on methods on objects with a `self.session` attribute.
"""
if not self.session:
_logger.debug('Creating new db session')
self._init_db_session() # depends on [control=['if'], data=[]]
try:
ret = func(self, *args, **kwargs)
self.session.commit() # depends on [control=['try'], data=[]]
except:
self.session.rollback()
tb = traceback.format_exc()
_logger.debug(tb)
raise # depends on [control=['except'], data=[]]
finally:
_logger.debug('Closing db session')
self.session.close()
return ret |
def _delete_org(self, org_name):
"""Send organization delete request to DCNM.
:param org_name: name of organization to be deleted
"""
url = self._del_org_url % (org_name)
return self._send_request('DELETE', url, '', 'organization') | def function[_delete_org, parameter[self, org_name]]:
constant[Send organization delete request to DCNM.
:param org_name: name of organization to be deleted
]
variable[url] assign[=] binary_operation[name[self]._del_org_url <ast.Mod object at 0x7da2590d6920> name[org_name]]
return[call[name[self]._send_request, parameter[constant[DELETE], name[url], constant[], constant[organization]]]] | keyword[def] identifier[_delete_org] ( identifier[self] , identifier[org_name] ):
literal[string]
identifier[url] = identifier[self] . identifier[_del_org_url] %( identifier[org_name] )
keyword[return] identifier[self] . identifier[_send_request] ( literal[string] , identifier[url] , literal[string] , literal[string] ) | def _delete_org(self, org_name):
"""Send organization delete request to DCNM.
:param org_name: name of organization to be deleted
"""
url = self._del_org_url % org_name
return self._send_request('DELETE', url, '', 'organization') |
def _process_docstrings(self, doc, members, add=True):
"""Adds the docstrings from the list of DocElements to their
respective members.
Returns true if the doc element belonged to a member."""
if ((doc.doctype == "member" or doc.doctype == "local") and
doc.pointsto is not None and
doc.pointsto in members):
if add:
members[doc.pointsto].docstring.append(doc)
else:
members[doc.pointsto].overwrite_docs(doc)
return True
else:
return False | def function[_process_docstrings, parameter[self, doc, members, add]]:
constant[Adds the docstrings from the list of DocElements to their
respective members.
Returns true if the doc element belonged to a member.]
if <ast.BoolOp object at 0x7da1b26625c0> begin[:]
if name[add] begin[:]
call[call[name[members]][name[doc].pointsto].docstring.append, parameter[name[doc]]]
return[constant[True]] | keyword[def] identifier[_process_docstrings] ( identifier[self] , identifier[doc] , identifier[members] , identifier[add] = keyword[True] ):
literal[string]
keyword[if] (( identifier[doc] . identifier[doctype] == literal[string] keyword[or] identifier[doc] . identifier[doctype] == literal[string] ) keyword[and]
identifier[doc] . identifier[pointsto] keyword[is] keyword[not] keyword[None] keyword[and]
identifier[doc] . identifier[pointsto] keyword[in] identifier[members] ):
keyword[if] identifier[add] :
identifier[members] [ identifier[doc] . identifier[pointsto] ]. identifier[docstring] . identifier[append] ( identifier[doc] )
keyword[else] :
identifier[members] [ identifier[doc] . identifier[pointsto] ]. identifier[overwrite_docs] ( identifier[doc] )
keyword[return] keyword[True]
keyword[else] :
keyword[return] keyword[False] | def _process_docstrings(self, doc, members, add=True):
"""Adds the docstrings from the list of DocElements to their
respective members.
Returns true if the doc element belonged to a member."""
if (doc.doctype == 'member' or doc.doctype == 'local') and doc.pointsto is not None and (doc.pointsto in members):
if add:
members[doc.pointsto].docstring.append(doc) # depends on [control=['if'], data=[]]
else:
members[doc.pointsto].overwrite_docs(doc)
return True # depends on [control=['if'], data=[]]
else:
return False |
def add_tags(DomainName=None, ARN=None,
region=None, key=None, keyid=None, profile=None, **kwargs):
'''
Add tags to a domain
Returns {tagged: true} if the domain was tagged and returns
{tagged: False} if the domain was not tagged.
CLI Example:
.. code-block:: bash
salt myminion boto_elasticsearch_domain.add_tags mydomain tag_a=tag_value tag_b=tag_value
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
tagslist = []
for k, v in six.iteritems(kwargs):
if six.text_type(k).startswith('__'):
continue
tagslist.append({'Key': six.text_type(k), 'Value': six.text_type(v)})
if ARN is None:
if DomainName is None:
raise SaltInvocationError('One (but not both) of ARN or '
'domain must be specified.')
domaindata = status(DomainName=DomainName,
region=region, key=key, keyid=keyid,
profile=profile)
if not domaindata or 'domain' not in domaindata:
log.warning('Domain tags not updated')
return {'tagged': False}
ARN = domaindata.get('domain', {}).get('ARN')
elif DomainName is not None:
raise SaltInvocationError('One (but not both) of ARN or '
'domain must be specified.')
conn.add_tags(ARN=ARN, TagList=tagslist)
return {'tagged': True}
except ClientError as e:
return {'tagged': False, 'error': __utils__['boto3.get_error'](e)} | def function[add_tags, parameter[DomainName, ARN, region, key, keyid, profile]]:
constant[
Add tags to a domain
Returns {tagged: true} if the domain was tagged and returns
{tagged: False} if the domain was not tagged.
CLI Example:
.. code-block:: bash
salt myminion boto_elasticsearch_domain.add_tags mydomain tag_a=tag_value tag_b=tag_value
]
<ast.Try object at 0x7da1b1c14130> | keyword[def] identifier[add_tags] ( identifier[DomainName] = keyword[None] , identifier[ARN] = keyword[None] ,
identifier[region] = keyword[None] , identifier[key] = keyword[None] , identifier[keyid] = keyword[None] , identifier[profile] = keyword[None] ,** identifier[kwargs] ):
literal[string]
keyword[try] :
identifier[conn] = identifier[_get_conn] ( identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] , identifier[profile] = identifier[profile] )
identifier[tagslist] =[]
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[six] . identifier[iteritems] ( identifier[kwargs] ):
keyword[if] identifier[six] . identifier[text_type] ( identifier[k] ). identifier[startswith] ( literal[string] ):
keyword[continue]
identifier[tagslist] . identifier[append] ({ literal[string] : identifier[six] . identifier[text_type] ( identifier[k] ), literal[string] : identifier[six] . identifier[text_type] ( identifier[v] )})
keyword[if] identifier[ARN] keyword[is] keyword[None] :
keyword[if] identifier[DomainName] keyword[is] keyword[None] :
keyword[raise] identifier[SaltInvocationError] ( literal[string]
literal[string] )
identifier[domaindata] = identifier[status] ( identifier[DomainName] = identifier[DomainName] ,
identifier[region] = identifier[region] , identifier[key] = identifier[key] , identifier[keyid] = identifier[keyid] ,
identifier[profile] = identifier[profile] )
keyword[if] keyword[not] identifier[domaindata] keyword[or] literal[string] keyword[not] keyword[in] identifier[domaindata] :
identifier[log] . identifier[warning] ( literal[string] )
keyword[return] { literal[string] : keyword[False] }
identifier[ARN] = identifier[domaindata] . identifier[get] ( literal[string] ,{}). identifier[get] ( literal[string] )
keyword[elif] identifier[DomainName] keyword[is] keyword[not] keyword[None] :
keyword[raise] identifier[SaltInvocationError] ( literal[string]
literal[string] )
identifier[conn] . identifier[add_tags] ( identifier[ARN] = identifier[ARN] , identifier[TagList] = identifier[tagslist] )
keyword[return] { literal[string] : keyword[True] }
keyword[except] identifier[ClientError] keyword[as] identifier[e] :
keyword[return] { literal[string] : keyword[False] , literal[string] : identifier[__utils__] [ literal[string] ]( identifier[e] )} | def add_tags(DomainName=None, ARN=None, region=None, key=None, keyid=None, profile=None, **kwargs):
"""
Add tags to a domain
Returns {tagged: true} if the domain was tagged and returns
{tagged: False} if the domain was not tagged.
CLI Example:
.. code-block:: bash
salt myminion boto_elasticsearch_domain.add_tags mydomain tag_a=tag_value tag_b=tag_value
"""
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
tagslist = []
for (k, v) in six.iteritems(kwargs):
if six.text_type(k).startswith('__'):
continue # depends on [control=['if'], data=[]]
tagslist.append({'Key': six.text_type(k), 'Value': six.text_type(v)}) # depends on [control=['for'], data=[]]
if ARN is None:
if DomainName is None:
raise SaltInvocationError('One (but not both) of ARN or domain must be specified.') # depends on [control=['if'], data=[]]
domaindata = status(DomainName=DomainName, region=region, key=key, keyid=keyid, profile=profile)
if not domaindata or 'domain' not in domaindata:
log.warning('Domain tags not updated')
return {'tagged': False} # depends on [control=['if'], data=[]]
ARN = domaindata.get('domain', {}).get('ARN') # depends on [control=['if'], data=['ARN']]
elif DomainName is not None:
raise SaltInvocationError('One (but not both) of ARN or domain must be specified.') # depends on [control=['if'], data=[]]
conn.add_tags(ARN=ARN, TagList=tagslist)
return {'tagged': True} # depends on [control=['try'], data=[]]
except ClientError as e:
return {'tagged': False, 'error': __utils__['boto3.get_error'](e)} # depends on [control=['except'], data=['e']] |
def form_valid(self, form):
"""Pull the metrics from the submitted form, and store them as a
list of strings in ``self.metric_slugs``.
"""
self.metric_slugs = [k.strip() for k in form.cleaned_data['metrics']]
return super(AggregateFormView, self).form_valid(form) | def function[form_valid, parameter[self, form]]:
constant[Pull the metrics from the submitted form, and store them as a
list of strings in ``self.metric_slugs``.
]
name[self].metric_slugs assign[=] <ast.ListComp object at 0x7da1b0fb1330>
return[call[call[name[super], parameter[name[AggregateFormView], name[self]]].form_valid, parameter[name[form]]]] | keyword[def] identifier[form_valid] ( identifier[self] , identifier[form] ):
literal[string]
identifier[self] . identifier[metric_slugs] =[ identifier[k] . identifier[strip] () keyword[for] identifier[k] keyword[in] identifier[form] . identifier[cleaned_data] [ literal[string] ]]
keyword[return] identifier[super] ( identifier[AggregateFormView] , identifier[self] ). identifier[form_valid] ( identifier[form] ) | def form_valid(self, form):
"""Pull the metrics from the submitted form, and store them as a
list of strings in ``self.metric_slugs``.
"""
self.metric_slugs = [k.strip() for k in form.cleaned_data['metrics']]
return super(AggregateFormView, self).form_valid(form) |
def _category(self):
""" Type of the image: LOLA or WAC
Note: Specify the attribute ``grid``, ``img`` and ``lbl`
"""
if self.fname.split('_')[0] == 'WAC':
self.grid = 'WAC'
self.img = os.path.join(self.wacpath, self.fname + '.IMG')
self.lbl = ''
elif self.fname.split('_')[0] == 'LDEM':
self.grid = 'LOLA'
self.img = os.path.join(self.lolapath, self.fname + '.IMG')
self.lbl = os.path.join(self.lolapath, self.fname + '.LBL')
else:
raise ValueError("%s : This type of image is not recognized. Possible\
images are from %s only" % (self.fname, ', '.join(('WAC', 'LOLA')))) | def function[_category, parameter[self]]:
constant[ Type of the image: LOLA or WAC
Note: Specify the attribute ``grid``, ``img`` and ``lbl`
]
if compare[call[call[name[self].fname.split, parameter[constant[_]]]][constant[0]] equal[==] constant[WAC]] begin[:]
name[self].grid assign[=] constant[WAC]
name[self].img assign[=] call[name[os].path.join, parameter[name[self].wacpath, binary_operation[name[self].fname + constant[.IMG]]]]
name[self].lbl assign[=] constant[] | keyword[def] identifier[_category] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[fname] . identifier[split] ( literal[string] )[ literal[int] ]== literal[string] :
identifier[self] . identifier[grid] = literal[string]
identifier[self] . identifier[img] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[wacpath] , identifier[self] . identifier[fname] + literal[string] )
identifier[self] . identifier[lbl] = literal[string]
keyword[elif] identifier[self] . identifier[fname] . identifier[split] ( literal[string] )[ literal[int] ]== literal[string] :
identifier[self] . identifier[grid] = literal[string]
identifier[self] . identifier[img] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[lolapath] , identifier[self] . identifier[fname] + literal[string] )
identifier[self] . identifier[lbl] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[lolapath] , identifier[self] . identifier[fname] + literal[string] )
keyword[else] :
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[self] . identifier[fname] , literal[string] . identifier[join] (( literal[string] , literal[string] )))) | def _category(self):
""" Type of the image: LOLA or WAC
Note: Specify the attribute ``grid``, ``img`` and ``lbl`
"""
if self.fname.split('_')[0] == 'WAC':
self.grid = 'WAC'
self.img = os.path.join(self.wacpath, self.fname + '.IMG')
self.lbl = '' # depends on [control=['if'], data=[]]
elif self.fname.split('_')[0] == 'LDEM':
self.grid = 'LOLA'
self.img = os.path.join(self.lolapath, self.fname + '.IMG')
self.lbl = os.path.join(self.lolapath, self.fname + '.LBL') # depends on [control=['if'], data=[]]
else:
raise ValueError('%s : This type of image is not recognized. Possible images are from %s only' % (self.fname, ', '.join(('WAC', 'LOLA')))) |
def CopyToDateTimeString(self):
"""Copies the APFS timestamp to a date and time string.
Returns:
str: date and time value formatted as: "YYYY-MM-DD hh:mm:ss.#########" or
None if the timestamp is missing or invalid.
"""
if (self._timestamp is None or self._timestamp < self._INT64_MIN or
self._timestamp > self._INT64_MAX):
return None
return super(APFSTime, self)._CopyToDateTimeString() | def function[CopyToDateTimeString, parameter[self]]:
constant[Copies the APFS timestamp to a date and time string.
Returns:
str: date and time value formatted as: "YYYY-MM-DD hh:mm:ss.#########" or
None if the timestamp is missing or invalid.
]
if <ast.BoolOp object at 0x7da18f723400> begin[:]
return[constant[None]]
return[call[call[name[super], parameter[name[APFSTime], name[self]]]._CopyToDateTimeString, parameter[]]] | keyword[def] identifier[CopyToDateTimeString] ( identifier[self] ):
literal[string]
keyword[if] ( identifier[self] . identifier[_timestamp] keyword[is] keyword[None] keyword[or] identifier[self] . identifier[_timestamp] < identifier[self] . identifier[_INT64_MIN] keyword[or]
identifier[self] . identifier[_timestamp] > identifier[self] . identifier[_INT64_MAX] ):
keyword[return] keyword[None]
keyword[return] identifier[super] ( identifier[APFSTime] , identifier[self] ). identifier[_CopyToDateTimeString] () | def CopyToDateTimeString(self):
"""Copies the APFS timestamp to a date and time string.
Returns:
str: date and time value formatted as: "YYYY-MM-DD hh:mm:ss.#########" or
None if the timestamp is missing or invalid.
"""
if self._timestamp is None or self._timestamp < self._INT64_MIN or self._timestamp > self._INT64_MAX:
return None # depends on [control=['if'], data=[]]
return super(APFSTime, self)._CopyToDateTimeString() |
def after_epoch_profile(self, epoch_id: int, profile: TimeProfile, train_stream_name: str, extra_streams: Iterable[str]) -> None:
"""
After epoch profile event.
This event provides opportunity to process time profile of the finished epoch.
:param epoch_id: finished epoch id
:param profile: dictionary of lists of event timings that were measured during the epoch
:param extra_streams: enumeration of additional stream names
"""
pass | def function[after_epoch_profile, parameter[self, epoch_id, profile, train_stream_name, extra_streams]]:
constant[
After epoch profile event.
This event provides opportunity to process time profile of the finished epoch.
:param epoch_id: finished epoch id
:param profile: dictionary of lists of event timings that were measured during the epoch
:param extra_streams: enumeration of additional stream names
]
pass | keyword[def] identifier[after_epoch_profile] ( identifier[self] , identifier[epoch_id] : identifier[int] , identifier[profile] : identifier[TimeProfile] , identifier[train_stream_name] : identifier[str] , identifier[extra_streams] : identifier[Iterable] [ identifier[str] ])-> keyword[None] :
literal[string]
keyword[pass] | def after_epoch_profile(self, epoch_id: int, profile: TimeProfile, train_stream_name: str, extra_streams: Iterable[str]) -> None:
"""
After epoch profile event.
This event provides opportunity to process time profile of the finished epoch.
:param epoch_id: finished epoch id
:param profile: dictionary of lists of event timings that were measured during the epoch
:param extra_streams: enumeration of additional stream names
"""
pass |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.