code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
|---|---|---|---|
def _as_graph_element(self):
"""Returns the underlying graph element if possible."""
if self.is_sequence():
raise TypeError('A Pretty Tensor that holds a sequence cannot be '
'represented as a graph element.')
else:
# Self might be holding something else that isn't a true tensor, so
# if the 'tensor' can behave like a graph element, look for its
# _AsGraphElement method and call it. Graph elements themselves may not
# have or need this method, so just return other items directly.
obj = self.tensor
conv_fn = getattr(obj, '_as_graph_element', None)
if conv_fn and isinstance(conv_fn, collections.Callable):
obj = conv_fn()
return obj
|
def function[_as_graph_element, parameter[self]]:
constant[Returns the underlying graph element if possible.]
if call[name[self].is_sequence, parameter[]] begin[:]
<ast.Raise object at 0x7da20c7c9c00>
|
keyword[def] identifier[_as_graph_element] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[is_sequence] ():
keyword[raise] identifier[TypeError] ( literal[string]
literal[string] )
keyword[else] :
identifier[obj] = identifier[self] . identifier[tensor]
identifier[conv_fn] = identifier[getattr] ( identifier[obj] , literal[string] , keyword[None] )
keyword[if] identifier[conv_fn] keyword[and] identifier[isinstance] ( identifier[conv_fn] , identifier[collections] . identifier[Callable] ):
identifier[obj] = identifier[conv_fn] ()
keyword[return] identifier[obj]
|
def _as_graph_element(self):
"""Returns the underlying graph element if possible."""
if self.is_sequence():
raise TypeError('A Pretty Tensor that holds a sequence cannot be represented as a graph element.') # depends on [control=['if'], data=[]]
else:
# Self might be holding something else that isn't a true tensor, so
# if the 'tensor' can behave like a graph element, look for its
# _AsGraphElement method and call it. Graph elements themselves may not
# have or need this method, so just return other items directly.
obj = self.tensor
conv_fn = getattr(obj, '_as_graph_element', None)
if conv_fn and isinstance(conv_fn, collections.Callable):
obj = conv_fn() # depends on [control=['if'], data=[]]
return obj
|
def get_name(obj, setting_name='LONG_NAME_FORMAT'):
"""
Returns the correct order of the name according to the current language.
"""
nickname = obj.get_nickname()
romanized_first_name = obj.get_romanized_first_name()
romanized_last_name = obj.get_romanized_last_name()
non_romanized_first_name = obj.get_non_romanized_first_name()
non_romanized_last_name = obj.get_non_romanized_last_name()
non_translated_title = obj.get_title()
non_translated_gender = obj.get_gender()
# when the title is blank, gettext returns weird header text. So if this
# occurs, we will pass it on blank without gettext
if non_translated_title:
title = gettext(non_translated_title)
else:
title = non_translated_title
if non_translated_gender:
gender = gettext(non_translated_gender)
else:
gender = non_translated_gender
format_string = u'{}'.format(get_format(setting_name))
format_kwargs = {}
if '{n}' in format_string:
format_kwargs.update({'n': nickname})
if '{N}' in format_string:
format_kwargs.update({'N': nickname.upper()})
if '{f}' in format_string:
format_kwargs.update({'f': romanized_first_name})
if '{F}' in format_string:
format_kwargs.update({'F': romanized_first_name.upper()})
if '{l}' in format_string:
format_kwargs.update({'l': romanized_last_name})
if '{L}' in format_string:
format_kwargs.update({'L': romanized_last_name.upper()})
if '{a}' in format_string:
format_kwargs.update({'a': non_romanized_first_name})
if '{A}' in format_string:
format_kwargs.update({'A': non_romanized_first_name.upper()})
if '{x}' in format_string:
format_kwargs.update({'x': non_romanized_last_name})
if '{X}' in format_string:
format_kwargs.update({'X': non_romanized_last_name.upper()})
if '{t}' in format_string:
format_kwargs.update({'t': title})
if '{T}' in format_string:
format_kwargs.update({'T': title.upper()})
if '{g}' in format_string:
format_kwargs.update({'g': gender})
if '{G}' in format_string:
format_kwargs.update({'G': gender.upper()})
return format_string.format(**format_kwargs)
|
def function[get_name, parameter[obj, setting_name]]:
constant[
Returns the correct order of the name according to the current language.
]
variable[nickname] assign[=] call[name[obj].get_nickname, parameter[]]
variable[romanized_first_name] assign[=] call[name[obj].get_romanized_first_name, parameter[]]
variable[romanized_last_name] assign[=] call[name[obj].get_romanized_last_name, parameter[]]
variable[non_romanized_first_name] assign[=] call[name[obj].get_non_romanized_first_name, parameter[]]
variable[non_romanized_last_name] assign[=] call[name[obj].get_non_romanized_last_name, parameter[]]
variable[non_translated_title] assign[=] call[name[obj].get_title, parameter[]]
variable[non_translated_gender] assign[=] call[name[obj].get_gender, parameter[]]
if name[non_translated_title] begin[:]
variable[title] assign[=] call[name[gettext], parameter[name[non_translated_title]]]
if name[non_translated_gender] begin[:]
variable[gender] assign[=] call[name[gettext], parameter[name[non_translated_gender]]]
variable[format_string] assign[=] call[constant[{}].format, parameter[call[name[get_format], parameter[name[setting_name]]]]]
variable[format_kwargs] assign[=] dictionary[[], []]
if compare[constant[{n}] in name[format_string]] begin[:]
call[name[format_kwargs].update, parameter[dictionary[[<ast.Constant object at 0x7da204565120>], [<ast.Name object at 0x7da2045659f0>]]]]
if compare[constant[{N}] in name[format_string]] begin[:]
call[name[format_kwargs].update, parameter[dictionary[[<ast.Constant object at 0x7da204566bf0>], [<ast.Call object at 0x7da2045675b0>]]]]
if compare[constant[{f}] in name[format_string]] begin[:]
call[name[format_kwargs].update, parameter[dictionary[[<ast.Constant object at 0x7da204567460>], [<ast.Name object at 0x7da204565e10>]]]]
if compare[constant[{F}] in name[format_string]] begin[:]
call[name[format_kwargs].update, parameter[dictionary[[<ast.Constant object at 0x7da204565f00>], [<ast.Call object at 0x7da204567010>]]]]
if compare[constant[{l}] in name[format_string]] begin[:]
call[name[format_kwargs].update, parameter[dictionary[[<ast.Constant object at 0x7da204620550>], [<ast.Name object at 0x7da204621840>]]]]
if compare[constant[{L}] in name[format_string]] begin[:]
call[name[format_kwargs].update, parameter[dictionary[[<ast.Constant object at 0x7da204621960>], [<ast.Call object at 0x7da204620e50>]]]]
if compare[constant[{a}] in name[format_string]] begin[:]
call[name[format_kwargs].update, parameter[dictionary[[<ast.Constant object at 0x7da204620f40>], [<ast.Name object at 0x7da204622830>]]]]
if compare[constant[{A}] in name[format_string]] begin[:]
call[name[format_kwargs].update, parameter[dictionary[[<ast.Constant object at 0x7da2046230a0>], [<ast.Call object at 0x7da204622f80>]]]]
if compare[constant[{x}] in name[format_string]] begin[:]
call[name[format_kwargs].update, parameter[dictionary[[<ast.Constant object at 0x7da204622800>], [<ast.Name object at 0x7da204622cb0>]]]]
if compare[constant[{X}] in name[format_string]] begin[:]
call[name[format_kwargs].update, parameter[dictionary[[<ast.Constant object at 0x7da204623190>], [<ast.Call object at 0x7da2046206a0>]]]]
if compare[constant[{t}] in name[format_string]] begin[:]
call[name[format_kwargs].update, parameter[dictionary[[<ast.Constant object at 0x7da204621780>], [<ast.Name object at 0x7da2046228c0>]]]]
if compare[constant[{T}] in name[format_string]] begin[:]
call[name[format_kwargs].update, parameter[dictionary[[<ast.Constant object at 0x7da2046218d0>], [<ast.Call object at 0x7da2046227a0>]]]]
if compare[constant[{g}] in name[format_string]] begin[:]
call[name[format_kwargs].update, parameter[dictionary[[<ast.Constant object at 0x7da204620130>], [<ast.Name object at 0x7da204621300>]]]]
if compare[constant[{G}] in name[format_string]] begin[:]
call[name[format_kwargs].update, parameter[dictionary[[<ast.Constant object at 0x7da2046235b0>], [<ast.Call object at 0x7da204622fb0>]]]]
return[call[name[format_string].format, parameter[]]]
|
keyword[def] identifier[get_name] ( identifier[obj] , identifier[setting_name] = literal[string] ):
literal[string]
identifier[nickname] = identifier[obj] . identifier[get_nickname] ()
identifier[romanized_first_name] = identifier[obj] . identifier[get_romanized_first_name] ()
identifier[romanized_last_name] = identifier[obj] . identifier[get_romanized_last_name] ()
identifier[non_romanized_first_name] = identifier[obj] . identifier[get_non_romanized_first_name] ()
identifier[non_romanized_last_name] = identifier[obj] . identifier[get_non_romanized_last_name] ()
identifier[non_translated_title] = identifier[obj] . identifier[get_title] ()
identifier[non_translated_gender] = identifier[obj] . identifier[get_gender] ()
keyword[if] identifier[non_translated_title] :
identifier[title] = identifier[gettext] ( identifier[non_translated_title] )
keyword[else] :
identifier[title] = identifier[non_translated_title]
keyword[if] identifier[non_translated_gender] :
identifier[gender] = identifier[gettext] ( identifier[non_translated_gender] )
keyword[else] :
identifier[gender] = identifier[non_translated_gender]
identifier[format_string] = literal[string] . identifier[format] ( identifier[get_format] ( identifier[setting_name] ))
identifier[format_kwargs] ={}
keyword[if] literal[string] keyword[in] identifier[format_string] :
identifier[format_kwargs] . identifier[update] ({ literal[string] : identifier[nickname] })
keyword[if] literal[string] keyword[in] identifier[format_string] :
identifier[format_kwargs] . identifier[update] ({ literal[string] : identifier[nickname] . identifier[upper] ()})
keyword[if] literal[string] keyword[in] identifier[format_string] :
identifier[format_kwargs] . identifier[update] ({ literal[string] : identifier[romanized_first_name] })
keyword[if] literal[string] keyword[in] identifier[format_string] :
identifier[format_kwargs] . identifier[update] ({ literal[string] : identifier[romanized_first_name] . identifier[upper] ()})
keyword[if] literal[string] keyword[in] identifier[format_string] :
identifier[format_kwargs] . identifier[update] ({ literal[string] : identifier[romanized_last_name] })
keyword[if] literal[string] keyword[in] identifier[format_string] :
identifier[format_kwargs] . identifier[update] ({ literal[string] : identifier[romanized_last_name] . identifier[upper] ()})
keyword[if] literal[string] keyword[in] identifier[format_string] :
identifier[format_kwargs] . identifier[update] ({ literal[string] : identifier[non_romanized_first_name] })
keyword[if] literal[string] keyword[in] identifier[format_string] :
identifier[format_kwargs] . identifier[update] ({ literal[string] : identifier[non_romanized_first_name] . identifier[upper] ()})
keyword[if] literal[string] keyword[in] identifier[format_string] :
identifier[format_kwargs] . identifier[update] ({ literal[string] : identifier[non_romanized_last_name] })
keyword[if] literal[string] keyword[in] identifier[format_string] :
identifier[format_kwargs] . identifier[update] ({ literal[string] : identifier[non_romanized_last_name] . identifier[upper] ()})
keyword[if] literal[string] keyword[in] identifier[format_string] :
identifier[format_kwargs] . identifier[update] ({ literal[string] : identifier[title] })
keyword[if] literal[string] keyword[in] identifier[format_string] :
identifier[format_kwargs] . identifier[update] ({ literal[string] : identifier[title] . identifier[upper] ()})
keyword[if] literal[string] keyword[in] identifier[format_string] :
identifier[format_kwargs] . identifier[update] ({ literal[string] : identifier[gender] })
keyword[if] literal[string] keyword[in] identifier[format_string] :
identifier[format_kwargs] . identifier[update] ({ literal[string] : identifier[gender] . identifier[upper] ()})
keyword[return] identifier[format_string] . identifier[format] (** identifier[format_kwargs] )
|
def get_name(obj, setting_name='LONG_NAME_FORMAT'):
"""
Returns the correct order of the name according to the current language.
"""
nickname = obj.get_nickname()
romanized_first_name = obj.get_romanized_first_name()
romanized_last_name = obj.get_romanized_last_name()
non_romanized_first_name = obj.get_non_romanized_first_name()
non_romanized_last_name = obj.get_non_romanized_last_name()
non_translated_title = obj.get_title()
non_translated_gender = obj.get_gender()
# when the title is blank, gettext returns weird header text. So if this
# occurs, we will pass it on blank without gettext
if non_translated_title:
title = gettext(non_translated_title) # depends on [control=['if'], data=[]]
else:
title = non_translated_title
if non_translated_gender:
gender = gettext(non_translated_gender) # depends on [control=['if'], data=[]]
else:
gender = non_translated_gender
format_string = u'{}'.format(get_format(setting_name))
format_kwargs = {}
if '{n}' in format_string:
format_kwargs.update({'n': nickname}) # depends on [control=['if'], data=[]]
if '{N}' in format_string:
format_kwargs.update({'N': nickname.upper()}) # depends on [control=['if'], data=[]]
if '{f}' in format_string:
format_kwargs.update({'f': romanized_first_name}) # depends on [control=['if'], data=[]]
if '{F}' in format_string:
format_kwargs.update({'F': romanized_first_name.upper()}) # depends on [control=['if'], data=[]]
if '{l}' in format_string:
format_kwargs.update({'l': romanized_last_name}) # depends on [control=['if'], data=[]]
if '{L}' in format_string:
format_kwargs.update({'L': romanized_last_name.upper()}) # depends on [control=['if'], data=[]]
if '{a}' in format_string:
format_kwargs.update({'a': non_romanized_first_name}) # depends on [control=['if'], data=[]]
if '{A}' in format_string:
format_kwargs.update({'A': non_romanized_first_name.upper()}) # depends on [control=['if'], data=[]]
if '{x}' in format_string:
format_kwargs.update({'x': non_romanized_last_name}) # depends on [control=['if'], data=[]]
if '{X}' in format_string:
format_kwargs.update({'X': non_romanized_last_name.upper()}) # depends on [control=['if'], data=[]]
if '{t}' in format_string:
format_kwargs.update({'t': title}) # depends on [control=['if'], data=[]]
if '{T}' in format_string:
format_kwargs.update({'T': title.upper()}) # depends on [control=['if'], data=[]]
if '{g}' in format_string:
format_kwargs.update({'g': gender}) # depends on [control=['if'], data=[]]
if '{G}' in format_string:
format_kwargs.update({'G': gender.upper()}) # depends on [control=['if'], data=[]]
return format_string.format(**format_kwargs)
|
def add(self, name, session, pict_url=None, parent_cid=None, sort_order=None):
'''taobao.sellercats.list.add 添加卖家自定义类目
此API添加卖家店铺内自定义类目 父类目parent_cid值等于0:表示此类目为店铺下的一级类目,值不等于0:表示此类目有父类目 注:因为缓存的关系,添加的新类目需8个小时后才可以在淘宝页面上正常显示,但是不影响在该类目下商品发布'''
request = TOPRequest('taobao.sellercats.list.add')
request['name'] = name
if pict_url!=None: request['pict_url'] = pict_url
if parent_cid!=None: request['parent_cid'] = parent_cid
if sort_order!=None: request['sort_order'] = sort_order
self.create(self.execute(request, session)['seller_cat'])
return self
|
def function[add, parameter[self, name, session, pict_url, parent_cid, sort_order]]:
constant[taobao.sellercats.list.add 添加卖家自定义类目
此API添加卖家店铺内自定义类目 父类目parent_cid值等于0:表示此类目为店铺下的一级类目,值不等于0:表示此类目有父类目 注:因为缓存的关系,添加的新类目需8个小时后才可以在淘宝页面上正常显示,但是不影响在该类目下商品发布]
variable[request] assign[=] call[name[TOPRequest], parameter[constant[taobao.sellercats.list.add]]]
call[name[request]][constant[name]] assign[=] name[name]
if compare[name[pict_url] not_equal[!=] constant[None]] begin[:]
call[name[request]][constant[pict_url]] assign[=] name[pict_url]
if compare[name[parent_cid] not_equal[!=] constant[None]] begin[:]
call[name[request]][constant[parent_cid]] assign[=] name[parent_cid]
if compare[name[sort_order] not_equal[!=] constant[None]] begin[:]
call[name[request]][constant[sort_order]] assign[=] name[sort_order]
call[name[self].create, parameter[call[call[name[self].execute, parameter[name[request], name[session]]]][constant[seller_cat]]]]
return[name[self]]
|
keyword[def] identifier[add] ( identifier[self] , identifier[name] , identifier[session] , identifier[pict_url] = keyword[None] , identifier[parent_cid] = keyword[None] , identifier[sort_order] = keyword[None] ):
literal[string]
identifier[request] = identifier[TOPRequest] ( literal[string] )
identifier[request] [ literal[string] ]= identifier[name]
keyword[if] identifier[pict_url] != keyword[None] : identifier[request] [ literal[string] ]= identifier[pict_url]
keyword[if] identifier[parent_cid] != keyword[None] : identifier[request] [ literal[string] ]= identifier[parent_cid]
keyword[if] identifier[sort_order] != keyword[None] : identifier[request] [ literal[string] ]= identifier[sort_order]
identifier[self] . identifier[create] ( identifier[self] . identifier[execute] ( identifier[request] , identifier[session] )[ literal[string] ])
keyword[return] identifier[self]
|
def add(self, name, session, pict_url=None, parent_cid=None, sort_order=None):
"""taobao.sellercats.list.add 添加卖家自定义类目
此API添加卖家店铺内自定义类目 父类目parent_cid值等于0:表示此类目为店铺下的一级类目,值不等于0:表示此类目有父类目 注:因为缓存的关系,添加的新类目需8个小时后才可以在淘宝页面上正常显示,但是不影响在该类目下商品发布"""
request = TOPRequest('taobao.sellercats.list.add')
request['name'] = name
if pict_url != None:
request['pict_url'] = pict_url # depends on [control=['if'], data=['pict_url']]
if parent_cid != None:
request['parent_cid'] = parent_cid # depends on [control=['if'], data=['parent_cid']]
if sort_order != None:
request['sort_order'] = sort_order # depends on [control=['if'], data=['sort_order']]
self.create(self.execute(request, session)['seller_cat'])
return self
|
def uninitialize(cls) -> None:
"""Removes the ``SIGCHLD`` handler."""
if not cls._initialized:
return
signal.signal(signal.SIGCHLD, cls._old_sigchld)
cls._initialized = False
|
def function[uninitialize, parameter[cls]]:
constant[Removes the ``SIGCHLD`` handler.]
if <ast.UnaryOp object at 0x7da1b20ca7a0> begin[:]
return[None]
call[name[signal].signal, parameter[name[signal].SIGCHLD, name[cls]._old_sigchld]]
name[cls]._initialized assign[=] constant[False]
|
keyword[def] identifier[uninitialize] ( identifier[cls] )-> keyword[None] :
literal[string]
keyword[if] keyword[not] identifier[cls] . identifier[_initialized] :
keyword[return]
identifier[signal] . identifier[signal] ( identifier[signal] . identifier[SIGCHLD] , identifier[cls] . identifier[_old_sigchld] )
identifier[cls] . identifier[_initialized] = keyword[False]
|
def uninitialize(cls) -> None:
"""Removes the ``SIGCHLD`` handler."""
if not cls._initialized:
return # depends on [control=['if'], data=[]]
signal.signal(signal.SIGCHLD, cls._old_sigchld)
cls._initialized = False
|
def auth_traps_enabled(name, status=True):
'''
Manage the sending of authentication traps.
:param bool status: The enabled status.
Example of usage:
.. code-block:: yaml
snmp-auth-traps:
win_snmp.auth_traps_enabled:
- status: True
'''
ret = {'name': name,
'changes': {},
'comment': six.text_type(),
'result': None}
vname = 'EnableAuthenticationTraps'
current_status = __salt__['win_snmp.get_auth_traps_enabled']()
if status == current_status:
ret['comment'] = '{0} already contains the provided value.'.format(vname)
ret['result'] = True
elif __opts__['test']:
ret['comment'] = '{0} will be changed.'.format(vname)
ret['changes'] = {'old': current_status,
'new': status}
else:
ret['comment'] = 'Set {0} to contain the provided value.'.format(vname)
ret['changes'] = {'old': current_status,
'new': status}
ret['result'] = __salt__['win_snmp.set_auth_traps_enabled'](status=status)
return ret
|
def function[auth_traps_enabled, parameter[name, status]]:
constant[
Manage the sending of authentication traps.
:param bool status: The enabled status.
Example of usage:
.. code-block:: yaml
snmp-auth-traps:
win_snmp.auth_traps_enabled:
- status: True
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da18eb55540>, <ast.Constant object at 0x7da18eb549d0>, <ast.Constant object at 0x7da18eb566e0>, <ast.Constant object at 0x7da18eb57280>], [<ast.Name object at 0x7da18eb57430>, <ast.Dict object at 0x7da18eb56f20>, <ast.Call object at 0x7da18eb541c0>, <ast.Constant object at 0x7da18eb57a90>]]
variable[vname] assign[=] constant[EnableAuthenticationTraps]
variable[current_status] assign[=] call[call[name[__salt__]][constant[win_snmp.get_auth_traps_enabled]], parameter[]]
if compare[name[status] equal[==] name[current_status]] begin[:]
call[name[ret]][constant[comment]] assign[=] call[constant[{0} already contains the provided value.].format, parameter[name[vname]]]
call[name[ret]][constant[result]] assign[=] constant[True]
return[name[ret]]
|
keyword[def] identifier[auth_traps_enabled] ( identifier[name] , identifier[status] = keyword[True] ):
literal[string]
identifier[ret] ={ literal[string] : identifier[name] ,
literal[string] :{},
literal[string] : identifier[six] . identifier[text_type] (),
literal[string] : keyword[None] }
identifier[vname] = literal[string]
identifier[current_status] = identifier[__salt__] [ literal[string] ]()
keyword[if] identifier[status] == identifier[current_status] :
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[vname] )
identifier[ret] [ literal[string] ]= keyword[True]
keyword[elif] identifier[__opts__] [ literal[string] ]:
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[vname] )
identifier[ret] [ literal[string] ]={ literal[string] : identifier[current_status] ,
literal[string] : identifier[status] }
keyword[else] :
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[vname] )
identifier[ret] [ literal[string] ]={ literal[string] : identifier[current_status] ,
literal[string] : identifier[status] }
identifier[ret] [ literal[string] ]= identifier[__salt__] [ literal[string] ]( identifier[status] = identifier[status] )
keyword[return] identifier[ret]
|
def auth_traps_enabled(name, status=True):
"""
Manage the sending of authentication traps.
:param bool status: The enabled status.
Example of usage:
.. code-block:: yaml
snmp-auth-traps:
win_snmp.auth_traps_enabled:
- status: True
"""
ret = {'name': name, 'changes': {}, 'comment': six.text_type(), 'result': None}
vname = 'EnableAuthenticationTraps'
current_status = __salt__['win_snmp.get_auth_traps_enabled']()
if status == current_status:
ret['comment'] = '{0} already contains the provided value.'.format(vname)
ret['result'] = True # depends on [control=['if'], data=[]]
elif __opts__['test']:
ret['comment'] = '{0} will be changed.'.format(vname)
ret['changes'] = {'old': current_status, 'new': status} # depends on [control=['if'], data=[]]
else:
ret['comment'] = 'Set {0} to contain the provided value.'.format(vname)
ret['changes'] = {'old': current_status, 'new': status}
ret['result'] = __salt__['win_snmp.set_auth_traps_enabled'](status=status)
return ret
|
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
r"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of data dirs should be
returned. By default, the first item from XDG_DATA_DIRS is
returned, or '/usr/local/share/<AppName>',
if XDG_DATA_DIRS is not set
Typical site data directories are:
Mac OS X: /Library/Application Support/<AppName>
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
For Unix, this is using the $XDG_DATA_DIRS[0] default.
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('/Library/Application Support')
if appname:
path = os.path.join(path, appname)
else:
# XDG default for $XDG_DATA_DIRS
# only first, if multipath is False
path = os.getenv('XDG_DATA_DIRS',
os.pathsep.join(['/usr/local/share', '/usr/share']))
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
if appname and version:
path = os.path.join(path, version)
return path
|
def function[site_data_dir, parameter[appname, appauthor, version, multipath]]:
constant[Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of data dirs should be
returned. By default, the first item from XDG_DATA_DIRS is
returned, or '/usr/local/share/<AppName>',
if XDG_DATA_DIRS is not set
Typical site data directories are:
Mac OS X: /Library/Application Support/<AppName>
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
For Unix, this is using the $XDG_DATA_DIRS[0] default.
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
]
if compare[name[system] equal[==] constant[win32]] begin[:]
if compare[name[appauthor] is constant[None]] begin[:]
variable[appauthor] assign[=] name[appname]
variable[path] assign[=] call[name[os].path.normpath, parameter[call[name[_get_win_folder], parameter[constant[CSIDL_COMMON_APPDATA]]]]]
if name[appname] begin[:]
if compare[name[appauthor] is_not constant[False]] begin[:]
variable[path] assign[=] call[name[os].path.join, parameter[name[path], name[appauthor], name[appname]]]
if <ast.BoolOp object at 0x7da18ede4580> begin[:]
variable[path] assign[=] call[name[os].path.join, parameter[name[path], name[version]]]
return[name[path]]
|
keyword[def] identifier[site_data_dir] ( identifier[appname] = keyword[None] , identifier[appauthor] = keyword[None] , identifier[version] = keyword[None] , identifier[multipath] = keyword[False] ):
literal[string]
keyword[if] identifier[system] == literal[string] :
keyword[if] identifier[appauthor] keyword[is] keyword[None] :
identifier[appauthor] = identifier[appname]
identifier[path] = identifier[os] . identifier[path] . identifier[normpath] ( identifier[_get_win_folder] ( literal[string] ))
keyword[if] identifier[appname] :
keyword[if] identifier[appauthor] keyword[is] keyword[not] keyword[False] :
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[appauthor] , identifier[appname] )
keyword[else] :
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[appname] )
keyword[elif] identifier[system] == literal[string] :
identifier[path] = identifier[os] . identifier[path] . identifier[expanduser] ( literal[string] )
keyword[if] identifier[appname] :
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[appname] )
keyword[else] :
identifier[path] = identifier[os] . identifier[getenv] ( literal[string] ,
identifier[os] . identifier[pathsep] . identifier[join] ([ literal[string] , literal[string] ]))
identifier[pathlist] =[ identifier[os] . identifier[path] . identifier[expanduser] ( identifier[x] . identifier[rstrip] ( identifier[os] . identifier[sep] )) keyword[for] identifier[x] keyword[in] identifier[path] . identifier[split] ( identifier[os] . identifier[pathsep] )]
keyword[if] identifier[appname] :
keyword[if] identifier[version] :
identifier[appname] = identifier[os] . identifier[path] . identifier[join] ( identifier[appname] , identifier[version] )
identifier[pathlist] =[ identifier[os] . identifier[sep] . identifier[join] ([ identifier[x] , identifier[appname] ]) keyword[for] identifier[x] keyword[in] identifier[pathlist] ]
keyword[if] identifier[multipath] :
identifier[path] = identifier[os] . identifier[pathsep] . identifier[join] ( identifier[pathlist] )
keyword[else] :
identifier[path] = identifier[pathlist] [ literal[int] ]
keyword[return] identifier[path]
keyword[if] identifier[appname] keyword[and] identifier[version] :
identifier[path] = identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[version] )
keyword[return] identifier[path]
|
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of data dirs should be
returned. By default, the first item from XDG_DATA_DIRS is
returned, or '/usr/local/share/<AppName>',
if XDG_DATA_DIRS is not set
Typical site data directories are:
Mac OS X: /Library/Application Support/<AppName>
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
Win XP: C:\\Documents and Settings\\All Users\\Application Data\\<AppAuthor>\\<AppName>
Vista: (Fail! "C:\\ProgramData" is a hidden *system* directory on Vista.)
Win 7: C:\\ProgramData\\<AppAuthor>\\<AppName> # Hidden, but writeable on Win 7.
For Unix, this is using the $XDG_DATA_DIRS[0] default.
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system == 'win32':
if appauthor is None:
appauthor = appname # depends on [control=['if'], data=['appauthor']]
path = os.path.normpath(_get_win_folder('CSIDL_COMMON_APPDATA'))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname) # depends on [control=['if'], data=['appauthor']]
else:
path = os.path.join(path, appname) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif system == 'darwin':
path = os.path.expanduser('/Library/Application Support')
if appname:
path = os.path.join(path, appname) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
# XDG default for $XDG_DATA_DIRS
# only first, if multipath is False
path = os.getenv('XDG_DATA_DIRS', os.pathsep.join(['/usr/local/share', '/usr/share']))
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version) # depends on [control=['if'], data=[]]
pathlist = [os.sep.join([x, appname]) for x in pathlist] # depends on [control=['if'], data=[]]
if multipath:
path = os.pathsep.join(pathlist) # depends on [control=['if'], data=[]]
else:
path = pathlist[0]
return path
if appname and version:
path = os.path.join(path, version) # depends on [control=['if'], data=[]]
return path
|
def _set_access_log(self, config, level):
""" Configure access logs
"""
access_handler = self._get_param(
'global',
'log.access_handler',
config,
'syslog',
)
# log format for syslog
syslog_formatter = logging.Formatter(
"ldapcherry[%(process)d]: %(message)s"
)
# replace access log handler by a syslog handler
if access_handler == 'syslog':
cherrypy.log.access_log.handlers = []
handler = logging.handlers.SysLogHandler(
address='/dev/log',
facility='user',
)
handler.setFormatter(syslog_formatter)
cherrypy.log.access_log.addHandler(handler)
# if stdout, open a logger on stdout
elif access_handler == 'stdout':
cherrypy.log.access_log.handlers = []
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter(
'ldapcherry.access - %(levelname)s - %(message)s'
)
handler.setFormatter(formatter)
cherrypy.log.access_log.addHandler(handler)
# if file, we keep the default
elif access_handler == 'file':
pass
# replace access log handler by a null handler
elif access_handler == 'none':
cherrypy.log.access_log.handlers = []
handler = logging.NullHandler()
cherrypy.log.access_log.addHandler(handler)
# set log level
cherrypy.log.access_log.setLevel(level)
|
def function[_set_access_log, parameter[self, config, level]]:
constant[ Configure access logs
]
variable[access_handler] assign[=] call[name[self]._get_param, parameter[constant[global], constant[log.access_handler], name[config], constant[syslog]]]
variable[syslog_formatter] assign[=] call[name[logging].Formatter, parameter[constant[ldapcherry[%(process)d]: %(message)s]]]
if compare[name[access_handler] equal[==] constant[syslog]] begin[:]
name[cherrypy].log.access_log.handlers assign[=] list[[]]
variable[handler] assign[=] call[name[logging].handlers.SysLogHandler, parameter[]]
call[name[handler].setFormatter, parameter[name[syslog_formatter]]]
call[name[cherrypy].log.access_log.addHandler, parameter[name[handler]]]
call[name[cherrypy].log.access_log.setLevel, parameter[name[level]]]
|
keyword[def] identifier[_set_access_log] ( identifier[self] , identifier[config] , identifier[level] ):
literal[string]
identifier[access_handler] = identifier[self] . identifier[_get_param] (
literal[string] ,
literal[string] ,
identifier[config] ,
literal[string] ,
)
identifier[syslog_formatter] = identifier[logging] . identifier[Formatter] (
literal[string]
)
keyword[if] identifier[access_handler] == literal[string] :
identifier[cherrypy] . identifier[log] . identifier[access_log] . identifier[handlers] =[]
identifier[handler] = identifier[logging] . identifier[handlers] . identifier[SysLogHandler] (
identifier[address] = literal[string] ,
identifier[facility] = literal[string] ,
)
identifier[handler] . identifier[setFormatter] ( identifier[syslog_formatter] )
identifier[cherrypy] . identifier[log] . identifier[access_log] . identifier[addHandler] ( identifier[handler] )
keyword[elif] identifier[access_handler] == literal[string] :
identifier[cherrypy] . identifier[log] . identifier[access_log] . identifier[handlers] =[]
identifier[handler] = identifier[logging] . identifier[StreamHandler] ( identifier[sys] . identifier[stdout] )
identifier[formatter] = identifier[logging] . identifier[Formatter] (
literal[string]
)
identifier[handler] . identifier[setFormatter] ( identifier[formatter] )
identifier[cherrypy] . identifier[log] . identifier[access_log] . identifier[addHandler] ( identifier[handler] )
keyword[elif] identifier[access_handler] == literal[string] :
keyword[pass]
keyword[elif] identifier[access_handler] == literal[string] :
identifier[cherrypy] . identifier[log] . identifier[access_log] . identifier[handlers] =[]
identifier[handler] = identifier[logging] . identifier[NullHandler] ()
identifier[cherrypy] . identifier[log] . identifier[access_log] . identifier[addHandler] ( identifier[handler] )
identifier[cherrypy] . identifier[log] . identifier[access_log] . identifier[setLevel] ( identifier[level] )
|
def _set_access_log(self, config, level):
""" Configure access logs
"""
access_handler = self._get_param('global', 'log.access_handler', config, 'syslog')
# log format for syslog
syslog_formatter = logging.Formatter('ldapcherry[%(process)d]: %(message)s')
# replace access log handler by a syslog handler
if access_handler == 'syslog':
cherrypy.log.access_log.handlers = []
handler = logging.handlers.SysLogHandler(address='/dev/log', facility='user')
handler.setFormatter(syslog_formatter)
cherrypy.log.access_log.addHandler(handler) # depends on [control=['if'], data=[]]
# if stdout, open a logger on stdout
elif access_handler == 'stdout':
cherrypy.log.access_log.handlers = []
handler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('ldapcherry.access - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
cherrypy.log.access_log.addHandler(handler) # depends on [control=['if'], data=[]]
# if file, we keep the default
elif access_handler == 'file':
pass # depends on [control=['if'], data=[]]
# replace access log handler by a null handler
elif access_handler == 'none':
cherrypy.log.access_log.handlers = []
handler = logging.NullHandler()
cherrypy.log.access_log.addHandler(handler) # depends on [control=['if'], data=[]]
# set log level
cherrypy.log.access_log.setLevel(level)
|
def create_xml_file_from_string(self, content, destination=None):
"""
Creates XML file from text.
:param content: C++ source code
:type content: str
:param destination: file name for xml file
:type destination: str
:rtype: returns file name of xml file
"""
header_file = utils.create_temp_file_name(suffix='.h')
try:
with open(header_file, "w+") as header:
header.write(content)
xml_file = self.create_xml_file(header_file, destination)
finally:
utils.remove_file_no_raise(header_file, self.__config)
return xml_file
|
def function[create_xml_file_from_string, parameter[self, content, destination]]:
constant[
Creates XML file from text.
:param content: C++ source code
:type content: str
:param destination: file name for xml file
:type destination: str
:rtype: returns file name of xml file
]
variable[header_file] assign[=] call[name[utils].create_temp_file_name, parameter[]]
<ast.Try object at 0x7da1b26ac3d0>
return[name[xml_file]]
|
keyword[def] identifier[create_xml_file_from_string] ( identifier[self] , identifier[content] , identifier[destination] = keyword[None] ):
literal[string]
identifier[header_file] = identifier[utils] . identifier[create_temp_file_name] ( identifier[suffix] = literal[string] )
keyword[try] :
keyword[with] identifier[open] ( identifier[header_file] , literal[string] ) keyword[as] identifier[header] :
identifier[header] . identifier[write] ( identifier[content] )
identifier[xml_file] = identifier[self] . identifier[create_xml_file] ( identifier[header_file] , identifier[destination] )
keyword[finally] :
identifier[utils] . identifier[remove_file_no_raise] ( identifier[header_file] , identifier[self] . identifier[__config] )
keyword[return] identifier[xml_file]
|
def create_xml_file_from_string(self, content, destination=None):
"""
Creates XML file from text.
:param content: C++ source code
:type content: str
:param destination: file name for xml file
:type destination: str
:rtype: returns file name of xml file
"""
header_file = utils.create_temp_file_name(suffix='.h')
try:
with open(header_file, 'w+') as header:
header.write(content) # depends on [control=['with'], data=['header']]
xml_file = self.create_xml_file(header_file, destination) # depends on [control=['try'], data=[]]
finally:
utils.remove_file_no_raise(header_file, self.__config)
return xml_file
|
def update_from_dict(self, d):
"""
Apply options from a serialized body to the current instance. Modifies
the object in-place. Used mostly by ``from_dict``.
"""
d = d.copy()
if 'query' in d:
self.query._proxied = Q(d.pop('query'))
if 'post_filter' in d:
self.post_filter._proxied = Q(d.pop('post_filter'))
aggs = d.pop('aggs', d.pop('aggregations', {}))
if aggs:
self.aggs._params = {
'aggs': {
name: A(value) for (name, value) in iteritems(aggs)}
}
if 'sort' in d:
self._sort = d.pop('sort')
if '_source' in d:
self._source = d.pop('_source')
if 'highlight' in d:
high = d.pop('highlight').copy()
self._highlight = high.pop('fields')
self._highlight_opts = high
if 'suggest' in d:
self._suggest = d.pop('suggest')
if 'text' in self._suggest:
text = self._suggest.pop('text')
for s in self._suggest.values():
s.setdefault('text', text)
if 'script_fields' in d:
self._script_fields = d.pop('script_fields')
self._extra = d
return self
|
def function[update_from_dict, parameter[self, d]]:
constant[
Apply options from a serialized body to the current instance. Modifies
the object in-place. Used mostly by ``from_dict``.
]
variable[d] assign[=] call[name[d].copy, parameter[]]
if compare[constant[query] in name[d]] begin[:]
name[self].query._proxied assign[=] call[name[Q], parameter[call[name[d].pop, parameter[constant[query]]]]]
if compare[constant[post_filter] in name[d]] begin[:]
name[self].post_filter._proxied assign[=] call[name[Q], parameter[call[name[d].pop, parameter[constant[post_filter]]]]]
variable[aggs] assign[=] call[name[d].pop, parameter[constant[aggs], call[name[d].pop, parameter[constant[aggregations], dictionary[[], []]]]]]
if name[aggs] begin[:]
name[self].aggs._params assign[=] dictionary[[<ast.Constant object at 0x7da1b21205b0>], [<ast.DictComp object at 0x7da1b2120640>]]
if compare[constant[sort] in name[d]] begin[:]
name[self]._sort assign[=] call[name[d].pop, parameter[constant[sort]]]
if compare[constant[_source] in name[d]] begin[:]
name[self]._source assign[=] call[name[d].pop, parameter[constant[_source]]]
if compare[constant[highlight] in name[d]] begin[:]
variable[high] assign[=] call[call[name[d].pop, parameter[constant[highlight]]].copy, parameter[]]
name[self]._highlight assign[=] call[name[high].pop, parameter[constant[fields]]]
name[self]._highlight_opts assign[=] name[high]
if compare[constant[suggest] in name[d]] begin[:]
name[self]._suggest assign[=] call[name[d].pop, parameter[constant[suggest]]]
if compare[constant[text] in name[self]._suggest] begin[:]
variable[text] assign[=] call[name[self]._suggest.pop, parameter[constant[text]]]
for taget[name[s]] in starred[call[name[self]._suggest.values, parameter[]]] begin[:]
call[name[s].setdefault, parameter[constant[text], name[text]]]
if compare[constant[script_fields] in name[d]] begin[:]
name[self]._script_fields assign[=] call[name[d].pop, parameter[constant[script_fields]]]
name[self]._extra assign[=] name[d]
return[name[self]]
|
keyword[def] identifier[update_from_dict] ( identifier[self] , identifier[d] ):
literal[string]
identifier[d] = identifier[d] . identifier[copy] ()
keyword[if] literal[string] keyword[in] identifier[d] :
identifier[self] . identifier[query] . identifier[_proxied] = identifier[Q] ( identifier[d] . identifier[pop] ( literal[string] ))
keyword[if] literal[string] keyword[in] identifier[d] :
identifier[self] . identifier[post_filter] . identifier[_proxied] = identifier[Q] ( identifier[d] . identifier[pop] ( literal[string] ))
identifier[aggs] = identifier[d] . identifier[pop] ( literal[string] , identifier[d] . identifier[pop] ( literal[string] ,{}))
keyword[if] identifier[aggs] :
identifier[self] . identifier[aggs] . identifier[_params] ={
literal[string] :{
identifier[name] : identifier[A] ( identifier[value] ) keyword[for] ( identifier[name] , identifier[value] ) keyword[in] identifier[iteritems] ( identifier[aggs] )}
}
keyword[if] literal[string] keyword[in] identifier[d] :
identifier[self] . identifier[_sort] = identifier[d] . identifier[pop] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[d] :
identifier[self] . identifier[_source] = identifier[d] . identifier[pop] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[d] :
identifier[high] = identifier[d] . identifier[pop] ( literal[string] ). identifier[copy] ()
identifier[self] . identifier[_highlight] = identifier[high] . identifier[pop] ( literal[string] )
identifier[self] . identifier[_highlight_opts] = identifier[high]
keyword[if] literal[string] keyword[in] identifier[d] :
identifier[self] . identifier[_suggest] = identifier[d] . identifier[pop] ( literal[string] )
keyword[if] literal[string] keyword[in] identifier[self] . identifier[_suggest] :
identifier[text] = identifier[self] . identifier[_suggest] . identifier[pop] ( literal[string] )
keyword[for] identifier[s] keyword[in] identifier[self] . identifier[_suggest] . identifier[values] ():
identifier[s] . identifier[setdefault] ( literal[string] , identifier[text] )
keyword[if] literal[string] keyword[in] identifier[d] :
identifier[self] . identifier[_script_fields] = identifier[d] . identifier[pop] ( literal[string] )
identifier[self] . identifier[_extra] = identifier[d]
keyword[return] identifier[self]
|
def update_from_dict(self, d):
"""
Apply options from a serialized body to the current instance. Modifies
the object in-place. Used mostly by ``from_dict``.
"""
d = d.copy()
if 'query' in d:
self.query._proxied = Q(d.pop('query')) # depends on [control=['if'], data=['d']]
if 'post_filter' in d:
self.post_filter._proxied = Q(d.pop('post_filter')) # depends on [control=['if'], data=['d']]
aggs = d.pop('aggs', d.pop('aggregations', {}))
if aggs:
self.aggs._params = {'aggs': {name: A(value) for (name, value) in iteritems(aggs)}} # depends on [control=['if'], data=[]]
if 'sort' in d:
self._sort = d.pop('sort') # depends on [control=['if'], data=['d']]
if '_source' in d:
self._source = d.pop('_source') # depends on [control=['if'], data=['d']]
if 'highlight' in d:
high = d.pop('highlight').copy()
self._highlight = high.pop('fields')
self._highlight_opts = high # depends on [control=['if'], data=['d']]
if 'suggest' in d:
self._suggest = d.pop('suggest')
if 'text' in self._suggest:
text = self._suggest.pop('text')
for s in self._suggest.values():
s.setdefault('text', text) # depends on [control=['for'], data=['s']] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['d']]
if 'script_fields' in d:
self._script_fields = d.pop('script_fields') # depends on [control=['if'], data=['d']]
self._extra = d
return self
|
def get_pool(cls) -> Pool:
"""
Yields:
existing db connection pool
"""
if len(cls._connection_params) < 5:
raise ConnectionError('Please call SQLStore.connect before calling this method')
if not cls._pool:
cls._pool = yield from create_pool(**cls._connection_params)
return cls._pool
|
def function[get_pool, parameter[cls]]:
constant[
Yields:
existing db connection pool
]
if compare[call[name[len], parameter[name[cls]._connection_params]] less[<] constant[5]] begin[:]
<ast.Raise object at 0x7da1b00364d0>
if <ast.UnaryOp object at 0x7da1b0035a50> begin[:]
name[cls]._pool assign[=] <ast.YieldFrom object at 0x7da1b0034880>
return[name[cls]._pool]
|
keyword[def] identifier[get_pool] ( identifier[cls] )-> identifier[Pool] :
literal[string]
keyword[if] identifier[len] ( identifier[cls] . identifier[_connection_params] )< literal[int] :
keyword[raise] identifier[ConnectionError] ( literal[string] )
keyword[if] keyword[not] identifier[cls] . identifier[_pool] :
identifier[cls] . identifier[_pool] = keyword[yield] keyword[from] identifier[create_pool] (** identifier[cls] . identifier[_connection_params] )
keyword[return] identifier[cls] . identifier[_pool]
|
def get_pool(cls) -> Pool:
"""
Yields:
existing db connection pool
"""
if len(cls._connection_params) < 5:
raise ConnectionError('Please call SQLStore.connect before calling this method') # depends on [control=['if'], data=[]]
if not cls._pool:
cls._pool = (yield from create_pool(**cls._connection_params)) # depends on [control=['if'], data=[]]
return cls._pool
|
def add_evaluation_step(result_tensor, ground_truth_tensor):
"""Inserts the operations we need to evaluate the accuracy of our results.
Args:
result_tensor: The new final node that produces results.
ground_truth_tensor: The node we feed ground truth data
into.
Returns:
Tuple of (evaluation step, prediction).
"""
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
prediction = tf.argmax(result_tensor, 1)
correct_prediction = tf.equal(prediction, ground_truth_tensor)
with tf.name_scope('accuracy'):
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', evaluation_step)
return evaluation_step, prediction
|
def function[add_evaluation_step, parameter[result_tensor, ground_truth_tensor]]:
constant[Inserts the operations we need to evaluate the accuracy of our results.
Args:
result_tensor: The new final node that produces results.
ground_truth_tensor: The node we feed ground truth data
into.
Returns:
Tuple of (evaluation step, prediction).
]
with call[name[tf].name_scope, parameter[constant[accuracy]]] begin[:]
with call[name[tf].name_scope, parameter[constant[correct_prediction]]] begin[:]
variable[prediction] assign[=] call[name[tf].argmax, parameter[name[result_tensor], constant[1]]]
variable[correct_prediction] assign[=] call[name[tf].equal, parameter[name[prediction], name[ground_truth_tensor]]]
with call[name[tf].name_scope, parameter[constant[accuracy]]] begin[:]
variable[evaluation_step] assign[=] call[name[tf].reduce_mean, parameter[call[name[tf].cast, parameter[name[correct_prediction], name[tf].float32]]]]
call[name[tf].summary.scalar, parameter[constant[accuracy], name[evaluation_step]]]
return[tuple[[<ast.Name object at 0x7da1b1f18340>, <ast.Name object at 0x7da1b1f1aad0>]]]
|
keyword[def] identifier[add_evaluation_step] ( identifier[result_tensor] , identifier[ground_truth_tensor] ):
literal[string]
keyword[with] identifier[tf] . identifier[name_scope] ( literal[string] ):
keyword[with] identifier[tf] . identifier[name_scope] ( literal[string] ):
identifier[prediction] = identifier[tf] . identifier[argmax] ( identifier[result_tensor] , literal[int] )
identifier[correct_prediction] = identifier[tf] . identifier[equal] ( identifier[prediction] , identifier[ground_truth_tensor] )
keyword[with] identifier[tf] . identifier[name_scope] ( literal[string] ):
identifier[evaluation_step] = identifier[tf] . identifier[reduce_mean] ( identifier[tf] . identifier[cast] ( identifier[correct_prediction] , identifier[tf] . identifier[float32] ))
identifier[tf] . identifier[summary] . identifier[scalar] ( literal[string] , identifier[evaluation_step] )
keyword[return] identifier[evaluation_step] , identifier[prediction]
|
def add_evaluation_step(result_tensor, ground_truth_tensor):
"""Inserts the operations we need to evaluate the accuracy of our results.
Args:
result_tensor: The new final node that produces results.
ground_truth_tensor: The node we feed ground truth data
into.
Returns:
Tuple of (evaluation step, prediction).
"""
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
prediction = tf.argmax(result_tensor, 1)
correct_prediction = tf.equal(prediction, ground_truth_tensor) # depends on [control=['with'], data=[]]
with tf.name_scope('accuracy'):
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # depends on [control=['with'], data=[]] # depends on [control=['with'], data=[]]
tf.summary.scalar('accuracy', evaluation_step)
return (evaluation_step, prediction)
|
def get_full_page_box_list_assigning_media_and_crop(input_doc, quiet=False):
"""Get a list of all the full-page box values for each page. The argument
input_doc should be a PdfFileReader object. The boxes on the list are in the
simple 4-float list format used by this program, not RectangleObject format."""
full_page_box_list = []
rotation_list = []
if args.verbose and not quiet:
print("\nOriginal full page sizes, in PDF format (lbrt):")
for page_num in range(input_doc.getNumPages()):
# Get the current page and find the full-page box.
curr_page = input_doc.getPage(page_num)
full_page_box = get_full_page_box_assigning_media_and_crop(curr_page)
if args.verbose and not quiet:
# want to display page num numbering from 1, so add one
print("\t"+str(page_num+1), " rot =",
curr_page.rotationAngle, "\t", full_page_box)
# Convert the RectangleObject to floats in an ordinary list and append.
ordinary_box = [float(b) for b in full_page_box]
full_page_box_list.append(ordinary_box)
# Append the rotation value to the rotation_list.
rotation_list.append(curr_page.rotationAngle)
return full_page_box_list, rotation_list
|
def function[get_full_page_box_list_assigning_media_and_crop, parameter[input_doc, quiet]]:
constant[Get a list of all the full-page box values for each page. The argument
input_doc should be a PdfFileReader object. The boxes on the list are in the
simple 4-float list format used by this program, not RectangleObject format.]
variable[full_page_box_list] assign[=] list[[]]
variable[rotation_list] assign[=] list[[]]
if <ast.BoolOp object at 0x7da1b11d4b20> begin[:]
call[name[print], parameter[constant[
Original full page sizes, in PDF format (lbrt):]]]
for taget[name[page_num]] in starred[call[name[range], parameter[call[name[input_doc].getNumPages, parameter[]]]]] begin[:]
variable[curr_page] assign[=] call[name[input_doc].getPage, parameter[name[page_num]]]
variable[full_page_box] assign[=] call[name[get_full_page_box_assigning_media_and_crop], parameter[name[curr_page]]]
if <ast.BoolOp object at 0x7da1b11d5390> begin[:]
call[name[print], parameter[binary_operation[constant[ ] + call[name[str], parameter[binary_operation[name[page_num] + constant[1]]]]], constant[ rot =], name[curr_page].rotationAngle, constant[ ], name[full_page_box]]]
variable[ordinary_box] assign[=] <ast.ListComp object at 0x7da1b11d4220>
call[name[full_page_box_list].append, parameter[name[ordinary_box]]]
call[name[rotation_list].append, parameter[name[curr_page].rotationAngle]]
return[tuple[[<ast.Name object at 0x7da1b11d7a30>, <ast.Name object at 0x7da1b11d7fd0>]]]
|
keyword[def] identifier[get_full_page_box_list_assigning_media_and_crop] ( identifier[input_doc] , identifier[quiet] = keyword[False] ):
literal[string]
identifier[full_page_box_list] =[]
identifier[rotation_list] =[]
keyword[if] identifier[args] . identifier[verbose] keyword[and] keyword[not] identifier[quiet] :
identifier[print] ( literal[string] )
keyword[for] identifier[page_num] keyword[in] identifier[range] ( identifier[input_doc] . identifier[getNumPages] ()):
identifier[curr_page] = identifier[input_doc] . identifier[getPage] ( identifier[page_num] )
identifier[full_page_box] = identifier[get_full_page_box_assigning_media_and_crop] ( identifier[curr_page] )
keyword[if] identifier[args] . identifier[verbose] keyword[and] keyword[not] identifier[quiet] :
identifier[print] ( literal[string] + identifier[str] ( identifier[page_num] + literal[int] ), literal[string] ,
identifier[curr_page] . identifier[rotationAngle] , literal[string] , identifier[full_page_box] )
identifier[ordinary_box] =[ identifier[float] ( identifier[b] ) keyword[for] identifier[b] keyword[in] identifier[full_page_box] ]
identifier[full_page_box_list] . identifier[append] ( identifier[ordinary_box] )
identifier[rotation_list] . identifier[append] ( identifier[curr_page] . identifier[rotationAngle] )
keyword[return] identifier[full_page_box_list] , identifier[rotation_list]
|
def get_full_page_box_list_assigning_media_and_crop(input_doc, quiet=False):
"""Get a list of all the full-page box values for each page. The argument
input_doc should be a PdfFileReader object. The boxes on the list are in the
simple 4-float list format used by this program, not RectangleObject format."""
full_page_box_list = []
rotation_list = []
if args.verbose and (not quiet):
print('\nOriginal full page sizes, in PDF format (lbrt):') # depends on [control=['if'], data=[]]
for page_num in range(input_doc.getNumPages()):
# Get the current page and find the full-page box.
curr_page = input_doc.getPage(page_num)
full_page_box = get_full_page_box_assigning_media_and_crop(curr_page)
if args.verbose and (not quiet):
# want to display page num numbering from 1, so add one
print('\t' + str(page_num + 1), ' rot =', curr_page.rotationAngle, '\t', full_page_box) # depends on [control=['if'], data=[]]
# Convert the RectangleObject to floats in an ordinary list and append.
ordinary_box = [float(b) for b in full_page_box]
full_page_box_list.append(ordinary_box)
# Append the rotation value to the rotation_list.
rotation_list.append(curr_page.rotationAngle) # depends on [control=['for'], data=['page_num']]
return (full_page_box_list, rotation_list)
|
def token(function):
"""Attach a CSRF token for POST requests."""
def wrapped(session, *args):
"""Wrap function."""
resp = session.get(TOKEN_URL).json()
session.headers.update({'mopar-csrf-salt': resp['token']})
return function(session, *args)
return wrapped
|
def function[token, parameter[function]]:
constant[Attach a CSRF token for POST requests.]
def function[wrapped, parameter[session]]:
constant[Wrap function.]
variable[resp] assign[=] call[call[name[session].get, parameter[name[TOKEN_URL]]].json, parameter[]]
call[name[session].headers.update, parameter[dictionary[[<ast.Constant object at 0x7da1b0a4b010>], [<ast.Subscript object at 0x7da1b0a4aec0>]]]]
return[call[name[function], parameter[name[session], <ast.Starred object at 0x7da1b0a4a860>]]]
return[name[wrapped]]
|
keyword[def] identifier[token] ( identifier[function] ):
literal[string]
keyword[def] identifier[wrapped] ( identifier[session] ,* identifier[args] ):
literal[string]
identifier[resp] = identifier[session] . identifier[get] ( identifier[TOKEN_URL] ). identifier[json] ()
identifier[session] . identifier[headers] . identifier[update] ({ literal[string] : identifier[resp] [ literal[string] ]})
keyword[return] identifier[function] ( identifier[session] ,* identifier[args] )
keyword[return] identifier[wrapped]
|
def token(function):
"""Attach a CSRF token for POST requests."""
def wrapped(session, *args):
"""Wrap function."""
resp = session.get(TOKEN_URL).json()
session.headers.update({'mopar-csrf-salt': resp['token']})
return function(session, *args)
return wrapped
|
def PopupNonBlocking(*args, button_type=POPUP_BUTTONS_OK, button_color=None, background_color=None, text_color=None,
auto_close=False, auto_close_duration=None, non_blocking=True, icon=DEFAULT_WINDOW_ICON,
line_width=None, font=None, no_titlebar=False, grab_anywhere=False, keep_on_top=False,
location=(None, None)):
"""
Show Popup box and immediately return (does not block)
:param args:
:param button_type:
:param button_color:
:param background_color:
:param text_color:
:param auto_close:
:param auto_close_duration:
:param non_blocking:
:param icon:
:param line_width:
:param font:
:param no_titlebar:
:param grab_anywhere:
:param keep_on_top:
:param location:
:return:
"""
Popup(*args, button_color=button_color, background_color=background_color, text_color=text_color,
button_type=button_type,
auto_close=auto_close, auto_close_duration=auto_close_duration, non_blocking=non_blocking, icon=icon,
line_width=line_width,
font=font, no_titlebar=no_titlebar, grab_anywhere=grab_anywhere, keep_on_top=keep_on_top, location=location)
|
def function[PopupNonBlocking, parameter[]]:
constant[
Show Popup box and immediately return (does not block)
:param args:
:param button_type:
:param button_color:
:param background_color:
:param text_color:
:param auto_close:
:param auto_close_duration:
:param non_blocking:
:param icon:
:param line_width:
:param font:
:param no_titlebar:
:param grab_anywhere:
:param keep_on_top:
:param location:
:return:
]
call[name[Popup], parameter[<ast.Starred object at 0x7da1b2010040>]]
|
keyword[def] identifier[PopupNonBlocking] (* identifier[args] , identifier[button_type] = identifier[POPUP_BUTTONS_OK] , identifier[button_color] = keyword[None] , identifier[background_color] = keyword[None] , identifier[text_color] = keyword[None] ,
identifier[auto_close] = keyword[False] , identifier[auto_close_duration] = keyword[None] , identifier[non_blocking] = keyword[True] , identifier[icon] = identifier[DEFAULT_WINDOW_ICON] ,
identifier[line_width] = keyword[None] , identifier[font] = keyword[None] , identifier[no_titlebar] = keyword[False] , identifier[grab_anywhere] = keyword[False] , identifier[keep_on_top] = keyword[False] ,
identifier[location] =( keyword[None] , keyword[None] )):
literal[string]
identifier[Popup] (* identifier[args] , identifier[button_color] = identifier[button_color] , identifier[background_color] = identifier[background_color] , identifier[text_color] = identifier[text_color] ,
identifier[button_type] = identifier[button_type] ,
identifier[auto_close] = identifier[auto_close] , identifier[auto_close_duration] = identifier[auto_close_duration] , identifier[non_blocking] = identifier[non_blocking] , identifier[icon] = identifier[icon] ,
identifier[line_width] = identifier[line_width] ,
identifier[font] = identifier[font] , identifier[no_titlebar] = identifier[no_titlebar] , identifier[grab_anywhere] = identifier[grab_anywhere] , identifier[keep_on_top] = identifier[keep_on_top] , identifier[location] = identifier[location] )
|
def PopupNonBlocking(*args, button_type=POPUP_BUTTONS_OK, button_color=None, background_color=None, text_color=None, auto_close=False, auto_close_duration=None, non_blocking=True, icon=DEFAULT_WINDOW_ICON, line_width=None, font=None, no_titlebar=False, grab_anywhere=False, keep_on_top=False, location=(None, None)):
"""
Show Popup box and immediately return (does not block)
:param args:
:param button_type:
:param button_color:
:param background_color:
:param text_color:
:param auto_close:
:param auto_close_duration:
:param non_blocking:
:param icon:
:param line_width:
:param font:
:param no_titlebar:
:param grab_anywhere:
:param keep_on_top:
:param location:
:return:
"""
Popup(*args, button_color=button_color, background_color=background_color, text_color=text_color, button_type=button_type, auto_close=auto_close, auto_close_duration=auto_close_duration, non_blocking=non_blocking, icon=icon, line_width=line_width, font=font, no_titlebar=no_titlebar, grab_anywhere=grab_anywhere, keep_on_top=keep_on_top, location=location)
|
def get_embeddings(self, rand=None, dtype='float32', corpus='common_crawl_48', n_dim=300):
"""
Retrieves the embeddings for the vocabulary.
:param rand: Random initialization function for out-of-vocabulary words. Defaults to `np.random.uniform(-0.1, 0.1, size=shape)`.
:param dtype: Type of the matrix.
:param corpus: Corpus to use. Please see `GloveVocab.settings` for available corpus.
:param n_dim: dimension of vectors to use. Please see `GloveVocab.settings` for available corpus.
:return: embeddings corresponding to the vocab instance.
NOTE: this function will download potentially very large binary dumps the first time it is called.
"""
assert corpus in self.settings, '{} not in supported corpus {}'.format(corpus, self.settings.keys())
self.n_dim, self.corpus, self.setting = n_dim, corpus, self.settings[corpus]
assert n_dim in self.setting.n_dims, '{} not in supported dimensions {}'.format(n_dim, self.setting.n_dims)
rand = rand if rand else lambda shape: np.random.uniform(-0.1, 0.1, size=shape)
zip_file = get_data_or_download('glove', '{}.zip'.format(self.corpus), self.setting.url, size=self.setting.size)
E = rand((len(self), self.n_dim)).astype(dtype)
n_dim = str(self.n_dim)
with zipfile.ZipFile(zip_file) as zf:
# should be only 1 txt file
names = [info.filename for info in zf.infolist() if
info.filename.endswith('.txt') and n_dim in info.filename]
if not names:
s = 'no .txt files found in zip file that matches {}-dim!'.format(n_dim)
s += '\n available files: {}'.format(names)
raise IOError(s)
name = names[0]
seen = []
with zf.open(name) as f:
for line in f:
toks = str(line).rstrip().split(' ')
word = toks[0]
if word in self:
seen += [word]
E[self[word]] = np.array([float(w) for w in toks[1:]], dtype=dtype)
self.backfill_unk_emb(E, set(seen))
return E
|
def function[get_embeddings, parameter[self, rand, dtype, corpus, n_dim]]:
constant[
Retrieves the embeddings for the vocabulary.
:param rand: Random initialization function for out-of-vocabulary words. Defaults to `np.random.uniform(-0.1, 0.1, size=shape)`.
:param dtype: Type of the matrix.
:param corpus: Corpus to use. Please see `GloveVocab.settings` for available corpus.
:param n_dim: dimension of vectors to use. Please see `GloveVocab.settings` for available corpus.
:return: embeddings corresponding to the vocab instance.
NOTE: this function will download potentially very large binary dumps the first time it is called.
]
assert[compare[name[corpus] in name[self].settings]]
<ast.Tuple object at 0x7da1b106cf10> assign[=] tuple[[<ast.Name object at 0x7da1b106c6a0>, <ast.Name object at 0x7da1b106ea10>, <ast.Subscript object at 0x7da1b1042560>]]
assert[compare[name[n_dim] in name[self].setting.n_dims]]
variable[rand] assign[=] <ast.IfExp object at 0x7da1b106f8b0>
variable[zip_file] assign[=] call[name[get_data_or_download], parameter[constant[glove], call[constant[{}.zip].format, parameter[name[self].corpus]], name[self].setting.url]]
variable[E] assign[=] call[call[name[rand], parameter[tuple[[<ast.Call object at 0x7da1b106e0e0>, <ast.Attribute object at 0x7da1b106dde0>]]]].astype, parameter[name[dtype]]]
variable[n_dim] assign[=] call[name[str], parameter[name[self].n_dim]]
with call[name[zipfile].ZipFile, parameter[name[zip_file]]] begin[:]
variable[names] assign[=] <ast.ListComp object at 0x7da1b104b3a0>
if <ast.UnaryOp object at 0x7da1b1049c60> begin[:]
variable[s] assign[=] call[constant[no .txt files found in zip file that matches {}-dim!].format, parameter[name[n_dim]]]
<ast.AugAssign object at 0x7da1b104b6a0>
<ast.Raise object at 0x7da1b104b070>
variable[name] assign[=] call[name[names]][constant[0]]
variable[seen] assign[=] list[[]]
with call[name[zf].open, parameter[name[name]]] begin[:]
for taget[name[line]] in starred[name[f]] begin[:]
variable[toks] assign[=] call[call[call[name[str], parameter[name[line]]].rstrip, parameter[]].split, parameter[constant[ ]]]
variable[word] assign[=] call[name[toks]][constant[0]]
if compare[name[word] in name[self]] begin[:]
<ast.AugAssign object at 0x7da1b100d480>
call[name[E]][call[name[self]][name[word]]] assign[=] call[name[np].array, parameter[<ast.ListComp object at 0x7da1b100c340>]]
call[name[self].backfill_unk_emb, parameter[name[E], call[name[set], parameter[name[seen]]]]]
return[name[E]]
|
keyword[def] identifier[get_embeddings] ( identifier[self] , identifier[rand] = keyword[None] , identifier[dtype] = literal[string] , identifier[corpus] = literal[string] , identifier[n_dim] = literal[int] ):
literal[string]
keyword[assert] identifier[corpus] keyword[in] identifier[self] . identifier[settings] , literal[string] . identifier[format] ( identifier[corpus] , identifier[self] . identifier[settings] . identifier[keys] ())
identifier[self] . identifier[n_dim] , identifier[self] . identifier[corpus] , identifier[self] . identifier[setting] = identifier[n_dim] , identifier[corpus] , identifier[self] . identifier[settings] [ identifier[corpus] ]
keyword[assert] identifier[n_dim] keyword[in] identifier[self] . identifier[setting] . identifier[n_dims] , literal[string] . identifier[format] ( identifier[n_dim] , identifier[self] . identifier[setting] . identifier[n_dims] )
identifier[rand] = identifier[rand] keyword[if] identifier[rand] keyword[else] keyword[lambda] identifier[shape] : identifier[np] . identifier[random] . identifier[uniform] (- literal[int] , literal[int] , identifier[size] = identifier[shape] )
identifier[zip_file] = identifier[get_data_or_download] ( literal[string] , literal[string] . identifier[format] ( identifier[self] . identifier[corpus] ), identifier[self] . identifier[setting] . identifier[url] , identifier[size] = identifier[self] . identifier[setting] . identifier[size] )
identifier[E] = identifier[rand] (( identifier[len] ( identifier[self] ), identifier[self] . identifier[n_dim] )). identifier[astype] ( identifier[dtype] )
identifier[n_dim] = identifier[str] ( identifier[self] . identifier[n_dim] )
keyword[with] identifier[zipfile] . identifier[ZipFile] ( identifier[zip_file] ) keyword[as] identifier[zf] :
identifier[names] =[ identifier[info] . identifier[filename] keyword[for] identifier[info] keyword[in] identifier[zf] . identifier[infolist] () keyword[if]
identifier[info] . identifier[filename] . identifier[endswith] ( literal[string] ) keyword[and] identifier[n_dim] keyword[in] identifier[info] . identifier[filename] ]
keyword[if] keyword[not] identifier[names] :
identifier[s] = literal[string] . identifier[format] ( identifier[n_dim] )
identifier[s] += literal[string] . identifier[format] ( identifier[names] )
keyword[raise] identifier[IOError] ( identifier[s] )
identifier[name] = identifier[names] [ literal[int] ]
identifier[seen] =[]
keyword[with] identifier[zf] . identifier[open] ( identifier[name] ) keyword[as] identifier[f] :
keyword[for] identifier[line] keyword[in] identifier[f] :
identifier[toks] = identifier[str] ( identifier[line] ). identifier[rstrip] (). identifier[split] ( literal[string] )
identifier[word] = identifier[toks] [ literal[int] ]
keyword[if] identifier[word] keyword[in] identifier[self] :
identifier[seen] +=[ identifier[word] ]
identifier[E] [ identifier[self] [ identifier[word] ]]= identifier[np] . identifier[array] ([ identifier[float] ( identifier[w] ) keyword[for] identifier[w] keyword[in] identifier[toks] [ literal[int] :]], identifier[dtype] = identifier[dtype] )
identifier[self] . identifier[backfill_unk_emb] ( identifier[E] , identifier[set] ( identifier[seen] ))
keyword[return] identifier[E]
|
def get_embeddings(self, rand=None, dtype='float32', corpus='common_crawl_48', n_dim=300):
"""
Retrieves the embeddings for the vocabulary.
:param rand: Random initialization function for out-of-vocabulary words. Defaults to `np.random.uniform(-0.1, 0.1, size=shape)`.
:param dtype: Type of the matrix.
:param corpus: Corpus to use. Please see `GloveVocab.settings` for available corpus.
:param n_dim: dimension of vectors to use. Please see `GloveVocab.settings` for available corpus.
:return: embeddings corresponding to the vocab instance.
NOTE: this function will download potentially very large binary dumps the first time it is called.
"""
assert corpus in self.settings, '{} not in supported corpus {}'.format(corpus, self.settings.keys())
(self.n_dim, self.corpus, self.setting) = (n_dim, corpus, self.settings[corpus])
assert n_dim in self.setting.n_dims, '{} not in supported dimensions {}'.format(n_dim, self.setting.n_dims)
rand = rand if rand else lambda shape: np.random.uniform(-0.1, 0.1, size=shape)
zip_file = get_data_or_download('glove', '{}.zip'.format(self.corpus), self.setting.url, size=self.setting.size)
E = rand((len(self), self.n_dim)).astype(dtype)
n_dim = str(self.n_dim)
with zipfile.ZipFile(zip_file) as zf:
# should be only 1 txt file
names = [info.filename for info in zf.infolist() if info.filename.endswith('.txt') and n_dim in info.filename]
if not names:
s = 'no .txt files found in zip file that matches {}-dim!'.format(n_dim)
s += '\n available files: {}'.format(names)
raise IOError(s) # depends on [control=['if'], data=[]]
name = names[0]
seen = []
with zf.open(name) as f:
for line in f:
toks = str(line).rstrip().split(' ')
word = toks[0]
if word in self:
seen += [word]
E[self[word]] = np.array([float(w) for w in toks[1:]], dtype=dtype) # depends on [control=['if'], data=['word', 'self']] # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['f']]
self.backfill_unk_emb(E, set(seen))
return E # depends on [control=['with'], data=['zf']]
|
def gen_radio_edit(sig_dic):
'''
editing for HTML radio control.
'''
edit_zuoxiang = '''7
<label for="{0}"><span>
<a class="glyphicon glyphicon-star" style="color: red;font-size: xx-small;">
</a>{1}</span>
'''.format(sig_dic['en'], sig_dic['zh'])
dic_tmp = sig_dic['dic']
for key in dic_tmp.keys():
tmp_str = '''
<input id="{0}" name="{0}" type="radio" class="form-control" value="{1}"
{{% if '{0}' in postinfo.extinfo and postinfo.extinfo['{0}'] == '{1}' %}}
checked
{{% end %}}
>{2}
'''.format(sig_dic['en'], key, dic_tmp[key])
edit_zuoxiang += tmp_str
edit_zuoxiang += '''</label>'''
return edit_zuoxiang
|
def function[gen_radio_edit, parameter[sig_dic]]:
constant[
editing for HTML radio control.
]
variable[edit_zuoxiang] assign[=] call[constant[7
<label for="{0}"><span>
<a class="glyphicon glyphicon-star" style="color: red;font-size: xx-small;">
</a>{1}</span>
].format, parameter[call[name[sig_dic]][constant[en]], call[name[sig_dic]][constant[zh]]]]
variable[dic_tmp] assign[=] call[name[sig_dic]][constant[dic]]
for taget[name[key]] in starred[call[name[dic_tmp].keys, parameter[]]] begin[:]
variable[tmp_str] assign[=] call[constant[
<input id="{0}" name="{0}" type="radio" class="form-control" value="{1}"
{{% if '{0}' in postinfo.extinfo and postinfo.extinfo['{0}'] == '{1}' %}}
checked
{{% end %}}
>{2}
].format, parameter[call[name[sig_dic]][constant[en]], name[key], call[name[dic_tmp]][name[key]]]]
<ast.AugAssign object at 0x7da1b0464250>
<ast.AugAssign object at 0x7da1b0467d30>
return[name[edit_zuoxiang]]
|
keyword[def] identifier[gen_radio_edit] ( identifier[sig_dic] ):
literal[string]
identifier[edit_zuoxiang] = literal[string] . identifier[format] ( identifier[sig_dic] [ literal[string] ], identifier[sig_dic] [ literal[string] ])
identifier[dic_tmp] = identifier[sig_dic] [ literal[string] ]
keyword[for] identifier[key] keyword[in] identifier[dic_tmp] . identifier[keys] ():
identifier[tmp_str] = literal[string] . identifier[format] ( identifier[sig_dic] [ literal[string] ], identifier[key] , identifier[dic_tmp] [ identifier[key] ])
identifier[edit_zuoxiang] += identifier[tmp_str]
identifier[edit_zuoxiang] += literal[string]
keyword[return] identifier[edit_zuoxiang]
|
def gen_radio_edit(sig_dic):
"""
editing for HTML radio control.
"""
edit_zuoxiang = '7\n <label for="{0}"><span>\n <a class="glyphicon glyphicon-star" style="color: red;font-size: xx-small;">\n </a>{1}</span>\n '.format(sig_dic['en'], sig_dic['zh'])
dic_tmp = sig_dic['dic']
for key in dic_tmp.keys():
tmp_str = '\n <input id="{0}" name="{0}" type="radio" class="form-control" value="{1}"\n {{% if \'{0}\' in postinfo.extinfo and postinfo.extinfo[\'{0}\'] == \'{1}\' %}}\n checked\n {{% end %}}\n >{2}\n '.format(sig_dic['en'], key, dic_tmp[key])
edit_zuoxiang += tmp_str # depends on [control=['for'], data=['key']]
edit_zuoxiang += '</label>'
return edit_zuoxiang
|
def needs_low_priority(self, priority):
"""
:return: None
"""
assert isinstance(priority, int)
if priority != velbus.LOW_PRIORITY:
self.parser_error("needs low priority set")
|
def function[needs_low_priority, parameter[self, priority]]:
constant[
:return: None
]
assert[call[name[isinstance], parameter[name[priority], name[int]]]]
if compare[name[priority] not_equal[!=] name[velbus].LOW_PRIORITY] begin[:]
call[name[self].parser_error, parameter[constant[needs low priority set]]]
|
keyword[def] identifier[needs_low_priority] ( identifier[self] , identifier[priority] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[priority] , identifier[int] )
keyword[if] identifier[priority] != identifier[velbus] . identifier[LOW_PRIORITY] :
identifier[self] . identifier[parser_error] ( literal[string] )
|
def needs_low_priority(self, priority):
"""
:return: None
"""
assert isinstance(priority, int)
if priority != velbus.LOW_PRIORITY:
self.parser_error('needs low priority set') # depends on [control=['if'], data=[]]
|
def get_variable_and_source(x: str):
""" Process the variable name to make it more human-readable. """
xs = x.replace("\/", "|").split("/")
xs = [x.replace("|", "/") for x in xs]
if xs[0] == "FAO":
return " ".join(xs[2:]), xs[0]
else:
return xs[-1], xs[0]
|
def function[get_variable_and_source, parameter[x]]:
constant[ Process the variable name to make it more human-readable. ]
variable[xs] assign[=] call[call[name[x].replace, parameter[constant[\/], constant[|]]].split, parameter[constant[/]]]
variable[xs] assign[=] <ast.ListComp object at 0x7da1b02e70a0>
if compare[call[name[xs]][constant[0]] equal[==] constant[FAO]] begin[:]
return[tuple[[<ast.Call object at 0x7da1b02e5090>, <ast.Subscript object at 0x7da1b02e7bb0>]]]
|
keyword[def] identifier[get_variable_and_source] ( identifier[x] : identifier[str] ):
literal[string]
identifier[xs] = identifier[x] . identifier[replace] ( literal[string] , literal[string] ). identifier[split] ( literal[string] )
identifier[xs] =[ identifier[x] . identifier[replace] ( literal[string] , literal[string] ) keyword[for] identifier[x] keyword[in] identifier[xs] ]
keyword[if] identifier[xs] [ literal[int] ]== literal[string] :
keyword[return] literal[string] . identifier[join] ( identifier[xs] [ literal[int] :]), identifier[xs] [ literal[int] ]
keyword[else] :
keyword[return] identifier[xs] [- literal[int] ], identifier[xs] [ literal[int] ]
|
def get_variable_and_source(x: str):
""" Process the variable name to make it more human-readable. """
xs = x.replace('\\/', '|').split('/')
xs = [x.replace('|', '/') for x in xs]
if xs[0] == 'FAO':
return (' '.join(xs[2:]), xs[0]) # depends on [control=['if'], data=[]]
else:
return (xs[-1], xs[0])
|
def import_txt(filename, **kwargs):
"""Import Syscal measurements from a text file, exported as 'Spreadsheet'.
Parameters
----------
filename: string
input filename
x0: float, optional
position of first electrode. If not given, then use the smallest
x-position in the data as the first electrode.
spacing: float
electrode spacing. This is important if not all electrodes are used in
a given measurement setup. If not given, then the smallest distance
between electrodes is assumed to be the electrode spacing. Naturally,
this requires measurements (or injections) with subsequent electrodes.
reciprocals: int, optional
if provided, then assume that this is a reciprocal measurements where
only the electrode cables were switched. The provided number N is
treated as the maximum electrode number, and denotations are renamed
according to the equation :math:`X_n = N - (X_a - 1)`
Returns
-------
data: :py:class:`pandas.DataFrame`
Contains the measurement data
electrodes: :py:class:`pandas.DataFrame`
Contains electrode positions (None at the moment)
topography: None
No topography information is contained in the text files, so we always
return None
Notes
-----
* TODO: we could try to infer electrode spacing from the file itself
"""
# read in text file into a buffer
with open(filename, 'r') as fid:
text = fid.read()
strings_to_replace = {
'Mixed / non conventional': 'Mixed/non-conventional',
'Date': 'Date Time AM-PM',
}
for key in strings_to_replace.keys():
text = text.replace(key, strings_to_replace[key])
buffer = StringIO(text)
# read data file
data_raw = pd.read_csv(
buffer,
# sep='\t',
delim_whitespace=True,
)
# clean up column names
data_raw.columns = [x.strip() for x in data_raw.columns.tolist()]
# generate electrode positions
data = _convert_coords_to_abmn_X(
data_raw[['Spa.1', 'Spa.2', 'Spa.3', 'Spa.4']],
**kwargs
)
# [mV] / [mA]
data['r'] = data_raw['Vp'] / data_raw['In']
data['Vmn'] = data_raw['Vp']
data['Iab'] = data_raw['In']
# rename electrode denotations
rec_max = kwargs.get('reciprocals', None)
if rec_max is not None:
print('renumbering electrode numbers')
data[['a', 'b', 'm', 'n']] = rec_max + 1 - data[['a', 'b', 'm', 'n']]
return data, None, None
|
def function[import_txt, parameter[filename]]:
constant[Import Syscal measurements from a text file, exported as 'Spreadsheet'.
Parameters
----------
filename: string
input filename
x0: float, optional
position of first electrode. If not given, then use the smallest
x-position in the data as the first electrode.
spacing: float
electrode spacing. This is important if not all electrodes are used in
a given measurement setup. If not given, then the smallest distance
between electrodes is assumed to be the electrode spacing. Naturally,
this requires measurements (or injections) with subsequent electrodes.
reciprocals: int, optional
if provided, then assume that this is a reciprocal measurements where
only the electrode cables were switched. The provided number N is
treated as the maximum electrode number, and denotations are renamed
according to the equation :math:`X_n = N - (X_a - 1)`
Returns
-------
data: :py:class:`pandas.DataFrame`
Contains the measurement data
electrodes: :py:class:`pandas.DataFrame`
Contains electrode positions (None at the moment)
topography: None
No topography information is contained in the text files, so we always
return None
Notes
-----
* TODO: we could try to infer electrode spacing from the file itself
]
with call[name[open], parameter[name[filename], constant[r]]] begin[:]
variable[text] assign[=] call[name[fid].read, parameter[]]
variable[strings_to_replace] assign[=] dictionary[[<ast.Constant object at 0x7da18bcca110>, <ast.Constant object at 0x7da18bcca920>], [<ast.Constant object at 0x7da18bccbdc0>, <ast.Constant object at 0x7da18bcca6e0>]]
for taget[name[key]] in starred[call[name[strings_to_replace].keys, parameter[]]] begin[:]
variable[text] assign[=] call[name[text].replace, parameter[name[key], call[name[strings_to_replace]][name[key]]]]
variable[buffer] assign[=] call[name[StringIO], parameter[name[text]]]
variable[data_raw] assign[=] call[name[pd].read_csv, parameter[name[buffer]]]
name[data_raw].columns assign[=] <ast.ListComp object at 0x7da18bcc8250>
variable[data] assign[=] call[name[_convert_coords_to_abmn_X], parameter[call[name[data_raw]][list[[<ast.Constant object at 0x7da18fe93280>, <ast.Constant object at 0x7da18fe912d0>, <ast.Constant object at 0x7da18fe924d0>, <ast.Constant object at 0x7da18fe92fb0>]]]]]
call[name[data]][constant[r]] assign[=] binary_operation[call[name[data_raw]][constant[Vp]] / call[name[data_raw]][constant[In]]]
call[name[data]][constant[Vmn]] assign[=] call[name[data_raw]][constant[Vp]]
call[name[data]][constant[Iab]] assign[=] call[name[data_raw]][constant[In]]
variable[rec_max] assign[=] call[name[kwargs].get, parameter[constant[reciprocals], constant[None]]]
if compare[name[rec_max] is_not constant[None]] begin[:]
call[name[print], parameter[constant[renumbering electrode numbers]]]
call[name[data]][list[[<ast.Constant object at 0x7da20e955d80>, <ast.Constant object at 0x7da20e954190>, <ast.Constant object at 0x7da20e955000>, <ast.Constant object at 0x7da20e954100>]]] assign[=] binary_operation[binary_operation[name[rec_max] + constant[1]] - call[name[data]][list[[<ast.Constant object at 0x7da20e956d70>, <ast.Constant object at 0x7da20e955540>, <ast.Constant object at 0x7da20e9546d0>, <ast.Constant object at 0x7da20e9564d0>]]]]
return[tuple[[<ast.Name object at 0x7da20e957e50>, <ast.Constant object at 0x7da20e954ac0>, <ast.Constant object at 0x7da20e955600>]]]
|
keyword[def] identifier[import_txt] ( identifier[filename] ,** identifier[kwargs] ):
literal[string]
keyword[with] identifier[open] ( identifier[filename] , literal[string] ) keyword[as] identifier[fid] :
identifier[text] = identifier[fid] . identifier[read] ()
identifier[strings_to_replace] ={
literal[string] : literal[string] ,
literal[string] : literal[string] ,
}
keyword[for] identifier[key] keyword[in] identifier[strings_to_replace] . identifier[keys] ():
identifier[text] = identifier[text] . identifier[replace] ( identifier[key] , identifier[strings_to_replace] [ identifier[key] ])
identifier[buffer] = identifier[StringIO] ( identifier[text] )
identifier[data_raw] = identifier[pd] . identifier[read_csv] (
identifier[buffer] ,
identifier[delim_whitespace] = keyword[True] ,
)
identifier[data_raw] . identifier[columns] =[ identifier[x] . identifier[strip] () keyword[for] identifier[x] keyword[in] identifier[data_raw] . identifier[columns] . identifier[tolist] ()]
identifier[data] = identifier[_convert_coords_to_abmn_X] (
identifier[data_raw] [[ literal[string] , literal[string] , literal[string] , literal[string] ]],
** identifier[kwargs]
)
identifier[data] [ literal[string] ]= identifier[data_raw] [ literal[string] ]/ identifier[data_raw] [ literal[string] ]
identifier[data] [ literal[string] ]= identifier[data_raw] [ literal[string] ]
identifier[data] [ literal[string] ]= identifier[data_raw] [ literal[string] ]
identifier[rec_max] = identifier[kwargs] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[rec_max] keyword[is] keyword[not] keyword[None] :
identifier[print] ( literal[string] )
identifier[data] [[ literal[string] , literal[string] , literal[string] , literal[string] ]]= identifier[rec_max] + literal[int] - identifier[data] [[ literal[string] , literal[string] , literal[string] , literal[string] ]]
keyword[return] identifier[data] , keyword[None] , keyword[None]
|
def import_txt(filename, **kwargs):
"""Import Syscal measurements from a text file, exported as 'Spreadsheet'.
Parameters
----------
filename: string
input filename
x0: float, optional
position of first electrode. If not given, then use the smallest
x-position in the data as the first electrode.
spacing: float
electrode spacing. This is important if not all electrodes are used in
a given measurement setup. If not given, then the smallest distance
between electrodes is assumed to be the electrode spacing. Naturally,
this requires measurements (or injections) with subsequent electrodes.
reciprocals: int, optional
if provided, then assume that this is a reciprocal measurements where
only the electrode cables were switched. The provided number N is
treated as the maximum electrode number, and denotations are renamed
according to the equation :math:`X_n = N - (X_a - 1)`
Returns
-------
data: :py:class:`pandas.DataFrame`
Contains the measurement data
electrodes: :py:class:`pandas.DataFrame`
Contains electrode positions (None at the moment)
topography: None
No topography information is contained in the text files, so we always
return None
Notes
-----
* TODO: we could try to infer electrode spacing from the file itself
"""
# read in text file into a buffer
with open(filename, 'r') as fid:
text = fid.read() # depends on [control=['with'], data=['fid']]
strings_to_replace = {'Mixed / non conventional': 'Mixed/non-conventional', 'Date': 'Date Time AM-PM'}
for key in strings_to_replace.keys():
text = text.replace(key, strings_to_replace[key]) # depends on [control=['for'], data=['key']]
buffer = StringIO(text)
# read data file
# sep='\t',
data_raw = pd.read_csv(buffer, delim_whitespace=True)
# clean up column names
data_raw.columns = [x.strip() for x in data_raw.columns.tolist()]
# generate electrode positions
data = _convert_coords_to_abmn_X(data_raw[['Spa.1', 'Spa.2', 'Spa.3', 'Spa.4']], **kwargs)
# [mV] / [mA]
data['r'] = data_raw['Vp'] / data_raw['In']
data['Vmn'] = data_raw['Vp']
data['Iab'] = data_raw['In']
# rename electrode denotations
rec_max = kwargs.get('reciprocals', None)
if rec_max is not None:
print('renumbering electrode numbers')
data[['a', 'b', 'm', 'n']] = rec_max + 1 - data[['a', 'b', 'm', 'n']] # depends on [control=['if'], data=['rec_max']]
return (data, None, None)
|
def nla_len(self, value):
"""Length setter."""
self.bytearray[self._get_slicers(0)] = bytearray(c_uint16(value or 0))
|
def function[nla_len, parameter[self, value]]:
constant[Length setter.]
call[name[self].bytearray][call[name[self]._get_slicers, parameter[constant[0]]]] assign[=] call[name[bytearray], parameter[call[name[c_uint16], parameter[<ast.BoolOp object at 0x7da1b28f0160>]]]]
|
keyword[def] identifier[nla_len] ( identifier[self] , identifier[value] ):
literal[string]
identifier[self] . identifier[bytearray] [ identifier[self] . identifier[_get_slicers] ( literal[int] )]= identifier[bytearray] ( identifier[c_uint16] ( identifier[value] keyword[or] literal[int] ))
|
def nla_len(self, value):
"""Length setter."""
self.bytearray[self._get_slicers(0)] = bytearray(c_uint16(value or 0))
|
def date_to_datetime_tz(
cr, table_name, user_field_name, date_field_name, datetime_field_name):
""" Take the related user's timezone into account when converting
date field to datetime in a given table.
This function must be call in post migration script.
:param table_name : Name of the table where the field is;
:param user_field_name : The name of the user field (res.users);
:param date_field_name : The name of the old date field; \
(Typically a legacy name, set in pre-migration script)
:param datetime_field_name : The name of the new date field;
.. versionadded:: 8.0
"""
cr.execute(
"""
SELECT distinct(rp.tz)
FROM %s my_table, res_users ru, res_partner rp
WHERE rp.tz IS NOT NULL
AND my_table.%s=ru.id
AND ru.partner_id=rp.id
""" % (table_name, user_field_name,))
for timezone, in cr.fetchall():
cr.execute("SET TIMEZONE=%s", (timezone,))
values = {
'table_name': table_name,
'date_field_name': date_field_name,
'datetime_field_name': datetime_field_name,
'timezone': timezone,
}
logged_query(
cr,
"""
UPDATE %(table_name)s my_table
SET %(datetime_field_name)s =
my_table.%(date_field_name)s::TIMESTAMP AT TIME ZONE 'UTC'
FROM res_partner rp, res_users ru
WHERE my_table.%(date_field_name)s IS NOT NULL
AND my_table.user_id=ru.id
AND ru.partner_id=rp.id
AND rp.tz='%(timezone)s';
""" % values)
cr.execute("RESET TIMEZONE")
|
def function[date_to_datetime_tz, parameter[cr, table_name, user_field_name, date_field_name, datetime_field_name]]:
constant[ Take the related user's timezone into account when converting
date field to datetime in a given table.
This function must be call in post migration script.
:param table_name : Name of the table where the field is;
:param user_field_name : The name of the user field (res.users);
:param date_field_name : The name of the old date field; (Typically a legacy name, set in pre-migration script)
:param datetime_field_name : The name of the new date field;
.. versionadded:: 8.0
]
call[name[cr].execute, parameter[binary_operation[constant[
SELECT distinct(rp.tz)
FROM %s my_table, res_users ru, res_partner rp
WHERE rp.tz IS NOT NULL
AND my_table.%s=ru.id
AND ru.partner_id=rp.id
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f09f0a0>, <ast.Name object at 0x7da18f09ff10>]]]]]
for taget[tuple[[<ast.Name object at 0x7da18f09fe50>]]] in starred[call[name[cr].fetchall, parameter[]]] begin[:]
call[name[cr].execute, parameter[constant[SET TIMEZONE=%s], tuple[[<ast.Name object at 0x7da18f09ca00>]]]]
variable[values] assign[=] dictionary[[<ast.Constant object at 0x7da18f09eaa0>, <ast.Constant object at 0x7da18f09e920>, <ast.Constant object at 0x7da18f09e4d0>, <ast.Constant object at 0x7da18f09e470>], [<ast.Name object at 0x7da18f09f9d0>, <ast.Name object at 0x7da18f09c610>, <ast.Name object at 0x7da18f09f8e0>, <ast.Name object at 0x7da18f09d720>]]
call[name[logged_query], parameter[name[cr], binary_operation[constant[
UPDATE %(table_name)s my_table
SET %(datetime_field_name)s =
my_table.%(date_field_name)s::TIMESTAMP AT TIME ZONE 'UTC'
FROM res_partner rp, res_users ru
WHERE my_table.%(date_field_name)s IS NOT NULL
AND my_table.user_id=ru.id
AND ru.partner_id=rp.id
AND rp.tz='%(timezone)s';
] <ast.Mod object at 0x7da2590d6920> name[values]]]]
call[name[cr].execute, parameter[constant[RESET TIMEZONE]]]
|
keyword[def] identifier[date_to_datetime_tz] (
identifier[cr] , identifier[table_name] , identifier[user_field_name] , identifier[date_field_name] , identifier[datetime_field_name] ):
literal[string]
identifier[cr] . identifier[execute] (
literal[string] %( identifier[table_name] , identifier[user_field_name] ,))
keyword[for] identifier[timezone] , keyword[in] identifier[cr] . identifier[fetchall] ():
identifier[cr] . identifier[execute] ( literal[string] ,( identifier[timezone] ,))
identifier[values] ={
literal[string] : identifier[table_name] ,
literal[string] : identifier[date_field_name] ,
literal[string] : identifier[datetime_field_name] ,
literal[string] : identifier[timezone] ,
}
identifier[logged_query] (
identifier[cr] ,
literal[string] % identifier[values] )
identifier[cr] . identifier[execute] ( literal[string] )
|
def date_to_datetime_tz(cr, table_name, user_field_name, date_field_name, datetime_field_name):
""" Take the related user's timezone into account when converting
date field to datetime in a given table.
This function must be call in post migration script.
:param table_name : Name of the table where the field is;
:param user_field_name : The name of the user field (res.users);
:param date_field_name : The name of the old date field; (Typically a legacy name, set in pre-migration script)
:param datetime_field_name : The name of the new date field;
.. versionadded:: 8.0
"""
cr.execute('\n SELECT distinct(rp.tz)\n FROM %s my_table, res_users ru, res_partner rp\n WHERE rp.tz IS NOT NULL\n AND my_table.%s=ru.id\n AND ru.partner_id=rp.id\n ' % (table_name, user_field_name))
for (timezone,) in cr.fetchall():
cr.execute('SET TIMEZONE=%s', (timezone,))
values = {'table_name': table_name, 'date_field_name': date_field_name, 'datetime_field_name': datetime_field_name, 'timezone': timezone}
logged_query(cr, "\n UPDATE %(table_name)s my_table\n SET %(datetime_field_name)s =\n my_table.%(date_field_name)s::TIMESTAMP AT TIME ZONE 'UTC'\n FROM res_partner rp, res_users ru\n WHERE my_table.%(date_field_name)s IS NOT NULL\n AND my_table.user_id=ru.id\n AND ru.partner_id=rp.id\n AND rp.tz='%(timezone)s';\n " % values) # depends on [control=['for'], data=[]]
cr.execute('RESET TIMEZONE')
|
def _parse_single(self, text, tagname):
"""
A hack to get the content of the XML responses from the CAS server.
## Arguments
* `text` (str): The XML string to parse.
* `tagname` (str): The tag that contains the info that we want.
## Returns
* `content` (str): The contents of the tag.
"""
return minidom.parseString(text)\
.getElementsByTagName(tagname)[0].firstChild.data
|
def function[_parse_single, parameter[self, text, tagname]]:
constant[
A hack to get the content of the XML responses from the CAS server.
## Arguments
* `text` (str): The XML string to parse.
* `tagname` (str): The tag that contains the info that we want.
## Returns
* `content` (str): The contents of the tag.
]
return[call[call[call[name[minidom].parseString, parameter[name[text]]].getElementsByTagName, parameter[name[tagname]]]][constant[0]].firstChild.data]
|
keyword[def] identifier[_parse_single] ( identifier[self] , identifier[text] , identifier[tagname] ):
literal[string]
keyword[return] identifier[minidom] . identifier[parseString] ( identifier[text] ). identifier[getElementsByTagName] ( identifier[tagname] )[ literal[int] ]. identifier[firstChild] . identifier[data]
|
def _parse_single(self, text, tagname):
"""
A hack to get the content of the XML responses from the CAS server.
## Arguments
* `text` (str): The XML string to parse.
* `tagname` (str): The tag that contains the info that we want.
## Returns
* `content` (str): The contents of the tag.
"""
return minidom.parseString(text).getElementsByTagName(tagname)[0].firstChild.data
|
def create(self, subscription_id, name, parameters, type='analysis', service='facebook'):
""" Create a PYLON task
:param subscription_id: The ID of the recording to create the task for
:type subscription_id: str
:param name: The name of the new task
:type name: str
:param parameters: The parameters for this task
:type parameters: dict
:param type: The type of analysis to create, currently only 'analysis' is accepted
:type type: str
:param service: The PYLON service (facebook)
:type service: str
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`,
:class:`requests.exceptions.HTTPError`
"""
params = {
'subscription_id': subscription_id,
'name': name,
'parameters': parameters,
'type': type
}
return self.request.post(service + '/task/', params)
|
def function[create, parameter[self, subscription_id, name, parameters, type, service]]:
constant[ Create a PYLON task
:param subscription_id: The ID of the recording to create the task for
:type subscription_id: str
:param name: The name of the new task
:type name: str
:param parameters: The parameters for this task
:type parameters: dict
:param type: The type of analysis to create, currently only 'analysis' is accepted
:type type: str
:param service: The PYLON service (facebook)
:type service: str
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`,
:class:`requests.exceptions.HTTPError`
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b0b3aa10>, <ast.Constant object at 0x7da1b0b3a800>, <ast.Constant object at 0x7da1b0b38fd0>, <ast.Constant object at 0x7da1b0b38be0>], [<ast.Name object at 0x7da1b0b39750>, <ast.Name object at 0x7da1b0b3a500>, <ast.Name object at 0x7da1b0b39ff0>, <ast.Name object at 0x7da1b0b3b2b0>]]
return[call[name[self].request.post, parameter[binary_operation[name[service] + constant[/task/]], name[params]]]]
|
keyword[def] identifier[create] ( identifier[self] , identifier[subscription_id] , identifier[name] , identifier[parameters] , identifier[type] = literal[string] , identifier[service] = literal[string] ):
literal[string]
identifier[params] ={
literal[string] : identifier[subscription_id] ,
literal[string] : identifier[name] ,
literal[string] : identifier[parameters] ,
literal[string] : identifier[type]
}
keyword[return] identifier[self] . identifier[request] . identifier[post] ( identifier[service] + literal[string] , identifier[params] )
|
def create(self, subscription_id, name, parameters, type='analysis', service='facebook'):
""" Create a PYLON task
:param subscription_id: The ID of the recording to create the task for
:type subscription_id: str
:param name: The name of the new task
:type name: str
:param parameters: The parameters for this task
:type parameters: dict
:param type: The type of analysis to create, currently only 'analysis' is accepted
:type type: str
:param service: The PYLON service (facebook)
:type service: str
:return: dict of REST API output with headers attached
:rtype: :class:`~datasift.request.DictResponse`
:raises: :class:`~datasift.exceptions.DataSiftApiException`,
:class:`requests.exceptions.HTTPError`
"""
params = {'subscription_id': subscription_id, 'name': name, 'parameters': parameters, 'type': type}
return self.request.post(service + '/task/', params)
|
def _validate(self):
"""
Ensure that our expression string has variables of the form x_0, x_1,
... x_(N - 1), where N is the length of our inputs.
"""
variable_names, _unused = getExprNames(self._expr, {})
expr_indices = []
for name in variable_names:
if name == 'inf':
continue
match = _VARIABLE_NAME_RE.match(name)
if not match:
raise ValueError("%r is not a valid variable name" % name)
expr_indices.append(int(match.group(2)))
expr_indices.sort()
expected_indices = list(range(len(self.inputs)))
if expr_indices != expected_indices:
raise ValueError(
"Expected %s for variable indices, but got %s" % (
expected_indices, expr_indices,
)
)
super(NumericalExpression, self)._validate()
|
def function[_validate, parameter[self]]:
constant[
Ensure that our expression string has variables of the form x_0, x_1,
... x_(N - 1), where N is the length of our inputs.
]
<ast.Tuple object at 0x7da18bc72fe0> assign[=] call[name[getExprNames], parameter[name[self]._expr, dictionary[[], []]]]
variable[expr_indices] assign[=] list[[]]
for taget[name[name]] in starred[name[variable_names]] begin[:]
if compare[name[name] equal[==] constant[inf]] begin[:]
continue
variable[match] assign[=] call[name[_VARIABLE_NAME_RE].match, parameter[name[name]]]
if <ast.UnaryOp object at 0x7da18bc73af0> begin[:]
<ast.Raise object at 0x7da18bc73610>
call[name[expr_indices].append, parameter[call[name[int], parameter[call[name[match].group, parameter[constant[2]]]]]]]
call[name[expr_indices].sort, parameter[]]
variable[expected_indices] assign[=] call[name[list], parameter[call[name[range], parameter[call[name[len], parameter[name[self].inputs]]]]]]
if compare[name[expr_indices] not_equal[!=] name[expected_indices]] begin[:]
<ast.Raise object at 0x7da18bc722f0>
call[call[name[super], parameter[name[NumericalExpression], name[self]]]._validate, parameter[]]
|
keyword[def] identifier[_validate] ( identifier[self] ):
literal[string]
identifier[variable_names] , identifier[_unused] = identifier[getExprNames] ( identifier[self] . identifier[_expr] ,{})
identifier[expr_indices] =[]
keyword[for] identifier[name] keyword[in] identifier[variable_names] :
keyword[if] identifier[name] == literal[string] :
keyword[continue]
identifier[match] = identifier[_VARIABLE_NAME_RE] . identifier[match] ( identifier[name] )
keyword[if] keyword[not] identifier[match] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[name] )
identifier[expr_indices] . identifier[append] ( identifier[int] ( identifier[match] . identifier[group] ( literal[int] )))
identifier[expr_indices] . identifier[sort] ()
identifier[expected_indices] = identifier[list] ( identifier[range] ( identifier[len] ( identifier[self] . identifier[inputs] )))
keyword[if] identifier[expr_indices] != identifier[expected_indices] :
keyword[raise] identifier[ValueError] (
literal[string] %(
identifier[expected_indices] , identifier[expr_indices] ,
)
)
identifier[super] ( identifier[NumericalExpression] , identifier[self] ). identifier[_validate] ()
|
def _validate(self):
"""
Ensure that our expression string has variables of the form x_0, x_1,
... x_(N - 1), where N is the length of our inputs.
"""
(variable_names, _unused) = getExprNames(self._expr, {})
expr_indices = []
for name in variable_names:
if name == 'inf':
continue # depends on [control=['if'], data=[]]
match = _VARIABLE_NAME_RE.match(name)
if not match:
raise ValueError('%r is not a valid variable name' % name) # depends on [control=['if'], data=[]]
expr_indices.append(int(match.group(2))) # depends on [control=['for'], data=['name']]
expr_indices.sort()
expected_indices = list(range(len(self.inputs)))
if expr_indices != expected_indices:
raise ValueError('Expected %s for variable indices, but got %s' % (expected_indices, expr_indices)) # depends on [control=['if'], data=['expr_indices', 'expected_indices']]
super(NumericalExpression, self)._validate()
|
def kill_conditional_comments(self, doc):
"""
IE conditional comments basically embed HTML that the parser
doesn't normally see. We can't allow anything like that, so
we'll kill any comments that could be conditional.
"""
bad = []
self._kill_elements(
doc, lambda el: _conditional_comment_re.search(el.text),
etree.Comment)
|
def function[kill_conditional_comments, parameter[self, doc]]:
constant[
IE conditional comments basically embed HTML that the parser
doesn't normally see. We can't allow anything like that, so
we'll kill any comments that could be conditional.
]
variable[bad] assign[=] list[[]]
call[name[self]._kill_elements, parameter[name[doc], <ast.Lambda object at 0x7da18f811db0>, name[etree].Comment]]
|
keyword[def] identifier[kill_conditional_comments] ( identifier[self] , identifier[doc] ):
literal[string]
identifier[bad] =[]
identifier[self] . identifier[_kill_elements] (
identifier[doc] , keyword[lambda] identifier[el] : identifier[_conditional_comment_re] . identifier[search] ( identifier[el] . identifier[text] ),
identifier[etree] . identifier[Comment] )
|
def kill_conditional_comments(self, doc):
"""
IE conditional comments basically embed HTML that the parser
doesn't normally see. We can't allow anything like that, so
we'll kill any comments that could be conditional.
"""
bad = []
self._kill_elements(doc, lambda el: _conditional_comment_re.search(el.text), etree.Comment)
|
def _init_solc_binary(version):
"""Figure out solc binary and version.
Only proper versions are supported. No nightlies, commits etc (such as available in remix).
"""
if not version:
return os.environ.get("SOLC") or "solc"
# tried converting input to semver, seemed not necessary so just slicing for now
main_version = solc.main.get_solc_version_string()
main_version_number = re.match(r"\d+.\d+.\d+", main_version)
if main_version is None:
raise CriticalError(
"Could not extract solc version from string {}".format(main_version)
)
if version == main_version_number:
log.info("Given version matches installed version")
solc_binary = os.environ.get("SOLC") or "solc"
else:
solc_binary = util.solc_exists(version)
if solc_binary:
log.info("Given version is already installed")
else:
try:
solc.install_solc("v" + version)
solc_binary = util.solc_exists(version)
if not solc_binary:
raise SolcError()
except SolcError:
raise CriticalError(
"There was an error when trying to install the specified solc version"
)
log.info("Setting the compiler to %s", solc_binary)
return solc_binary
|
def function[_init_solc_binary, parameter[version]]:
constant[Figure out solc binary and version.
Only proper versions are supported. No nightlies, commits etc (such as available in remix).
]
if <ast.UnaryOp object at 0x7da1b1dde530> begin[:]
return[<ast.BoolOp object at 0x7da1b1ddd6c0>]
variable[main_version] assign[=] call[name[solc].main.get_solc_version_string, parameter[]]
variable[main_version_number] assign[=] call[name[re].match, parameter[constant[\d+.\d+.\d+], name[main_version]]]
if compare[name[main_version] is constant[None]] begin[:]
<ast.Raise object at 0x7da1b1ddc6d0>
if compare[name[version] equal[==] name[main_version_number]] begin[:]
call[name[log].info, parameter[constant[Given version matches installed version]]]
variable[solc_binary] assign[=] <ast.BoolOp object at 0x7da1b1ddeb60>
return[name[solc_binary]]
|
keyword[def] identifier[_init_solc_binary] ( identifier[version] ):
literal[string]
keyword[if] keyword[not] identifier[version] :
keyword[return] identifier[os] . identifier[environ] . identifier[get] ( literal[string] ) keyword[or] literal[string]
identifier[main_version] = identifier[solc] . identifier[main] . identifier[get_solc_version_string] ()
identifier[main_version_number] = identifier[re] . identifier[match] ( literal[string] , identifier[main_version] )
keyword[if] identifier[main_version] keyword[is] keyword[None] :
keyword[raise] identifier[CriticalError] (
literal[string] . identifier[format] ( identifier[main_version] )
)
keyword[if] identifier[version] == identifier[main_version_number] :
identifier[log] . identifier[info] ( literal[string] )
identifier[solc_binary] = identifier[os] . identifier[environ] . identifier[get] ( literal[string] ) keyword[or] literal[string]
keyword[else] :
identifier[solc_binary] = identifier[util] . identifier[solc_exists] ( identifier[version] )
keyword[if] identifier[solc_binary] :
identifier[log] . identifier[info] ( literal[string] )
keyword[else] :
keyword[try] :
identifier[solc] . identifier[install_solc] ( literal[string] + identifier[version] )
identifier[solc_binary] = identifier[util] . identifier[solc_exists] ( identifier[version] )
keyword[if] keyword[not] identifier[solc_binary] :
keyword[raise] identifier[SolcError] ()
keyword[except] identifier[SolcError] :
keyword[raise] identifier[CriticalError] (
literal[string]
)
identifier[log] . identifier[info] ( literal[string] , identifier[solc_binary] )
keyword[return] identifier[solc_binary]
|
def _init_solc_binary(version):
"""Figure out solc binary and version.
Only proper versions are supported. No nightlies, commits etc (such as available in remix).
"""
if not version:
return os.environ.get('SOLC') or 'solc' # depends on [control=['if'], data=[]]
# tried converting input to semver, seemed not necessary so just slicing for now
main_version = solc.main.get_solc_version_string()
main_version_number = re.match('\\d+.\\d+.\\d+', main_version)
if main_version is None:
raise CriticalError('Could not extract solc version from string {}'.format(main_version)) # depends on [control=['if'], data=['main_version']]
if version == main_version_number:
log.info('Given version matches installed version')
solc_binary = os.environ.get('SOLC') or 'solc' # depends on [control=['if'], data=[]]
else:
solc_binary = util.solc_exists(version)
if solc_binary:
log.info('Given version is already installed') # depends on [control=['if'], data=[]]
else:
try:
solc.install_solc('v' + version)
solc_binary = util.solc_exists(version)
if not solc_binary:
raise SolcError() # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except SolcError:
raise CriticalError('There was an error when trying to install the specified solc version') # depends on [control=['except'], data=[]]
log.info('Setting the compiler to %s', solc_binary)
return solc_binary
|
def evaluate_model(filepath,
train_start=0, train_end=60000, test_start=0,
test_end=10000, batch_size=128,
testing=False, num_threads=None):
"""
Run evaluation on a saved model
:param filepath: path to model to evaluate
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:param batch_size: size of evaluation batches
"""
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
# Set logging level to see debug information
set_log_level(logging.INFO)
# Create TF session
if num_threads:
config_args = dict(intra_op_parallelism_threads=1)
else:
config_args = {}
sess = tf.Session(config=tf.ConfigProto(**config_args))
# Get MNIST test data
mnist = MNIST(train_start=train_start, train_end=train_end,
test_start=test_start, test_end=test_end)
x_train, y_train = mnist.get_set('train')
x_test, y_test = mnist.get_set('test')
# Use Image Parameters
img_rows, img_cols, nchannels = x_train.shape[1:4]
nb_classes = y_train.shape[1]
# Define input TF placeholder
x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols,
nchannels))
y = tf.placeholder(tf.float32, shape=(None, nb_classes))
eval_params = {'batch_size': batch_size}
fgsm_params = {
'eps': 0.3,
'clip_min': 0.,
'clip_max': 1.
}
def do_eval(preds, x_set, y_set, report_key, is_adv=None):
acc = model_eval(sess, x, y, preds, x_set, y_set, args=eval_params)
if is_adv is None:
report_text = None
elif is_adv:
report_text = 'adversarial'
else:
report_text = 'legitimate'
if report_text:
print('Test accuracy on %s examples: %0.4f' % (report_text, acc))
with sess.as_default():
model = load(filepath)
assert len(model.get_params()) > 0
# Initialize the Fast Gradient Sign Method (FGSM) attack object and
# graph
fgsm = FastGradientMethod(model, sess=sess)
adv_x = fgsm.generate(x, **fgsm_params)
preds_adv = model.get_logits(adv_x)
preds = model.get_logits(x)
# Evaluate the accuracy of the MNIST model on adversarial examples
do_eval(preds, x_test, y_test, 'train_clean_train_clean_eval', False)
do_eval(preds_adv, x_test, y_test, 'clean_train_adv_eval', True)
|
def function[evaluate_model, parameter[filepath, train_start, train_end, test_start, test_end, batch_size, testing, num_threads]]:
constant[
Run evaluation on a saved model
:param filepath: path to model to evaluate
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:param batch_size: size of evaluation batches
]
call[name[tf].set_random_seed, parameter[constant[1234]]]
call[name[set_log_level], parameter[name[logging].INFO]]
if name[num_threads] begin[:]
variable[config_args] assign[=] call[name[dict], parameter[]]
variable[sess] assign[=] call[name[tf].Session, parameter[]]
variable[mnist] assign[=] call[name[MNIST], parameter[]]
<ast.Tuple object at 0x7da204623df0> assign[=] call[name[mnist].get_set, parameter[constant[train]]]
<ast.Tuple object at 0x7da204620340> assign[=] call[name[mnist].get_set, parameter[constant[test]]]
<ast.Tuple object at 0x7da204623940> assign[=] call[name[x_train].shape][<ast.Slice object at 0x7da2046208e0>]
variable[nb_classes] assign[=] call[name[y_train].shape][constant[1]]
variable[x] assign[=] call[name[tf].placeholder, parameter[name[tf].float32]]
variable[y] assign[=] call[name[tf].placeholder, parameter[name[tf].float32]]
variable[eval_params] assign[=] dictionary[[<ast.Constant object at 0x7da204621480>], [<ast.Name object at 0x7da2046217e0>]]
variable[fgsm_params] assign[=] dictionary[[<ast.Constant object at 0x7da2046227d0>, <ast.Constant object at 0x7da204622ef0>, <ast.Constant object at 0x7da204621ba0>], [<ast.Constant object at 0x7da2046210c0>, <ast.Constant object at 0x7da204621180>, <ast.Constant object at 0x7da204620eb0>]]
def function[do_eval, parameter[preds, x_set, y_set, report_key, is_adv]]:
variable[acc] assign[=] call[name[model_eval], parameter[name[sess], name[x], name[y], name[preds], name[x_set], name[y_set]]]
if compare[name[is_adv] is constant[None]] begin[:]
variable[report_text] assign[=] constant[None]
if name[report_text] begin[:]
call[name[print], parameter[binary_operation[constant[Test accuracy on %s examples: %0.4f] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f813d00>, <ast.Name object at 0x7da18f812470>]]]]]
with call[name[sess].as_default, parameter[]] begin[:]
variable[model] assign[=] call[name[load], parameter[name[filepath]]]
assert[compare[call[name[len], parameter[call[name[model].get_params, parameter[]]]] greater[>] constant[0]]]
variable[fgsm] assign[=] call[name[FastGradientMethod], parameter[name[model]]]
variable[adv_x] assign[=] call[name[fgsm].generate, parameter[name[x]]]
variable[preds_adv] assign[=] call[name[model].get_logits, parameter[name[adv_x]]]
variable[preds] assign[=] call[name[model].get_logits, parameter[name[x]]]
call[name[do_eval], parameter[name[preds], name[x_test], name[y_test], constant[train_clean_train_clean_eval], constant[False]]]
call[name[do_eval], parameter[name[preds_adv], name[x_test], name[y_test], constant[clean_train_adv_eval], constant[True]]]
|
keyword[def] identifier[evaluate_model] ( identifier[filepath] ,
identifier[train_start] = literal[int] , identifier[train_end] = literal[int] , identifier[test_start] = literal[int] ,
identifier[test_end] = literal[int] , identifier[batch_size] = literal[int] ,
identifier[testing] = keyword[False] , identifier[num_threads] = keyword[None] ):
literal[string]
identifier[tf] . identifier[set_random_seed] ( literal[int] )
identifier[set_log_level] ( identifier[logging] . identifier[INFO] )
keyword[if] identifier[num_threads] :
identifier[config_args] = identifier[dict] ( identifier[intra_op_parallelism_threads] = literal[int] )
keyword[else] :
identifier[config_args] ={}
identifier[sess] = identifier[tf] . identifier[Session] ( identifier[config] = identifier[tf] . identifier[ConfigProto] (** identifier[config_args] ))
identifier[mnist] = identifier[MNIST] ( identifier[train_start] = identifier[train_start] , identifier[train_end] = identifier[train_end] ,
identifier[test_start] = identifier[test_start] , identifier[test_end] = identifier[test_end] )
identifier[x_train] , identifier[y_train] = identifier[mnist] . identifier[get_set] ( literal[string] )
identifier[x_test] , identifier[y_test] = identifier[mnist] . identifier[get_set] ( literal[string] )
identifier[img_rows] , identifier[img_cols] , identifier[nchannels] = identifier[x_train] . identifier[shape] [ literal[int] : literal[int] ]
identifier[nb_classes] = identifier[y_train] . identifier[shape] [ literal[int] ]
identifier[x] = identifier[tf] . identifier[placeholder] ( identifier[tf] . identifier[float32] , identifier[shape] =( keyword[None] , identifier[img_rows] , identifier[img_cols] ,
identifier[nchannels] ))
identifier[y] = identifier[tf] . identifier[placeholder] ( identifier[tf] . identifier[float32] , identifier[shape] =( keyword[None] , identifier[nb_classes] ))
identifier[eval_params] ={ literal[string] : identifier[batch_size] }
identifier[fgsm_params] ={
literal[string] : literal[int] ,
literal[string] : literal[int] ,
literal[string] : literal[int]
}
keyword[def] identifier[do_eval] ( identifier[preds] , identifier[x_set] , identifier[y_set] , identifier[report_key] , identifier[is_adv] = keyword[None] ):
identifier[acc] = identifier[model_eval] ( identifier[sess] , identifier[x] , identifier[y] , identifier[preds] , identifier[x_set] , identifier[y_set] , identifier[args] = identifier[eval_params] )
keyword[if] identifier[is_adv] keyword[is] keyword[None] :
identifier[report_text] = keyword[None]
keyword[elif] identifier[is_adv] :
identifier[report_text] = literal[string]
keyword[else] :
identifier[report_text] = literal[string]
keyword[if] identifier[report_text] :
identifier[print] ( literal[string] %( identifier[report_text] , identifier[acc] ))
keyword[with] identifier[sess] . identifier[as_default] ():
identifier[model] = identifier[load] ( identifier[filepath] )
keyword[assert] identifier[len] ( identifier[model] . identifier[get_params] ())> literal[int]
identifier[fgsm] = identifier[FastGradientMethod] ( identifier[model] , identifier[sess] = identifier[sess] )
identifier[adv_x] = identifier[fgsm] . identifier[generate] ( identifier[x] ,** identifier[fgsm_params] )
identifier[preds_adv] = identifier[model] . identifier[get_logits] ( identifier[adv_x] )
identifier[preds] = identifier[model] . identifier[get_logits] ( identifier[x] )
identifier[do_eval] ( identifier[preds] , identifier[x_test] , identifier[y_test] , literal[string] , keyword[False] )
identifier[do_eval] ( identifier[preds_adv] , identifier[x_test] , identifier[y_test] , literal[string] , keyword[True] )
|
def evaluate_model(filepath, train_start=0, train_end=60000, test_start=0, test_end=10000, batch_size=128, testing=False, num_threads=None):
"""
Run evaluation on a saved model
:param filepath: path to model to evaluate
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:param batch_size: size of evaluation batches
"""
# Set TF random seed to improve reproducibility
tf.set_random_seed(1234)
# Set logging level to see debug information
set_log_level(logging.INFO)
# Create TF session
if num_threads:
config_args = dict(intra_op_parallelism_threads=1) # depends on [control=['if'], data=[]]
else:
config_args = {}
sess = tf.Session(config=tf.ConfigProto(**config_args))
# Get MNIST test data
mnist = MNIST(train_start=train_start, train_end=train_end, test_start=test_start, test_end=test_end)
(x_train, y_train) = mnist.get_set('train')
(x_test, y_test) = mnist.get_set('test')
# Use Image Parameters
(img_rows, img_cols, nchannels) = x_train.shape[1:4]
nb_classes = y_train.shape[1]
# Define input TF placeholder
x = tf.placeholder(tf.float32, shape=(None, img_rows, img_cols, nchannels))
y = tf.placeholder(tf.float32, shape=(None, nb_classes))
eval_params = {'batch_size': batch_size}
fgsm_params = {'eps': 0.3, 'clip_min': 0.0, 'clip_max': 1.0}
def do_eval(preds, x_set, y_set, report_key, is_adv=None):
acc = model_eval(sess, x, y, preds, x_set, y_set, args=eval_params)
if is_adv is None:
report_text = None # depends on [control=['if'], data=[]]
elif is_adv:
report_text = 'adversarial' # depends on [control=['if'], data=[]]
else:
report_text = 'legitimate'
if report_text:
print('Test accuracy on %s examples: %0.4f' % (report_text, acc)) # depends on [control=['if'], data=[]]
with sess.as_default():
model = load(filepath) # depends on [control=['with'], data=[]]
assert len(model.get_params()) > 0
# Initialize the Fast Gradient Sign Method (FGSM) attack object and
# graph
fgsm = FastGradientMethod(model, sess=sess)
adv_x = fgsm.generate(x, **fgsm_params)
preds_adv = model.get_logits(adv_x)
preds = model.get_logits(x)
# Evaluate the accuracy of the MNIST model on adversarial examples
do_eval(preds, x_test, y_test, 'train_clean_train_clean_eval', False)
do_eval(preds_adv, x_test, y_test, 'clean_train_adv_eval', True)
|
def create(cls, account_id, resource_id, action, timestamp, metrics):
""" Set properties for an enforcement action"""
enforcement = Enforcements()
enforcement.account_id = account_id
enforcement.resource_id = resource_id
enforcement.action = action
enforcement.timestamp = timestamp
enforcement.metrics = metrics
try:
db.session.add(enforcement)
except SQLAlchemyError as e:
logging.error('Could not add enforcement entry to database. {}'.format(e))
|
def function[create, parameter[cls, account_id, resource_id, action, timestamp, metrics]]:
constant[ Set properties for an enforcement action]
variable[enforcement] assign[=] call[name[Enforcements], parameter[]]
name[enforcement].account_id assign[=] name[account_id]
name[enforcement].resource_id assign[=] name[resource_id]
name[enforcement].action assign[=] name[action]
name[enforcement].timestamp assign[=] name[timestamp]
name[enforcement].metrics assign[=] name[metrics]
<ast.Try object at 0x7da1b20f8c40>
|
keyword[def] identifier[create] ( identifier[cls] , identifier[account_id] , identifier[resource_id] , identifier[action] , identifier[timestamp] , identifier[metrics] ):
literal[string]
identifier[enforcement] = identifier[Enforcements] ()
identifier[enforcement] . identifier[account_id] = identifier[account_id]
identifier[enforcement] . identifier[resource_id] = identifier[resource_id]
identifier[enforcement] . identifier[action] = identifier[action]
identifier[enforcement] . identifier[timestamp] = identifier[timestamp]
identifier[enforcement] . identifier[metrics] = identifier[metrics]
keyword[try] :
identifier[db] . identifier[session] . identifier[add] ( identifier[enforcement] )
keyword[except] identifier[SQLAlchemyError] keyword[as] identifier[e] :
identifier[logging] . identifier[error] ( literal[string] . identifier[format] ( identifier[e] ))
|
def create(cls, account_id, resource_id, action, timestamp, metrics):
""" Set properties for an enforcement action"""
enforcement = Enforcements()
enforcement.account_id = account_id
enforcement.resource_id = resource_id
enforcement.action = action
enforcement.timestamp = timestamp
enforcement.metrics = metrics
try:
db.session.add(enforcement) # depends on [control=['try'], data=[]]
except SQLAlchemyError as e:
logging.error('Could not add enforcement entry to database. {}'.format(e)) # depends on [control=['except'], data=['e']]
|
def _update_from_database(self):
"""Updates map to latest state in database.
Should be called prior to major object events to assure that an
assessment being taken on multiple devices are reasonably synchronized.
"""
collection = JSONClientValidated('assessment',
collection='AssessmentSection',
runtime=self._runtime)
self._my_map = collection.find_one({'_id': self._my_map['_id']})
|
def function[_update_from_database, parameter[self]]:
constant[Updates map to latest state in database.
Should be called prior to major object events to assure that an
assessment being taken on multiple devices are reasonably synchronized.
]
variable[collection] assign[=] call[name[JSONClientValidated], parameter[constant[assessment]]]
name[self]._my_map assign[=] call[name[collection].find_one, parameter[dictionary[[<ast.Constant object at 0x7da20c992e30>], [<ast.Subscript object at 0x7da20c9918d0>]]]]
|
keyword[def] identifier[_update_from_database] ( identifier[self] ):
literal[string]
identifier[collection] = identifier[JSONClientValidated] ( literal[string] ,
identifier[collection] = literal[string] ,
identifier[runtime] = identifier[self] . identifier[_runtime] )
identifier[self] . identifier[_my_map] = identifier[collection] . identifier[find_one] ({ literal[string] : identifier[self] . identifier[_my_map] [ literal[string] ]})
|
def _update_from_database(self):
"""Updates map to latest state in database.
Should be called prior to major object events to assure that an
assessment being taken on multiple devices are reasonably synchronized.
"""
collection = JSONClientValidated('assessment', collection='AssessmentSection', runtime=self._runtime)
self._my_map = collection.find_one({'_id': self._my_map['_id']})
|
def layout(args):
"""
%prog layout query.subject.simple query.seqids subject.seqids
Compute optimal seqids order in a second genome, based on seqids on one
genome, given the pairwise blocks in .simple format.
"""
from jcvi.algorithms.ec import GA_setup, GA_run
p = OptionParser(layout.__doc__)
p.set_beds()
p.set_cpus(cpus=32)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
simplefile, qseqids, sseqids = args
qbed, sbed, qorder, sorder, is_self = check_beds(simplefile, p, opts)
qseqids = qseqids.strip().split(",")
sseqids = sseqids.strip().split(",")
qseqids_ii = dict((s, i) for i, s in enumerate(qseqids))
sseqids_ii = dict((s, i) for i, s in enumerate(sseqids))
blocks = SimpleFile(simplefile).blocks
scores = defaultdict(int)
for a, b, c, d, score, orientation, hl in blocks:
qi, q = qorder[a]
si, s = sorder[c]
qseqid, sseqid = q.seqid, s.seqid
if sseqid not in sseqids:
continue
scores[sseqids_ii[sseqid], qseqid] += score
data = []
for (a, b), score in sorted(scores.items()):
if b not in qseqids_ii:
continue
data.append((qseqids_ii[b], score))
tour = range(len(qseqids))
toolbox = GA_setup(tour)
toolbox.register("evaluate", colinear_evaluate_weights, data=data)
tour, fitness = GA_run(toolbox, ngen=100, npop=100, cpus=opts.cpus)
tour = [qseqids[x] for x in tour]
print(",".join(tour))
|
def function[layout, parameter[args]]:
constant[
%prog layout query.subject.simple query.seqids subject.seqids
Compute optimal seqids order in a second genome, based on seqids on one
genome, given the pairwise blocks in .simple format.
]
from relative_module[jcvi.algorithms.ec] import module[GA_setup], module[GA_run]
variable[p] assign[=] call[name[OptionParser], parameter[name[layout].__doc__]]
call[name[p].set_beds, parameter[]]
call[name[p].set_cpus, parameter[]]
<ast.Tuple object at 0x7da20c6e4d60> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[3]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da20c6e5d50>]]
<ast.Tuple object at 0x7da20c6e5c30> assign[=] name[args]
<ast.Tuple object at 0x7da20c6e54e0> assign[=] call[name[check_beds], parameter[name[simplefile], name[p], name[opts]]]
variable[qseqids] assign[=] call[call[name[qseqids].strip, parameter[]].split, parameter[constant[,]]]
variable[sseqids] assign[=] call[call[name[sseqids].strip, parameter[]].split, parameter[constant[,]]]
variable[qseqids_ii] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da20c6e7d30>]]
variable[sseqids_ii] assign[=] call[name[dict], parameter[<ast.GeneratorExp object at 0x7da20c6e4fa0>]]
variable[blocks] assign[=] call[name[SimpleFile], parameter[name[simplefile]]].blocks
variable[scores] assign[=] call[name[defaultdict], parameter[name[int]]]
for taget[tuple[[<ast.Name object at 0x7da1b084ee60>, <ast.Name object at 0x7da1b084d930>, <ast.Name object at 0x7da1b084ea10>, <ast.Name object at 0x7da1b084e1a0>, <ast.Name object at 0x7da1b084ea70>, <ast.Name object at 0x7da1b084e0e0>, <ast.Name object at 0x7da1b084ee90>]]] in starred[name[blocks]] begin[:]
<ast.Tuple object at 0x7da1b084eb60> assign[=] call[name[qorder]][name[a]]
<ast.Tuple object at 0x7da1b084eb00> assign[=] call[name[sorder]][name[c]]
<ast.Tuple object at 0x7da1b084eb90> assign[=] tuple[[<ast.Attribute object at 0x7da1b084d6c0>, <ast.Attribute object at 0x7da1b084e200>]]
if compare[name[sseqid] <ast.NotIn object at 0x7da2590d7190> name[sseqids]] begin[:]
continue
<ast.AugAssign object at 0x7da1b084dba0>
variable[data] assign[=] list[[]]
for taget[tuple[[<ast.Tuple object at 0x7da1b084d7e0>, <ast.Name object at 0x7da1b084ed40>]]] in starred[call[name[sorted], parameter[call[name[scores].items, parameter[]]]]] begin[:]
if compare[name[b] <ast.NotIn object at 0x7da2590d7190> name[qseqids_ii]] begin[:]
continue
call[name[data].append, parameter[tuple[[<ast.Subscript object at 0x7da1b0854370>, <ast.Name object at 0x7da1b0854940>]]]]
variable[tour] assign[=] call[name[range], parameter[call[name[len], parameter[name[qseqids]]]]]
variable[toolbox] assign[=] call[name[GA_setup], parameter[name[tour]]]
call[name[toolbox].register, parameter[constant[evaluate], name[colinear_evaluate_weights]]]
<ast.Tuple object at 0x7da1b08540a0> assign[=] call[name[GA_run], parameter[name[toolbox]]]
variable[tour] assign[=] <ast.ListComp object at 0x7da1b08545b0>
call[name[print], parameter[call[constant[,].join, parameter[name[tour]]]]]
|
keyword[def] identifier[layout] ( identifier[args] ):
literal[string]
keyword[from] identifier[jcvi] . identifier[algorithms] . identifier[ec] keyword[import] identifier[GA_setup] , identifier[GA_run]
identifier[p] = identifier[OptionParser] ( identifier[layout] . identifier[__doc__] )
identifier[p] . identifier[set_beds] ()
identifier[p] . identifier[set_cpus] ( identifier[cpus] = literal[int] )
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[simplefile] , identifier[qseqids] , identifier[sseqids] = identifier[args]
identifier[qbed] , identifier[sbed] , identifier[qorder] , identifier[sorder] , identifier[is_self] = identifier[check_beds] ( identifier[simplefile] , identifier[p] , identifier[opts] )
identifier[qseqids] = identifier[qseqids] . identifier[strip] (). identifier[split] ( literal[string] )
identifier[sseqids] = identifier[sseqids] . identifier[strip] (). identifier[split] ( literal[string] )
identifier[qseqids_ii] = identifier[dict] (( identifier[s] , identifier[i] ) keyword[for] identifier[i] , identifier[s] keyword[in] identifier[enumerate] ( identifier[qseqids] ))
identifier[sseqids_ii] = identifier[dict] (( identifier[s] , identifier[i] ) keyword[for] identifier[i] , identifier[s] keyword[in] identifier[enumerate] ( identifier[sseqids] ))
identifier[blocks] = identifier[SimpleFile] ( identifier[simplefile] ). identifier[blocks]
identifier[scores] = identifier[defaultdict] ( identifier[int] )
keyword[for] identifier[a] , identifier[b] , identifier[c] , identifier[d] , identifier[score] , identifier[orientation] , identifier[hl] keyword[in] identifier[blocks] :
identifier[qi] , identifier[q] = identifier[qorder] [ identifier[a] ]
identifier[si] , identifier[s] = identifier[sorder] [ identifier[c] ]
identifier[qseqid] , identifier[sseqid] = identifier[q] . identifier[seqid] , identifier[s] . identifier[seqid]
keyword[if] identifier[sseqid] keyword[not] keyword[in] identifier[sseqids] :
keyword[continue]
identifier[scores] [ identifier[sseqids_ii] [ identifier[sseqid] ], identifier[qseqid] ]+= identifier[score]
identifier[data] =[]
keyword[for] ( identifier[a] , identifier[b] ), identifier[score] keyword[in] identifier[sorted] ( identifier[scores] . identifier[items] ()):
keyword[if] identifier[b] keyword[not] keyword[in] identifier[qseqids_ii] :
keyword[continue]
identifier[data] . identifier[append] (( identifier[qseqids_ii] [ identifier[b] ], identifier[score] ))
identifier[tour] = identifier[range] ( identifier[len] ( identifier[qseqids] ))
identifier[toolbox] = identifier[GA_setup] ( identifier[tour] )
identifier[toolbox] . identifier[register] ( literal[string] , identifier[colinear_evaluate_weights] , identifier[data] = identifier[data] )
identifier[tour] , identifier[fitness] = identifier[GA_run] ( identifier[toolbox] , identifier[ngen] = literal[int] , identifier[npop] = literal[int] , identifier[cpus] = identifier[opts] . identifier[cpus] )
identifier[tour] =[ identifier[qseqids] [ identifier[x] ] keyword[for] identifier[x] keyword[in] identifier[tour] ]
identifier[print] ( literal[string] . identifier[join] ( identifier[tour] ))
|
def layout(args):
"""
%prog layout query.subject.simple query.seqids subject.seqids
Compute optimal seqids order in a second genome, based on seqids on one
genome, given the pairwise blocks in .simple format.
"""
from jcvi.algorithms.ec import GA_setup, GA_run
p = OptionParser(layout.__doc__)
p.set_beds()
p.set_cpus(cpus=32)
(opts, args) = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
(simplefile, qseqids, sseqids) = args
(qbed, sbed, qorder, sorder, is_self) = check_beds(simplefile, p, opts)
qseqids = qseqids.strip().split(',')
sseqids = sseqids.strip().split(',')
qseqids_ii = dict(((s, i) for (i, s) in enumerate(qseqids)))
sseqids_ii = dict(((s, i) for (i, s) in enumerate(sseqids)))
blocks = SimpleFile(simplefile).blocks
scores = defaultdict(int)
for (a, b, c, d, score, orientation, hl) in blocks:
(qi, q) = qorder[a]
(si, s) = sorder[c]
(qseqid, sseqid) = (q.seqid, s.seqid)
if sseqid not in sseqids:
continue # depends on [control=['if'], data=[]]
scores[sseqids_ii[sseqid], qseqid] += score # depends on [control=['for'], data=[]]
data = []
for ((a, b), score) in sorted(scores.items()):
if b not in qseqids_ii:
continue # depends on [control=['if'], data=[]]
data.append((qseqids_ii[b], score)) # depends on [control=['for'], data=[]]
tour = range(len(qseqids))
toolbox = GA_setup(tour)
toolbox.register('evaluate', colinear_evaluate_weights, data=data)
(tour, fitness) = GA_run(toolbox, ngen=100, npop=100, cpus=opts.cpus)
tour = [qseqids[x] for x in tour]
print(','.join(tour))
|
def prepare(self, strict=True):
""" preparation for loaded json
:param bool strict: when in strict mode, exception would be raised if not valid.
"""
self.__root = self.prepare_obj(self.raw, self.__url)
self.validate(strict=strict)
if hasattr(self.__root, 'schemes') and self.__root.schemes:
if len(self.__root.schemes) > 0:
self.__schemes = self.__root.schemes
else:
# extract schemes from the url to load spec
self.__schemes = [six.moves.urlparse(self.__url).schemes]
s = Scanner(self)
s.scan(root=self.__root, route=[Merge()])
s.scan(root=self.__root, route=[PatchObject()])
s.scan(root=self.__root, route=[Aggregate()])
# reducer for Operation
tr = TypeReduce(self.__sep)
cy = CycleDetector()
s.scan(root=self.__root, route=[tr, cy])
# 'op' -- shortcut for Operation with tag and operaionId
self.__op = utils.ScopeDict(tr.op)
# 'm' -- shortcut for model in Swagger 1.2
if hasattr(self.__root, 'definitions') and self.__root.definitions != None:
self.__m = utils.ScopeDict(self.__root.definitions)
else:
self.__m = utils.ScopeDict({})
# update scope-separater
self.__m.sep = self.__sep
self.__op.sep = self.__sep
# cycle detection
if len(cy.cycles['schema']) > 0 and strict:
raise errs.CycleDetectionError('Cycles detected in Schema Object: {0}'.format(cy.cycles['schema']))
|
def function[prepare, parameter[self, strict]]:
constant[ preparation for loaded json
:param bool strict: when in strict mode, exception would be raised if not valid.
]
name[self].__root assign[=] call[name[self].prepare_obj, parameter[name[self].raw, name[self].__url]]
call[name[self].validate, parameter[]]
if <ast.BoolOp object at 0x7da20c6aa590> begin[:]
if compare[call[name[len], parameter[name[self].__root.schemes]] greater[>] constant[0]] begin[:]
name[self].__schemes assign[=] name[self].__root.schemes
variable[s] assign[=] call[name[Scanner], parameter[name[self]]]
call[name[s].scan, parameter[]]
call[name[s].scan, parameter[]]
call[name[s].scan, parameter[]]
variable[tr] assign[=] call[name[TypeReduce], parameter[name[self].__sep]]
variable[cy] assign[=] call[name[CycleDetector], parameter[]]
call[name[s].scan, parameter[]]
name[self].__op assign[=] call[name[utils].ScopeDict, parameter[name[tr].op]]
if <ast.BoolOp object at 0x7da1b26acf40> begin[:]
name[self].__m assign[=] call[name[utils].ScopeDict, parameter[name[self].__root.definitions]]
name[self].__m.sep assign[=] name[self].__sep
name[self].__op.sep assign[=] name[self].__sep
if <ast.BoolOp object at 0x7da1b26afdf0> begin[:]
<ast.Raise object at 0x7da1b26ac3d0>
|
keyword[def] identifier[prepare] ( identifier[self] , identifier[strict] = keyword[True] ):
literal[string]
identifier[self] . identifier[__root] = identifier[self] . identifier[prepare_obj] ( identifier[self] . identifier[raw] , identifier[self] . identifier[__url] )
identifier[self] . identifier[validate] ( identifier[strict] = identifier[strict] )
keyword[if] identifier[hasattr] ( identifier[self] . identifier[__root] , literal[string] ) keyword[and] identifier[self] . identifier[__root] . identifier[schemes] :
keyword[if] identifier[len] ( identifier[self] . identifier[__root] . identifier[schemes] )> literal[int] :
identifier[self] . identifier[__schemes] = identifier[self] . identifier[__root] . identifier[schemes]
keyword[else] :
identifier[self] . identifier[__schemes] =[ identifier[six] . identifier[moves] . identifier[urlparse] ( identifier[self] . identifier[__url] ). identifier[schemes] ]
identifier[s] = identifier[Scanner] ( identifier[self] )
identifier[s] . identifier[scan] ( identifier[root] = identifier[self] . identifier[__root] , identifier[route] =[ identifier[Merge] ()])
identifier[s] . identifier[scan] ( identifier[root] = identifier[self] . identifier[__root] , identifier[route] =[ identifier[PatchObject] ()])
identifier[s] . identifier[scan] ( identifier[root] = identifier[self] . identifier[__root] , identifier[route] =[ identifier[Aggregate] ()])
identifier[tr] = identifier[TypeReduce] ( identifier[self] . identifier[__sep] )
identifier[cy] = identifier[CycleDetector] ()
identifier[s] . identifier[scan] ( identifier[root] = identifier[self] . identifier[__root] , identifier[route] =[ identifier[tr] , identifier[cy] ])
identifier[self] . identifier[__op] = identifier[utils] . identifier[ScopeDict] ( identifier[tr] . identifier[op] )
keyword[if] identifier[hasattr] ( identifier[self] . identifier[__root] , literal[string] ) keyword[and] identifier[self] . identifier[__root] . identifier[definitions] != keyword[None] :
identifier[self] . identifier[__m] = identifier[utils] . identifier[ScopeDict] ( identifier[self] . identifier[__root] . identifier[definitions] )
keyword[else] :
identifier[self] . identifier[__m] = identifier[utils] . identifier[ScopeDict] ({})
identifier[self] . identifier[__m] . identifier[sep] = identifier[self] . identifier[__sep]
identifier[self] . identifier[__op] . identifier[sep] = identifier[self] . identifier[__sep]
keyword[if] identifier[len] ( identifier[cy] . identifier[cycles] [ literal[string] ])> literal[int] keyword[and] identifier[strict] :
keyword[raise] identifier[errs] . identifier[CycleDetectionError] ( literal[string] . identifier[format] ( identifier[cy] . identifier[cycles] [ literal[string] ]))
|
def prepare(self, strict=True):
""" preparation for loaded json
:param bool strict: when in strict mode, exception would be raised if not valid.
"""
self.__root = self.prepare_obj(self.raw, self.__url)
self.validate(strict=strict)
if hasattr(self.__root, 'schemes') and self.__root.schemes:
if len(self.__root.schemes) > 0:
self.__schemes = self.__root.schemes # depends on [control=['if'], data=[]]
else:
# extract schemes from the url to load spec
self.__schemes = [six.moves.urlparse(self.__url).schemes] # depends on [control=['if'], data=[]]
s = Scanner(self)
s.scan(root=self.__root, route=[Merge()])
s.scan(root=self.__root, route=[PatchObject()])
s.scan(root=self.__root, route=[Aggregate()])
# reducer for Operation
tr = TypeReduce(self.__sep)
cy = CycleDetector()
s.scan(root=self.__root, route=[tr, cy])
# 'op' -- shortcut for Operation with tag and operaionId
self.__op = utils.ScopeDict(tr.op)
# 'm' -- shortcut for model in Swagger 1.2
if hasattr(self.__root, 'definitions') and self.__root.definitions != None:
self.__m = utils.ScopeDict(self.__root.definitions) # depends on [control=['if'], data=[]]
else:
self.__m = utils.ScopeDict({})
# update scope-separater
self.__m.sep = self.__sep
self.__op.sep = self.__sep
# cycle detection
if len(cy.cycles['schema']) > 0 and strict:
raise errs.CycleDetectionError('Cycles detected in Schema Object: {0}'.format(cy.cycles['schema'])) # depends on [control=['if'], data=[]]
|
def muteThreadMentions(self, mute=True, thread_id=None):
"""
Mutes thread mentions
:param mute: Boolean. True to mute, False to unmute
:param thread_id: User/Group ID to mute. See :ref:`intro_threads`
"""
thread_id, thread_type = self._getThread(thread_id, None)
data = {"mentions_mute_mode": int(mute), "thread_fbid": thread_id}
r = self._post(self.req_url.MUTE_MENTIONS, data, fix_request=True)
|
def function[muteThreadMentions, parameter[self, mute, thread_id]]:
constant[
Mutes thread mentions
:param mute: Boolean. True to mute, False to unmute
:param thread_id: User/Group ID to mute. See :ref:`intro_threads`
]
<ast.Tuple object at 0x7da1b188d4b0> assign[=] call[name[self]._getThread, parameter[name[thread_id], constant[None]]]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da1b188c1c0>, <ast.Constant object at 0x7da1b188d0f0>], [<ast.Call object at 0x7da1b188d9f0>, <ast.Name object at 0x7da1b188c6a0>]]
variable[r] assign[=] call[name[self]._post, parameter[name[self].req_url.MUTE_MENTIONS, name[data]]]
|
keyword[def] identifier[muteThreadMentions] ( identifier[self] , identifier[mute] = keyword[True] , identifier[thread_id] = keyword[None] ):
literal[string]
identifier[thread_id] , identifier[thread_type] = identifier[self] . identifier[_getThread] ( identifier[thread_id] , keyword[None] )
identifier[data] ={ literal[string] : identifier[int] ( identifier[mute] ), literal[string] : identifier[thread_id] }
identifier[r] = identifier[self] . identifier[_post] ( identifier[self] . identifier[req_url] . identifier[MUTE_MENTIONS] , identifier[data] , identifier[fix_request] = keyword[True] )
|
def muteThreadMentions(self, mute=True, thread_id=None):
"""
Mutes thread mentions
:param mute: Boolean. True to mute, False to unmute
:param thread_id: User/Group ID to mute. See :ref:`intro_threads`
"""
(thread_id, thread_type) = self._getThread(thread_id, None)
data = {'mentions_mute_mode': int(mute), 'thread_fbid': thread_id}
r = self._post(self.req_url.MUTE_MENTIONS, data, fix_request=True)
|
def add_node(self, node_id, name, labels):
"""Add the node with name and labels.
Args:
node_id: Id for the node.
name: Name for the node.
labels: Label for the node.
Raises:
NotImplementedError: When adding labels is not supported.
"""
node = self.graph_db.get_or_create_indexed_node('Node', 'node_id', node_id, {'node_id': node_id, 'name': name})
try:
node.add_labels(*labels)
except NotImplementedError:
pass
|
def function[add_node, parameter[self, node_id, name, labels]]:
constant[Add the node with name and labels.
Args:
node_id: Id for the node.
name: Name for the node.
labels: Label for the node.
Raises:
NotImplementedError: When adding labels is not supported.
]
variable[node] assign[=] call[name[self].graph_db.get_or_create_indexed_node, parameter[constant[Node], constant[node_id], name[node_id], dictionary[[<ast.Constant object at 0x7da18bc70490>, <ast.Constant object at 0x7da18bc71ed0>], [<ast.Name object at 0x7da18bc70ee0>, <ast.Name object at 0x7da18bc73d60>]]]]
<ast.Try object at 0x7da18bc713f0>
|
keyword[def] identifier[add_node] ( identifier[self] , identifier[node_id] , identifier[name] , identifier[labels] ):
literal[string]
identifier[node] = identifier[self] . identifier[graph_db] . identifier[get_or_create_indexed_node] ( literal[string] , literal[string] , identifier[node_id] ,{ literal[string] : identifier[node_id] , literal[string] : identifier[name] })
keyword[try] :
identifier[node] . identifier[add_labels] (* identifier[labels] )
keyword[except] identifier[NotImplementedError] :
keyword[pass]
|
def add_node(self, node_id, name, labels):
"""Add the node with name and labels.
Args:
node_id: Id for the node.
name: Name for the node.
labels: Label for the node.
Raises:
NotImplementedError: When adding labels is not supported.
"""
node = self.graph_db.get_or_create_indexed_node('Node', 'node_id', node_id, {'node_id': node_id, 'name': name})
try:
node.add_labels(*labels) # depends on [control=['try'], data=[]]
except NotImplementedError:
pass # depends on [control=['except'], data=[]]
|
def _get_lattice_parameters(lattice):
"""Return basis vector lengths
Parameters
----------
lattice : array_like
Basis vectors given as column vectors
shape=(3, 3), dtype='double'
Returns
-------
ndarray, shape=(3,), dtype='double'
"""
return np.array(np.sqrt(np.dot(lattice.T, lattice).diagonal()),
dtype='double')
|
def function[_get_lattice_parameters, parameter[lattice]]:
constant[Return basis vector lengths
Parameters
----------
lattice : array_like
Basis vectors given as column vectors
shape=(3, 3), dtype='double'
Returns
-------
ndarray, shape=(3,), dtype='double'
]
return[call[name[np].array, parameter[call[name[np].sqrt, parameter[call[call[name[np].dot, parameter[name[lattice].T, name[lattice]]].diagonal, parameter[]]]]]]]
|
keyword[def] identifier[_get_lattice_parameters] ( identifier[lattice] ):
literal[string]
keyword[return] identifier[np] . identifier[array] ( identifier[np] . identifier[sqrt] ( identifier[np] . identifier[dot] ( identifier[lattice] . identifier[T] , identifier[lattice] ). identifier[diagonal] ()),
identifier[dtype] = literal[string] )
|
def _get_lattice_parameters(lattice):
"""Return basis vector lengths
Parameters
----------
lattice : array_like
Basis vectors given as column vectors
shape=(3, 3), dtype='double'
Returns
-------
ndarray, shape=(3,), dtype='double'
"""
return np.array(np.sqrt(np.dot(lattice.T, lattice).diagonal()), dtype='double')
|
def scale_pixels(color, layer):
"""Scales the pixel to the virtual pixelmap."""
pixelmap = []
# Scaling the pixel offsets.
for pix_x in range(MAX_X + 1):
for pix_y in range(MAX_Y + 1):
# Horizontal pixels
y1 = pix_y * dotsize[0]
x1 = pix_x * dotsize[1]
# Vertical pixels
y2 = pix_y * dotsize[0] + (dotsize[0] - 1)
x2 = pix_x * dotsize[1] + (dotsize[1] - 1)
if (y1 <= MAX_Y) and (y2 <= MAX_Y):
if (x1 <= MAX_X) and (x2 <= MAX_X):
if (pix_x, pix_y) in layer:
pixelmap.append([(y1, x1), (y2, x2), color])
return pixelmap
|
def function[scale_pixels, parameter[color, layer]]:
constant[Scales the pixel to the virtual pixelmap.]
variable[pixelmap] assign[=] list[[]]
for taget[name[pix_x]] in starred[call[name[range], parameter[binary_operation[name[MAX_X] + constant[1]]]]] begin[:]
for taget[name[pix_y]] in starred[call[name[range], parameter[binary_operation[name[MAX_Y] + constant[1]]]]] begin[:]
variable[y1] assign[=] binary_operation[name[pix_y] * call[name[dotsize]][constant[0]]]
variable[x1] assign[=] binary_operation[name[pix_x] * call[name[dotsize]][constant[1]]]
variable[y2] assign[=] binary_operation[binary_operation[name[pix_y] * call[name[dotsize]][constant[0]]] + binary_operation[call[name[dotsize]][constant[0]] - constant[1]]]
variable[x2] assign[=] binary_operation[binary_operation[name[pix_x] * call[name[dotsize]][constant[1]]] + binary_operation[call[name[dotsize]][constant[1]] - constant[1]]]
if <ast.BoolOp object at 0x7da20c7c8160> begin[:]
if <ast.BoolOp object at 0x7da20c7cb880> begin[:]
if compare[tuple[[<ast.Name object at 0x7da20c7cb100>, <ast.Name object at 0x7da20c7cbe50>]] in name[layer]] begin[:]
call[name[pixelmap].append, parameter[list[[<ast.Tuple object at 0x7da20c7c86d0>, <ast.Tuple object at 0x7da20c7ca800>, <ast.Name object at 0x7da20c7c9300>]]]]
return[name[pixelmap]]
|
keyword[def] identifier[scale_pixels] ( identifier[color] , identifier[layer] ):
literal[string]
identifier[pixelmap] =[]
keyword[for] identifier[pix_x] keyword[in] identifier[range] ( identifier[MAX_X] + literal[int] ):
keyword[for] identifier[pix_y] keyword[in] identifier[range] ( identifier[MAX_Y] + literal[int] ):
identifier[y1] = identifier[pix_y] * identifier[dotsize] [ literal[int] ]
identifier[x1] = identifier[pix_x] * identifier[dotsize] [ literal[int] ]
identifier[y2] = identifier[pix_y] * identifier[dotsize] [ literal[int] ]+( identifier[dotsize] [ literal[int] ]- literal[int] )
identifier[x2] = identifier[pix_x] * identifier[dotsize] [ literal[int] ]+( identifier[dotsize] [ literal[int] ]- literal[int] )
keyword[if] ( identifier[y1] <= identifier[MAX_Y] ) keyword[and] ( identifier[y2] <= identifier[MAX_Y] ):
keyword[if] ( identifier[x1] <= identifier[MAX_X] ) keyword[and] ( identifier[x2] <= identifier[MAX_X] ):
keyword[if] ( identifier[pix_x] , identifier[pix_y] ) keyword[in] identifier[layer] :
identifier[pixelmap] . identifier[append] ([( identifier[y1] , identifier[x1] ),( identifier[y2] , identifier[x2] ), identifier[color] ])
keyword[return] identifier[pixelmap]
|
def scale_pixels(color, layer):
"""Scales the pixel to the virtual pixelmap."""
pixelmap = []
# Scaling the pixel offsets.
for pix_x in range(MAX_X + 1):
for pix_y in range(MAX_Y + 1):
# Horizontal pixels
y1 = pix_y * dotsize[0]
x1 = pix_x * dotsize[1]
# Vertical pixels
y2 = pix_y * dotsize[0] + (dotsize[0] - 1)
x2 = pix_x * dotsize[1] + (dotsize[1] - 1)
if y1 <= MAX_Y and y2 <= MAX_Y:
if x1 <= MAX_X and x2 <= MAX_X:
if (pix_x, pix_y) in layer:
pixelmap.append([(y1, x1), (y2, x2), color]) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['pix_y']] # depends on [control=['for'], data=['pix_x']]
return pixelmap
|
def _get_nodal_planes_from_ndk_string(self, ndk_string):
"""
Reads the nodal plane information (represented by 5th line [57:] of the
tensor representation) and returns an instance of the GCMTNodalPlanes
class
"""
planes = GCMTNodalPlanes()
planes.nodal_plane_1 = {'strike': float(ndk_string[0:3]),
'dip': float(ndk_string[3:6]),
'rake': float(ndk_string[6:11])}
planes.nodal_plane_2 = {'strike': float(ndk_string[11:15]),
'dip': float(ndk_string[15:18]),
'rake': float(ndk_string[18:])}
return planes
|
def function[_get_nodal_planes_from_ndk_string, parameter[self, ndk_string]]:
constant[
Reads the nodal plane information (represented by 5th line [57:] of the
tensor representation) and returns an instance of the GCMTNodalPlanes
class
]
variable[planes] assign[=] call[name[GCMTNodalPlanes], parameter[]]
name[planes].nodal_plane_1 assign[=] dictionary[[<ast.Constant object at 0x7da20c7943d0>, <ast.Constant object at 0x7da20c795120>, <ast.Constant object at 0x7da20c795c60>], [<ast.Call object at 0x7da20c795600>, <ast.Call object at 0x7da20c7946a0>, <ast.Call object at 0x7da1b138ece0>]]
name[planes].nodal_plane_2 assign[=] dictionary[[<ast.Constant object at 0x7da1b138e890>, <ast.Constant object at 0x7da1b138c0d0>, <ast.Constant object at 0x7da1b138ec80>], [<ast.Call object at 0x7da1b138df30>, <ast.Call object at 0x7da1b138fe50>, <ast.Call object at 0x7da1b138c460>]]
return[name[planes]]
|
keyword[def] identifier[_get_nodal_planes_from_ndk_string] ( identifier[self] , identifier[ndk_string] ):
literal[string]
identifier[planes] = identifier[GCMTNodalPlanes] ()
identifier[planes] . identifier[nodal_plane_1] ={ literal[string] : identifier[float] ( identifier[ndk_string] [ literal[int] : literal[int] ]),
literal[string] : identifier[float] ( identifier[ndk_string] [ literal[int] : literal[int] ]),
literal[string] : identifier[float] ( identifier[ndk_string] [ literal[int] : literal[int] ])}
identifier[planes] . identifier[nodal_plane_2] ={ literal[string] : identifier[float] ( identifier[ndk_string] [ literal[int] : literal[int] ]),
literal[string] : identifier[float] ( identifier[ndk_string] [ literal[int] : literal[int] ]),
literal[string] : identifier[float] ( identifier[ndk_string] [ literal[int] :])}
keyword[return] identifier[planes]
|
def _get_nodal_planes_from_ndk_string(self, ndk_string):
"""
Reads the nodal plane information (represented by 5th line [57:] of the
tensor representation) and returns an instance of the GCMTNodalPlanes
class
"""
planes = GCMTNodalPlanes()
planes.nodal_plane_1 = {'strike': float(ndk_string[0:3]), 'dip': float(ndk_string[3:6]), 'rake': float(ndk_string[6:11])}
planes.nodal_plane_2 = {'strike': float(ndk_string[11:15]), 'dip': float(ndk_string[15:18]), 'rake': float(ndk_string[18:])}
return planes
|
def install(args, console, env, ciprcfg, opts):
"""
Install a package from github and make it available for use.
"""
if len(args) == 0:
# Is this a cipr project?
if ciprcfg.exists:
# Install all the packages for this project
console.quiet('Installing current project packages...')
for name, source in ciprcfg.packages.items():
if opts.upgrade:
app.command.run(['install', '--upgrade', source])
else:
app.command.run(['install', source])
else:
console.error('No cipr project or package found.')
return
else:
for source in args:
package, name, version, type = _package_info(source)
if not path.exists(env.package_dir):
os.makedirs(env.package_dir)
package_dir = path.join(env.package_dir, name)
if path.exists(package_dir):
if opts.upgrade:
app.command.run(['uninstall', name])
else:
console.quiet('Package %s already exists. Use --upgrade to force a re-install.' % name)
return
console.quiet('Installing %s...' % name)
if type == 'git':
tmpdir = tempfile.mkdtemp(prefix='cipr')
clom.git.clone(package, tmpdir).shell.execute()
if version:
cmd = AND(clom.cd(tmpdir), clom.git.checkout(version))
cmd.shell.execute()
package_json = path.join(tmpdir, 'package.json')
if path.exists(package_json):
# Looks like a cipr package, copy directly
shutil.move(tmpdir, package_dir)
else:
# Not a cipr package, sandbox in sub-directory
shutil.move(tmpdir, path.join(package_dir, name))
console.quiet('`%s` installed from git repo to `%s`' % (name, package_dir))
elif path.exists(package):
# Local
os.symlink(package, package_dir)
else:
console.error('Package `%s` type not recognized' % package)
return
pkg = Package(package_dir, source)
ciprcfg.add_package(pkg)
if pkg.dependencies:
console.quiet('Installing dependancies...')
for name, require in pkg.dependencies.items():
if opts.upgrade:
app.command.run(['install', '--upgrade', require])
else:
app.command.run(['install', require])
|
def function[install, parameter[args, console, env, ciprcfg, opts]]:
constant[
Install a package from github and make it available for use.
]
if compare[call[name[len], parameter[name[args]]] equal[==] constant[0]] begin[:]
if name[ciprcfg].exists begin[:]
call[name[console].quiet, parameter[constant[Installing current project packages...]]]
for taget[tuple[[<ast.Name object at 0x7da18dc990f0>, <ast.Name object at 0x7da18dc9b130>]]] in starred[call[name[ciprcfg].packages.items, parameter[]]] begin[:]
if name[opts].upgrade begin[:]
call[name[app].command.run, parameter[list[[<ast.Constant object at 0x7da18dc9beb0>, <ast.Constant object at 0x7da18dc9b970>, <ast.Name object at 0x7da18dc98eb0>]]]]
return[None]
|
keyword[def] identifier[install] ( identifier[args] , identifier[console] , identifier[env] , identifier[ciprcfg] , identifier[opts] ):
literal[string]
keyword[if] identifier[len] ( identifier[args] )== literal[int] :
keyword[if] identifier[ciprcfg] . identifier[exists] :
identifier[console] . identifier[quiet] ( literal[string] )
keyword[for] identifier[name] , identifier[source] keyword[in] identifier[ciprcfg] . identifier[packages] . identifier[items] ():
keyword[if] identifier[opts] . identifier[upgrade] :
identifier[app] . identifier[command] . identifier[run] ([ literal[string] , literal[string] , identifier[source] ])
keyword[else] :
identifier[app] . identifier[command] . identifier[run] ([ literal[string] , identifier[source] ])
keyword[else] :
identifier[console] . identifier[error] ( literal[string] )
keyword[return]
keyword[else] :
keyword[for] identifier[source] keyword[in] identifier[args] :
identifier[package] , identifier[name] , identifier[version] , identifier[type] = identifier[_package_info] ( identifier[source] )
keyword[if] keyword[not] identifier[path] . identifier[exists] ( identifier[env] . identifier[package_dir] ):
identifier[os] . identifier[makedirs] ( identifier[env] . identifier[package_dir] )
identifier[package_dir] = identifier[path] . identifier[join] ( identifier[env] . identifier[package_dir] , identifier[name] )
keyword[if] identifier[path] . identifier[exists] ( identifier[package_dir] ):
keyword[if] identifier[opts] . identifier[upgrade] :
identifier[app] . identifier[command] . identifier[run] ([ literal[string] , identifier[name] ])
keyword[else] :
identifier[console] . identifier[quiet] ( literal[string] % identifier[name] )
keyword[return]
identifier[console] . identifier[quiet] ( literal[string] % identifier[name] )
keyword[if] identifier[type] == literal[string] :
identifier[tmpdir] = identifier[tempfile] . identifier[mkdtemp] ( identifier[prefix] = literal[string] )
identifier[clom] . identifier[git] . identifier[clone] ( identifier[package] , identifier[tmpdir] ). identifier[shell] . identifier[execute] ()
keyword[if] identifier[version] :
identifier[cmd] = identifier[AND] ( identifier[clom] . identifier[cd] ( identifier[tmpdir] ), identifier[clom] . identifier[git] . identifier[checkout] ( identifier[version] ))
identifier[cmd] . identifier[shell] . identifier[execute] ()
identifier[package_json] = identifier[path] . identifier[join] ( identifier[tmpdir] , literal[string] )
keyword[if] identifier[path] . identifier[exists] ( identifier[package_json] ):
identifier[shutil] . identifier[move] ( identifier[tmpdir] , identifier[package_dir] )
keyword[else] :
identifier[shutil] . identifier[move] ( identifier[tmpdir] , identifier[path] . identifier[join] ( identifier[package_dir] , identifier[name] ))
identifier[console] . identifier[quiet] ( literal[string] %( identifier[name] , identifier[package_dir] ))
keyword[elif] identifier[path] . identifier[exists] ( identifier[package] ):
identifier[os] . identifier[symlink] ( identifier[package] , identifier[package_dir] )
keyword[else] :
identifier[console] . identifier[error] ( literal[string] % identifier[package] )
keyword[return]
identifier[pkg] = identifier[Package] ( identifier[package_dir] , identifier[source] )
identifier[ciprcfg] . identifier[add_package] ( identifier[pkg] )
keyword[if] identifier[pkg] . identifier[dependencies] :
identifier[console] . identifier[quiet] ( literal[string] )
keyword[for] identifier[name] , identifier[require] keyword[in] identifier[pkg] . identifier[dependencies] . identifier[items] ():
keyword[if] identifier[opts] . identifier[upgrade] :
identifier[app] . identifier[command] . identifier[run] ([ literal[string] , literal[string] , identifier[require] ])
keyword[else] :
identifier[app] . identifier[command] . identifier[run] ([ literal[string] , identifier[require] ])
|
def install(args, console, env, ciprcfg, opts):
"""
Install a package from github and make it available for use.
"""
if len(args) == 0:
# Is this a cipr project?
if ciprcfg.exists:
# Install all the packages for this project
console.quiet('Installing current project packages...')
for (name, source) in ciprcfg.packages.items():
if opts.upgrade:
app.command.run(['install', '--upgrade', source]) # depends on [control=['if'], data=[]]
else:
app.command.run(['install', source]) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
console.error('No cipr project or package found.')
return # depends on [control=['if'], data=[]]
else:
for source in args:
(package, name, version, type) = _package_info(source)
if not path.exists(env.package_dir):
os.makedirs(env.package_dir) # depends on [control=['if'], data=[]]
package_dir = path.join(env.package_dir, name)
if path.exists(package_dir):
if opts.upgrade:
app.command.run(['uninstall', name]) # depends on [control=['if'], data=[]]
else:
console.quiet('Package %s already exists. Use --upgrade to force a re-install.' % name)
return # depends on [control=['if'], data=[]]
console.quiet('Installing %s...' % name)
if type == 'git':
tmpdir = tempfile.mkdtemp(prefix='cipr')
clom.git.clone(package, tmpdir).shell.execute()
if version:
cmd = AND(clom.cd(tmpdir), clom.git.checkout(version))
cmd.shell.execute() # depends on [control=['if'], data=[]]
package_json = path.join(tmpdir, 'package.json')
if path.exists(package_json):
# Looks like a cipr package, copy directly
shutil.move(tmpdir, package_dir) # depends on [control=['if'], data=[]]
else:
# Not a cipr package, sandbox in sub-directory
shutil.move(tmpdir, path.join(package_dir, name))
console.quiet('`%s` installed from git repo to `%s`' % (name, package_dir)) # depends on [control=['if'], data=[]]
elif path.exists(package):
# Local
os.symlink(package, package_dir) # depends on [control=['if'], data=[]]
else:
console.error('Package `%s` type not recognized' % package)
return
pkg = Package(package_dir, source)
ciprcfg.add_package(pkg)
if pkg.dependencies:
console.quiet('Installing dependancies...')
for (name, require) in pkg.dependencies.items():
if opts.upgrade:
app.command.run(['install', '--upgrade', require]) # depends on [control=['if'], data=[]]
else:
app.command.run(['install', require]) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['source']]
|
def check_consistent_parameter_grouping(self):
"""
Ensures this object does not have conflicting groups of parameters.
:raises ValueError: For conflicting or absent parameters.
"""
parameter_groups = {}
if self.indices_per_axis is not None:
parameter_groups["indices_per_axis"] = \
{"self.indices_per_axis": self.indices_per_axis}
if (self.split_size is not None) or (self.split_num_slices_per_axis is not None):
parameter_groups["split_size"] = \
{
"self.split_size": self.split_size,
"self.split_num_slices_per_axis": self.split_num_slices_per_axis,
}
if self.tile_shape is not None:
parameter_groups["tile_shape"] = \
{"self.tile_shape": self.tile_shape}
if self.max_tile_bytes is not None:
parameter_groups["max_tile_bytes"] = \
{"self.max_tile_bytes": self.max_tile_bytes}
if self.max_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.max_tile_shape"] = self.max_tile_shape
if self.sub_tile_shape is not None:
if "max_tile_bytes" not in parameter_groups.keys():
parameter_groups["max_tile_bytes"] = {}
parameter_groups["max_tile_bytes"]["self.sub_tile_shape"] = self.sub_tile_shape
self.logger.debug("parameter_groups=%s", parameter_groups)
if len(parameter_groups.keys()) > 1:
group_keys = sorted(parameter_groups.keys())
raise ValueError(
"Got conflicting parameter groups specified, "
+
"should only specify one group to define the split:\n"
+
(
"\n".join(
[
(
("Group %18s: " % ("'%s'" % group_key))
+
str(parameter_groups[group_key])
)
for group_key in group_keys
]
)
)
)
if len(parameter_groups.keys()) <= 0:
raise ValueError(
"No split parameters specified, need parameters from one of the groups: "
+
"'indices_per_axis', 'split_size', 'tile_shape' or 'max_tile_bytes'"
)
|
def function[check_consistent_parameter_grouping, parameter[self]]:
constant[
Ensures this object does not have conflicting groups of parameters.
:raises ValueError: For conflicting or absent parameters.
]
variable[parameter_groups] assign[=] dictionary[[], []]
if compare[name[self].indices_per_axis is_not constant[None]] begin[:]
call[name[parameter_groups]][constant[indices_per_axis]] assign[=] dictionary[[<ast.Constant object at 0x7da1b0b6ed70>], [<ast.Attribute object at 0x7da1b0b6d0c0>]]
if <ast.BoolOp object at 0x7da1b0b6cb50> begin[:]
call[name[parameter_groups]][constant[split_size]] assign[=] dictionary[[<ast.Constant object at 0x7da1b0b6e7d0>, <ast.Constant object at 0x7da1b0b6de10>], [<ast.Attribute object at 0x7da1b0b6df00>, <ast.Attribute object at 0x7da1b0b6e500>]]
if compare[name[self].tile_shape is_not constant[None]] begin[:]
call[name[parameter_groups]][constant[tile_shape]] assign[=] dictionary[[<ast.Constant object at 0x7da1b0b6fe50>], [<ast.Attribute object at 0x7da1b0b6fca0>]]
if compare[name[self].max_tile_bytes is_not constant[None]] begin[:]
call[name[parameter_groups]][constant[max_tile_bytes]] assign[=] dictionary[[<ast.Constant object at 0x7da1b0b6cdc0>], [<ast.Attribute object at 0x7da1b0b6d030>]]
if compare[name[self].max_tile_shape is_not constant[None]] begin[:]
if compare[constant[max_tile_bytes] <ast.NotIn object at 0x7da2590d7190> call[name[parameter_groups].keys, parameter[]]] begin[:]
call[name[parameter_groups]][constant[max_tile_bytes]] assign[=] dictionary[[], []]
call[call[name[parameter_groups]][constant[max_tile_bytes]]][constant[self.max_tile_shape]] assign[=] name[self].max_tile_shape
if compare[name[self].sub_tile_shape is_not constant[None]] begin[:]
if compare[constant[max_tile_bytes] <ast.NotIn object at 0x7da2590d7190> call[name[parameter_groups].keys, parameter[]]] begin[:]
call[name[parameter_groups]][constant[max_tile_bytes]] assign[=] dictionary[[], []]
call[call[name[parameter_groups]][constant[max_tile_bytes]]][constant[self.sub_tile_shape]] assign[=] name[self].sub_tile_shape
call[name[self].logger.debug, parameter[constant[parameter_groups=%s], name[parameter_groups]]]
if compare[call[name[len], parameter[call[name[parameter_groups].keys, parameter[]]]] greater[>] constant[1]] begin[:]
variable[group_keys] assign[=] call[name[sorted], parameter[call[name[parameter_groups].keys, parameter[]]]]
<ast.Raise object at 0x7da1b0a21cc0>
if compare[call[name[len], parameter[call[name[parameter_groups].keys, parameter[]]]] less_or_equal[<=] constant[0]] begin[:]
<ast.Raise object at 0x7da1b0a20ee0>
|
keyword[def] identifier[check_consistent_parameter_grouping] ( identifier[self] ):
literal[string]
identifier[parameter_groups] ={}
keyword[if] identifier[self] . identifier[indices_per_axis] keyword[is] keyword[not] keyword[None] :
identifier[parameter_groups] [ literal[string] ]={ literal[string] : identifier[self] . identifier[indices_per_axis] }
keyword[if] ( identifier[self] . identifier[split_size] keyword[is] keyword[not] keyword[None] ) keyword[or] ( identifier[self] . identifier[split_num_slices_per_axis] keyword[is] keyword[not] keyword[None] ):
identifier[parameter_groups] [ literal[string] ]={
literal[string] : identifier[self] . identifier[split_size] ,
literal[string] : identifier[self] . identifier[split_num_slices_per_axis] ,
}
keyword[if] identifier[self] . identifier[tile_shape] keyword[is] keyword[not] keyword[None] :
identifier[parameter_groups] [ literal[string] ]={ literal[string] : identifier[self] . identifier[tile_shape] }
keyword[if] identifier[self] . identifier[max_tile_bytes] keyword[is] keyword[not] keyword[None] :
identifier[parameter_groups] [ literal[string] ]={ literal[string] : identifier[self] . identifier[max_tile_bytes] }
keyword[if] identifier[self] . identifier[max_tile_shape] keyword[is] keyword[not] keyword[None] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[parameter_groups] . identifier[keys] ():
identifier[parameter_groups] [ literal[string] ]={}
identifier[parameter_groups] [ literal[string] ][ literal[string] ]= identifier[self] . identifier[max_tile_shape]
keyword[if] identifier[self] . identifier[sub_tile_shape] keyword[is] keyword[not] keyword[None] :
keyword[if] literal[string] keyword[not] keyword[in] identifier[parameter_groups] . identifier[keys] ():
identifier[parameter_groups] [ literal[string] ]={}
identifier[parameter_groups] [ literal[string] ][ literal[string] ]= identifier[self] . identifier[sub_tile_shape]
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] , identifier[parameter_groups] )
keyword[if] identifier[len] ( identifier[parameter_groups] . identifier[keys] ())> literal[int] :
identifier[group_keys] = identifier[sorted] ( identifier[parameter_groups] . identifier[keys] ())
keyword[raise] identifier[ValueError] (
literal[string]
+
literal[string]
+
(
literal[string] . identifier[join] (
[
(
( literal[string] %( literal[string] % identifier[group_key] ))
+
identifier[str] ( identifier[parameter_groups] [ identifier[group_key] ])
)
keyword[for] identifier[group_key] keyword[in] identifier[group_keys]
]
)
)
)
keyword[if] identifier[len] ( identifier[parameter_groups] . identifier[keys] ())<= literal[int] :
keyword[raise] identifier[ValueError] (
literal[string]
+
literal[string]
)
|
def check_consistent_parameter_grouping(self):
"""
Ensures this object does not have conflicting groups of parameters.
:raises ValueError: For conflicting or absent parameters.
"""
parameter_groups = {}
if self.indices_per_axis is not None:
parameter_groups['indices_per_axis'] = {'self.indices_per_axis': self.indices_per_axis} # depends on [control=['if'], data=[]]
if self.split_size is not None or self.split_num_slices_per_axis is not None:
parameter_groups['split_size'] = {'self.split_size': self.split_size, 'self.split_num_slices_per_axis': self.split_num_slices_per_axis} # depends on [control=['if'], data=[]]
if self.tile_shape is not None:
parameter_groups['tile_shape'] = {'self.tile_shape': self.tile_shape} # depends on [control=['if'], data=[]]
if self.max_tile_bytes is not None:
parameter_groups['max_tile_bytes'] = {'self.max_tile_bytes': self.max_tile_bytes} # depends on [control=['if'], data=[]]
if self.max_tile_shape is not None:
if 'max_tile_bytes' not in parameter_groups.keys():
parameter_groups['max_tile_bytes'] = {} # depends on [control=['if'], data=[]]
parameter_groups['max_tile_bytes']['self.max_tile_shape'] = self.max_tile_shape # depends on [control=['if'], data=[]]
if self.sub_tile_shape is not None:
if 'max_tile_bytes' not in parameter_groups.keys():
parameter_groups['max_tile_bytes'] = {} # depends on [control=['if'], data=[]]
parameter_groups['max_tile_bytes']['self.sub_tile_shape'] = self.sub_tile_shape # depends on [control=['if'], data=[]]
self.logger.debug('parameter_groups=%s', parameter_groups)
if len(parameter_groups.keys()) > 1:
group_keys = sorted(parameter_groups.keys())
raise ValueError('Got conflicting parameter groups specified, ' + 'should only specify one group to define the split:\n' + '\n'.join(['Group %18s: ' % ("'%s'" % group_key) + str(parameter_groups[group_key]) for group_key in group_keys])) # depends on [control=['if'], data=[]]
if len(parameter_groups.keys()) <= 0:
raise ValueError('No split parameters specified, need parameters from one of the groups: ' + "'indices_per_axis', 'split_size', 'tile_shape' or 'max_tile_bytes'") # depends on [control=['if'], data=[]]
|
def _twos_comp_conditional(orig_wire, sign_bit, bw=None):
"""Returns two's complement of wire (using bitwidth bw) if sign_bit == 1"""
if bw is None:
bw = len(orig_wire)
new_wire = pyrtl.WireVector(bw)
with pyrtl.conditional_assignment:
with sign_bit:
new_wire |= ~orig_wire + 1
with pyrtl.otherwise:
new_wire |= orig_wire
return new_wire
|
def function[_twos_comp_conditional, parameter[orig_wire, sign_bit, bw]]:
constant[Returns two's complement of wire (using bitwidth bw) if sign_bit == 1]
if compare[name[bw] is constant[None]] begin[:]
variable[bw] assign[=] call[name[len], parameter[name[orig_wire]]]
variable[new_wire] assign[=] call[name[pyrtl].WireVector, parameter[name[bw]]]
with name[pyrtl].conditional_assignment begin[:]
with name[sign_bit] begin[:]
<ast.AugAssign object at 0x7da20c794c70>
with name[pyrtl].otherwise begin[:]
<ast.AugAssign object at 0x7da20c795030>
return[name[new_wire]]
|
keyword[def] identifier[_twos_comp_conditional] ( identifier[orig_wire] , identifier[sign_bit] , identifier[bw] = keyword[None] ):
literal[string]
keyword[if] identifier[bw] keyword[is] keyword[None] :
identifier[bw] = identifier[len] ( identifier[orig_wire] )
identifier[new_wire] = identifier[pyrtl] . identifier[WireVector] ( identifier[bw] )
keyword[with] identifier[pyrtl] . identifier[conditional_assignment] :
keyword[with] identifier[sign_bit] :
identifier[new_wire] |=~ identifier[orig_wire] + literal[int]
keyword[with] identifier[pyrtl] . identifier[otherwise] :
identifier[new_wire] |= identifier[orig_wire]
keyword[return] identifier[new_wire]
|
def _twos_comp_conditional(orig_wire, sign_bit, bw=None):
"""Returns two's complement of wire (using bitwidth bw) if sign_bit == 1"""
if bw is None:
bw = len(orig_wire) # depends on [control=['if'], data=['bw']]
new_wire = pyrtl.WireVector(bw)
with pyrtl.conditional_assignment:
with sign_bit:
new_wire |= ~orig_wire + 1 # depends on [control=['with'], data=[]]
with pyrtl.otherwise:
new_wire |= orig_wire # depends on [control=['with'], data=[]] # depends on [control=['with'], data=[]]
return new_wire
|
def schema_list(dbname, user=None,
db_user=None, db_password=None,
db_host=None, db_port=None):
'''
Return a dict with information about schemas in a Postgres database.
CLI Example:
.. code-block:: bash
salt '*' postgres.schema_list dbname
dbname
Database name we query on
user
The system user the operation should be performed on behalf of
db_user
database username if different from config or default
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default
'''
ret = {}
query = (''.join([
'SELECT '
'pg_namespace.nspname as "name",'
'pg_namespace.nspacl as "acl", '
'pg_roles.rolname as "owner" '
'FROM pg_namespace '
'LEFT JOIN pg_roles ON pg_roles.oid = pg_namespace.nspowner '
]))
rows = psql_query(query, runas=user,
host=db_host,
user=db_user,
port=db_port,
maintenance_db=dbname,
password=db_password)
for row in rows:
retrow = {}
for key in ('owner', 'acl'):
retrow[key] = row[key]
ret[row['name']] = retrow
return ret
|
def function[schema_list, parameter[dbname, user, db_user, db_password, db_host, db_port]]:
constant[
Return a dict with information about schemas in a Postgres database.
CLI Example:
.. code-block:: bash
salt '*' postgres.schema_list dbname
dbname
Database name we query on
user
The system user the operation should be performed on behalf of
db_user
database username if different from config or default
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default
]
variable[ret] assign[=] dictionary[[], []]
variable[query] assign[=] call[constant[].join, parameter[list[[<ast.Constant object at 0x7da20c7c8070>]]]]
variable[rows] assign[=] call[name[psql_query], parameter[name[query]]]
for taget[name[row]] in starred[name[rows]] begin[:]
variable[retrow] assign[=] dictionary[[], []]
for taget[name[key]] in starred[tuple[[<ast.Constant object at 0x7da20c7c8a90>, <ast.Constant object at 0x7da20c7c89d0>]]] begin[:]
call[name[retrow]][name[key]] assign[=] call[name[row]][name[key]]
call[name[ret]][call[name[row]][constant[name]]] assign[=] name[retrow]
return[name[ret]]
|
keyword[def] identifier[schema_list] ( identifier[dbname] , identifier[user] = keyword[None] ,
identifier[db_user] = keyword[None] , identifier[db_password] = keyword[None] ,
identifier[db_host] = keyword[None] , identifier[db_port] = keyword[None] ):
literal[string]
identifier[ret] ={}
identifier[query] =( literal[string] . identifier[join] ([
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
literal[string]
]))
identifier[rows] = identifier[psql_query] ( identifier[query] , identifier[runas] = identifier[user] ,
identifier[host] = identifier[db_host] ,
identifier[user] = identifier[db_user] ,
identifier[port] = identifier[db_port] ,
identifier[maintenance_db] = identifier[dbname] ,
identifier[password] = identifier[db_password] )
keyword[for] identifier[row] keyword[in] identifier[rows] :
identifier[retrow] ={}
keyword[for] identifier[key] keyword[in] ( literal[string] , literal[string] ):
identifier[retrow] [ identifier[key] ]= identifier[row] [ identifier[key] ]
identifier[ret] [ identifier[row] [ literal[string] ]]= identifier[retrow]
keyword[return] identifier[ret]
|
def schema_list(dbname, user=None, db_user=None, db_password=None, db_host=None, db_port=None):
"""
Return a dict with information about schemas in a Postgres database.
CLI Example:
.. code-block:: bash
salt '*' postgres.schema_list dbname
dbname
Database name we query on
user
The system user the operation should be performed on behalf of
db_user
database username if different from config or default
db_password
user password if any password for a specified user
db_host
Database host if different from config or default
db_port
Database port if different from config or default
"""
ret = {}
query = ''.join(['SELECT pg_namespace.nspname as "name",pg_namespace.nspacl as "acl", pg_roles.rolname as "owner" FROM pg_namespace LEFT JOIN pg_roles ON pg_roles.oid = pg_namespace.nspowner '])
rows = psql_query(query, runas=user, host=db_host, user=db_user, port=db_port, maintenance_db=dbname, password=db_password)
for row in rows:
retrow = {}
for key in ('owner', 'acl'):
retrow[key] = row[key] # depends on [control=['for'], data=['key']]
ret[row['name']] = retrow # depends on [control=['for'], data=['row']]
return ret
|
def complete_get(self, text, line, begidx, endidx):
"""completion for find command"""
options = self.GET_OPTS
if not text:
completions = options
else:
completions = [f
for f in options
if f.startswith(text)
]
return completions
|
def function[complete_get, parameter[self, text, line, begidx, endidx]]:
constant[completion for find command]
variable[options] assign[=] name[self].GET_OPTS
if <ast.UnaryOp object at 0x7da1b10109d0> begin[:]
variable[completions] assign[=] name[options]
return[name[completions]]
|
keyword[def] identifier[complete_get] ( identifier[self] , identifier[text] , identifier[line] , identifier[begidx] , identifier[endidx] ):
literal[string]
identifier[options] = identifier[self] . identifier[GET_OPTS]
keyword[if] keyword[not] identifier[text] :
identifier[completions] = identifier[options]
keyword[else] :
identifier[completions] =[ identifier[f]
keyword[for] identifier[f] keyword[in] identifier[options]
keyword[if] identifier[f] . identifier[startswith] ( identifier[text] )
]
keyword[return] identifier[completions]
|
def complete_get(self, text, line, begidx, endidx):
"""completion for find command"""
options = self.GET_OPTS
if not text:
completions = options # depends on [control=['if'], data=[]]
else:
completions = [f for f in options if f.startswith(text)]
return completions
|
def init_registered(self, request):
""" Create default price list items for each registered resource. """
created_items = models.DefaultPriceListItem.init_from_registered_resources()
if created_items:
message = ungettext(
_('Price item was created: %s.') % created_items[0].name,
_('Price items were created: %s.') % ', '.join(item.name for item in created_items),
len(created_items)
)
self.message_user(request, message)
else:
self.message_user(request, _('Price items for all registered resources have been updated.'))
return redirect(reverse('admin:cost_tracking_defaultpricelistitem_changelist'))
|
def function[init_registered, parameter[self, request]]:
constant[ Create default price list items for each registered resource. ]
variable[created_items] assign[=] call[name[models].DefaultPriceListItem.init_from_registered_resources, parameter[]]
if name[created_items] begin[:]
variable[message] assign[=] call[name[ungettext], parameter[binary_operation[call[name[_], parameter[constant[Price item was created: %s.]]] <ast.Mod object at 0x7da2590d6920> call[name[created_items]][constant[0]].name], binary_operation[call[name[_], parameter[constant[Price items were created: %s.]]] <ast.Mod object at 0x7da2590d6920> call[constant[, ].join, parameter[<ast.GeneratorExp object at 0x7da1b0fc4670>]]], call[name[len], parameter[name[created_items]]]]]
call[name[self].message_user, parameter[name[request], name[message]]]
return[call[name[redirect], parameter[call[name[reverse], parameter[constant[admin:cost_tracking_defaultpricelistitem_changelist]]]]]]
|
keyword[def] identifier[init_registered] ( identifier[self] , identifier[request] ):
literal[string]
identifier[created_items] = identifier[models] . identifier[DefaultPriceListItem] . identifier[init_from_registered_resources] ()
keyword[if] identifier[created_items] :
identifier[message] = identifier[ungettext] (
identifier[_] ( literal[string] )% identifier[created_items] [ literal[int] ]. identifier[name] ,
identifier[_] ( literal[string] )% literal[string] . identifier[join] ( identifier[item] . identifier[name] keyword[for] identifier[item] keyword[in] identifier[created_items] ),
identifier[len] ( identifier[created_items] )
)
identifier[self] . identifier[message_user] ( identifier[request] , identifier[message] )
keyword[else] :
identifier[self] . identifier[message_user] ( identifier[request] , identifier[_] ( literal[string] ))
keyword[return] identifier[redirect] ( identifier[reverse] ( literal[string] ))
|
def init_registered(self, request):
""" Create default price list items for each registered resource. """
created_items = models.DefaultPriceListItem.init_from_registered_resources()
if created_items:
message = ungettext(_('Price item was created: %s.') % created_items[0].name, _('Price items were created: %s.') % ', '.join((item.name for item in created_items)), len(created_items))
self.message_user(request, message) # depends on [control=['if'], data=[]]
else:
self.message_user(request, _('Price items for all registered resources have been updated.'))
return redirect(reverse('admin:cost_tracking_defaultpricelistitem_changelist'))
|
def stop_image_acquisition(self):
"""
Stops image acquisition.
:return: None.
"""
if self.is_acquiring_images:
#
self._is_acquiring_images = False
#
if self.thread_image_acquisition.is_running: # TODO
self.thread_image_acquisition.stop()
with MutexLocker(self.thread_image_acquisition):
#
self.device.node_map.AcquisitionStop.execute()
try:
# Unlock TLParamsLocked in order to allow full device
# configuration:
self.device.node_map.TLParamsLocked.value = 0
except LogicalErrorException:
# SFNC < 2.0
pass
for data_stream in self._data_streams:
# Stop image acquisition.
try:
data_stream.stop_acquisition(
ACQ_STOP_FLAGS_LIST.ACQ_STOP_FLAGS_KILL
)
except (ResourceInUseException, TimeoutException) as e:
self._logger.error(e, exc_info=True)
# Flash the queue for image acquisition process.
data_stream.flush_buffer_queue(
ACQ_QUEUE_TYPE_LIST.ACQ_QUEUE_ALL_DISCARD
)
for event_manager in self._event_new_buffer_managers:
event_manager.flush_event_queue()
if self._create_ds_at_connection:
self._release_buffers()
else:
self._release_data_streams()
#
self._has_acquired_1st_image = False
#
self._chunk_adapter.detach_buffer()
#
self._logger.info(
'{0} stopped image acquisition.'.format(self._device.id_)
)
if self._profiler:
self._profiler.print_diff()
|
def function[stop_image_acquisition, parameter[self]]:
constant[
Stops image acquisition.
:return: None.
]
if name[self].is_acquiring_images begin[:]
name[self]._is_acquiring_images assign[=] constant[False]
if name[self].thread_image_acquisition.is_running begin[:]
call[name[self].thread_image_acquisition.stop, parameter[]]
with call[name[MutexLocker], parameter[name[self].thread_image_acquisition]] begin[:]
call[name[self].device.node_map.AcquisitionStop.execute, parameter[]]
<ast.Try object at 0x7da1b021d870>
for taget[name[data_stream]] in starred[name[self]._data_streams] begin[:]
<ast.Try object at 0x7da1b021f940>
call[name[data_stream].flush_buffer_queue, parameter[name[ACQ_QUEUE_TYPE_LIST].ACQ_QUEUE_ALL_DISCARD]]
for taget[name[event_manager]] in starred[name[self]._event_new_buffer_managers] begin[:]
call[name[event_manager].flush_event_queue, parameter[]]
if name[self]._create_ds_at_connection begin[:]
call[name[self]._release_buffers, parameter[]]
name[self]._has_acquired_1st_image assign[=] constant[False]
call[name[self]._chunk_adapter.detach_buffer, parameter[]]
call[name[self]._logger.info, parameter[call[constant[{0} stopped image acquisition.].format, parameter[name[self]._device.id_]]]]
if name[self]._profiler begin[:]
call[name[self]._profiler.print_diff, parameter[]]
|
keyword[def] identifier[stop_image_acquisition] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[is_acquiring_images] :
identifier[self] . identifier[_is_acquiring_images] = keyword[False]
keyword[if] identifier[self] . identifier[thread_image_acquisition] . identifier[is_running] :
identifier[self] . identifier[thread_image_acquisition] . identifier[stop] ()
keyword[with] identifier[MutexLocker] ( identifier[self] . identifier[thread_image_acquisition] ):
identifier[self] . identifier[device] . identifier[node_map] . identifier[AcquisitionStop] . identifier[execute] ()
keyword[try] :
identifier[self] . identifier[device] . identifier[node_map] . identifier[TLParamsLocked] . identifier[value] = literal[int]
keyword[except] identifier[LogicalErrorException] :
keyword[pass]
keyword[for] identifier[data_stream] keyword[in] identifier[self] . identifier[_data_streams] :
keyword[try] :
identifier[data_stream] . identifier[stop_acquisition] (
identifier[ACQ_STOP_FLAGS_LIST] . identifier[ACQ_STOP_FLAGS_KILL]
)
keyword[except] ( identifier[ResourceInUseException] , identifier[TimeoutException] ) keyword[as] identifier[e] :
identifier[self] . identifier[_logger] . identifier[error] ( identifier[e] , identifier[exc_info] = keyword[True] )
identifier[data_stream] . identifier[flush_buffer_queue] (
identifier[ACQ_QUEUE_TYPE_LIST] . identifier[ACQ_QUEUE_ALL_DISCARD]
)
keyword[for] identifier[event_manager] keyword[in] identifier[self] . identifier[_event_new_buffer_managers] :
identifier[event_manager] . identifier[flush_event_queue] ()
keyword[if] identifier[self] . identifier[_create_ds_at_connection] :
identifier[self] . identifier[_release_buffers] ()
keyword[else] :
identifier[self] . identifier[_release_data_streams] ()
identifier[self] . identifier[_has_acquired_1st_image] = keyword[False]
identifier[self] . identifier[_chunk_adapter] . identifier[detach_buffer] ()
identifier[self] . identifier[_logger] . identifier[info] (
literal[string] . identifier[format] ( identifier[self] . identifier[_device] . identifier[id_] )
)
keyword[if] identifier[self] . identifier[_profiler] :
identifier[self] . identifier[_profiler] . identifier[print_diff] ()
|
def stop_image_acquisition(self):
"""
Stops image acquisition.
:return: None.
"""
if self.is_acquiring_images:
#
self._is_acquiring_images = False
#
if self.thread_image_acquisition.is_running: # TODO
self.thread_image_acquisition.stop() # depends on [control=['if'], data=[]]
with MutexLocker(self.thread_image_acquisition):
#
self.device.node_map.AcquisitionStop.execute()
try:
# Unlock TLParamsLocked in order to allow full device
# configuration:
self.device.node_map.TLParamsLocked.value = 0 # depends on [control=['try'], data=[]]
except LogicalErrorException:
# SFNC < 2.0
pass # depends on [control=['except'], data=[]]
for data_stream in self._data_streams:
# Stop image acquisition.
try:
data_stream.stop_acquisition(ACQ_STOP_FLAGS_LIST.ACQ_STOP_FLAGS_KILL) # depends on [control=['try'], data=[]]
except (ResourceInUseException, TimeoutException) as e:
self._logger.error(e, exc_info=True) # depends on [control=['except'], data=['e']]
# Flash the queue for image acquisition process.
data_stream.flush_buffer_queue(ACQ_QUEUE_TYPE_LIST.ACQ_QUEUE_ALL_DISCARD) # depends on [control=['for'], data=['data_stream']]
for event_manager in self._event_new_buffer_managers:
event_manager.flush_event_queue() # depends on [control=['for'], data=['event_manager']]
if self._create_ds_at_connection:
self._release_buffers() # depends on [control=['if'], data=[]]
else:
self._release_data_streams() # depends on [control=['with'], data=[]]
#
self._has_acquired_1st_image = False
#
self._chunk_adapter.detach_buffer()
#
self._logger.info('{0} stopped image acquisition.'.format(self._device.id_)) # depends on [control=['if'], data=[]]
if self._profiler:
self._profiler.print_diff() # depends on [control=['if'], data=[]]
|
def _build(self, inputs):
"""Assembles the module network and adds it to the graph.
The internal computation graph is assembled according to the set of
constraints provided at construction time.
Args:
inputs: Tensor containing a batch of transformation parameters.
Returns:
A batch of warped grids.
Raises:
Error: If the input tensor size is not consistent with the constraints
passed at construction time.
"""
input_shape = tf.shape(inputs)
input_dtype = inputs.dtype.as_numpy_dtype
batch_size = tf.expand_dims(input_shape[0], 0)
number_of_params = inputs.get_shape()[1]
if number_of_params != self._constraints.num_free_params:
raise base.Error('Input size is not consistent with constraint '
'definition: {} parameters expected, {} provided.'
.format(self._constraints.num_free_params,
number_of_params))
num_output_dimensions = len(self._psi) // 3
def get_input_slice(start, size):
"""Extracts a subset of columns from the input 2D Tensor."""
return basic.SliceByDim([1], [start], [size])(inputs)
warped_grid = []
var_index_offset = 0
number_of_points = np.prod(self._output_shape)
for i in xrange(num_output_dimensions):
if self._psi[i] is not None:
# The i-th output dimension is not fully specified by the constraints,
# the graph is setup to perform matrix multiplication in batch mode.
grid_coord = self._psi[i].astype(input_dtype)
num_active_vars = self._psi[i].shape[0]
active_vars = get_input_slice(var_index_offset, num_active_vars)
warped_coord = tf.matmul(active_vars, grid_coord)
warped_coord = tf.expand_dims(warped_coord, 1)
var_index_offset += num_active_vars
offset = self._psi[num_output_dimensions + i]
if offset is not None:
offset = offset.astype(input_dtype)
# Some entries in the i-th row of the affine matrix were constrained
# and the corresponding matrix multiplications have been precomputed.
tiling_params = tf.concat(
[
batch_size, tf.constant(
1, shape=(1,)), tf.ones_like(offset.shape)
],
0)
offset = offset.reshape((1, 1) + offset.shape)
warped_coord += tf.tile(offset, tiling_params)
else:
# The i-th output dimension is fully specified by the constraints, and
# the corresponding matrix multiplications have been precomputed.
warped_coord = self._psi[num_output_dimensions + i].astype(input_dtype)
tiling_params = tf.concat(
[
batch_size, tf.constant(
1, shape=(1,)), tf.ones_like(warped_coord.shape)
],
0)
warped_coord = warped_coord.reshape((1, 1) + warped_coord.shape)
warped_coord = tf.tile(warped_coord, tiling_params)
warped_coord += self._psi[i + 2 * num_output_dimensions]
# Need to help TF figuring out shape inference since tiling information
# is held in Tensors which are not known until run time.
warped_coord.set_shape([None, 1, number_of_points])
warped_grid.append(warped_coord)
# Reshape all the warped coordinates tensors to match the specified output
# shape and concatenate into a single matrix.
grid_shape = self._output_shape + (1,)
warped_grid = [basic.BatchReshape(grid_shape)(grid) for grid in warped_grid]
return tf.concat(warped_grid, len(grid_shape))
|
def function[_build, parameter[self, inputs]]:
constant[Assembles the module network and adds it to the graph.
The internal computation graph is assembled according to the set of
constraints provided at construction time.
Args:
inputs: Tensor containing a batch of transformation parameters.
Returns:
A batch of warped grids.
Raises:
Error: If the input tensor size is not consistent with the constraints
passed at construction time.
]
variable[input_shape] assign[=] call[name[tf].shape, parameter[name[inputs]]]
variable[input_dtype] assign[=] name[inputs].dtype.as_numpy_dtype
variable[batch_size] assign[=] call[name[tf].expand_dims, parameter[call[name[input_shape]][constant[0]], constant[0]]]
variable[number_of_params] assign[=] call[call[name[inputs].get_shape, parameter[]]][constant[1]]
if compare[name[number_of_params] not_equal[!=] name[self]._constraints.num_free_params] begin[:]
<ast.Raise object at 0x7da1b1f95630>
variable[num_output_dimensions] assign[=] binary_operation[call[name[len], parameter[name[self]._psi]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[3]]
def function[get_input_slice, parameter[start, size]]:
constant[Extracts a subset of columns from the input 2D Tensor.]
return[call[call[name[basic].SliceByDim, parameter[list[[<ast.Constant object at 0x7da1b1f970a0>]], list[[<ast.Name object at 0x7da1b1f976a0>]], list[[<ast.Name object at 0x7da1b1f950c0>]]]], parameter[name[inputs]]]]
variable[warped_grid] assign[=] list[[]]
variable[var_index_offset] assign[=] constant[0]
variable[number_of_points] assign[=] call[name[np].prod, parameter[name[self]._output_shape]]
for taget[name[i]] in starred[call[name[xrange], parameter[name[num_output_dimensions]]]] begin[:]
if compare[call[name[self]._psi][name[i]] is_not constant[None]] begin[:]
variable[grid_coord] assign[=] call[call[name[self]._psi][name[i]].astype, parameter[name[input_dtype]]]
variable[num_active_vars] assign[=] call[call[name[self]._psi][name[i]].shape][constant[0]]
variable[active_vars] assign[=] call[name[get_input_slice], parameter[name[var_index_offset], name[num_active_vars]]]
variable[warped_coord] assign[=] call[name[tf].matmul, parameter[name[active_vars], name[grid_coord]]]
variable[warped_coord] assign[=] call[name[tf].expand_dims, parameter[name[warped_coord], constant[1]]]
<ast.AugAssign object at 0x7da1b1f964a0>
variable[offset] assign[=] call[name[self]._psi][binary_operation[name[num_output_dimensions] + name[i]]]
if compare[name[offset] is_not constant[None]] begin[:]
variable[offset] assign[=] call[name[offset].astype, parameter[name[input_dtype]]]
variable[tiling_params] assign[=] call[name[tf].concat, parameter[list[[<ast.Name object at 0x7da1b2122920>, <ast.Call object at 0x7da1b2122890>, <ast.Call object at 0x7da1b2122380>]], constant[0]]]
variable[offset] assign[=] call[name[offset].reshape, parameter[binary_operation[tuple[[<ast.Constant object at 0x7da1b2122500>, <ast.Constant object at 0x7da1b2123280>]] + name[offset].shape]]]
<ast.AugAssign object at 0x7da1b2122740>
<ast.AugAssign object at 0x7da1b1f945b0>
call[name[warped_coord].set_shape, parameter[list[[<ast.Constant object at 0x7da1b1f94ee0>, <ast.Constant object at 0x7da1b1f94b20>, <ast.Name object at 0x7da1b1f97970>]]]]
call[name[warped_grid].append, parameter[name[warped_coord]]]
variable[grid_shape] assign[=] binary_operation[name[self]._output_shape + tuple[[<ast.Constant object at 0x7da1b1f96c50>]]]
variable[warped_grid] assign[=] <ast.ListComp object at 0x7da1b1f96a40>
return[call[name[tf].concat, parameter[name[warped_grid], call[name[len], parameter[name[grid_shape]]]]]]
|
keyword[def] identifier[_build] ( identifier[self] , identifier[inputs] ):
literal[string]
identifier[input_shape] = identifier[tf] . identifier[shape] ( identifier[inputs] )
identifier[input_dtype] = identifier[inputs] . identifier[dtype] . identifier[as_numpy_dtype]
identifier[batch_size] = identifier[tf] . identifier[expand_dims] ( identifier[input_shape] [ literal[int] ], literal[int] )
identifier[number_of_params] = identifier[inputs] . identifier[get_shape] ()[ literal[int] ]
keyword[if] identifier[number_of_params] != identifier[self] . identifier[_constraints] . identifier[num_free_params] :
keyword[raise] identifier[base] . identifier[Error] ( literal[string]
literal[string]
. identifier[format] ( identifier[self] . identifier[_constraints] . identifier[num_free_params] ,
identifier[number_of_params] ))
identifier[num_output_dimensions] = identifier[len] ( identifier[self] . identifier[_psi] )// literal[int]
keyword[def] identifier[get_input_slice] ( identifier[start] , identifier[size] ):
literal[string]
keyword[return] identifier[basic] . identifier[SliceByDim] ([ literal[int] ],[ identifier[start] ],[ identifier[size] ])( identifier[inputs] )
identifier[warped_grid] =[]
identifier[var_index_offset] = literal[int]
identifier[number_of_points] = identifier[np] . identifier[prod] ( identifier[self] . identifier[_output_shape] )
keyword[for] identifier[i] keyword[in] identifier[xrange] ( identifier[num_output_dimensions] ):
keyword[if] identifier[self] . identifier[_psi] [ identifier[i] ] keyword[is] keyword[not] keyword[None] :
identifier[grid_coord] = identifier[self] . identifier[_psi] [ identifier[i] ]. identifier[astype] ( identifier[input_dtype] )
identifier[num_active_vars] = identifier[self] . identifier[_psi] [ identifier[i] ]. identifier[shape] [ literal[int] ]
identifier[active_vars] = identifier[get_input_slice] ( identifier[var_index_offset] , identifier[num_active_vars] )
identifier[warped_coord] = identifier[tf] . identifier[matmul] ( identifier[active_vars] , identifier[grid_coord] )
identifier[warped_coord] = identifier[tf] . identifier[expand_dims] ( identifier[warped_coord] , literal[int] )
identifier[var_index_offset] += identifier[num_active_vars]
identifier[offset] = identifier[self] . identifier[_psi] [ identifier[num_output_dimensions] + identifier[i] ]
keyword[if] identifier[offset] keyword[is] keyword[not] keyword[None] :
identifier[offset] = identifier[offset] . identifier[astype] ( identifier[input_dtype] )
identifier[tiling_params] = identifier[tf] . identifier[concat] (
[
identifier[batch_size] , identifier[tf] . identifier[constant] (
literal[int] , identifier[shape] =( literal[int] ,)), identifier[tf] . identifier[ones_like] ( identifier[offset] . identifier[shape] )
],
literal[int] )
identifier[offset] = identifier[offset] . identifier[reshape] (( literal[int] , literal[int] )+ identifier[offset] . identifier[shape] )
identifier[warped_coord] += identifier[tf] . identifier[tile] ( identifier[offset] , identifier[tiling_params] )
keyword[else] :
identifier[warped_coord] = identifier[self] . identifier[_psi] [ identifier[num_output_dimensions] + identifier[i] ]. identifier[astype] ( identifier[input_dtype] )
identifier[tiling_params] = identifier[tf] . identifier[concat] (
[
identifier[batch_size] , identifier[tf] . identifier[constant] (
literal[int] , identifier[shape] =( literal[int] ,)), identifier[tf] . identifier[ones_like] ( identifier[warped_coord] . identifier[shape] )
],
literal[int] )
identifier[warped_coord] = identifier[warped_coord] . identifier[reshape] (( literal[int] , literal[int] )+ identifier[warped_coord] . identifier[shape] )
identifier[warped_coord] = identifier[tf] . identifier[tile] ( identifier[warped_coord] , identifier[tiling_params] )
identifier[warped_coord] += identifier[self] . identifier[_psi] [ identifier[i] + literal[int] * identifier[num_output_dimensions] ]
identifier[warped_coord] . identifier[set_shape] ([ keyword[None] , literal[int] , identifier[number_of_points] ])
identifier[warped_grid] . identifier[append] ( identifier[warped_coord] )
identifier[grid_shape] = identifier[self] . identifier[_output_shape] +( literal[int] ,)
identifier[warped_grid] =[ identifier[basic] . identifier[BatchReshape] ( identifier[grid_shape] )( identifier[grid] ) keyword[for] identifier[grid] keyword[in] identifier[warped_grid] ]
keyword[return] identifier[tf] . identifier[concat] ( identifier[warped_grid] , identifier[len] ( identifier[grid_shape] ))
|
def _build(self, inputs):
"""Assembles the module network and adds it to the graph.
The internal computation graph is assembled according to the set of
constraints provided at construction time.
Args:
inputs: Tensor containing a batch of transformation parameters.
Returns:
A batch of warped grids.
Raises:
Error: If the input tensor size is not consistent with the constraints
passed at construction time.
"""
input_shape = tf.shape(inputs)
input_dtype = inputs.dtype.as_numpy_dtype
batch_size = tf.expand_dims(input_shape[0], 0)
number_of_params = inputs.get_shape()[1]
if number_of_params != self._constraints.num_free_params:
raise base.Error('Input size is not consistent with constraint definition: {} parameters expected, {} provided.'.format(self._constraints.num_free_params, number_of_params)) # depends on [control=['if'], data=['number_of_params']]
num_output_dimensions = len(self._psi) // 3
def get_input_slice(start, size):
"""Extracts a subset of columns from the input 2D Tensor."""
return basic.SliceByDim([1], [start], [size])(inputs)
warped_grid = []
var_index_offset = 0
number_of_points = np.prod(self._output_shape)
for i in xrange(num_output_dimensions):
if self._psi[i] is not None:
# The i-th output dimension is not fully specified by the constraints,
# the graph is setup to perform matrix multiplication in batch mode.
grid_coord = self._psi[i].astype(input_dtype)
num_active_vars = self._psi[i].shape[0]
active_vars = get_input_slice(var_index_offset, num_active_vars)
warped_coord = tf.matmul(active_vars, grid_coord)
warped_coord = tf.expand_dims(warped_coord, 1)
var_index_offset += num_active_vars
offset = self._psi[num_output_dimensions + i]
if offset is not None:
offset = offset.astype(input_dtype)
# Some entries in the i-th row of the affine matrix were constrained
# and the corresponding matrix multiplications have been precomputed.
tiling_params = tf.concat([batch_size, tf.constant(1, shape=(1,)), tf.ones_like(offset.shape)], 0)
offset = offset.reshape((1, 1) + offset.shape)
warped_coord += tf.tile(offset, tiling_params) # depends on [control=['if'], data=['offset']] # depends on [control=['if'], data=[]]
else:
# The i-th output dimension is fully specified by the constraints, and
# the corresponding matrix multiplications have been precomputed.
warped_coord = self._psi[num_output_dimensions + i].astype(input_dtype)
tiling_params = tf.concat([batch_size, tf.constant(1, shape=(1,)), tf.ones_like(warped_coord.shape)], 0)
warped_coord = warped_coord.reshape((1, 1) + warped_coord.shape)
warped_coord = tf.tile(warped_coord, tiling_params)
warped_coord += self._psi[i + 2 * num_output_dimensions]
# Need to help TF figuring out shape inference since tiling information
# is held in Tensors which are not known until run time.
warped_coord.set_shape([None, 1, number_of_points])
warped_grid.append(warped_coord) # depends on [control=['for'], data=['i']]
# Reshape all the warped coordinates tensors to match the specified output
# shape and concatenate into a single matrix.
grid_shape = self._output_shape + (1,)
warped_grid = [basic.BatchReshape(grid_shape)(grid) for grid in warped_grid]
return tf.concat(warped_grid, len(grid_shape))
|
def _validate_calibration_params(strategy='accuracy', min_rate=None,
beta=1.):
"""Ensure that calibration parameters have allowed values"""
if strategy not in ('accuracy', 'f_beta', 'max_tpr',
'max_tnr'):
raise ValueError('Strategy can either be "accuracy", "f_beta" or '
'"max_tpr" or "max_tnr". Got "{}" instead.'
.format(strategy))
if strategy == 'max_tpr' or strategy == 'max_tnr':
if (min_rate is None or not isinstance(min_rate, (int, float)) or
not min_rate >= 0 or not min_rate <= 1):
raise ValueError('Parameter min_rate must be a number in'
'[0, 1]. '
'Got {} instead.'.format(min_rate))
if strategy == 'f_beta':
if beta is None or not isinstance(beta, (int, float)):
raise ValueError('Parameter beta must be a real number. '
'Got {} instead.'.format(type(beta)))
|
def function[_validate_calibration_params, parameter[strategy, min_rate, beta]]:
constant[Ensure that calibration parameters have allowed values]
if compare[name[strategy] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da1b1da1f60>, <ast.Constant object at 0x7da1b1da1870>, <ast.Constant object at 0x7da1b1da2590>, <ast.Constant object at 0x7da1b1da3370>]]] begin[:]
<ast.Raise object at 0x7da1b1da24a0>
if <ast.BoolOp object at 0x7da1b1da0a90> begin[:]
if <ast.BoolOp object at 0x7da1b1da3790> begin[:]
<ast.Raise object at 0x7da1b1e67850>
if compare[name[strategy] equal[==] constant[f_beta]] begin[:]
if <ast.BoolOp object at 0x7da1b1e65210> begin[:]
<ast.Raise object at 0x7da1b1e65f30>
|
keyword[def] identifier[_validate_calibration_params] ( identifier[strategy] = literal[string] , identifier[min_rate] = keyword[None] ,
identifier[beta] = literal[int] ):
literal[string]
keyword[if] identifier[strategy] keyword[not] keyword[in] ( literal[string] , literal[string] , literal[string] ,
literal[string] ):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string]
. identifier[format] ( identifier[strategy] ))
keyword[if] identifier[strategy] == literal[string] keyword[or] identifier[strategy] == literal[string] :
keyword[if] ( identifier[min_rate] keyword[is] keyword[None] keyword[or] keyword[not] identifier[isinstance] ( identifier[min_rate] ,( identifier[int] , identifier[float] )) keyword[or]
keyword[not] identifier[min_rate] >= literal[int] keyword[or] keyword[not] identifier[min_rate] <= literal[int] ):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[min_rate] ))
keyword[if] identifier[strategy] == literal[string] :
keyword[if] identifier[beta] keyword[is] keyword[None] keyword[or] keyword[not] identifier[isinstance] ( identifier[beta] ,( identifier[int] , identifier[float] )):
keyword[raise] identifier[ValueError] ( literal[string]
literal[string] . identifier[format] ( identifier[type] ( identifier[beta] )))
|
def _validate_calibration_params(strategy='accuracy', min_rate=None, beta=1.0):
"""Ensure that calibration parameters have allowed values"""
if strategy not in ('accuracy', 'f_beta', 'max_tpr', 'max_tnr'):
raise ValueError('Strategy can either be "accuracy", "f_beta" or "max_tpr" or "max_tnr". Got "{}" instead.'.format(strategy)) # depends on [control=['if'], data=['strategy']]
if strategy == 'max_tpr' or strategy == 'max_tnr':
if min_rate is None or not isinstance(min_rate, (int, float)) or (not min_rate >= 0) or (not min_rate <= 1):
raise ValueError('Parameter min_rate must be a number in[0, 1]. Got {} instead.'.format(min_rate)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if strategy == 'f_beta':
if beta is None or not isinstance(beta, (int, float)):
raise ValueError('Parameter beta must be a real number. Got {} instead.'.format(type(beta))) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
|
def _get_function_wrapper(
self, func: typing.Callable[..., typing.Union["typing.Awaitable[typing.Any]", typing.Any]]
) -> typing.Callable[..., "typing.Union[concurrent.futures.Future[typing.Any], typing.Awaitable[typing.Any]]"]:
"""Here should be constructed and returned real decorator.
:param func: Wrapped function
:type func: typing.Callable
:return: wrapped coroutine or function
:rtype: typing.Callable[..., typing.Union[typing.Awaitable, concurrent.futures.Future]]
"""
prepared = self._await_if_required(func)
# noinspection PyMissingOrEmptyDocstring
@functools.wraps(prepared) # pylint: disable=missing-docstring
def wrapper(
*args: typing.Any, **kwargs: typing.Any
) -> typing.Union[
"concurrent.futures.Future[typing.Any]",
"typing.Awaitable[typing.Any]",
typing.Callable[..., "typing.Union[concurrent.futures.Future[typing.Any], typing.Awaitable[typing.Any]]"],
]:
loop: typing.Optional[asyncio.AbstractEventLoop] = self._get_loop(*args, **kwargs)
if loop is None:
return self.executor.submit(prepared, *args, **kwargs)
return loop.run_in_executor(self.executor, functools.partial(prepared, *args, **kwargs))
return wrapper
|
def function[_get_function_wrapper, parameter[self, func]]:
constant[Here should be constructed and returned real decorator.
:param func: Wrapped function
:type func: typing.Callable
:return: wrapped coroutine or function
:rtype: typing.Callable[..., typing.Union[typing.Awaitable, concurrent.futures.Future]]
]
variable[prepared] assign[=] call[name[self]._await_if_required, parameter[name[func]]]
def function[wrapper, parameter[]]:
<ast.AnnAssign object at 0x7da20c7cba00>
if compare[name[loop] is constant[None]] begin[:]
return[call[name[self].executor.submit, parameter[name[prepared], <ast.Starred object at 0x7da20c7c8220>]]]
return[call[name[loop].run_in_executor, parameter[name[self].executor, call[name[functools].partial, parameter[name[prepared], <ast.Starred object at 0x7da20c7c8dc0>]]]]]
return[name[wrapper]]
|
keyword[def] identifier[_get_function_wrapper] (
identifier[self] , identifier[func] : identifier[typing] . identifier[Callable] [..., identifier[typing] . identifier[Union] [ literal[string] , identifier[typing] . identifier[Any] ]]
)-> identifier[typing] . identifier[Callable] [..., literal[string] ]:
literal[string]
identifier[prepared] = identifier[self] . identifier[_await_if_required] ( identifier[func] )
@ identifier[functools] . identifier[wraps] ( identifier[prepared] )
keyword[def] identifier[wrapper] (
* identifier[args] : identifier[typing] . identifier[Any] ,** identifier[kwargs] : identifier[typing] . identifier[Any]
)-> identifier[typing] . identifier[Union] [
literal[string] ,
literal[string] ,
identifier[typing] . identifier[Callable] [..., literal[string] ],
]:
identifier[loop] : identifier[typing] . identifier[Optional] [ identifier[asyncio] . identifier[AbstractEventLoop] ]= identifier[self] . identifier[_get_loop] (* identifier[args] ,** identifier[kwargs] )
keyword[if] identifier[loop] keyword[is] keyword[None] :
keyword[return] identifier[self] . identifier[executor] . identifier[submit] ( identifier[prepared] ,* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[loop] . identifier[run_in_executor] ( identifier[self] . identifier[executor] , identifier[functools] . identifier[partial] ( identifier[prepared] ,* identifier[args] ,** identifier[kwargs] ))
keyword[return] identifier[wrapper]
|
def _get_function_wrapper(self, func: typing.Callable[..., typing.Union['typing.Awaitable[typing.Any]', typing.Any]]) -> typing.Callable[..., 'typing.Union[concurrent.futures.Future[typing.Any], typing.Awaitable[typing.Any]]']:
"""Here should be constructed and returned real decorator.
:param func: Wrapped function
:type func: typing.Callable
:return: wrapped coroutine or function
:rtype: typing.Callable[..., typing.Union[typing.Awaitable, concurrent.futures.Future]]
"""
prepared = self._await_if_required(func)
# noinspection PyMissingOrEmptyDocstring
# pylint: disable=missing-docstring
@functools.wraps(prepared)
def wrapper(*args: typing.Any, **kwargs: typing.Any) -> typing.Union['concurrent.futures.Future[typing.Any]', 'typing.Awaitable[typing.Any]', typing.Callable[..., 'typing.Union[concurrent.futures.Future[typing.Any], typing.Awaitable[typing.Any]]']]:
loop: typing.Optional[asyncio.AbstractEventLoop] = self._get_loop(*args, **kwargs)
if loop is None:
return self.executor.submit(prepared, *args, **kwargs) # depends on [control=['if'], data=[]]
return loop.run_in_executor(self.executor, functools.partial(prepared, *args, **kwargs))
return wrapper
|
def read(self, size):
"""
Read wrapper.
Parameters
----------
size : int
Number of bytes to read
"""
data = None
while True:
try:
data = self.handle.recv(size)
except socket.timeout as socket_error:
self._reconnect(socket_error)
except socket.error as socket_error:
# this is fine, just retry
if socket_error.errno == errno.EINTR:
continue
self._reconnect(IOError)
if not data:
self._reconnect(IOError)
break
return data
|
def function[read, parameter[self, size]]:
constant[
Read wrapper.
Parameters
----------
size : int
Number of bytes to read
]
variable[data] assign[=] constant[None]
while constant[True] begin[:]
<ast.Try object at 0x7da2054a5120>
if <ast.UnaryOp object at 0x7da2054a6f50> begin[:]
call[name[self]._reconnect, parameter[name[IOError]]]
break
return[name[data]]
|
keyword[def] identifier[read] ( identifier[self] , identifier[size] ):
literal[string]
identifier[data] = keyword[None]
keyword[while] keyword[True] :
keyword[try] :
identifier[data] = identifier[self] . identifier[handle] . identifier[recv] ( identifier[size] )
keyword[except] identifier[socket] . identifier[timeout] keyword[as] identifier[socket_error] :
identifier[self] . identifier[_reconnect] ( identifier[socket_error] )
keyword[except] identifier[socket] . identifier[error] keyword[as] identifier[socket_error] :
keyword[if] identifier[socket_error] . identifier[errno] == identifier[errno] . identifier[EINTR] :
keyword[continue]
identifier[self] . identifier[_reconnect] ( identifier[IOError] )
keyword[if] keyword[not] identifier[data] :
identifier[self] . identifier[_reconnect] ( identifier[IOError] )
keyword[break]
keyword[return] identifier[data]
|
def read(self, size):
"""
Read wrapper.
Parameters
----------
size : int
Number of bytes to read
"""
data = None
while True:
try:
data = self.handle.recv(size) # depends on [control=['try'], data=[]]
except socket.timeout as socket_error:
self._reconnect(socket_error) # depends on [control=['except'], data=['socket_error']]
except socket.error as socket_error:
# this is fine, just retry
if socket_error.errno == errno.EINTR:
continue # depends on [control=['if'], data=[]]
self._reconnect(IOError) # depends on [control=['except'], data=['socket_error']]
if not data:
self._reconnect(IOError) # depends on [control=['if'], data=[]]
break # depends on [control=['while'], data=[]]
return data
|
def record_to_fs(self):
"""Create a filesystem file from a File"""
fr = self.record
if fr.contents:
with self._fs.open(self.file_name, 'w', encoding='utf-8') as f:
self.record_to_fh(f)
|
def function[record_to_fs, parameter[self]]:
constant[Create a filesystem file from a File]
variable[fr] assign[=] name[self].record
if name[fr].contents begin[:]
with call[name[self]._fs.open, parameter[name[self].file_name, constant[w]]] begin[:]
call[name[self].record_to_fh, parameter[name[f]]]
|
keyword[def] identifier[record_to_fs] ( identifier[self] ):
literal[string]
identifier[fr] = identifier[self] . identifier[record]
keyword[if] identifier[fr] . identifier[contents] :
keyword[with] identifier[self] . identifier[_fs] . identifier[open] ( identifier[self] . identifier[file_name] , literal[string] , identifier[encoding] = literal[string] ) keyword[as] identifier[f] :
identifier[self] . identifier[record_to_fh] ( identifier[f] )
|
def record_to_fs(self):
"""Create a filesystem file from a File"""
fr = self.record
if fr.contents:
with self._fs.open(self.file_name, 'w', encoding='utf-8') as f:
self.record_to_fh(f) # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]]
|
def update(old, new, collection, sneaky_update_filter=None):
"""
update an existing object with a new one, only saving it and
setting updated_at if something has changed
old
old object
new
new object
collection
collection to save changed object to
sneaky_update_filter
a filter for updates to object that should be ignored
format is a dict mapping field names to a comparison function
that returns True iff there is a change
"""
# need_save = something has changed
need_save = False
locked_fields = old.get('_locked_fields', [])
for key, value in new.items():
# don't update locked fields
if key in locked_fields:
continue
if old.get(key) != value:
if sneaky_update_filter and key in sneaky_update_filter:
if sneaky_update_filter[key](old[key], value):
old[key] = value
need_save = True
else:
old[key] = value
need_save = True
# remove old +key field if this field no longer has a +
plus_key = '+%s' % key
if plus_key in old:
del old[plus_key]
need_save = True
if need_save:
old['updated_at'] = datetime.datetime.utcnow()
collection.save(old, safe=True)
return need_save
|
def function[update, parameter[old, new, collection, sneaky_update_filter]]:
constant[
update an existing object with a new one, only saving it and
setting updated_at if something has changed
old
old object
new
new object
collection
collection to save changed object to
sneaky_update_filter
a filter for updates to object that should be ignored
format is a dict mapping field names to a comparison function
that returns True iff there is a change
]
variable[need_save] assign[=] constant[False]
variable[locked_fields] assign[=] call[name[old].get, parameter[constant[_locked_fields], list[[]]]]
for taget[tuple[[<ast.Name object at 0x7da1b26acaf0>, <ast.Name object at 0x7da1b26afe80>]]] in starred[call[name[new].items, parameter[]]] begin[:]
if compare[name[key] in name[locked_fields]] begin[:]
continue
if compare[call[name[old].get, parameter[name[key]]] not_equal[!=] name[value]] begin[:]
if <ast.BoolOp object at 0x7da1b26aefe0> begin[:]
if call[call[name[sneaky_update_filter]][name[key]], parameter[call[name[old]][name[key]], name[value]]] begin[:]
call[name[old]][name[key]] assign[=] name[value]
variable[need_save] assign[=] constant[True]
variable[plus_key] assign[=] binary_operation[constant[+%s] <ast.Mod object at 0x7da2590d6920> name[key]]
if compare[name[plus_key] in name[old]] begin[:]
<ast.Delete object at 0x7da1b26ae320>
variable[need_save] assign[=] constant[True]
if name[need_save] begin[:]
call[name[old]][constant[updated_at]] assign[=] call[name[datetime].datetime.utcnow, parameter[]]
call[name[collection].save, parameter[name[old]]]
return[name[need_save]]
|
keyword[def] identifier[update] ( identifier[old] , identifier[new] , identifier[collection] , identifier[sneaky_update_filter] = keyword[None] ):
literal[string]
identifier[need_save] = keyword[False]
identifier[locked_fields] = identifier[old] . identifier[get] ( literal[string] ,[])
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[new] . identifier[items] ():
keyword[if] identifier[key] keyword[in] identifier[locked_fields] :
keyword[continue]
keyword[if] identifier[old] . identifier[get] ( identifier[key] )!= identifier[value] :
keyword[if] identifier[sneaky_update_filter] keyword[and] identifier[key] keyword[in] identifier[sneaky_update_filter] :
keyword[if] identifier[sneaky_update_filter] [ identifier[key] ]( identifier[old] [ identifier[key] ], identifier[value] ):
identifier[old] [ identifier[key] ]= identifier[value]
identifier[need_save] = keyword[True]
keyword[else] :
identifier[old] [ identifier[key] ]= identifier[value]
identifier[need_save] = keyword[True]
identifier[plus_key] = literal[string] % identifier[key]
keyword[if] identifier[plus_key] keyword[in] identifier[old] :
keyword[del] identifier[old] [ identifier[plus_key] ]
identifier[need_save] = keyword[True]
keyword[if] identifier[need_save] :
identifier[old] [ literal[string] ]= identifier[datetime] . identifier[datetime] . identifier[utcnow] ()
identifier[collection] . identifier[save] ( identifier[old] , identifier[safe] = keyword[True] )
keyword[return] identifier[need_save]
|
def update(old, new, collection, sneaky_update_filter=None):
"""
update an existing object with a new one, only saving it and
setting updated_at if something has changed
old
old object
new
new object
collection
collection to save changed object to
sneaky_update_filter
a filter for updates to object that should be ignored
format is a dict mapping field names to a comparison function
that returns True iff there is a change
"""
# need_save = something has changed
need_save = False
locked_fields = old.get('_locked_fields', [])
for (key, value) in new.items():
# don't update locked fields
if key in locked_fields:
continue # depends on [control=['if'], data=[]]
if old.get(key) != value:
if sneaky_update_filter and key in sneaky_update_filter:
if sneaky_update_filter[key](old[key], value):
old[key] = value
need_save = True # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
old[key] = value
need_save = True # depends on [control=['if'], data=['value']]
# remove old +key field if this field no longer has a +
plus_key = '+%s' % key
if plus_key in old:
del old[plus_key]
need_save = True # depends on [control=['if'], data=['plus_key', 'old']] # depends on [control=['for'], data=[]]
if need_save:
old['updated_at'] = datetime.datetime.utcnow()
collection.save(old, safe=True) # depends on [control=['if'], data=[]]
return need_save
|
def create_from_file(self, file=None, fmt='gaf', skim=True, **args):
"""
Creates from a file. If fmt is set to None then the file suffixes will
be used to choose a parser.
Arguments
---------
file : str or file
input file or filename
fmt : str
name of format e.g. gaf
"""
if fmt is not None and not fmt.startswith('.'):
fmt = '.{}'.format(fmt)
d = {
'.gaf' : GafParser,
'.gpad' : GpadParser,
'.hpoa' : HpoaParser,
}
if fmt is None:
filename = file if isinstance(file, str) else file.name
suffixes = pathlib.Path(filename).suffixes
iterator = (fn() for ext, fn in d.items() if ext in suffixes)
else:
iterator = (fn() for ext, fn in d.items() if ext == fmt)
try:
parser = next(iterator)
except StopIteration:
logging.error("Format not recognized: {}".format(fmt))
logging.info("Parsing {} with {}/{}".format(file, fmt, parser))
if skim:
results = parser.skim(file)
return self.create_from_tuples(results, **args)
else:
assocs = parser.parse(file, skipheader=True)
return self.create_from_assocs(assocs, **args)
|
def function[create_from_file, parameter[self, file, fmt, skim]]:
constant[
Creates from a file. If fmt is set to None then the file suffixes will
be used to choose a parser.
Arguments
---------
file : str or file
input file or filename
fmt : str
name of format e.g. gaf
]
if <ast.BoolOp object at 0x7da204567910> begin[:]
variable[fmt] assign[=] call[constant[.{}].format, parameter[name[fmt]]]
variable[d] assign[=] dictionary[[<ast.Constant object at 0x7da2045653f0>, <ast.Constant object at 0x7da204566740>, <ast.Constant object at 0x7da2045665f0>], [<ast.Name object at 0x7da204566320>, <ast.Name object at 0x7da204564f40>, <ast.Name object at 0x7da204566620>]]
if compare[name[fmt] is constant[None]] begin[:]
variable[filename] assign[=] <ast.IfExp object at 0x7da2045660e0>
variable[suffixes] assign[=] call[name[pathlib].Path, parameter[name[filename]]].suffixes
variable[iterator] assign[=] <ast.GeneratorExp object at 0x7da1b0893430>
<ast.Try object at 0x7da1b0893bb0>
call[name[logging].info, parameter[call[constant[Parsing {} with {}/{}].format, parameter[name[file], name[fmt], name[parser]]]]]
if name[skim] begin[:]
variable[results] assign[=] call[name[parser].skim, parameter[name[file]]]
return[call[name[self].create_from_tuples, parameter[name[results]]]]
|
keyword[def] identifier[create_from_file] ( identifier[self] , identifier[file] = keyword[None] , identifier[fmt] = literal[string] , identifier[skim] = keyword[True] ,** identifier[args] ):
literal[string]
keyword[if] identifier[fmt] keyword[is] keyword[not] keyword[None] keyword[and] keyword[not] identifier[fmt] . identifier[startswith] ( literal[string] ):
identifier[fmt] = literal[string] . identifier[format] ( identifier[fmt] )
identifier[d] ={
literal[string] : identifier[GafParser] ,
literal[string] : identifier[GpadParser] ,
literal[string] : identifier[HpoaParser] ,
}
keyword[if] identifier[fmt] keyword[is] keyword[None] :
identifier[filename] = identifier[file] keyword[if] identifier[isinstance] ( identifier[file] , identifier[str] ) keyword[else] identifier[file] . identifier[name]
identifier[suffixes] = identifier[pathlib] . identifier[Path] ( identifier[filename] ). identifier[suffixes]
identifier[iterator] =( identifier[fn] () keyword[for] identifier[ext] , identifier[fn] keyword[in] identifier[d] . identifier[items] () keyword[if] identifier[ext] keyword[in] identifier[suffixes] )
keyword[else] :
identifier[iterator] =( identifier[fn] () keyword[for] identifier[ext] , identifier[fn] keyword[in] identifier[d] . identifier[items] () keyword[if] identifier[ext] == identifier[fmt] )
keyword[try] :
identifier[parser] = identifier[next] ( identifier[iterator] )
keyword[except] identifier[StopIteration] :
identifier[logging] . identifier[error] ( literal[string] . identifier[format] ( identifier[fmt] ))
identifier[logging] . identifier[info] ( literal[string] . identifier[format] ( identifier[file] , identifier[fmt] , identifier[parser] ))
keyword[if] identifier[skim] :
identifier[results] = identifier[parser] . identifier[skim] ( identifier[file] )
keyword[return] identifier[self] . identifier[create_from_tuples] ( identifier[results] ,** identifier[args] )
keyword[else] :
identifier[assocs] = identifier[parser] . identifier[parse] ( identifier[file] , identifier[skipheader] = keyword[True] )
keyword[return] identifier[self] . identifier[create_from_assocs] ( identifier[assocs] ,** identifier[args] )
|
def create_from_file(self, file=None, fmt='gaf', skim=True, **args):
"""
Creates from a file. If fmt is set to None then the file suffixes will
be used to choose a parser.
Arguments
---------
file : str or file
input file or filename
fmt : str
name of format e.g. gaf
"""
if fmt is not None and (not fmt.startswith('.')):
fmt = '.{}'.format(fmt) # depends on [control=['if'], data=[]]
d = {'.gaf': GafParser, '.gpad': GpadParser, '.hpoa': HpoaParser}
if fmt is None:
filename = file if isinstance(file, str) else file.name
suffixes = pathlib.Path(filename).suffixes
iterator = (fn() for (ext, fn) in d.items() if ext in suffixes) # depends on [control=['if'], data=[]]
else:
iterator = (fn() for (ext, fn) in d.items() if ext == fmt)
try:
parser = next(iterator) # depends on [control=['try'], data=[]]
except StopIteration:
logging.error('Format not recognized: {}'.format(fmt)) # depends on [control=['except'], data=[]]
logging.info('Parsing {} with {}/{}'.format(file, fmt, parser))
if skim:
results = parser.skim(file)
return self.create_from_tuples(results, **args) # depends on [control=['if'], data=[]]
else:
assocs = parser.parse(file, skipheader=True)
return self.create_from_assocs(assocs, **args)
|
def get_modules():
'''Returns a list of available modules.'''
modules = set()
cwd = os.getcwd()
for d in os.listdir(cwd):
if d == 'module.yml':
modules.add(Module(cwd))
path = unipath(cwd, d)
if utils.is_module(path):
modules.add(Module(cwd))
module_paths = get_module_paths()
for module_path in module_paths:
for d in os.listdir(module_path):
path = unipath(module_path, d)
if utils.is_module(path):
modules.add(Module(path))
return sorted(list(modules), key=lambda x: x.name)
|
def function[get_modules, parameter[]]:
constant[Returns a list of available modules.]
variable[modules] assign[=] call[name[set], parameter[]]
variable[cwd] assign[=] call[name[os].getcwd, parameter[]]
for taget[name[d]] in starred[call[name[os].listdir, parameter[name[cwd]]]] begin[:]
if compare[name[d] equal[==] constant[module.yml]] begin[:]
call[name[modules].add, parameter[call[name[Module], parameter[name[cwd]]]]]
variable[path] assign[=] call[name[unipath], parameter[name[cwd], name[d]]]
if call[name[utils].is_module, parameter[name[path]]] begin[:]
call[name[modules].add, parameter[call[name[Module], parameter[name[cwd]]]]]
variable[module_paths] assign[=] call[name[get_module_paths], parameter[]]
for taget[name[module_path]] in starred[name[module_paths]] begin[:]
for taget[name[d]] in starred[call[name[os].listdir, parameter[name[module_path]]]] begin[:]
variable[path] assign[=] call[name[unipath], parameter[name[module_path], name[d]]]
if call[name[utils].is_module, parameter[name[path]]] begin[:]
call[name[modules].add, parameter[call[name[Module], parameter[name[path]]]]]
return[call[name[sorted], parameter[call[name[list], parameter[name[modules]]]]]]
|
keyword[def] identifier[get_modules] ():
literal[string]
identifier[modules] = identifier[set] ()
identifier[cwd] = identifier[os] . identifier[getcwd] ()
keyword[for] identifier[d] keyword[in] identifier[os] . identifier[listdir] ( identifier[cwd] ):
keyword[if] identifier[d] == literal[string] :
identifier[modules] . identifier[add] ( identifier[Module] ( identifier[cwd] ))
identifier[path] = identifier[unipath] ( identifier[cwd] , identifier[d] )
keyword[if] identifier[utils] . identifier[is_module] ( identifier[path] ):
identifier[modules] . identifier[add] ( identifier[Module] ( identifier[cwd] ))
identifier[module_paths] = identifier[get_module_paths] ()
keyword[for] identifier[module_path] keyword[in] identifier[module_paths] :
keyword[for] identifier[d] keyword[in] identifier[os] . identifier[listdir] ( identifier[module_path] ):
identifier[path] = identifier[unipath] ( identifier[module_path] , identifier[d] )
keyword[if] identifier[utils] . identifier[is_module] ( identifier[path] ):
identifier[modules] . identifier[add] ( identifier[Module] ( identifier[path] ))
keyword[return] identifier[sorted] ( identifier[list] ( identifier[modules] ), identifier[key] = keyword[lambda] identifier[x] : identifier[x] . identifier[name] )
|
def get_modules():
"""Returns a list of available modules."""
modules = set()
cwd = os.getcwd()
for d in os.listdir(cwd):
if d == 'module.yml':
modules.add(Module(cwd)) # depends on [control=['if'], data=[]]
path = unipath(cwd, d)
if utils.is_module(path):
modules.add(Module(cwd)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['d']]
module_paths = get_module_paths()
for module_path in module_paths:
for d in os.listdir(module_path):
path = unipath(module_path, d)
if utils.is_module(path):
modules.add(Module(path)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['d']] # depends on [control=['for'], data=['module_path']]
return sorted(list(modules), key=lambda x: x.name)
|
def type(self, col):
"""
The type for the given column.
:param col: either a name, or an index of the column to look up
:returns: type of the column, one of: ``str``, ``int``, ``real``, ``enum``, ``time``, ``bool``.
:raises H2OValueError: if such column does not exist in the frame.
"""
assert_is_type(col, int, str)
if not self._ex._cache.types_valid() or not self._ex._cache.names_valid():
self._ex._cache.flush()
self._frame(fill_cache=True)
types = self._ex._cache.types
if is_type(col, str):
if col in types:
return types[col]
else:
names = self._ex._cache.names
if -len(names) <= col < len(names):
return types[names[col]]
raise H2OValueError("Column '%r' does not exist in the frame" % col)
|
def function[type, parameter[self, col]]:
constant[
The type for the given column.
:param col: either a name, or an index of the column to look up
:returns: type of the column, one of: ``str``, ``int``, ``real``, ``enum``, ``time``, ``bool``.
:raises H2OValueError: if such column does not exist in the frame.
]
call[name[assert_is_type], parameter[name[col], name[int], name[str]]]
if <ast.BoolOp object at 0x7da18dc98b50> begin[:]
call[name[self]._ex._cache.flush, parameter[]]
call[name[self]._frame, parameter[]]
variable[types] assign[=] name[self]._ex._cache.types
if call[name[is_type], parameter[name[col], name[str]]] begin[:]
if compare[name[col] in name[types]] begin[:]
return[call[name[types]][name[col]]]
<ast.Raise object at 0x7da18dc98c10>
|
keyword[def] identifier[type] ( identifier[self] , identifier[col] ):
literal[string]
identifier[assert_is_type] ( identifier[col] , identifier[int] , identifier[str] )
keyword[if] keyword[not] identifier[self] . identifier[_ex] . identifier[_cache] . identifier[types_valid] () keyword[or] keyword[not] identifier[self] . identifier[_ex] . identifier[_cache] . identifier[names_valid] ():
identifier[self] . identifier[_ex] . identifier[_cache] . identifier[flush] ()
identifier[self] . identifier[_frame] ( identifier[fill_cache] = keyword[True] )
identifier[types] = identifier[self] . identifier[_ex] . identifier[_cache] . identifier[types]
keyword[if] identifier[is_type] ( identifier[col] , identifier[str] ):
keyword[if] identifier[col] keyword[in] identifier[types] :
keyword[return] identifier[types] [ identifier[col] ]
keyword[else] :
identifier[names] = identifier[self] . identifier[_ex] . identifier[_cache] . identifier[names]
keyword[if] - identifier[len] ( identifier[names] )<= identifier[col] < identifier[len] ( identifier[names] ):
keyword[return] identifier[types] [ identifier[names] [ identifier[col] ]]
keyword[raise] identifier[H2OValueError] ( literal[string] % identifier[col] )
|
def type(self, col):
"""
The type for the given column.
:param col: either a name, or an index of the column to look up
:returns: type of the column, one of: ``str``, ``int``, ``real``, ``enum``, ``time``, ``bool``.
:raises H2OValueError: if such column does not exist in the frame.
"""
assert_is_type(col, int, str)
if not self._ex._cache.types_valid() or not self._ex._cache.names_valid():
self._ex._cache.flush()
self._frame(fill_cache=True) # depends on [control=['if'], data=[]]
types = self._ex._cache.types
if is_type(col, str):
if col in types:
return types[col] # depends on [control=['if'], data=['col', 'types']] # depends on [control=['if'], data=[]]
else:
names = self._ex._cache.names
if -len(names) <= col < len(names):
return types[names[col]] # depends on [control=['if'], data=['col']]
raise H2OValueError("Column '%r' does not exist in the frame" % col)
|
def determine_rotation(rotation, mark):
"""
Determines the number of degrees to rotate the watermark image.
"""
if isinstance(rotation, six.string_types) and rotation.lower() == 'r':
rotation = random.randint(0, 359)
else:
rotation = _int(rotation)
return rotation
|
def function[determine_rotation, parameter[rotation, mark]]:
constant[
Determines the number of degrees to rotate the watermark image.
]
if <ast.BoolOp object at 0x7da1b2865630> begin[:]
variable[rotation] assign[=] call[name[random].randint, parameter[constant[0], constant[359]]]
return[name[rotation]]
|
keyword[def] identifier[determine_rotation] ( identifier[rotation] , identifier[mark] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[rotation] , identifier[six] . identifier[string_types] ) keyword[and] identifier[rotation] . identifier[lower] ()== literal[string] :
identifier[rotation] = identifier[random] . identifier[randint] ( literal[int] , literal[int] )
keyword[else] :
identifier[rotation] = identifier[_int] ( identifier[rotation] )
keyword[return] identifier[rotation]
|
def determine_rotation(rotation, mark):
"""
Determines the number of degrees to rotate the watermark image.
"""
if isinstance(rotation, six.string_types) and rotation.lower() == 'r':
rotation = random.randint(0, 359) # depends on [control=['if'], data=[]]
else:
rotation = _int(rotation)
return rotation
|
def scale(self, n):
""" Scale cluster to n workers
Parameters
----------
n: int
Target number of workers
Example
-------
>>> cluster.scale(10) # scale cluster to ten workers
See Also
--------
KubeCluster.scale_up
KubeCluster.scale_down
"""
pods = self._cleanup_terminated_pods(self.pods())
if n >= len(pods):
return self.scale_up(n, pods=pods)
else:
n_to_delete = len(pods) - n
# Before trying to close running workers, check if we can cancel
# pending pods (in case the kubernetes cluster was too full to
# provision those pods in the first place).
running_workers = list(self.scheduler.workers.keys())
running_ips = set(urlparse(worker).hostname
for worker in running_workers)
pending_pods = [p for p in pods
if p.status.pod_ip not in running_ips]
if pending_pods:
pending_to_delete = pending_pods[:n_to_delete]
logger.debug("Deleting pending pods: %s", pending_to_delete)
self._delete_pods(pending_to_delete)
n_to_delete = n_to_delete - len(pending_to_delete)
if n_to_delete <= 0:
return
to_close = select_workers_to_close(self.scheduler, n_to_delete)
logger.debug("Closing workers: %s", to_close)
if len(to_close) < len(self.scheduler.workers):
# Close workers cleanly to migrate any temporary results to
# remaining workers.
@gen.coroutine
def f(to_close):
yield self.scheduler.retire_workers(
workers=to_close, remove=True, close_workers=True)
yield offload(self.scale_down, to_close)
self.scheduler.loop.add_callback(f, to_close)
return
# Terminate all pods without waiting for clean worker shutdown
self.scale_down(to_close)
|
def function[scale, parameter[self, n]]:
constant[ Scale cluster to n workers
Parameters
----------
n: int
Target number of workers
Example
-------
>>> cluster.scale(10) # scale cluster to ten workers
See Also
--------
KubeCluster.scale_up
KubeCluster.scale_down
]
variable[pods] assign[=] call[name[self]._cleanup_terminated_pods, parameter[call[name[self].pods, parameter[]]]]
if compare[name[n] greater_or_equal[>=] call[name[len], parameter[name[pods]]]] begin[:]
return[call[name[self].scale_up, parameter[name[n]]]]
|
keyword[def] identifier[scale] ( identifier[self] , identifier[n] ):
literal[string]
identifier[pods] = identifier[self] . identifier[_cleanup_terminated_pods] ( identifier[self] . identifier[pods] ())
keyword[if] identifier[n] >= identifier[len] ( identifier[pods] ):
keyword[return] identifier[self] . identifier[scale_up] ( identifier[n] , identifier[pods] = identifier[pods] )
keyword[else] :
identifier[n_to_delete] = identifier[len] ( identifier[pods] )- identifier[n]
identifier[running_workers] = identifier[list] ( identifier[self] . identifier[scheduler] . identifier[workers] . identifier[keys] ())
identifier[running_ips] = identifier[set] ( identifier[urlparse] ( identifier[worker] ). identifier[hostname]
keyword[for] identifier[worker] keyword[in] identifier[running_workers] )
identifier[pending_pods] =[ identifier[p] keyword[for] identifier[p] keyword[in] identifier[pods]
keyword[if] identifier[p] . identifier[status] . identifier[pod_ip] keyword[not] keyword[in] identifier[running_ips] ]
keyword[if] identifier[pending_pods] :
identifier[pending_to_delete] = identifier[pending_pods] [: identifier[n_to_delete] ]
identifier[logger] . identifier[debug] ( literal[string] , identifier[pending_to_delete] )
identifier[self] . identifier[_delete_pods] ( identifier[pending_to_delete] )
identifier[n_to_delete] = identifier[n_to_delete] - identifier[len] ( identifier[pending_to_delete] )
keyword[if] identifier[n_to_delete] <= literal[int] :
keyword[return]
identifier[to_close] = identifier[select_workers_to_close] ( identifier[self] . identifier[scheduler] , identifier[n_to_delete] )
identifier[logger] . identifier[debug] ( literal[string] , identifier[to_close] )
keyword[if] identifier[len] ( identifier[to_close] )< identifier[len] ( identifier[self] . identifier[scheduler] . identifier[workers] ):
@ identifier[gen] . identifier[coroutine]
keyword[def] identifier[f] ( identifier[to_close] ):
keyword[yield] identifier[self] . identifier[scheduler] . identifier[retire_workers] (
identifier[workers] = identifier[to_close] , identifier[remove] = keyword[True] , identifier[close_workers] = keyword[True] )
keyword[yield] identifier[offload] ( identifier[self] . identifier[scale_down] , identifier[to_close] )
identifier[self] . identifier[scheduler] . identifier[loop] . identifier[add_callback] ( identifier[f] , identifier[to_close] )
keyword[return]
identifier[self] . identifier[scale_down] ( identifier[to_close] )
|
def scale(self, n):
""" Scale cluster to n workers
Parameters
----------
n: int
Target number of workers
Example
-------
>>> cluster.scale(10) # scale cluster to ten workers
See Also
--------
KubeCluster.scale_up
KubeCluster.scale_down
"""
pods = self._cleanup_terminated_pods(self.pods())
if n >= len(pods):
return self.scale_up(n, pods=pods) # depends on [control=['if'], data=['n']]
else:
n_to_delete = len(pods) - n
# Before trying to close running workers, check if we can cancel
# pending pods (in case the kubernetes cluster was too full to
# provision those pods in the first place).
running_workers = list(self.scheduler.workers.keys())
running_ips = set((urlparse(worker).hostname for worker in running_workers))
pending_pods = [p for p in pods if p.status.pod_ip not in running_ips]
if pending_pods:
pending_to_delete = pending_pods[:n_to_delete]
logger.debug('Deleting pending pods: %s', pending_to_delete)
self._delete_pods(pending_to_delete)
n_to_delete = n_to_delete - len(pending_to_delete)
if n_to_delete <= 0:
return # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
to_close = select_workers_to_close(self.scheduler, n_to_delete)
logger.debug('Closing workers: %s', to_close)
if len(to_close) < len(self.scheduler.workers):
# Close workers cleanly to migrate any temporary results to
# remaining workers.
@gen.coroutine
def f(to_close):
yield self.scheduler.retire_workers(workers=to_close, remove=True, close_workers=True)
yield offload(self.scale_down, to_close)
self.scheduler.loop.add_callback(f, to_close)
return # depends on [control=['if'], data=[]]
# Terminate all pods without waiting for clean worker shutdown
self.scale_down(to_close)
|
def update_mock_repo():
"""
Clone and gzip the memote-mock-repo used for CLI and integration tests.
The repo is hosted at
'https://github.com/ChristianLieven/memote-mock-repo.git' and maintained
separately from
"""
target_file = os.path.abspath(
join("tests", "data", "memote-mock-repo.tar.gz")
)
temp_dir = mkdtemp(prefix='tmp_mock')
previous_wd = os.getcwd()
try:
LOGGER.info("Cloning repository.")
os.chdir(temp_dir)
check_output(
['git', 'clone',
'https://github.com/ChristianLieven/memote-mock-repo.git']
)
os.chdir('memote-mock-repo/')
LOGGER.info("Setting git to ignore filemode changes.")
call(
['git', 'config',
'core.fileMode', 'false']
)
call(
['git', 'config',
'user.email', 'memote@opencobra.com']
)
call(
['git', 'config',
'user.name', 'memote-bot']
)
finally:
LOGGER.info("Compressing to tarball.")
tar = tarfile.open(target_file, "w:gz")
tar.add(
join(temp_dir, 'memote-mock-repo/'),
arcname="memote-mock-repo"
)
tar.close()
LOGGER.info("Success!")
LOGGER.info("Removing temporary directory.")
rmtree(temp_dir)
LOGGER.info("Success! The mock repo has been updated.")
os.chdir(previous_wd)
|
def function[update_mock_repo, parameter[]]:
constant[
Clone and gzip the memote-mock-repo used for CLI and integration tests.
The repo is hosted at
'https://github.com/ChristianLieven/memote-mock-repo.git' and maintained
separately from
]
variable[target_file] assign[=] call[name[os].path.abspath, parameter[call[name[join], parameter[constant[tests], constant[data], constant[memote-mock-repo.tar.gz]]]]]
variable[temp_dir] assign[=] call[name[mkdtemp], parameter[]]
variable[previous_wd] assign[=] call[name[os].getcwd, parameter[]]
<ast.Try object at 0x7da1b0666fb0>
|
keyword[def] identifier[update_mock_repo] ():
literal[string]
identifier[target_file] = identifier[os] . identifier[path] . identifier[abspath] (
identifier[join] ( literal[string] , literal[string] , literal[string] )
)
identifier[temp_dir] = identifier[mkdtemp] ( identifier[prefix] = literal[string] )
identifier[previous_wd] = identifier[os] . identifier[getcwd] ()
keyword[try] :
identifier[LOGGER] . identifier[info] ( literal[string] )
identifier[os] . identifier[chdir] ( identifier[temp_dir] )
identifier[check_output] (
[ literal[string] , literal[string] ,
literal[string] ]
)
identifier[os] . identifier[chdir] ( literal[string] )
identifier[LOGGER] . identifier[info] ( literal[string] )
identifier[call] (
[ literal[string] , literal[string] ,
literal[string] , literal[string] ]
)
identifier[call] (
[ literal[string] , literal[string] ,
literal[string] , literal[string] ]
)
identifier[call] (
[ literal[string] , literal[string] ,
literal[string] , literal[string] ]
)
keyword[finally] :
identifier[LOGGER] . identifier[info] ( literal[string] )
identifier[tar] = identifier[tarfile] . identifier[open] ( identifier[target_file] , literal[string] )
identifier[tar] . identifier[add] (
identifier[join] ( identifier[temp_dir] , literal[string] ),
identifier[arcname] = literal[string]
)
identifier[tar] . identifier[close] ()
identifier[LOGGER] . identifier[info] ( literal[string] )
identifier[LOGGER] . identifier[info] ( literal[string] )
identifier[rmtree] ( identifier[temp_dir] )
identifier[LOGGER] . identifier[info] ( literal[string] )
identifier[os] . identifier[chdir] ( identifier[previous_wd] )
|
def update_mock_repo():
"""
Clone and gzip the memote-mock-repo used for CLI and integration tests.
The repo is hosted at
'https://github.com/ChristianLieven/memote-mock-repo.git' and maintained
separately from
"""
target_file = os.path.abspath(join('tests', 'data', 'memote-mock-repo.tar.gz'))
temp_dir = mkdtemp(prefix='tmp_mock')
previous_wd = os.getcwd()
try:
LOGGER.info('Cloning repository.')
os.chdir(temp_dir)
check_output(['git', 'clone', 'https://github.com/ChristianLieven/memote-mock-repo.git'])
os.chdir('memote-mock-repo/')
LOGGER.info('Setting git to ignore filemode changes.')
call(['git', 'config', 'core.fileMode', 'false'])
call(['git', 'config', 'user.email', 'memote@opencobra.com'])
call(['git', 'config', 'user.name', 'memote-bot']) # depends on [control=['try'], data=[]]
finally:
LOGGER.info('Compressing to tarball.')
tar = tarfile.open(target_file, 'w:gz')
tar.add(join(temp_dir, 'memote-mock-repo/'), arcname='memote-mock-repo')
tar.close()
LOGGER.info('Success!')
LOGGER.info('Removing temporary directory.')
rmtree(temp_dir)
LOGGER.info('Success! The mock repo has been updated.')
os.chdir(previous_wd)
|
def fsencode(path, os_name=os.name, fs_encoding=FS_ENCODING, errors=None):
'''
Encode given path.
:param path: path will be encoded if not using bytes
:type path: bytes or str
:param os_name: operative system name, defaults to os.name
:type os_name: str
:param fs_encoding: current filesystem encoding, defaults to autodetected
:type fs_encoding: str
:return: encoded path
:rtype: bytes
'''
if isinstance(path, bytes):
return path
if not errors:
use_strict = PY_LEGACY or os_name == 'nt'
errors = 'strict' if use_strict else 'surrogateescape'
return path.encode(fs_encoding, errors=errors)
|
def function[fsencode, parameter[path, os_name, fs_encoding, errors]]:
constant[
Encode given path.
:param path: path will be encoded if not using bytes
:type path: bytes or str
:param os_name: operative system name, defaults to os.name
:type os_name: str
:param fs_encoding: current filesystem encoding, defaults to autodetected
:type fs_encoding: str
:return: encoded path
:rtype: bytes
]
if call[name[isinstance], parameter[name[path], name[bytes]]] begin[:]
return[name[path]]
if <ast.UnaryOp object at 0x7da20c76e470> begin[:]
variable[use_strict] assign[=] <ast.BoolOp object at 0x7da20c76e9e0>
variable[errors] assign[=] <ast.IfExp object at 0x7da20c76d900>
return[call[name[path].encode, parameter[name[fs_encoding]]]]
|
keyword[def] identifier[fsencode] ( identifier[path] , identifier[os_name] = identifier[os] . identifier[name] , identifier[fs_encoding] = identifier[FS_ENCODING] , identifier[errors] = keyword[None] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[path] , identifier[bytes] ):
keyword[return] identifier[path]
keyword[if] keyword[not] identifier[errors] :
identifier[use_strict] = identifier[PY_LEGACY] keyword[or] identifier[os_name] == literal[string]
identifier[errors] = literal[string] keyword[if] identifier[use_strict] keyword[else] literal[string]
keyword[return] identifier[path] . identifier[encode] ( identifier[fs_encoding] , identifier[errors] = identifier[errors] )
|
def fsencode(path, os_name=os.name, fs_encoding=FS_ENCODING, errors=None):
"""
Encode given path.
:param path: path will be encoded if not using bytes
:type path: bytes or str
:param os_name: operative system name, defaults to os.name
:type os_name: str
:param fs_encoding: current filesystem encoding, defaults to autodetected
:type fs_encoding: str
:return: encoded path
:rtype: bytes
"""
if isinstance(path, bytes):
return path # depends on [control=['if'], data=[]]
if not errors:
use_strict = PY_LEGACY or os_name == 'nt'
errors = 'strict' if use_strict else 'surrogateescape' # depends on [control=['if'], data=[]]
return path.encode(fs_encoding, errors=errors)
|
def load_cloudformation_template(path=None):
"""Load cloudformation template from path.
:param path: Absolute or relative path of cloudformation template. Defaults to cwd.
:return: module, success
"""
if not path:
path = os.path.abspath('cloudformation.py')
else:
path = os.path.abspath(path)
if isinstance(path, six.string_types):
try:
sp = sys.path
# temporarily add folder to allow relative path
sys.path.append(os.path.abspath(os.path.dirname(path)))
cloudformation = imp.load_source('cloudformation', path)
sys.path = sp # restore
# use cfn template hooks
if not check_hook_mechanism_is_intact(cloudformation):
# no hooks - do nothing
log.debug(
'No valid hook configuration: \'%s\'. Not using hooks!',
path)
else:
if check_register_present(cloudformation):
# register the template hooks so they listen to gcdt_signals
cloudformation.register()
return cloudformation, True
except GracefulExit:
raise
except ImportError as e:
print('could not find package for import: %s' % e)
except Exception as e:
print('could not import cloudformation.py, maybe something wrong ',
'with your code?')
print(e)
return None, False
|
def function[load_cloudformation_template, parameter[path]]:
constant[Load cloudformation template from path.
:param path: Absolute or relative path of cloudformation template. Defaults to cwd.
:return: module, success
]
if <ast.UnaryOp object at 0x7da204346ce0> begin[:]
variable[path] assign[=] call[name[os].path.abspath, parameter[constant[cloudformation.py]]]
if call[name[isinstance], parameter[name[path], name[six].string_types]] begin[:]
<ast.Try object at 0x7da212db4ee0>
return[tuple[[<ast.Constant object at 0x7da1b0ff1c00>, <ast.Constant object at 0x7da1b0ff3010>]]]
|
keyword[def] identifier[load_cloudformation_template] ( identifier[path] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[path] :
identifier[path] = identifier[os] . identifier[path] . identifier[abspath] ( literal[string] )
keyword[else] :
identifier[path] = identifier[os] . identifier[path] . identifier[abspath] ( identifier[path] )
keyword[if] identifier[isinstance] ( identifier[path] , identifier[six] . identifier[string_types] ):
keyword[try] :
identifier[sp] = identifier[sys] . identifier[path]
identifier[sys] . identifier[path] . identifier[append] ( identifier[os] . identifier[path] . identifier[abspath] ( identifier[os] . identifier[path] . identifier[dirname] ( identifier[path] )))
identifier[cloudformation] = identifier[imp] . identifier[load_source] ( literal[string] , identifier[path] )
identifier[sys] . identifier[path] = identifier[sp]
keyword[if] keyword[not] identifier[check_hook_mechanism_is_intact] ( identifier[cloudformation] ):
identifier[log] . identifier[debug] (
literal[string] ,
identifier[path] )
keyword[else] :
keyword[if] identifier[check_register_present] ( identifier[cloudformation] ):
identifier[cloudformation] . identifier[register] ()
keyword[return] identifier[cloudformation] , keyword[True]
keyword[except] identifier[GracefulExit] :
keyword[raise]
keyword[except] identifier[ImportError] keyword[as] identifier[e] :
identifier[print] ( literal[string] % identifier[e] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[print] ( literal[string] ,
literal[string] )
identifier[print] ( identifier[e] )
keyword[return] keyword[None] , keyword[False]
|
def load_cloudformation_template(path=None):
"""Load cloudformation template from path.
:param path: Absolute or relative path of cloudformation template. Defaults to cwd.
:return: module, success
"""
if not path:
path = os.path.abspath('cloudformation.py') # depends on [control=['if'], data=[]]
else:
path = os.path.abspath(path)
if isinstance(path, six.string_types):
try:
sp = sys.path
# temporarily add folder to allow relative path
sys.path.append(os.path.abspath(os.path.dirname(path)))
cloudformation = imp.load_source('cloudformation', path)
sys.path = sp # restore
# use cfn template hooks
if not check_hook_mechanism_is_intact(cloudformation):
# no hooks - do nothing
log.debug("No valid hook configuration: '%s'. Not using hooks!", path) # depends on [control=['if'], data=[]]
elif check_register_present(cloudformation):
# register the template hooks so they listen to gcdt_signals
cloudformation.register() # depends on [control=['if'], data=[]]
return (cloudformation, True) # depends on [control=['try'], data=[]]
except GracefulExit:
raise # depends on [control=['except'], data=[]]
except ImportError as e:
print('could not find package for import: %s' % e) # depends on [control=['except'], data=['e']]
except Exception as e:
print('could not import cloudformation.py, maybe something wrong ', 'with your code?')
print(e) # depends on [control=['except'], data=['e']] # depends on [control=['if'], data=[]]
return (None, False)
|
def _generate_next_token_helper(self, past_states, transitions):
""" generates next token based previous states """
key = tuple(past_states)
assert key in transitions, "%s" % str(key)
return utils.weighted_choice(transitions[key].items())
|
def function[_generate_next_token_helper, parameter[self, past_states, transitions]]:
constant[ generates next token based previous states ]
variable[key] assign[=] call[name[tuple], parameter[name[past_states]]]
assert[compare[name[key] in name[transitions]]]
return[call[name[utils].weighted_choice, parameter[call[call[name[transitions]][name[key]].items, parameter[]]]]]
|
keyword[def] identifier[_generate_next_token_helper] ( identifier[self] , identifier[past_states] , identifier[transitions] ):
literal[string]
identifier[key] = identifier[tuple] ( identifier[past_states] )
keyword[assert] identifier[key] keyword[in] identifier[transitions] , literal[string] % identifier[str] ( identifier[key] )
keyword[return] identifier[utils] . identifier[weighted_choice] ( identifier[transitions] [ identifier[key] ]. identifier[items] ())
|
def _generate_next_token_helper(self, past_states, transitions):
""" generates next token based previous states """
key = tuple(past_states)
assert key in transitions, '%s' % str(key)
return utils.weighted_choice(transitions[key].items())
|
def _get_exponential_spaced_values(mmin, mmax, number_samples):
'''
Function to return a set of exponentially spaced values between mmin and
mmax
:param float mmin:
Minimum value
:param float mmax:
Maximum value
:param float number_samples:
Number of exponentially spaced samples
:return np.ndarray:
Set of 'number_samples' exponentially spaced values
'''
lhs = np.exp(mmin) + np.arange(0., number_samples - 1., 1.) *\
((np.exp(mmax) - np.exp(mmin)) / (number_samples - 1.))
magval = np.hstack([lhs, np.exp(mmax)])
return np.log(magval)
|
def function[_get_exponential_spaced_values, parameter[mmin, mmax, number_samples]]:
constant[
Function to return a set of exponentially spaced values between mmin and
mmax
:param float mmin:
Minimum value
:param float mmax:
Maximum value
:param float number_samples:
Number of exponentially spaced samples
:return np.ndarray:
Set of 'number_samples' exponentially spaced values
]
variable[lhs] assign[=] binary_operation[call[name[np].exp, parameter[name[mmin]]] + binary_operation[call[name[np].arange, parameter[constant[0.0], binary_operation[name[number_samples] - constant[1.0]], constant[1.0]]] * binary_operation[binary_operation[call[name[np].exp, parameter[name[mmax]]] - call[name[np].exp, parameter[name[mmin]]]] / binary_operation[name[number_samples] - constant[1.0]]]]]
variable[magval] assign[=] call[name[np].hstack, parameter[list[[<ast.Name object at 0x7da2054a7790>, <ast.Call object at 0x7da2054a5780>]]]]
return[call[name[np].log, parameter[name[magval]]]]
|
keyword[def] identifier[_get_exponential_spaced_values] ( identifier[mmin] , identifier[mmax] , identifier[number_samples] ):
literal[string]
identifier[lhs] = identifier[np] . identifier[exp] ( identifier[mmin] )+ identifier[np] . identifier[arange] ( literal[int] , identifier[number_samples] - literal[int] , literal[int] )*(( identifier[np] . identifier[exp] ( identifier[mmax] )- identifier[np] . identifier[exp] ( identifier[mmin] ))/( identifier[number_samples] - literal[int] ))
identifier[magval] = identifier[np] . identifier[hstack] ([ identifier[lhs] , identifier[np] . identifier[exp] ( identifier[mmax] )])
keyword[return] identifier[np] . identifier[log] ( identifier[magval] )
|
def _get_exponential_spaced_values(mmin, mmax, number_samples):
"""
Function to return a set of exponentially spaced values between mmin and
mmax
:param float mmin:
Minimum value
:param float mmax:
Maximum value
:param float number_samples:
Number of exponentially spaced samples
:return np.ndarray:
Set of 'number_samples' exponentially spaced values
"""
lhs = np.exp(mmin) + np.arange(0.0, number_samples - 1.0, 1.0) * ((np.exp(mmax) - np.exp(mmin)) / (number_samples - 1.0))
magval = np.hstack([lhs, np.exp(mmax)])
return np.log(magval)
|
def get_hdrs(self, **kws):
"""Initialize column headers."""
hdrs = get_hdrs(self.prt_flds, **kws)
# Values in a "format_txt" "column" are used for formatting, not printing
return [h for h in hdrs if h != "format_txt"]
|
def function[get_hdrs, parameter[self]]:
constant[Initialize column headers.]
variable[hdrs] assign[=] call[name[get_hdrs], parameter[name[self].prt_flds]]
return[<ast.ListComp object at 0x7da18bcc9060>]
|
keyword[def] identifier[get_hdrs] ( identifier[self] ,** identifier[kws] ):
literal[string]
identifier[hdrs] = identifier[get_hdrs] ( identifier[self] . identifier[prt_flds] ,** identifier[kws] )
keyword[return] [ identifier[h] keyword[for] identifier[h] keyword[in] identifier[hdrs] keyword[if] identifier[h] != literal[string] ]
|
def get_hdrs(self, **kws):
"""Initialize column headers."""
hdrs = get_hdrs(self.prt_flds, **kws)
# Values in a "format_txt" "column" are used for formatting, not printing
return [h for h in hdrs if h != 'format_txt']
|
def cmd():
'''Return a command to launch a subshell'''
if platform == 'win':
return ['cmd.exe', '/K']
elif platform == 'linux':
ppid = os.getppid()
ppid_cmdline_file = '/proc/{0}/cmdline'.format(ppid)
try:
with open(ppid_cmdline_file) as f:
cmd = f.read()
if cmd.endswith('\x00'):
cmd = cmd[:-1]
cmd = cmd.split('\x00')
return cmd + [binpath('subshell.sh')]
except:
cmd = 'bash'
else:
cmd = 'bash'
return [cmd, binpath('subshell.sh')]
|
def function[cmd, parameter[]]:
constant[Return a command to launch a subshell]
if compare[name[platform] equal[==] constant[win]] begin[:]
return[list[[<ast.Constant object at 0x7da1b004b550>, <ast.Constant object at 0x7da1b004be20>]]]
return[list[[<ast.Name object at 0x7da1afe89b10>, <ast.Call object at 0x7da1afe8ae00>]]]
|
keyword[def] identifier[cmd] ():
literal[string]
keyword[if] identifier[platform] == literal[string] :
keyword[return] [ literal[string] , literal[string] ]
keyword[elif] identifier[platform] == literal[string] :
identifier[ppid] = identifier[os] . identifier[getppid] ()
identifier[ppid_cmdline_file] = literal[string] . identifier[format] ( identifier[ppid] )
keyword[try] :
keyword[with] identifier[open] ( identifier[ppid_cmdline_file] ) keyword[as] identifier[f] :
identifier[cmd] = identifier[f] . identifier[read] ()
keyword[if] identifier[cmd] . identifier[endswith] ( literal[string] ):
identifier[cmd] = identifier[cmd] [:- literal[int] ]
identifier[cmd] = identifier[cmd] . identifier[split] ( literal[string] )
keyword[return] identifier[cmd] +[ identifier[binpath] ( literal[string] )]
keyword[except] :
identifier[cmd] = literal[string]
keyword[else] :
identifier[cmd] = literal[string]
keyword[return] [ identifier[cmd] , identifier[binpath] ( literal[string] )]
|
def cmd():
"""Return a command to launch a subshell"""
if platform == 'win':
return ['cmd.exe', '/K'] # depends on [control=['if'], data=[]]
elif platform == 'linux':
ppid = os.getppid()
ppid_cmdline_file = '/proc/{0}/cmdline'.format(ppid)
try:
with open(ppid_cmdline_file) as f:
cmd = f.read() # depends on [control=['with'], data=['f']]
if cmd.endswith('\x00'):
cmd = cmd[:-1] # depends on [control=['if'], data=[]]
cmd = cmd.split('\x00')
return cmd + [binpath('subshell.sh')] # depends on [control=['try'], data=[]]
except:
cmd = 'bash' # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
else:
cmd = 'bash'
return [cmd, binpath('subshell.sh')]
|
def iso_date(d) -> str:
"""
Return iso format of a date
Args:
d:
Returns: str
"""
if isinstance(d, datetime):
return d.isoformat()
elif isinstance(d, date):
return datetime.combine(d, datetime.min.time()).isoformat()
else:
try:
datetime.strptime(d, '%Y-%m-%dT%H:%M:%S')
return d
except ValueError:
try:
datetime.strptime(d, '%Y-%m-%d')
return d + "T00:00:00"
except ValueError:
pass
raise ISODateError("Can not convert value to ISO format for kg")
|
def function[iso_date, parameter[d]]:
constant[
Return iso format of a date
Args:
d:
Returns: str
]
if call[name[isinstance], parameter[name[d], name[datetime]]] begin[:]
return[call[name[d].isoformat, parameter[]]]
<ast.Raise object at 0x7da1b0bef2b0>
|
keyword[def] identifier[iso_date] ( identifier[d] )-> identifier[str] :
literal[string]
keyword[if] identifier[isinstance] ( identifier[d] , identifier[datetime] ):
keyword[return] identifier[d] . identifier[isoformat] ()
keyword[elif] identifier[isinstance] ( identifier[d] , identifier[date] ):
keyword[return] identifier[datetime] . identifier[combine] ( identifier[d] , identifier[datetime] . identifier[min] . identifier[time] ()). identifier[isoformat] ()
keyword[else] :
keyword[try] :
identifier[datetime] . identifier[strptime] ( identifier[d] , literal[string] )
keyword[return] identifier[d]
keyword[except] identifier[ValueError] :
keyword[try] :
identifier[datetime] . identifier[strptime] ( identifier[d] , literal[string] )
keyword[return] identifier[d] + literal[string]
keyword[except] identifier[ValueError] :
keyword[pass]
keyword[raise] identifier[ISODateError] ( literal[string] )
|
def iso_date(d) -> str:
"""
Return iso format of a date
Args:
d:
Returns: str
"""
if isinstance(d, datetime):
return d.isoformat() # depends on [control=['if'], data=[]]
elif isinstance(d, date):
return datetime.combine(d, datetime.min.time()).isoformat() # depends on [control=['if'], data=[]]
else:
try:
datetime.strptime(d, '%Y-%m-%dT%H:%M:%S')
return d # depends on [control=['try'], data=[]]
except ValueError:
try:
datetime.strptime(d, '%Y-%m-%d')
return d + 'T00:00:00' # depends on [control=['try'], data=[]]
except ValueError:
pass # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]]
raise ISODateError('Can not convert value to ISO format for kg')
|
def _main_loop(self):
"""
Run jobs until must_stop returns True
"""
fetch_priorities_delay = timedelta(seconds=self.fetch_priorities_delay)
fetch_delayed_delay = timedelta(seconds=self.fetch_delayed_delay)
while not self.must_stop():
self.set_status('waiting')
if self.last_update_keys + fetch_priorities_delay < datetime.utcnow():
self.update_keys()
if self.last_requeue_delayed + fetch_delayed_delay < datetime.utcnow():
self.requeue_delayed_jobs()
try:
queue_and_job = self.wait_for_job()
if queue_and_job is None:
# timeout for blpop
continue
queue, job = queue_and_job
except Exception as e:
self.log('Unable to get job: %s\n%s'
% (str(e), traceback.format_exc()), level='error')
else:
self.num_loops += 1
try:
identifier = 'pk:%s' % job.pk.get()
except Exception as e:
identifier = '??'
try:
self.set_status('running')
identifier, status = job.hmget('identifier', 'status')
# some cache, don't count on it on subclasses
job._cached_identifier = identifier
job._cached_status = status
queue._cached_name = queue.name.hget()
if status == STATUSES.DELAYED:
self.job_delayed(job, queue)
elif status != STATUSES.WAITING:
self.job_skipped(job, queue)
else:
try:
self.job_started(job, queue)
job_result = self.callback(job, queue)
except Exception as e:
trace = None
if self.save_tracebacks:
trace = traceback.format_exc()
self.job_error(job, queue, e, trace)
else:
job._cached_status = job.status.hget()
if job._cached_status == STATUSES.DELAYED:
self.job_delayed(job, queue)
elif job._cached_status == STATUSES.CANCELED:
self.job_skipped(job, queue)
else:
self.job_success(job, queue, job_result)
except Exception as e:
self.log('[%s] unexpected error: %s\n%s'
% (identifier, str(e), traceback.format_exc()), level='error')
try:
queue.errors.rpush(job.ident)
except Exception as e:
self.log('[%s] unable to add the error in the queue: %s\n%s'
% (identifier, str(e), traceback.format_exc()), level='error')
|
def function[_main_loop, parameter[self]]:
constant[
Run jobs until must_stop returns True
]
variable[fetch_priorities_delay] assign[=] call[name[timedelta], parameter[]]
variable[fetch_delayed_delay] assign[=] call[name[timedelta], parameter[]]
while <ast.UnaryOp object at 0x7da18dc99360> begin[:]
call[name[self].set_status, parameter[constant[waiting]]]
if compare[binary_operation[name[self].last_update_keys + name[fetch_priorities_delay]] less[<] call[name[datetime].utcnow, parameter[]]] begin[:]
call[name[self].update_keys, parameter[]]
if compare[binary_operation[name[self].last_requeue_delayed + name[fetch_delayed_delay]] less[<] call[name[datetime].utcnow, parameter[]]] begin[:]
call[name[self].requeue_delayed_jobs, parameter[]]
<ast.Try object at 0x7da2047eb400>
|
keyword[def] identifier[_main_loop] ( identifier[self] ):
literal[string]
identifier[fetch_priorities_delay] = identifier[timedelta] ( identifier[seconds] = identifier[self] . identifier[fetch_priorities_delay] )
identifier[fetch_delayed_delay] = identifier[timedelta] ( identifier[seconds] = identifier[self] . identifier[fetch_delayed_delay] )
keyword[while] keyword[not] identifier[self] . identifier[must_stop] ():
identifier[self] . identifier[set_status] ( literal[string] )
keyword[if] identifier[self] . identifier[last_update_keys] + identifier[fetch_priorities_delay] < identifier[datetime] . identifier[utcnow] ():
identifier[self] . identifier[update_keys] ()
keyword[if] identifier[self] . identifier[last_requeue_delayed] + identifier[fetch_delayed_delay] < identifier[datetime] . identifier[utcnow] ():
identifier[self] . identifier[requeue_delayed_jobs] ()
keyword[try] :
identifier[queue_and_job] = identifier[self] . identifier[wait_for_job] ()
keyword[if] identifier[queue_and_job] keyword[is] keyword[None] :
keyword[continue]
identifier[queue] , identifier[job] = identifier[queue_and_job]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[log] ( literal[string]
%( identifier[str] ( identifier[e] ), identifier[traceback] . identifier[format_exc] ()), identifier[level] = literal[string] )
keyword[else] :
identifier[self] . identifier[num_loops] += literal[int]
keyword[try] :
identifier[identifier] = literal[string] % identifier[job] . identifier[pk] . identifier[get] ()
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[identifier] = literal[string]
keyword[try] :
identifier[self] . identifier[set_status] ( literal[string] )
identifier[identifier] , identifier[status] = identifier[job] . identifier[hmget] ( literal[string] , literal[string] )
identifier[job] . identifier[_cached_identifier] = identifier[identifier]
identifier[job] . identifier[_cached_status] = identifier[status]
identifier[queue] . identifier[_cached_name] = identifier[queue] . identifier[name] . identifier[hget] ()
keyword[if] identifier[status] == identifier[STATUSES] . identifier[DELAYED] :
identifier[self] . identifier[job_delayed] ( identifier[job] , identifier[queue] )
keyword[elif] identifier[status] != identifier[STATUSES] . identifier[WAITING] :
identifier[self] . identifier[job_skipped] ( identifier[job] , identifier[queue] )
keyword[else] :
keyword[try] :
identifier[self] . identifier[job_started] ( identifier[job] , identifier[queue] )
identifier[job_result] = identifier[self] . identifier[callback] ( identifier[job] , identifier[queue] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[trace] = keyword[None]
keyword[if] identifier[self] . identifier[save_tracebacks] :
identifier[trace] = identifier[traceback] . identifier[format_exc] ()
identifier[self] . identifier[job_error] ( identifier[job] , identifier[queue] , identifier[e] , identifier[trace] )
keyword[else] :
identifier[job] . identifier[_cached_status] = identifier[job] . identifier[status] . identifier[hget] ()
keyword[if] identifier[job] . identifier[_cached_status] == identifier[STATUSES] . identifier[DELAYED] :
identifier[self] . identifier[job_delayed] ( identifier[job] , identifier[queue] )
keyword[elif] identifier[job] . identifier[_cached_status] == identifier[STATUSES] . identifier[CANCELED] :
identifier[self] . identifier[job_skipped] ( identifier[job] , identifier[queue] )
keyword[else] :
identifier[self] . identifier[job_success] ( identifier[job] , identifier[queue] , identifier[job_result] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[log] ( literal[string]
%( identifier[identifier] , identifier[str] ( identifier[e] ), identifier[traceback] . identifier[format_exc] ()), identifier[level] = literal[string] )
keyword[try] :
identifier[queue] . identifier[errors] . identifier[rpush] ( identifier[job] . identifier[ident] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[log] ( literal[string]
%( identifier[identifier] , identifier[str] ( identifier[e] ), identifier[traceback] . identifier[format_exc] ()), identifier[level] = literal[string] )
|
def _main_loop(self):
"""
Run jobs until must_stop returns True
"""
fetch_priorities_delay = timedelta(seconds=self.fetch_priorities_delay)
fetch_delayed_delay = timedelta(seconds=self.fetch_delayed_delay)
while not self.must_stop():
self.set_status('waiting')
if self.last_update_keys + fetch_priorities_delay < datetime.utcnow():
self.update_keys() # depends on [control=['if'], data=[]]
if self.last_requeue_delayed + fetch_delayed_delay < datetime.utcnow():
self.requeue_delayed_jobs() # depends on [control=['if'], data=[]]
try:
queue_and_job = self.wait_for_job()
if queue_and_job is None:
# timeout for blpop
continue # depends on [control=['if'], data=[]]
(queue, job) = queue_and_job # depends on [control=['try'], data=[]]
except Exception as e:
self.log('Unable to get job: %s\n%s' % (str(e), traceback.format_exc()), level='error') # depends on [control=['except'], data=['e']]
else:
self.num_loops += 1
try:
identifier = 'pk:%s' % job.pk.get() # depends on [control=['try'], data=[]]
except Exception as e:
identifier = '??' # depends on [control=['except'], data=[]]
try:
self.set_status('running')
(identifier, status) = job.hmget('identifier', 'status')
# some cache, don't count on it on subclasses
job._cached_identifier = identifier
job._cached_status = status
queue._cached_name = queue.name.hget()
if status == STATUSES.DELAYED:
self.job_delayed(job, queue) # depends on [control=['if'], data=[]]
elif status != STATUSES.WAITING:
self.job_skipped(job, queue) # depends on [control=['if'], data=[]]
else:
try:
self.job_started(job, queue)
job_result = self.callback(job, queue) # depends on [control=['try'], data=[]]
except Exception as e:
trace = None
if self.save_tracebacks:
trace = traceback.format_exc() # depends on [control=['if'], data=[]]
self.job_error(job, queue, e, trace) # depends on [control=['except'], data=['e']]
else:
job._cached_status = job.status.hget()
if job._cached_status == STATUSES.DELAYED:
self.job_delayed(job, queue) # depends on [control=['if'], data=[]]
elif job._cached_status == STATUSES.CANCELED:
self.job_skipped(job, queue) # depends on [control=['if'], data=[]]
else:
self.job_success(job, queue, job_result) # depends on [control=['try'], data=[]]
except Exception as e:
self.log('[%s] unexpected error: %s\n%s' % (identifier, str(e), traceback.format_exc()), level='error')
try:
queue.errors.rpush(job.ident) # depends on [control=['try'], data=[]]
except Exception as e:
self.log('[%s] unable to add the error in the queue: %s\n%s' % (identifier, str(e), traceback.format_exc()), level='error') # depends on [control=['except'], data=['e']] # depends on [control=['except'], data=['e']] # depends on [control=['while'], data=[]]
|
def get_id(opts, cache_minion_id=False):
'''
Guess the id of the minion.
If CONFIG_DIR/minion_id exists, use the cached minion ID from that file.
If no minion id is configured, use multiple sources to find a FQDN.
If no FQDN is found you may get an ip address.
Returns two values: the detected ID, and a boolean value noting whether or
not an IP address is being used for the ID.
'''
if opts['root_dir'] is None:
root_dir = salt.syspaths.ROOT_DIR
else:
root_dir = opts['root_dir']
config_dir = salt.syspaths.CONFIG_DIR
if config_dir.startswith(salt.syspaths.ROOT_DIR):
config_dir = config_dir.split(salt.syspaths.ROOT_DIR, 1)[-1]
# Check for cached minion ID
id_cache = os.path.join(root_dir,
config_dir.lstrip(os.path.sep),
'minion_id')
if opts.get('minion_id_caching', True):
try:
with salt.utils.files.fopen(id_cache) as idf:
name = salt.utils.stringutils.to_unicode(idf.readline().strip())
bname = salt.utils.stringutils.to_bytes(name)
if bname.startswith(codecs.BOM): # Remove BOM if exists
name = salt.utils.stringutils.to_str(bname.replace(codecs.BOM, '', 1))
if name and name != 'localhost':
log.debug('Using cached minion ID from %s: %s', id_cache, name)
return name, False
except (IOError, OSError):
pass
if '__role' in opts and opts.get('__role') == 'minion':
log.debug(
'Guessing ID. The id can be explicitly set in %s',
os.path.join(salt.syspaths.CONFIG_DIR, 'minion')
)
if opts.get('id_function'):
newid = call_id_function(opts)
else:
newid = salt.utils.network.generate_minion_id()
if opts.get('minion_id_lowercase'):
newid = newid.lower()
log.debug('Changed minion id %s to lowercase.', newid)
# Optionally remove one or many domains in a generated minion id
if opts.get('minion_id_remove_domain'):
newid = remove_domain_from_fqdn(opts, newid)
if '__role' in opts and opts.get('__role') == 'minion':
if opts.get('id_function'):
log.debug(
'Found minion id from external function %s: %s',
opts['id_function'], newid
)
else:
log.debug('Found minion id from generate_minion_id(): %s', newid)
if cache_minion_id and opts.get('minion_id_caching', True):
_cache_id(newid, id_cache)
is_ipv4 = salt.utils.network.is_ipv4(newid)
return newid, is_ipv4
|
def function[get_id, parameter[opts, cache_minion_id]]:
constant[
Guess the id of the minion.
If CONFIG_DIR/minion_id exists, use the cached minion ID from that file.
If no minion id is configured, use multiple sources to find a FQDN.
If no FQDN is found you may get an ip address.
Returns two values: the detected ID, and a boolean value noting whether or
not an IP address is being used for the ID.
]
if compare[call[name[opts]][constant[root_dir]] is constant[None]] begin[:]
variable[root_dir] assign[=] name[salt].syspaths.ROOT_DIR
variable[config_dir] assign[=] name[salt].syspaths.CONFIG_DIR
if call[name[config_dir].startswith, parameter[name[salt].syspaths.ROOT_DIR]] begin[:]
variable[config_dir] assign[=] call[call[name[config_dir].split, parameter[name[salt].syspaths.ROOT_DIR, constant[1]]]][<ast.UnaryOp object at 0x7da1b215c910>]
variable[id_cache] assign[=] call[name[os].path.join, parameter[name[root_dir], call[name[config_dir].lstrip, parameter[name[os].path.sep]], constant[minion_id]]]
if call[name[opts].get, parameter[constant[minion_id_caching], constant[True]]] begin[:]
<ast.Try object at 0x7da1b215d7e0>
if <ast.BoolOp object at 0x7da1b215d2d0> begin[:]
call[name[log].debug, parameter[constant[Guessing ID. The id can be explicitly set in %s], call[name[os].path.join, parameter[name[salt].syspaths.CONFIG_DIR, constant[minion]]]]]
if call[name[opts].get, parameter[constant[id_function]]] begin[:]
variable[newid] assign[=] call[name[call_id_function], parameter[name[opts]]]
if call[name[opts].get, parameter[constant[minion_id_lowercase]]] begin[:]
variable[newid] assign[=] call[name[newid].lower, parameter[]]
call[name[log].debug, parameter[constant[Changed minion id %s to lowercase.], name[newid]]]
if call[name[opts].get, parameter[constant[minion_id_remove_domain]]] begin[:]
variable[newid] assign[=] call[name[remove_domain_from_fqdn], parameter[name[opts], name[newid]]]
if <ast.BoolOp object at 0x7da1b215e710> begin[:]
if call[name[opts].get, parameter[constant[id_function]]] begin[:]
call[name[log].debug, parameter[constant[Found minion id from external function %s: %s], call[name[opts]][constant[id_function]], name[newid]]]
if <ast.BoolOp object at 0x7da1b1f94640> begin[:]
call[name[_cache_id], parameter[name[newid], name[id_cache]]]
variable[is_ipv4] assign[=] call[name[salt].utils.network.is_ipv4, parameter[name[newid]]]
return[tuple[[<ast.Name object at 0x7da1b1f94f10>, <ast.Name object at 0x7da1b1f94160>]]]
|
keyword[def] identifier[get_id] ( identifier[opts] , identifier[cache_minion_id] = keyword[False] ):
literal[string]
keyword[if] identifier[opts] [ literal[string] ] keyword[is] keyword[None] :
identifier[root_dir] = identifier[salt] . identifier[syspaths] . identifier[ROOT_DIR]
keyword[else] :
identifier[root_dir] = identifier[opts] [ literal[string] ]
identifier[config_dir] = identifier[salt] . identifier[syspaths] . identifier[CONFIG_DIR]
keyword[if] identifier[config_dir] . identifier[startswith] ( identifier[salt] . identifier[syspaths] . identifier[ROOT_DIR] ):
identifier[config_dir] = identifier[config_dir] . identifier[split] ( identifier[salt] . identifier[syspaths] . identifier[ROOT_DIR] , literal[int] )[- literal[int] ]
identifier[id_cache] = identifier[os] . identifier[path] . identifier[join] ( identifier[root_dir] ,
identifier[config_dir] . identifier[lstrip] ( identifier[os] . identifier[path] . identifier[sep] ),
literal[string] )
keyword[if] identifier[opts] . identifier[get] ( literal[string] , keyword[True] ):
keyword[try] :
keyword[with] identifier[salt] . identifier[utils] . identifier[files] . identifier[fopen] ( identifier[id_cache] ) keyword[as] identifier[idf] :
identifier[name] = identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_unicode] ( identifier[idf] . identifier[readline] (). identifier[strip] ())
identifier[bname] = identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_bytes] ( identifier[name] )
keyword[if] identifier[bname] . identifier[startswith] ( identifier[codecs] . identifier[BOM] ):
identifier[name] = identifier[salt] . identifier[utils] . identifier[stringutils] . identifier[to_str] ( identifier[bname] . identifier[replace] ( identifier[codecs] . identifier[BOM] , literal[string] , literal[int] ))
keyword[if] identifier[name] keyword[and] identifier[name] != literal[string] :
identifier[log] . identifier[debug] ( literal[string] , identifier[id_cache] , identifier[name] )
keyword[return] identifier[name] , keyword[False]
keyword[except] ( identifier[IOError] , identifier[OSError] ):
keyword[pass]
keyword[if] literal[string] keyword[in] identifier[opts] keyword[and] identifier[opts] . identifier[get] ( literal[string] )== literal[string] :
identifier[log] . identifier[debug] (
literal[string] ,
identifier[os] . identifier[path] . identifier[join] ( identifier[salt] . identifier[syspaths] . identifier[CONFIG_DIR] , literal[string] )
)
keyword[if] identifier[opts] . identifier[get] ( literal[string] ):
identifier[newid] = identifier[call_id_function] ( identifier[opts] )
keyword[else] :
identifier[newid] = identifier[salt] . identifier[utils] . identifier[network] . identifier[generate_minion_id] ()
keyword[if] identifier[opts] . identifier[get] ( literal[string] ):
identifier[newid] = identifier[newid] . identifier[lower] ()
identifier[log] . identifier[debug] ( literal[string] , identifier[newid] )
keyword[if] identifier[opts] . identifier[get] ( literal[string] ):
identifier[newid] = identifier[remove_domain_from_fqdn] ( identifier[opts] , identifier[newid] )
keyword[if] literal[string] keyword[in] identifier[opts] keyword[and] identifier[opts] . identifier[get] ( literal[string] )== literal[string] :
keyword[if] identifier[opts] . identifier[get] ( literal[string] ):
identifier[log] . identifier[debug] (
literal[string] ,
identifier[opts] [ literal[string] ], identifier[newid]
)
keyword[else] :
identifier[log] . identifier[debug] ( literal[string] , identifier[newid] )
keyword[if] identifier[cache_minion_id] keyword[and] identifier[opts] . identifier[get] ( literal[string] , keyword[True] ):
identifier[_cache_id] ( identifier[newid] , identifier[id_cache] )
identifier[is_ipv4] = identifier[salt] . identifier[utils] . identifier[network] . identifier[is_ipv4] ( identifier[newid] )
keyword[return] identifier[newid] , identifier[is_ipv4]
|
def get_id(opts, cache_minion_id=False):
"""
Guess the id of the minion.
If CONFIG_DIR/minion_id exists, use the cached minion ID from that file.
If no minion id is configured, use multiple sources to find a FQDN.
If no FQDN is found you may get an ip address.
Returns two values: the detected ID, and a boolean value noting whether or
not an IP address is being used for the ID.
"""
if opts['root_dir'] is None:
root_dir = salt.syspaths.ROOT_DIR # depends on [control=['if'], data=[]]
else:
root_dir = opts['root_dir']
config_dir = salt.syspaths.CONFIG_DIR
if config_dir.startswith(salt.syspaths.ROOT_DIR):
config_dir = config_dir.split(salt.syspaths.ROOT_DIR, 1)[-1] # depends on [control=['if'], data=[]]
# Check for cached minion ID
id_cache = os.path.join(root_dir, config_dir.lstrip(os.path.sep), 'minion_id')
if opts.get('minion_id_caching', True):
try:
with salt.utils.files.fopen(id_cache) as idf:
name = salt.utils.stringutils.to_unicode(idf.readline().strip())
bname = salt.utils.stringutils.to_bytes(name)
if bname.startswith(codecs.BOM): # Remove BOM if exists
name = salt.utils.stringutils.to_str(bname.replace(codecs.BOM, '', 1)) # depends on [control=['if'], data=[]] # depends on [control=['with'], data=['idf']]
if name and name != 'localhost':
log.debug('Using cached minion ID from %s: %s', id_cache, name)
return (name, False) # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except (IOError, OSError):
pass # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
if '__role' in opts and opts.get('__role') == 'minion':
log.debug('Guessing ID. The id can be explicitly set in %s', os.path.join(salt.syspaths.CONFIG_DIR, 'minion')) # depends on [control=['if'], data=[]]
if opts.get('id_function'):
newid = call_id_function(opts) # depends on [control=['if'], data=[]]
else:
newid = salt.utils.network.generate_minion_id()
if opts.get('minion_id_lowercase'):
newid = newid.lower()
log.debug('Changed minion id %s to lowercase.', newid) # depends on [control=['if'], data=[]]
# Optionally remove one or many domains in a generated minion id
if opts.get('minion_id_remove_domain'):
newid = remove_domain_from_fqdn(opts, newid) # depends on [control=['if'], data=[]]
if '__role' in opts and opts.get('__role') == 'minion':
if opts.get('id_function'):
log.debug('Found minion id from external function %s: %s', opts['id_function'], newid) # depends on [control=['if'], data=[]]
else:
log.debug('Found minion id from generate_minion_id(): %s', newid) # depends on [control=['if'], data=[]]
if cache_minion_id and opts.get('minion_id_caching', True):
_cache_id(newid, id_cache) # depends on [control=['if'], data=[]]
is_ipv4 = salt.utils.network.is_ipv4(newid)
return (newid, is_ipv4)
|
def compile_kernel(self, instance, verbose):
"""compile the kernel for this specific instance"""
logging.debug('compile_kernel ' + instance.name)
#compile kernel_string into device func
func = None
try:
func = self.dev.compile(instance.name, instance.kernel_string)
except Exception as e:
#compiles may fail because certain kernel configurations use too
#much shared memory for example, the desired behavior is to simply
#skip over this configuration and try the next one
if "uses too much shared data" in str(e):
logging.debug('compile_kernel failed due to kernel using too much shared memory')
if verbose:
print("skipping config", instance.name, "reason: too much shared memory used")
else:
logging.debug('compile_kernel failed due to error: ' + str(e))
print("Error while compiling:", instance.name)
raise e
return func
|
def function[compile_kernel, parameter[self, instance, verbose]]:
constant[compile the kernel for this specific instance]
call[name[logging].debug, parameter[binary_operation[constant[compile_kernel ] + name[instance].name]]]
variable[func] assign[=] constant[None]
<ast.Try object at 0x7da1b05dbfd0>
return[name[func]]
|
keyword[def] identifier[compile_kernel] ( identifier[self] , identifier[instance] , identifier[verbose] ):
literal[string]
identifier[logging] . identifier[debug] ( literal[string] + identifier[instance] . identifier[name] )
identifier[func] = keyword[None]
keyword[try] :
identifier[func] = identifier[self] . identifier[dev] . identifier[compile] ( identifier[instance] . identifier[name] , identifier[instance] . identifier[kernel_string] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[if] literal[string] keyword[in] identifier[str] ( identifier[e] ):
identifier[logging] . identifier[debug] ( literal[string] )
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] , identifier[instance] . identifier[name] , literal[string] )
keyword[else] :
identifier[logging] . identifier[debug] ( literal[string] + identifier[str] ( identifier[e] ))
identifier[print] ( literal[string] , identifier[instance] . identifier[name] )
keyword[raise] identifier[e]
keyword[return] identifier[func]
|
def compile_kernel(self, instance, verbose):
"""compile the kernel for this specific instance"""
logging.debug('compile_kernel ' + instance.name)
#compile kernel_string into device func
func = None
try:
func = self.dev.compile(instance.name, instance.kernel_string) # depends on [control=['try'], data=[]]
except Exception as e:
#compiles may fail because certain kernel configurations use too
#much shared memory for example, the desired behavior is to simply
#skip over this configuration and try the next one
if 'uses too much shared data' in str(e):
logging.debug('compile_kernel failed due to kernel using too much shared memory')
if verbose:
print('skipping config', instance.name, 'reason: too much shared memory used') # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
logging.debug('compile_kernel failed due to error: ' + str(e))
print('Error while compiling:', instance.name)
raise e # depends on [control=['except'], data=['e']]
return func
|
def cv_compute(self, b, A, B, C, mK, f, m1, m2):
'''
Compute the model (cross-validation step only) for chunk :py:obj:`b`.
'''
A = np.sum([l * a for l, a in zip(self.lam[b], A)
if l is not None], axis=0)
B = np.sum([l * b for l, b in zip(self.lam[b], B)
if l is not None], axis=0)
W = np.linalg.solve(mK + A + C, f)
if self.transit_model is None:
model = np.dot(B, W)
else:
w_pld = np.concatenate([l * np.dot(self.X(n, m2).T, W)
for n, l in enumerate(self.lam[b])
if l is not None])
model = np.dot(np.hstack(
[self.X(n, m1) for n, l in enumerate(self.lam[b])
if l is not None]), w_pld)
model -= np.nanmedian(model)
return model
|
def function[cv_compute, parameter[self, b, A, B, C, mK, f, m1, m2]]:
constant[
Compute the model (cross-validation step only) for chunk :py:obj:`b`.
]
variable[A] assign[=] call[name[np].sum, parameter[<ast.ListComp object at 0x7da1b0e32680>]]
variable[B] assign[=] call[name[np].sum, parameter[<ast.ListComp object at 0x7da1b0e33c10>]]
variable[W] assign[=] call[name[np].linalg.solve, parameter[binary_operation[binary_operation[name[mK] + name[A]] + name[C]], name[f]]]
if compare[name[self].transit_model is constant[None]] begin[:]
variable[model] assign[=] call[name[np].dot, parameter[name[B], name[W]]]
<ast.AugAssign object at 0x7da1b0e31810>
return[name[model]]
|
keyword[def] identifier[cv_compute] ( identifier[self] , identifier[b] , identifier[A] , identifier[B] , identifier[C] , identifier[mK] , identifier[f] , identifier[m1] , identifier[m2] ):
literal[string]
identifier[A] = identifier[np] . identifier[sum] ([ identifier[l] * identifier[a] keyword[for] identifier[l] , identifier[a] keyword[in] identifier[zip] ( identifier[self] . identifier[lam] [ identifier[b] ], identifier[A] )
keyword[if] identifier[l] keyword[is] keyword[not] keyword[None] ], identifier[axis] = literal[int] )
identifier[B] = identifier[np] . identifier[sum] ([ identifier[l] * identifier[b] keyword[for] identifier[l] , identifier[b] keyword[in] identifier[zip] ( identifier[self] . identifier[lam] [ identifier[b] ], identifier[B] )
keyword[if] identifier[l] keyword[is] keyword[not] keyword[None] ], identifier[axis] = literal[int] )
identifier[W] = identifier[np] . identifier[linalg] . identifier[solve] ( identifier[mK] + identifier[A] + identifier[C] , identifier[f] )
keyword[if] identifier[self] . identifier[transit_model] keyword[is] keyword[None] :
identifier[model] = identifier[np] . identifier[dot] ( identifier[B] , identifier[W] )
keyword[else] :
identifier[w_pld] = identifier[np] . identifier[concatenate] ([ identifier[l] * identifier[np] . identifier[dot] ( identifier[self] . identifier[X] ( identifier[n] , identifier[m2] ). identifier[T] , identifier[W] )
keyword[for] identifier[n] , identifier[l] keyword[in] identifier[enumerate] ( identifier[self] . identifier[lam] [ identifier[b] ])
keyword[if] identifier[l] keyword[is] keyword[not] keyword[None] ])
identifier[model] = identifier[np] . identifier[dot] ( identifier[np] . identifier[hstack] (
[ identifier[self] . identifier[X] ( identifier[n] , identifier[m1] ) keyword[for] identifier[n] , identifier[l] keyword[in] identifier[enumerate] ( identifier[self] . identifier[lam] [ identifier[b] ])
keyword[if] identifier[l] keyword[is] keyword[not] keyword[None] ]), identifier[w_pld] )
identifier[model] -= identifier[np] . identifier[nanmedian] ( identifier[model] )
keyword[return] identifier[model]
|
def cv_compute(self, b, A, B, C, mK, f, m1, m2):
"""
Compute the model (cross-validation step only) for chunk :py:obj:`b`.
"""
A = np.sum([l * a for (l, a) in zip(self.lam[b], A) if l is not None], axis=0)
B = np.sum([l * b for (l, b) in zip(self.lam[b], B) if l is not None], axis=0)
W = np.linalg.solve(mK + A + C, f)
if self.transit_model is None:
model = np.dot(B, W) # depends on [control=['if'], data=[]]
else:
w_pld = np.concatenate([l * np.dot(self.X(n, m2).T, W) for (n, l) in enumerate(self.lam[b]) if l is not None])
model = np.dot(np.hstack([self.X(n, m1) for (n, l) in enumerate(self.lam[b]) if l is not None]), w_pld)
model -= np.nanmedian(model)
return model
|
def flip_axis_multi(x, axis, is_random=False):
"""Flip the axises of multiple images together, such as flip left and right, up and down, randomly or non-randomly,
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.flip_axis``.
Returns
-------
numpy.array
A list of processed images.
"""
if is_random:
factor = np.random.uniform(-1, 1)
if factor > 0:
# x = np.asarray(x).swapaxes(axis, 0)
# x = x[::-1, ...]
# x = x.swapaxes(0, axis)
# return x
results = []
for data in x:
data = np.asarray(data).swapaxes(axis, 0)
data = data[::-1, ...]
data = data.swapaxes(0, axis)
results.append(data)
return np.asarray(results)
else:
return np.asarray(x)
else:
# x = np.asarray(x).swapaxes(axis, 0)
# x = x[::-1, ...]
# x = x.swapaxes(0, axis)
# return x
results = []
for data in x:
data = np.asarray(data).swapaxes(axis, 0)
data = data[::-1, ...]
data = data.swapaxes(0, axis)
results.append(data)
return np.asarray(results)
|
def function[flip_axis_multi, parameter[x, axis, is_random]]:
constant[Flip the axises of multiple images together, such as flip left and right, up and down, randomly or non-randomly,
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.flip_axis``.
Returns
-------
numpy.array
A list of processed images.
]
if name[is_random] begin[:]
variable[factor] assign[=] call[name[np].random.uniform, parameter[<ast.UnaryOp object at 0x7da20c6a8040>, constant[1]]]
if compare[name[factor] greater[>] constant[0]] begin[:]
variable[results] assign[=] list[[]]
for taget[name[data]] in starred[name[x]] begin[:]
variable[data] assign[=] call[call[name[np].asarray, parameter[name[data]]].swapaxes, parameter[name[axis], constant[0]]]
variable[data] assign[=] call[name[data]][tuple[[<ast.Slice object at 0x7da20c6a92d0>, <ast.Constant object at 0x7da20c6a9300>]]]
variable[data] assign[=] call[name[data].swapaxes, parameter[constant[0], name[axis]]]
call[name[results].append, parameter[name[data]]]
return[call[name[np].asarray, parameter[name[results]]]]
|
keyword[def] identifier[flip_axis_multi] ( identifier[x] , identifier[axis] , identifier[is_random] = keyword[False] ):
literal[string]
keyword[if] identifier[is_random] :
identifier[factor] = identifier[np] . identifier[random] . identifier[uniform] (- literal[int] , literal[int] )
keyword[if] identifier[factor] > literal[int] :
identifier[results] =[]
keyword[for] identifier[data] keyword[in] identifier[x] :
identifier[data] = identifier[np] . identifier[asarray] ( identifier[data] ). identifier[swapaxes] ( identifier[axis] , literal[int] )
identifier[data] = identifier[data] [::- literal[int] ,...]
identifier[data] = identifier[data] . identifier[swapaxes] ( literal[int] , identifier[axis] )
identifier[results] . identifier[append] ( identifier[data] )
keyword[return] identifier[np] . identifier[asarray] ( identifier[results] )
keyword[else] :
keyword[return] identifier[np] . identifier[asarray] ( identifier[x] )
keyword[else] :
identifier[results] =[]
keyword[for] identifier[data] keyword[in] identifier[x] :
identifier[data] = identifier[np] . identifier[asarray] ( identifier[data] ). identifier[swapaxes] ( identifier[axis] , literal[int] )
identifier[data] = identifier[data] [::- literal[int] ,...]
identifier[data] = identifier[data] . identifier[swapaxes] ( literal[int] , identifier[axis] )
identifier[results] . identifier[append] ( identifier[data] )
keyword[return] identifier[np] . identifier[asarray] ( identifier[results] )
|
def flip_axis_multi(x, axis, is_random=False):
"""Flip the axises of multiple images together, such as flip left and right, up and down, randomly or non-randomly,
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.flip_axis``.
Returns
-------
numpy.array
A list of processed images.
"""
if is_random:
factor = np.random.uniform(-1, 1)
if factor > 0:
# x = np.asarray(x).swapaxes(axis, 0)
# x = x[::-1, ...]
# x = x.swapaxes(0, axis)
# return x
results = []
for data in x:
data = np.asarray(data).swapaxes(axis, 0)
data = data[::-1, ...]
data = data.swapaxes(0, axis)
results.append(data) # depends on [control=['for'], data=['data']]
return np.asarray(results) # depends on [control=['if'], data=[]]
else:
return np.asarray(x) # depends on [control=['if'], data=[]]
else:
# x = np.asarray(x).swapaxes(axis, 0)
# x = x[::-1, ...]
# x = x.swapaxes(0, axis)
# return x
results = []
for data in x:
data = np.asarray(data).swapaxes(axis, 0)
data = data[::-1, ...]
data = data.swapaxes(0, axis)
results.append(data) # depends on [control=['for'], data=['data']]
return np.asarray(results)
|
def _delete_reverses(self):
"""
Delete all objects that would have been cloned
on a clone command. This is done separately because
there may be m2m and other relationships that
would have not been deleted otherwise.
"""
for reverse in self.clone_related:
self._delete_reverse(reverse)
for field in self._meta.local_many_to_many:
if field.rel.through and \
field.rel.through._meta.auto_created and not \
field.name in self.clone_related:
man = getattr(self, field.name)
man.clear()
|
def function[_delete_reverses, parameter[self]]:
constant[
Delete all objects that would have been cloned
on a clone command. This is done separately because
there may be m2m and other relationships that
would have not been deleted otherwise.
]
for taget[name[reverse]] in starred[name[self].clone_related] begin[:]
call[name[self]._delete_reverse, parameter[name[reverse]]]
for taget[name[field]] in starred[name[self]._meta.local_many_to_many] begin[:]
if <ast.BoolOp object at 0x7da18f721810> begin[:]
variable[man] assign[=] call[name[getattr], parameter[name[self], name[field].name]]
call[name[man].clear, parameter[]]
|
keyword[def] identifier[_delete_reverses] ( identifier[self] ):
literal[string]
keyword[for] identifier[reverse] keyword[in] identifier[self] . identifier[clone_related] :
identifier[self] . identifier[_delete_reverse] ( identifier[reverse] )
keyword[for] identifier[field] keyword[in] identifier[self] . identifier[_meta] . identifier[local_many_to_many] :
keyword[if] identifier[field] . identifier[rel] . identifier[through] keyword[and] identifier[field] . identifier[rel] . identifier[through] . identifier[_meta] . identifier[auto_created] keyword[and] keyword[not] identifier[field] . identifier[name] keyword[in] identifier[self] . identifier[clone_related] :
identifier[man] = identifier[getattr] ( identifier[self] , identifier[field] . identifier[name] )
identifier[man] . identifier[clear] ()
|
def _delete_reverses(self):
"""
Delete all objects that would have been cloned
on a clone command. This is done separately because
there may be m2m and other relationships that
would have not been deleted otherwise.
"""
for reverse in self.clone_related:
self._delete_reverse(reverse) # depends on [control=['for'], data=['reverse']]
for field in self._meta.local_many_to_many:
if field.rel.through and field.rel.through._meta.auto_created and (not field.name in self.clone_related):
man = getattr(self, field.name)
man.clear() # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['field']]
|
def unmap_volume_from_sdc(self, volObj, sdcObj=None, **kwargs):
"""
Unmap a Volume from SDC or all SDCs
:param volObj: ScaleIO Volume object
:param sdcObj: ScaleIO SDC object
:param \**kwargs:
:Keyword Arguments:
*disableMapAllSdcs* (``bool``) -- True to disable all SDCs mapping
:return: POST request response
:rtype: Requests POST response object
:raise RuntimeError: If failure happen during communication with REST Gateway - Need to be cleaned up and made consistent to return understandable errors
"""
# TODO:
# Check if object parameters are the correct ones, otherwise throw error
# ADD logic for ALL SDC UNMAP
# For all SDC unmapVolumeFromDict = {'allSdc':'True'} False can be used
self.conn.connection._check_login()
if kwargs:
for key, value in kwargs.iteritems():
if key == 'enableMapAllSdcs' and value == False:
if self.get_volume_all_sdcs_mapped(volObj): # Check if allSdc?s is True before continuing
unmapVolumeFromSdcDict = {'allSdcs': 'False'}
else:
unmapVolumeFromSdcDict = {'sdcId': sdcObj.id}
try:
response = self.conn.connection._do_post("{}/{}{}/{}".format(self.conn.connection._api_url, "instances/Volume::", volObj.id, 'action/removeMappedSdc'), json=unmapVolumeFromSdcDict)
except:
raise RuntimeError("unmap_volume_from_sdc() - Cannot unmap volume")
return response
|
def function[unmap_volume_from_sdc, parameter[self, volObj, sdcObj]]:
constant[
Unmap a Volume from SDC or all SDCs
:param volObj: ScaleIO Volume object
:param sdcObj: ScaleIO SDC object
:param \**kwargs:
:Keyword Arguments:
*disableMapAllSdcs* (``bool``) -- True to disable all SDCs mapping
:return: POST request response
:rtype: Requests POST response object
:raise RuntimeError: If failure happen during communication with REST Gateway - Need to be cleaned up and made consistent to return understandable errors
]
call[name[self].conn.connection._check_login, parameter[]]
if name[kwargs] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b2539b10>, <ast.Name object at 0x7da1b2538eb0>]]] in starred[call[name[kwargs].iteritems, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da1b2538ca0> begin[:]
if call[name[self].get_volume_all_sdcs_mapped, parameter[name[volObj]]] begin[:]
variable[unmapVolumeFromSdcDict] assign[=] dictionary[[<ast.Constant object at 0x7da1b25398a0>], [<ast.Constant object at 0x7da1b2538c10>]]
<ast.Try object at 0x7da1b253b250>
return[name[response]]
|
keyword[def] identifier[unmap_volume_from_sdc] ( identifier[self] , identifier[volObj] , identifier[sdcObj] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[conn] . identifier[connection] . identifier[_check_login] ()
keyword[if] identifier[kwargs] :
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[kwargs] . identifier[iteritems] ():
keyword[if] identifier[key] == literal[string] keyword[and] identifier[value] == keyword[False] :
keyword[if] identifier[self] . identifier[get_volume_all_sdcs_mapped] ( identifier[volObj] ):
identifier[unmapVolumeFromSdcDict] ={ literal[string] : literal[string] }
keyword[else] :
identifier[unmapVolumeFromSdcDict] ={ literal[string] : identifier[sdcObj] . identifier[id] }
keyword[try] :
identifier[response] = identifier[self] . identifier[conn] . identifier[connection] . identifier[_do_post] ( literal[string] . identifier[format] ( identifier[self] . identifier[conn] . identifier[connection] . identifier[_api_url] , literal[string] , identifier[volObj] . identifier[id] , literal[string] ), identifier[json] = identifier[unmapVolumeFromSdcDict] )
keyword[except] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[return] identifier[response]
|
def unmap_volume_from_sdc(self, volObj, sdcObj=None, **kwargs):
"""
Unmap a Volume from SDC or all SDCs
:param volObj: ScaleIO Volume object
:param sdcObj: ScaleIO SDC object
:param \\**kwargs:
:Keyword Arguments:
*disableMapAllSdcs* (``bool``) -- True to disable all SDCs mapping
:return: POST request response
:rtype: Requests POST response object
:raise RuntimeError: If failure happen during communication with REST Gateway - Need to be cleaned up and made consistent to return understandable errors
"""
# TODO:
# Check if object parameters are the correct ones, otherwise throw error
# ADD logic for ALL SDC UNMAP
# For all SDC unmapVolumeFromDict = {'allSdc':'True'} False can be used
self.conn.connection._check_login()
if kwargs:
for (key, value) in kwargs.iteritems():
if key == 'enableMapAllSdcs' and value == False:
if self.get_volume_all_sdcs_mapped(volObj): # Check if allSdc?s is True before continuing
unmapVolumeFromSdcDict = {'allSdcs': 'False'} # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
unmapVolumeFromSdcDict = {'sdcId': sdcObj.id}
try:
response = self.conn.connection._do_post('{}/{}{}/{}'.format(self.conn.connection._api_url, 'instances/Volume::', volObj.id, 'action/removeMappedSdc'), json=unmapVolumeFromSdcDict) # depends on [control=['try'], data=[]]
except:
raise RuntimeError('unmap_volume_from_sdc() - Cannot unmap volume') # depends on [control=['except'], data=[]]
return response
|
def issymmetric(am):
r"""
A method to check if a square matrix is symmetric
Returns ``True`` if the sparse adjacency matrix is symmetric
"""
if am.shape[0] != am.shape[1]:
logger.warning('Matrix is not square, symmetrical is irrelevant')
return False
if am.format != 'coo':
am = am.tocoo(copy=False)
if istril(am) or istriu(am):
return False
# Compare am with its transpose, element wise
sym = ((am != am.T).size) == 0
return sym
|
def function[issymmetric, parameter[am]]:
constant[
A method to check if a square matrix is symmetric
Returns ``True`` if the sparse adjacency matrix is symmetric
]
if compare[call[name[am].shape][constant[0]] not_equal[!=] call[name[am].shape][constant[1]]] begin[:]
call[name[logger].warning, parameter[constant[Matrix is not square, symmetrical is irrelevant]]]
return[constant[False]]
if compare[name[am].format not_equal[!=] constant[coo]] begin[:]
variable[am] assign[=] call[name[am].tocoo, parameter[]]
if <ast.BoolOp object at 0x7da18fe90070> begin[:]
return[constant[False]]
variable[sym] assign[=] compare[compare[name[am] not_equal[!=] name[am].T].size equal[==] constant[0]]
return[name[sym]]
|
keyword[def] identifier[issymmetric] ( identifier[am] ):
literal[string]
keyword[if] identifier[am] . identifier[shape] [ literal[int] ]!= identifier[am] . identifier[shape] [ literal[int] ]:
identifier[logger] . identifier[warning] ( literal[string] )
keyword[return] keyword[False]
keyword[if] identifier[am] . identifier[format] != literal[string] :
identifier[am] = identifier[am] . identifier[tocoo] ( identifier[copy] = keyword[False] )
keyword[if] identifier[istril] ( identifier[am] ) keyword[or] identifier[istriu] ( identifier[am] ):
keyword[return] keyword[False]
identifier[sym] =(( identifier[am] != identifier[am] . identifier[T] ). identifier[size] )== literal[int]
keyword[return] identifier[sym]
|
def issymmetric(am):
"""
A method to check if a square matrix is symmetric
Returns ``True`` if the sparse adjacency matrix is symmetric
"""
if am.shape[0] != am.shape[1]:
logger.warning('Matrix is not square, symmetrical is irrelevant')
return False # depends on [control=['if'], data=[]]
if am.format != 'coo':
am = am.tocoo(copy=False) # depends on [control=['if'], data=[]]
if istril(am) or istriu(am):
return False # depends on [control=['if'], data=[]]
# Compare am with its transpose, element wise
sym = (am != am.T).size == 0
return sym
|
def get_out_segmentlistdict(self, process_ids = None):
"""
Return a segmentlistdict mapping instrument to out segment
list. If process_ids is a sequence of process IDs, then
only rows with matching IDs are included otherwise all rows
are included.
Note: the result is not coalesced, each segmentlist
contains the segments listed for that instrument as they
appeared in the table.
"""
seglists = segments.segmentlistdict()
for row in self:
ifos = row.instruments or (None,)
if process_ids is None or row.process_id in process_ids:
seglists.extend(dict((ifo, segments.segmentlist([row.out_segment])) for ifo in ifos))
return seglists
|
def function[get_out_segmentlistdict, parameter[self, process_ids]]:
constant[
Return a segmentlistdict mapping instrument to out segment
list. If process_ids is a sequence of process IDs, then
only rows with matching IDs are included otherwise all rows
are included.
Note: the result is not coalesced, each segmentlist
contains the segments listed for that instrument as they
appeared in the table.
]
variable[seglists] assign[=] call[name[segments].segmentlistdict, parameter[]]
for taget[name[row]] in starred[name[self]] begin[:]
variable[ifos] assign[=] <ast.BoolOp object at 0x7da1b0b570d0>
if <ast.BoolOp object at 0x7da1b0b54850> begin[:]
call[name[seglists].extend, parameter[call[name[dict], parameter[<ast.GeneratorExp object at 0x7da1b0b82080>]]]]
return[name[seglists]]
|
keyword[def] identifier[get_out_segmentlistdict] ( identifier[self] , identifier[process_ids] = keyword[None] ):
literal[string]
identifier[seglists] = identifier[segments] . identifier[segmentlistdict] ()
keyword[for] identifier[row] keyword[in] identifier[self] :
identifier[ifos] = identifier[row] . identifier[instruments] keyword[or] ( keyword[None] ,)
keyword[if] identifier[process_ids] keyword[is] keyword[None] keyword[or] identifier[row] . identifier[process_id] keyword[in] identifier[process_ids] :
identifier[seglists] . identifier[extend] ( identifier[dict] (( identifier[ifo] , identifier[segments] . identifier[segmentlist] ([ identifier[row] . identifier[out_segment] ])) keyword[for] identifier[ifo] keyword[in] identifier[ifos] ))
keyword[return] identifier[seglists]
|
def get_out_segmentlistdict(self, process_ids=None):
"""
Return a segmentlistdict mapping instrument to out segment
list. If process_ids is a sequence of process IDs, then
only rows with matching IDs are included otherwise all rows
are included.
Note: the result is not coalesced, each segmentlist
contains the segments listed for that instrument as they
appeared in the table.
"""
seglists = segments.segmentlistdict()
for row in self:
ifos = row.instruments or (None,)
if process_ids is None or row.process_id in process_ids:
seglists.extend(dict(((ifo, segments.segmentlist([row.out_segment])) for ifo in ifos))) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['row']]
return seglists
|
def _eval(self, text):
"""
Evaluate text and return (obj, valid)
where *obj* is the object represented by *text*
and *valid* is True if object evaluation did not raise any exception
"""
from spyder_kernels.py3compat import is_text_string
assert is_text_string(text)
ns = self._get_current_namespace(with_magics=True)
try:
return eval(text, ns), True
except:
return None, False
|
def function[_eval, parameter[self, text]]:
constant[
Evaluate text and return (obj, valid)
where *obj* is the object represented by *text*
and *valid* is True if object evaluation did not raise any exception
]
from relative_module[spyder_kernels.py3compat] import module[is_text_string]
assert[call[name[is_text_string], parameter[name[text]]]]
variable[ns] assign[=] call[name[self]._get_current_namespace, parameter[]]
<ast.Try object at 0x7da20e956830>
|
keyword[def] identifier[_eval] ( identifier[self] , identifier[text] ):
literal[string]
keyword[from] identifier[spyder_kernels] . identifier[py3compat] keyword[import] identifier[is_text_string]
keyword[assert] identifier[is_text_string] ( identifier[text] )
identifier[ns] = identifier[self] . identifier[_get_current_namespace] ( identifier[with_magics] = keyword[True] )
keyword[try] :
keyword[return] identifier[eval] ( identifier[text] , identifier[ns] ), keyword[True]
keyword[except] :
keyword[return] keyword[None] , keyword[False]
|
def _eval(self, text):
"""
Evaluate text and return (obj, valid)
where *obj* is the object represented by *text*
and *valid* is True if object evaluation did not raise any exception
"""
from spyder_kernels.py3compat import is_text_string
assert is_text_string(text)
ns = self._get_current_namespace(with_magics=True)
try:
return (eval(text, ns), True) # depends on [control=['try'], data=[]]
except:
return (None, False) # depends on [control=['except'], data=[]]
|
def main_func(args=None):
"""Main funcion when executing this module as script
:param args: commandline arguments
:type args: list
:returns: None
:rtype: None
:raises: None
"""
# we have to initialize a gui even if we dont need one right now.
# as soon as you call maya.standalone.initialize(), a QApplication
# with type Tty is created. This is the type for conosle apps.
# Because i have not found a way to replace that, we just init the gui.
guimain.init_gui()
main.init()
launcher = Launcher()
parsed, unknown = launcher.parse_args(args)
parsed.func(parsed, unknown)
|
def function[main_func, parameter[args]]:
constant[Main funcion when executing this module as script
:param args: commandline arguments
:type args: list
:returns: None
:rtype: None
:raises: None
]
call[name[guimain].init_gui, parameter[]]
call[name[main].init, parameter[]]
variable[launcher] assign[=] call[name[Launcher], parameter[]]
<ast.Tuple object at 0x7da1b1eca380> assign[=] call[name[launcher].parse_args, parameter[name[args]]]
call[name[parsed].func, parameter[name[parsed], name[unknown]]]
|
keyword[def] identifier[main_func] ( identifier[args] = keyword[None] ):
literal[string]
identifier[guimain] . identifier[init_gui] ()
identifier[main] . identifier[init] ()
identifier[launcher] = identifier[Launcher] ()
identifier[parsed] , identifier[unknown] = identifier[launcher] . identifier[parse_args] ( identifier[args] )
identifier[parsed] . identifier[func] ( identifier[parsed] , identifier[unknown] )
|
def main_func(args=None):
"""Main funcion when executing this module as script
:param args: commandline arguments
:type args: list
:returns: None
:rtype: None
:raises: None
"""
# we have to initialize a gui even if we dont need one right now.
# as soon as you call maya.standalone.initialize(), a QApplication
# with type Tty is created. This is the type for conosle apps.
# Because i have not found a way to replace that, we just init the gui.
guimain.init_gui()
main.init()
launcher = Launcher()
(parsed, unknown) = launcher.parse_args(args)
parsed.func(parsed, unknown)
|
def drop_invalid_columns(feed: "Feed") -> "Feed":
"""
Drop all DataFrame columns of the given "Feed" that are not
listed in the GTFS.
Return the resulting new "Feed".
"""
feed = feed.copy()
for table, group in cs.GTFS_REF.groupby("table"):
f = getattr(feed, table)
if f is None:
continue
valid_columns = group["column"].values
for col in f.columns:
if col not in valid_columns:
print(f"{table}: dropping invalid column {col}")
del f[col]
setattr(feed, table, f)
return feed
|
def function[drop_invalid_columns, parameter[feed]]:
constant[
Drop all DataFrame columns of the given "Feed" that are not
listed in the GTFS.
Return the resulting new "Feed".
]
variable[feed] assign[=] call[name[feed].copy, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da1b0b94730>, <ast.Name object at 0x7da1b0b94760>]]] in starred[call[name[cs].GTFS_REF.groupby, parameter[constant[table]]]] begin[:]
variable[f] assign[=] call[name[getattr], parameter[name[feed], name[table]]]
if compare[name[f] is constant[None]] begin[:]
continue
variable[valid_columns] assign[=] call[name[group]][constant[column]].values
for taget[name[col]] in starred[name[f].columns] begin[:]
if compare[name[col] <ast.NotIn object at 0x7da2590d7190> name[valid_columns]] begin[:]
call[name[print], parameter[<ast.JoinedStr object at 0x7da1b0b95a50>]]
<ast.Delete object at 0x7da1b0b963e0>
call[name[setattr], parameter[name[feed], name[table], name[f]]]
return[name[feed]]
|
keyword[def] identifier[drop_invalid_columns] ( identifier[feed] : literal[string] )-> literal[string] :
literal[string]
identifier[feed] = identifier[feed] . identifier[copy] ()
keyword[for] identifier[table] , identifier[group] keyword[in] identifier[cs] . identifier[GTFS_REF] . identifier[groupby] ( literal[string] ):
identifier[f] = identifier[getattr] ( identifier[feed] , identifier[table] )
keyword[if] identifier[f] keyword[is] keyword[None] :
keyword[continue]
identifier[valid_columns] = identifier[group] [ literal[string] ]. identifier[values]
keyword[for] identifier[col] keyword[in] identifier[f] . identifier[columns] :
keyword[if] identifier[col] keyword[not] keyword[in] identifier[valid_columns] :
identifier[print] ( literal[string] )
keyword[del] identifier[f] [ identifier[col] ]
identifier[setattr] ( identifier[feed] , identifier[table] , identifier[f] )
keyword[return] identifier[feed]
|
def drop_invalid_columns(feed: 'Feed') -> 'Feed':
"""
Drop all DataFrame columns of the given "Feed" that are not
listed in the GTFS.
Return the resulting new "Feed".
"""
feed = feed.copy()
for (table, group) in cs.GTFS_REF.groupby('table'):
f = getattr(feed, table)
if f is None:
continue # depends on [control=['if'], data=[]]
valid_columns = group['column'].values
for col in f.columns:
if col not in valid_columns:
print(f'{table}: dropping invalid column {col}')
del f[col] # depends on [control=['if'], data=['col']] # depends on [control=['for'], data=['col']]
setattr(feed, table, f) # depends on [control=['for'], data=[]]
return feed
|
def addDependency(self, item):
"""
Creates a dependency for this item to the next item. This item will
be treated as the source, the other as the target.
:param item | <QGanttWidgetItem>
"""
if item in self._dependencies:
return
viewItem = XGanttDepItem(self, item)
self._dependencies[item] = viewItem
item._reverseDependencies[self] = viewItem
self.syncDependencies()
|
def function[addDependency, parameter[self, item]]:
constant[
Creates a dependency for this item to the next item. This item will
be treated as the source, the other as the target.
:param item | <QGanttWidgetItem>
]
if compare[name[item] in name[self]._dependencies] begin[:]
return[None]
variable[viewItem] assign[=] call[name[XGanttDepItem], parameter[name[self], name[item]]]
call[name[self]._dependencies][name[item]] assign[=] name[viewItem]
call[name[item]._reverseDependencies][name[self]] assign[=] name[viewItem]
call[name[self].syncDependencies, parameter[]]
|
keyword[def] identifier[addDependency] ( identifier[self] , identifier[item] ):
literal[string]
keyword[if] identifier[item] keyword[in] identifier[self] . identifier[_dependencies] :
keyword[return]
identifier[viewItem] = identifier[XGanttDepItem] ( identifier[self] , identifier[item] )
identifier[self] . identifier[_dependencies] [ identifier[item] ]= identifier[viewItem]
identifier[item] . identifier[_reverseDependencies] [ identifier[self] ]= identifier[viewItem]
identifier[self] . identifier[syncDependencies] ()
|
def addDependency(self, item):
"""
Creates a dependency for this item to the next item. This item will
be treated as the source, the other as the target.
:param item | <QGanttWidgetItem>
"""
if item in self._dependencies:
return # depends on [control=['if'], data=[]]
viewItem = XGanttDepItem(self, item)
self._dependencies[item] = viewItem
item._reverseDependencies[self] = viewItem
self.syncDependencies()
|
def parse_filename(fname):
"""Parse a notebook filename.
This function takes a notebook filename and returns the notebook
format (json/py) and the notebook name. This logic can be
summarized as follows:
* notebook.ipynb -> (notebook.ipynb, notebook, json)
* notebook.json -> (notebook.json, notebook, json)
* notebook.py -> (notebook.py, notebook, py)
* notebook -> (notebook.ipynb, notebook, json)
Parameters
----------
fname : unicode
The notebook filename. The filename can use a specific filename
extention (.ipynb, .json, .py) or none, in which case .ipynb will
be assumed.
Returns
-------
(fname, name, format) : (unicode, unicode, unicode)
The filename, notebook name and format.
"""
if fname.endswith(u'.ipynb'):
format = u'json'
elif fname.endswith(u'.json'):
format = u'json'
elif fname.endswith(u'.py'):
format = u'py'
else:
fname = fname + u'.ipynb'
format = u'json'
name = fname.split('.')[0]
return fname, name, format
|
def function[parse_filename, parameter[fname]]:
constant[Parse a notebook filename.
This function takes a notebook filename and returns the notebook
format (json/py) and the notebook name. This logic can be
summarized as follows:
* notebook.ipynb -> (notebook.ipynb, notebook, json)
* notebook.json -> (notebook.json, notebook, json)
* notebook.py -> (notebook.py, notebook, py)
* notebook -> (notebook.ipynb, notebook, json)
Parameters
----------
fname : unicode
The notebook filename. The filename can use a specific filename
extention (.ipynb, .json, .py) or none, in which case .ipynb will
be assumed.
Returns
-------
(fname, name, format) : (unicode, unicode, unicode)
The filename, notebook name and format.
]
if call[name[fname].endswith, parameter[constant[.ipynb]]] begin[:]
variable[format] assign[=] constant[json]
variable[name] assign[=] call[call[name[fname].split, parameter[constant[.]]]][constant[0]]
return[tuple[[<ast.Name object at 0x7da18fe92650>, <ast.Name object at 0x7da18fe90b80>, <ast.Name object at 0x7da18fe91840>]]]
|
keyword[def] identifier[parse_filename] ( identifier[fname] ):
literal[string]
keyword[if] identifier[fname] . identifier[endswith] ( literal[string] ):
identifier[format] = literal[string]
keyword[elif] identifier[fname] . identifier[endswith] ( literal[string] ):
identifier[format] = literal[string]
keyword[elif] identifier[fname] . identifier[endswith] ( literal[string] ):
identifier[format] = literal[string]
keyword[else] :
identifier[fname] = identifier[fname] + literal[string]
identifier[format] = literal[string]
identifier[name] = identifier[fname] . identifier[split] ( literal[string] )[ literal[int] ]
keyword[return] identifier[fname] , identifier[name] , identifier[format]
|
def parse_filename(fname):
"""Parse a notebook filename.
This function takes a notebook filename and returns the notebook
format (json/py) and the notebook name. This logic can be
summarized as follows:
* notebook.ipynb -> (notebook.ipynb, notebook, json)
* notebook.json -> (notebook.json, notebook, json)
* notebook.py -> (notebook.py, notebook, py)
* notebook -> (notebook.ipynb, notebook, json)
Parameters
----------
fname : unicode
The notebook filename. The filename can use a specific filename
extention (.ipynb, .json, .py) or none, in which case .ipynb will
be assumed.
Returns
-------
(fname, name, format) : (unicode, unicode, unicode)
The filename, notebook name and format.
"""
if fname.endswith(u'.ipynb'):
format = u'json' # depends on [control=['if'], data=[]]
elif fname.endswith(u'.json'):
format = u'json' # depends on [control=['if'], data=[]]
elif fname.endswith(u'.py'):
format = u'py' # depends on [control=['if'], data=[]]
else:
fname = fname + u'.ipynb'
format = u'json'
name = fname.split('.')[0]
return (fname, name, format)
|
def run(configobj=None):
"""
TEAL interface for the `acscteforwardmodel` function.
"""
acscteforwardmodel(configobj['input'],
exec_path=configobj['exec_path'],
time_stamps=configobj['time_stamps'],
verbose=configobj['verbose'],
quiet=configobj['quiet'],
single_core=configobj['single_core']
)
|
def function[run, parameter[configobj]]:
constant[
TEAL interface for the `acscteforwardmodel` function.
]
call[name[acscteforwardmodel], parameter[call[name[configobj]][constant[input]]]]
|
keyword[def] identifier[run] ( identifier[configobj] = keyword[None] ):
literal[string]
identifier[acscteforwardmodel] ( identifier[configobj] [ literal[string] ],
identifier[exec_path] = identifier[configobj] [ literal[string] ],
identifier[time_stamps] = identifier[configobj] [ literal[string] ],
identifier[verbose] = identifier[configobj] [ literal[string] ],
identifier[quiet] = identifier[configobj] [ literal[string] ],
identifier[single_core] = identifier[configobj] [ literal[string] ]
)
|
def run(configobj=None):
"""
TEAL interface for the `acscteforwardmodel` function.
"""
acscteforwardmodel(configobj['input'], exec_path=configobj['exec_path'], time_stamps=configobj['time_stamps'], verbose=configobj['verbose'], quiet=configobj['quiet'], single_core=configobj['single_core'])
|
def block(seed):
""" Return block of normal random numbers
Parameters
----------
seed : {None, int}
The seed to generate the noise.sd
Returns
--------
noise : numpy.ndarray
Array of random numbers
"""
num = SAMPLE_RATE * BLOCK_SIZE
rng = RandomState(seed % 2**32)
variance = SAMPLE_RATE / 2
return rng.normal(size=num, scale=variance**0.5)
|
def function[block, parameter[seed]]:
constant[ Return block of normal random numbers
Parameters
----------
seed : {None, int}
The seed to generate the noise.sd
Returns
--------
noise : numpy.ndarray
Array of random numbers
]
variable[num] assign[=] binary_operation[name[SAMPLE_RATE] * name[BLOCK_SIZE]]
variable[rng] assign[=] call[name[RandomState], parameter[binary_operation[name[seed] <ast.Mod object at 0x7da2590d6920> binary_operation[constant[2] ** constant[32]]]]]
variable[variance] assign[=] binary_operation[name[SAMPLE_RATE] / constant[2]]
return[call[name[rng].normal, parameter[]]]
|
keyword[def] identifier[block] ( identifier[seed] ):
literal[string]
identifier[num] = identifier[SAMPLE_RATE] * identifier[BLOCK_SIZE]
identifier[rng] = identifier[RandomState] ( identifier[seed] % literal[int] ** literal[int] )
identifier[variance] = identifier[SAMPLE_RATE] / literal[int]
keyword[return] identifier[rng] . identifier[normal] ( identifier[size] = identifier[num] , identifier[scale] = identifier[variance] ** literal[int] )
|
def block(seed):
""" Return block of normal random numbers
Parameters
----------
seed : {None, int}
The seed to generate the noise.sd
Returns
--------
noise : numpy.ndarray
Array of random numbers
"""
num = SAMPLE_RATE * BLOCK_SIZE
rng = RandomState(seed % 2 ** 32)
variance = SAMPLE_RATE / 2
return rng.normal(size=num, scale=variance ** 0.5)
|
def add_weekdays2df(time_df, holidays=None, holiday_is_sunday=False):
r"""Giving back a DataFrame containing weekdays and optionally holidays for
the given year.
Parameters
----------
time_df : pandas DataFrame
DataFrame to which the weekdays should be added
Optional Parameters
-------------------
holidays : array with information for every hour of the year, if holiday or
not (0: holiday, 1: no holiday)
holiday_is_sunday : boolean
If set to True, all holidays (0) will be set to sundays (7).
Returns
-------
pandas.DataFrame : DataFrame with weekdays
Notes
-----
Using Pandas > 0.16
"""
time_df['weekday'] = time_df.index.weekday + 1
time_df['date'] = time_df.index.date
# Set weekday to Holiday (0) for all holidays
if holidays is not None:
if isinstance(holidays, dict):
holidays = list(holidays.keys())
time_df['weekday'].mask(pd.to_datetime(time_df['date']).isin(
pd.to_datetime(holidays)), 0, True)
if holiday_is_sunday:
time_df.weekday.mask(time_df.weekday == 0, 7, True)
return time_df
|
def function[add_weekdays2df, parameter[time_df, holidays, holiday_is_sunday]]:
constant[Giving back a DataFrame containing weekdays and optionally holidays for
the given year.
Parameters
----------
time_df : pandas DataFrame
DataFrame to which the weekdays should be added
Optional Parameters
-------------------
holidays : array with information for every hour of the year, if holiday or
not (0: holiday, 1: no holiday)
holiday_is_sunday : boolean
If set to True, all holidays (0) will be set to sundays (7).
Returns
-------
pandas.DataFrame : DataFrame with weekdays
Notes
-----
Using Pandas > 0.16
]
call[name[time_df]][constant[weekday]] assign[=] binary_operation[name[time_df].index.weekday + constant[1]]
call[name[time_df]][constant[date]] assign[=] name[time_df].index.date
if compare[name[holidays] is_not constant[None]] begin[:]
if call[name[isinstance], parameter[name[holidays], name[dict]]] begin[:]
variable[holidays] assign[=] call[name[list], parameter[call[name[holidays].keys, parameter[]]]]
call[call[name[time_df]][constant[weekday]].mask, parameter[call[call[name[pd].to_datetime, parameter[call[name[time_df]][constant[date]]]].isin, parameter[call[name[pd].to_datetime, parameter[name[holidays]]]]], constant[0], constant[True]]]
if name[holiday_is_sunday] begin[:]
call[name[time_df].weekday.mask, parameter[compare[name[time_df].weekday equal[==] constant[0]], constant[7], constant[True]]]
return[name[time_df]]
|
keyword[def] identifier[add_weekdays2df] ( identifier[time_df] , identifier[holidays] = keyword[None] , identifier[holiday_is_sunday] = keyword[False] ):
literal[string]
identifier[time_df] [ literal[string] ]= identifier[time_df] . identifier[index] . identifier[weekday] + literal[int]
identifier[time_df] [ literal[string] ]= identifier[time_df] . identifier[index] . identifier[date]
keyword[if] identifier[holidays] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[isinstance] ( identifier[holidays] , identifier[dict] ):
identifier[holidays] = identifier[list] ( identifier[holidays] . identifier[keys] ())
identifier[time_df] [ literal[string] ]. identifier[mask] ( identifier[pd] . identifier[to_datetime] ( identifier[time_df] [ literal[string] ]). identifier[isin] (
identifier[pd] . identifier[to_datetime] ( identifier[holidays] )), literal[int] , keyword[True] )
keyword[if] identifier[holiday_is_sunday] :
identifier[time_df] . identifier[weekday] . identifier[mask] ( identifier[time_df] . identifier[weekday] == literal[int] , literal[int] , keyword[True] )
keyword[return] identifier[time_df]
|
def add_weekdays2df(time_df, holidays=None, holiday_is_sunday=False):
"""Giving back a DataFrame containing weekdays and optionally holidays for
the given year.
Parameters
----------
time_df : pandas DataFrame
DataFrame to which the weekdays should be added
Optional Parameters
-------------------
holidays : array with information for every hour of the year, if holiday or
not (0: holiday, 1: no holiday)
holiday_is_sunday : boolean
If set to True, all holidays (0) will be set to sundays (7).
Returns
-------
pandas.DataFrame : DataFrame with weekdays
Notes
-----
Using Pandas > 0.16
"""
time_df['weekday'] = time_df.index.weekday + 1
time_df['date'] = time_df.index.date
# Set weekday to Holiday (0) for all holidays
if holidays is not None:
if isinstance(holidays, dict):
holidays = list(holidays.keys()) # depends on [control=['if'], data=[]]
time_df['weekday'].mask(pd.to_datetime(time_df['date']).isin(pd.to_datetime(holidays)), 0, True) # depends on [control=['if'], data=['holidays']]
if holiday_is_sunday:
time_df.weekday.mask(time_df.weekday == 0, 7, True) # depends on [control=['if'], data=[]]
return time_df
|
def produce_examples(shard_ids, wikis_dir, refs_dir, urls_dir, vocab_path,
out_filepaths):
"""Produce examples from shard_ids to out_filepaths."""
# * Join the Wikipedia articles with their references
# * Run Tf-idf to sort reference paragraphs
# * Encode the Wikipedia and reference text with the vocabulary
# * Write out TFRecords of tensorflow.Example
tf.logging.info("Processing %d input shards into %d output files.",
len(shard_ids), len(out_filepaths))
vocab = text_encoder.SubwordTextEncoder(vocab_path)
eot_ids = vocab.encode(EOT)
def example_generator():
"""Generate Example dicts."""
stats = dict(total_original_wikis=0, total_original_refs=0,
total_found_refs=0, ref_lengths=[], wiki_original_refs=[],
wiki_found_refs=[], wikis_skipped_no_refs=0,
wikis_skipped_short_lead=0, num_wikis_written=0)
ref_files_by_shard = _references_files_by_shard(refs_dir)
for shard_id in shard_ids:
tf.logging.info("Processing shard %d", shard_id)
wiki_urls = _wiki_urls_for_shard(shard_id, urls_dir)
tf.logging.info("Loaded wiki URLs for shard")
refs_content = _references_content(ref_files_by_shard[shard_id])
tf.logging.info("Loaded reference content for shard")
for i, wiki in enumerate(_wiki_articles(shard_id, wikis_dir)):
if not i % 1000:
tf.logging.info("Processing wiki index %d for shard %d", i, shard_id)
stats["total_original_wikis"] += 1
# Get reference content
wiki_ref_content = []
ref_urls = wiki_urls[wiki.url]["refs"]
stats["total_original_refs"] += len(ref_urls)
stats_wiki_original_refs = len(ref_urls)
stats_wiki_found_refs = 0
for ref_url in ref_urls:
ref_content = refs_content.get(ref_url)
if not ref_content:
continue
stats["total_found_refs"] += 1
stats["ref_lengths"].append(len(ref_content))
stats_wiki_found_refs += 1
wiki_ref_content.append(ref_content)
stats["wiki_original_refs"].append(stats_wiki_original_refs)
stats["wiki_found_refs"].append(stats_wiki_found_refs)
if not wiki_ref_content or len(wiki_ref_content) < _MIN_REFS:
# No/few refs were found
stats["wikis_skipped_no_refs"] += 1
continue
# Rank reference paragraphs with TFIDF
wiki_title = _normalize_text(wiki.title)
ranked_paragraphs = rank_reference_paragraphs(wiki_title,
wiki_ref_content)
# Construct inputs from Wiki title and references
inputs = []
inputs.extend(vocab.encode(wiki_title))
inputs.extend(eot_ids)
for paragraph in ranked_paragraphs:
if len(inputs) >= 1e6:
break
paragraph += " "
inputs.extend(vocab.encode(paragraph))
# Construct targets from article sections
targets, section_boundaries = _encode_wiki_sections(
wiki.sections, vocab)
# Skip if lead section is too short
if (not section_boundaries or
section_boundaries[0] < _MIN_LEADSECTION_TOKENS):
stats["wikis_skipped_short_lead"] += 1
continue
inputs.append(text_encoder.EOS_ID)
targets.append(text_encoder.EOS_ID)
stats["num_wikis_written"] += 1
yield {
"inputs": inputs,
"targets": targets,
"section_boundaries": section_boundaries,
}
tf.logging.info("Total: %d, Skipped: %d",
stats["num_wikis_written"],
stats["total_original_wikis"] - stats["num_wikis_written"])
tf.logging.info("Total refs: %d, Skipped refs: %d",
stats["total_found_refs"],
stats["total_original_refs"] - stats["total_found_refs"])
stats_fname = os.path.join(os.path.split(out_filepaths[0])[0],
"stats.%d.json" % shard_ids[0])
with tf.gfile.Open(stats_fname, "w") as f:
f.write(json.dumps(stats))
generator_utils.generate_files(example_generator(), out_filepaths)
|
def function[produce_examples, parameter[shard_ids, wikis_dir, refs_dir, urls_dir, vocab_path, out_filepaths]]:
constant[Produce examples from shard_ids to out_filepaths.]
call[name[tf].logging.info, parameter[constant[Processing %d input shards into %d output files.], call[name[len], parameter[name[shard_ids]]], call[name[len], parameter[name[out_filepaths]]]]]
variable[vocab] assign[=] call[name[text_encoder].SubwordTextEncoder, parameter[name[vocab_path]]]
variable[eot_ids] assign[=] call[name[vocab].encode, parameter[name[EOT]]]
def function[example_generator, parameter[]]:
constant[Generate Example dicts.]
variable[stats] assign[=] call[name[dict], parameter[]]
variable[ref_files_by_shard] assign[=] call[name[_references_files_by_shard], parameter[name[refs_dir]]]
for taget[name[shard_id]] in starred[name[shard_ids]] begin[:]
call[name[tf].logging.info, parameter[constant[Processing shard %d], name[shard_id]]]
variable[wiki_urls] assign[=] call[name[_wiki_urls_for_shard], parameter[name[shard_id], name[urls_dir]]]
call[name[tf].logging.info, parameter[constant[Loaded wiki URLs for shard]]]
variable[refs_content] assign[=] call[name[_references_content], parameter[call[name[ref_files_by_shard]][name[shard_id]]]]
call[name[tf].logging.info, parameter[constant[Loaded reference content for shard]]]
for taget[tuple[[<ast.Name object at 0x7da1b20f9ae0>, <ast.Name object at 0x7da1b20f8880>]]] in starred[call[name[enumerate], parameter[call[name[_wiki_articles], parameter[name[shard_id], name[wikis_dir]]]]]] begin[:]
if <ast.UnaryOp object at 0x7da1b20f83a0> begin[:]
call[name[tf].logging.info, parameter[constant[Processing wiki index %d for shard %d], name[i], name[shard_id]]]
<ast.AugAssign object at 0x7da1b20fa680>
variable[wiki_ref_content] assign[=] list[[]]
variable[ref_urls] assign[=] call[call[name[wiki_urls]][name[wiki].url]][constant[refs]]
<ast.AugAssign object at 0x7da1b20f9600>
variable[stats_wiki_original_refs] assign[=] call[name[len], parameter[name[ref_urls]]]
variable[stats_wiki_found_refs] assign[=] constant[0]
for taget[name[ref_url]] in starred[name[ref_urls]] begin[:]
variable[ref_content] assign[=] call[name[refs_content].get, parameter[name[ref_url]]]
if <ast.UnaryOp object at 0x7da1b20fa710> begin[:]
continue
<ast.AugAssign object at 0x7da1b20fb0a0>
call[call[name[stats]][constant[ref_lengths]].append, parameter[call[name[len], parameter[name[ref_content]]]]]
<ast.AugAssign object at 0x7da1b20f8ca0>
call[name[wiki_ref_content].append, parameter[name[ref_content]]]
call[call[name[stats]][constant[wiki_original_refs]].append, parameter[name[stats_wiki_original_refs]]]
call[call[name[stats]][constant[wiki_found_refs]].append, parameter[name[stats_wiki_found_refs]]]
if <ast.BoolOp object at 0x7da1b20fb8e0> begin[:]
<ast.AugAssign object at 0x7da1b20f8c70>
continue
variable[wiki_title] assign[=] call[name[_normalize_text], parameter[name[wiki].title]]
variable[ranked_paragraphs] assign[=] call[name[rank_reference_paragraphs], parameter[name[wiki_title], name[wiki_ref_content]]]
variable[inputs] assign[=] list[[]]
call[name[inputs].extend, parameter[call[name[vocab].encode, parameter[name[wiki_title]]]]]
call[name[inputs].extend, parameter[name[eot_ids]]]
for taget[name[paragraph]] in starred[name[ranked_paragraphs]] begin[:]
if compare[call[name[len], parameter[name[inputs]]] greater_or_equal[>=] constant[1000000.0]] begin[:]
break
<ast.AugAssign object at 0x7da1b2088d00>
call[name[inputs].extend, parameter[call[name[vocab].encode, parameter[name[paragraph]]]]]
<ast.Tuple object at 0x7da1b20899f0> assign[=] call[name[_encode_wiki_sections], parameter[name[wiki].sections, name[vocab]]]
if <ast.BoolOp object at 0x7da1b2089c30> begin[:]
<ast.AugAssign object at 0x7da1b2089e40>
continue
call[name[inputs].append, parameter[name[text_encoder].EOS_ID]]
call[name[targets].append, parameter[name[text_encoder].EOS_ID]]
<ast.AugAssign object at 0x7da1b2088a60>
<ast.Yield object at 0x7da1b2088dc0>
call[name[tf].logging.info, parameter[constant[Total: %d, Skipped: %d], call[name[stats]][constant[num_wikis_written]], binary_operation[call[name[stats]][constant[total_original_wikis]] - call[name[stats]][constant[num_wikis_written]]]]]
call[name[tf].logging.info, parameter[constant[Total refs: %d, Skipped refs: %d], call[name[stats]][constant[total_found_refs]], binary_operation[call[name[stats]][constant[total_original_refs]] - call[name[stats]][constant[total_found_refs]]]]]
variable[stats_fname] assign[=] call[name[os].path.join, parameter[call[call[name[os].path.split, parameter[call[name[out_filepaths]][constant[0]]]]][constant[0]], binary_operation[constant[stats.%d.json] <ast.Mod object at 0x7da2590d6920> call[name[shard_ids]][constant[0]]]]]
with call[name[tf].gfile.Open, parameter[name[stats_fname], constant[w]]] begin[:]
call[name[f].write, parameter[call[name[json].dumps, parameter[name[stats]]]]]
call[name[generator_utils].generate_files, parameter[call[name[example_generator], parameter[]], name[out_filepaths]]]
|
keyword[def] identifier[produce_examples] ( identifier[shard_ids] , identifier[wikis_dir] , identifier[refs_dir] , identifier[urls_dir] , identifier[vocab_path] ,
identifier[out_filepaths] ):
literal[string]
identifier[tf] . identifier[logging] . identifier[info] ( literal[string] ,
identifier[len] ( identifier[shard_ids] ), identifier[len] ( identifier[out_filepaths] ))
identifier[vocab] = identifier[text_encoder] . identifier[SubwordTextEncoder] ( identifier[vocab_path] )
identifier[eot_ids] = identifier[vocab] . identifier[encode] ( identifier[EOT] )
keyword[def] identifier[example_generator] ():
literal[string]
identifier[stats] = identifier[dict] ( identifier[total_original_wikis] = literal[int] , identifier[total_original_refs] = literal[int] ,
identifier[total_found_refs] = literal[int] , identifier[ref_lengths] =[], identifier[wiki_original_refs] =[],
identifier[wiki_found_refs] =[], identifier[wikis_skipped_no_refs] = literal[int] ,
identifier[wikis_skipped_short_lead] = literal[int] , identifier[num_wikis_written] = literal[int] )
identifier[ref_files_by_shard] = identifier[_references_files_by_shard] ( identifier[refs_dir] )
keyword[for] identifier[shard_id] keyword[in] identifier[shard_ids] :
identifier[tf] . identifier[logging] . identifier[info] ( literal[string] , identifier[shard_id] )
identifier[wiki_urls] = identifier[_wiki_urls_for_shard] ( identifier[shard_id] , identifier[urls_dir] )
identifier[tf] . identifier[logging] . identifier[info] ( literal[string] )
identifier[refs_content] = identifier[_references_content] ( identifier[ref_files_by_shard] [ identifier[shard_id] ])
identifier[tf] . identifier[logging] . identifier[info] ( literal[string] )
keyword[for] identifier[i] , identifier[wiki] keyword[in] identifier[enumerate] ( identifier[_wiki_articles] ( identifier[shard_id] , identifier[wikis_dir] )):
keyword[if] keyword[not] identifier[i] % literal[int] :
identifier[tf] . identifier[logging] . identifier[info] ( literal[string] , identifier[i] , identifier[shard_id] )
identifier[stats] [ literal[string] ]+= literal[int]
identifier[wiki_ref_content] =[]
identifier[ref_urls] = identifier[wiki_urls] [ identifier[wiki] . identifier[url] ][ literal[string] ]
identifier[stats] [ literal[string] ]+= identifier[len] ( identifier[ref_urls] )
identifier[stats_wiki_original_refs] = identifier[len] ( identifier[ref_urls] )
identifier[stats_wiki_found_refs] = literal[int]
keyword[for] identifier[ref_url] keyword[in] identifier[ref_urls] :
identifier[ref_content] = identifier[refs_content] . identifier[get] ( identifier[ref_url] )
keyword[if] keyword[not] identifier[ref_content] :
keyword[continue]
identifier[stats] [ literal[string] ]+= literal[int]
identifier[stats] [ literal[string] ]. identifier[append] ( identifier[len] ( identifier[ref_content] ))
identifier[stats_wiki_found_refs] += literal[int]
identifier[wiki_ref_content] . identifier[append] ( identifier[ref_content] )
identifier[stats] [ literal[string] ]. identifier[append] ( identifier[stats_wiki_original_refs] )
identifier[stats] [ literal[string] ]. identifier[append] ( identifier[stats_wiki_found_refs] )
keyword[if] keyword[not] identifier[wiki_ref_content] keyword[or] identifier[len] ( identifier[wiki_ref_content] )< identifier[_MIN_REFS] :
identifier[stats] [ literal[string] ]+= literal[int]
keyword[continue]
identifier[wiki_title] = identifier[_normalize_text] ( identifier[wiki] . identifier[title] )
identifier[ranked_paragraphs] = identifier[rank_reference_paragraphs] ( identifier[wiki_title] ,
identifier[wiki_ref_content] )
identifier[inputs] =[]
identifier[inputs] . identifier[extend] ( identifier[vocab] . identifier[encode] ( identifier[wiki_title] ))
identifier[inputs] . identifier[extend] ( identifier[eot_ids] )
keyword[for] identifier[paragraph] keyword[in] identifier[ranked_paragraphs] :
keyword[if] identifier[len] ( identifier[inputs] )>= literal[int] :
keyword[break]
identifier[paragraph] += literal[string]
identifier[inputs] . identifier[extend] ( identifier[vocab] . identifier[encode] ( identifier[paragraph] ))
identifier[targets] , identifier[section_boundaries] = identifier[_encode_wiki_sections] (
identifier[wiki] . identifier[sections] , identifier[vocab] )
keyword[if] ( keyword[not] identifier[section_boundaries] keyword[or]
identifier[section_boundaries] [ literal[int] ]< identifier[_MIN_LEADSECTION_TOKENS] ):
identifier[stats] [ literal[string] ]+= literal[int]
keyword[continue]
identifier[inputs] . identifier[append] ( identifier[text_encoder] . identifier[EOS_ID] )
identifier[targets] . identifier[append] ( identifier[text_encoder] . identifier[EOS_ID] )
identifier[stats] [ literal[string] ]+= literal[int]
keyword[yield] {
literal[string] : identifier[inputs] ,
literal[string] : identifier[targets] ,
literal[string] : identifier[section_boundaries] ,
}
identifier[tf] . identifier[logging] . identifier[info] ( literal[string] ,
identifier[stats] [ literal[string] ],
identifier[stats] [ literal[string] ]- identifier[stats] [ literal[string] ])
identifier[tf] . identifier[logging] . identifier[info] ( literal[string] ,
identifier[stats] [ literal[string] ],
identifier[stats] [ literal[string] ]- identifier[stats] [ literal[string] ])
identifier[stats_fname] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[path] . identifier[split] ( identifier[out_filepaths] [ literal[int] ])[ literal[int] ],
literal[string] % identifier[shard_ids] [ literal[int] ])
keyword[with] identifier[tf] . identifier[gfile] . identifier[Open] ( identifier[stats_fname] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[json] . identifier[dumps] ( identifier[stats] ))
identifier[generator_utils] . identifier[generate_files] ( identifier[example_generator] (), identifier[out_filepaths] )
|
def produce_examples(shard_ids, wikis_dir, refs_dir, urls_dir, vocab_path, out_filepaths):
"""Produce examples from shard_ids to out_filepaths."""
# * Join the Wikipedia articles with their references
# * Run Tf-idf to sort reference paragraphs
# * Encode the Wikipedia and reference text with the vocabulary
# * Write out TFRecords of tensorflow.Example
tf.logging.info('Processing %d input shards into %d output files.', len(shard_ids), len(out_filepaths))
vocab = text_encoder.SubwordTextEncoder(vocab_path)
eot_ids = vocab.encode(EOT)
def example_generator():
"""Generate Example dicts."""
stats = dict(total_original_wikis=0, total_original_refs=0, total_found_refs=0, ref_lengths=[], wiki_original_refs=[], wiki_found_refs=[], wikis_skipped_no_refs=0, wikis_skipped_short_lead=0, num_wikis_written=0)
ref_files_by_shard = _references_files_by_shard(refs_dir)
for shard_id in shard_ids:
tf.logging.info('Processing shard %d', shard_id)
wiki_urls = _wiki_urls_for_shard(shard_id, urls_dir)
tf.logging.info('Loaded wiki URLs for shard')
refs_content = _references_content(ref_files_by_shard[shard_id])
tf.logging.info('Loaded reference content for shard')
for (i, wiki) in enumerate(_wiki_articles(shard_id, wikis_dir)):
if not i % 1000:
tf.logging.info('Processing wiki index %d for shard %d', i, shard_id) # depends on [control=['if'], data=[]]
stats['total_original_wikis'] += 1
# Get reference content
wiki_ref_content = []
ref_urls = wiki_urls[wiki.url]['refs']
stats['total_original_refs'] += len(ref_urls)
stats_wiki_original_refs = len(ref_urls)
stats_wiki_found_refs = 0
for ref_url in ref_urls:
ref_content = refs_content.get(ref_url)
if not ref_content:
continue # depends on [control=['if'], data=[]]
stats['total_found_refs'] += 1
stats['ref_lengths'].append(len(ref_content))
stats_wiki_found_refs += 1
wiki_ref_content.append(ref_content) # depends on [control=['for'], data=['ref_url']]
stats['wiki_original_refs'].append(stats_wiki_original_refs)
stats['wiki_found_refs'].append(stats_wiki_found_refs)
if not wiki_ref_content or len(wiki_ref_content) < _MIN_REFS:
# No/few refs were found
stats['wikis_skipped_no_refs'] += 1
continue # depends on [control=['if'], data=[]]
# Rank reference paragraphs with TFIDF
wiki_title = _normalize_text(wiki.title)
ranked_paragraphs = rank_reference_paragraphs(wiki_title, wiki_ref_content)
# Construct inputs from Wiki title and references
inputs = []
inputs.extend(vocab.encode(wiki_title))
inputs.extend(eot_ids)
for paragraph in ranked_paragraphs:
if len(inputs) >= 1000000.0:
break # depends on [control=['if'], data=[]]
paragraph += ' '
inputs.extend(vocab.encode(paragraph)) # depends on [control=['for'], data=['paragraph']]
# Construct targets from article sections
(targets, section_boundaries) = _encode_wiki_sections(wiki.sections, vocab)
# Skip if lead section is too short
if not section_boundaries or section_boundaries[0] < _MIN_LEADSECTION_TOKENS:
stats['wikis_skipped_short_lead'] += 1
continue # depends on [control=['if'], data=[]]
inputs.append(text_encoder.EOS_ID)
targets.append(text_encoder.EOS_ID)
stats['num_wikis_written'] += 1
yield {'inputs': inputs, 'targets': targets, 'section_boundaries': section_boundaries} # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['shard_id']]
tf.logging.info('Total: %d, Skipped: %d', stats['num_wikis_written'], stats['total_original_wikis'] - stats['num_wikis_written'])
tf.logging.info('Total refs: %d, Skipped refs: %d', stats['total_found_refs'], stats['total_original_refs'] - stats['total_found_refs'])
stats_fname = os.path.join(os.path.split(out_filepaths[0])[0], 'stats.%d.json' % shard_ids[0])
with tf.gfile.Open(stats_fname, 'w') as f:
f.write(json.dumps(stats)) # depends on [control=['with'], data=['f']]
generator_utils.generate_files(example_generator(), out_filepaths)
|
def kl_divergence(V_a, V_b):
"""
Calculate Kullback-Leibler distance.
Uses the smoothing method described in `Bigi 2003
<http://lvk.cs.msu.su/~bruzz/articles/classification/Using%20Kullback-Leibler%20Distance%20for%20Text%20Categorization.pdf>`_
to facilitate better comparisons between vectors describing wordcounts.
Parameters
----------
V_a : list
V_b : list
Returns
-------
divergence : float
KL divergence.
"""
# Find shared features.
Ndiff = _shared_features(V_a, V_b)
# aprob and bprob should each sum to 1.0
aprob = map(lambda v: float(v)/sum(V_a), V_a)
bprob = map(lambda v: float(v)/sum(V_b), V_b)
# Smooth according to Bigi 2003.
aprob, bprob = _smooth(aprob, bprob, Ndiff)
return sum(map(lambda a, b: (a-b)*log(a/b), aprob, bprob))
|
def function[kl_divergence, parameter[V_a, V_b]]:
constant[
Calculate Kullback-Leibler distance.
Uses the smoothing method described in `Bigi 2003
<http://lvk.cs.msu.su/~bruzz/articles/classification/Using%20Kullback-Leibler%20Distance%20for%20Text%20Categorization.pdf>`_
to facilitate better comparisons between vectors describing wordcounts.
Parameters
----------
V_a : list
V_b : list
Returns
-------
divergence : float
KL divergence.
]
variable[Ndiff] assign[=] call[name[_shared_features], parameter[name[V_a], name[V_b]]]
variable[aprob] assign[=] call[name[map], parameter[<ast.Lambda object at 0x7da1b12f17e0>, name[V_a]]]
variable[bprob] assign[=] call[name[map], parameter[<ast.Lambda object at 0x7da1b12f3640>, name[V_b]]]
<ast.Tuple object at 0x7da1b11951b0> assign[=] call[name[_smooth], parameter[name[aprob], name[bprob], name[Ndiff]]]
return[call[name[sum], parameter[call[name[map], parameter[<ast.Lambda object at 0x7da1b1197fa0>, name[aprob], name[bprob]]]]]]
|
keyword[def] identifier[kl_divergence] ( identifier[V_a] , identifier[V_b] ):
literal[string]
identifier[Ndiff] = identifier[_shared_features] ( identifier[V_a] , identifier[V_b] )
identifier[aprob] = identifier[map] ( keyword[lambda] identifier[v] : identifier[float] ( identifier[v] )/ identifier[sum] ( identifier[V_a] ), identifier[V_a] )
identifier[bprob] = identifier[map] ( keyword[lambda] identifier[v] : identifier[float] ( identifier[v] )/ identifier[sum] ( identifier[V_b] ), identifier[V_b] )
identifier[aprob] , identifier[bprob] = identifier[_smooth] ( identifier[aprob] , identifier[bprob] , identifier[Ndiff] )
keyword[return] identifier[sum] ( identifier[map] ( keyword[lambda] identifier[a] , identifier[b] :( identifier[a] - identifier[b] )* identifier[log] ( identifier[a] / identifier[b] ), identifier[aprob] , identifier[bprob] ))
|
def kl_divergence(V_a, V_b):
"""
Calculate Kullback-Leibler distance.
Uses the smoothing method described in `Bigi 2003
<http://lvk.cs.msu.su/~bruzz/articles/classification/Using%20Kullback-Leibler%20Distance%20for%20Text%20Categorization.pdf>`_
to facilitate better comparisons between vectors describing wordcounts.
Parameters
----------
V_a : list
V_b : list
Returns
-------
divergence : float
KL divergence.
"""
# Find shared features.
Ndiff = _shared_features(V_a, V_b)
# aprob and bprob should each sum to 1.0
aprob = map(lambda v: float(v) / sum(V_a), V_a)
bprob = map(lambda v: float(v) / sum(V_b), V_b)
# Smooth according to Bigi 2003.
(aprob, bprob) = _smooth(aprob, bprob, Ndiff)
return sum(map(lambda a, b: (a - b) * log(a / b), aprob, bprob))
|
def enum(self):
"""
Enumeration of allowed object values.
The enumeration must not contain duplicates.
"""
value = self._schema.get("enum", None)
if value is None:
return
if not isinstance(value, list):
raise SchemaError(
"enum value {0!r} is not a list".format(value))
if len(value) == 0:
raise SchemaError(
"enum value {0!r} does not contain any"
" elements".format(value))
seen = set()
for item in value:
if item in seen:
raise SchemaError(
"enum value {0!r} contains duplicate element"
" {1!r}".format(value, item))
else:
seen.add(item)
return value
|
def function[enum, parameter[self]]:
constant[
Enumeration of allowed object values.
The enumeration must not contain duplicates.
]
variable[value] assign[=] call[name[self]._schema.get, parameter[constant[enum], constant[None]]]
if compare[name[value] is constant[None]] begin[:]
return[None]
if <ast.UnaryOp object at 0x7da2054a5900> begin[:]
<ast.Raise object at 0x7da2054a4b80>
if compare[call[name[len], parameter[name[value]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da20c6a8cd0>
variable[seen] assign[=] call[name[set], parameter[]]
for taget[name[item]] in starred[name[value]] begin[:]
if compare[name[item] in name[seen]] begin[:]
<ast.Raise object at 0x7da20c6aa920>
return[name[value]]
|
keyword[def] identifier[enum] ( identifier[self] ):
literal[string]
identifier[value] = identifier[self] . identifier[_schema] . identifier[get] ( literal[string] , keyword[None] )
keyword[if] identifier[value] keyword[is] keyword[None] :
keyword[return]
keyword[if] keyword[not] identifier[isinstance] ( identifier[value] , identifier[list] ):
keyword[raise] identifier[SchemaError] (
literal[string] . identifier[format] ( identifier[value] ))
keyword[if] identifier[len] ( identifier[value] )== literal[int] :
keyword[raise] identifier[SchemaError] (
literal[string]
literal[string] . identifier[format] ( identifier[value] ))
identifier[seen] = identifier[set] ()
keyword[for] identifier[item] keyword[in] identifier[value] :
keyword[if] identifier[item] keyword[in] identifier[seen] :
keyword[raise] identifier[SchemaError] (
literal[string]
literal[string] . identifier[format] ( identifier[value] , identifier[item] ))
keyword[else] :
identifier[seen] . identifier[add] ( identifier[item] )
keyword[return] identifier[value]
|
def enum(self):
"""
Enumeration of allowed object values.
The enumeration must not contain duplicates.
"""
value = self._schema.get('enum', None)
if value is None:
return # depends on [control=['if'], data=[]]
if not isinstance(value, list):
raise SchemaError('enum value {0!r} is not a list'.format(value)) # depends on [control=['if'], data=[]]
if len(value) == 0:
raise SchemaError('enum value {0!r} does not contain any elements'.format(value)) # depends on [control=['if'], data=[]]
seen = set()
for item in value:
if item in seen:
raise SchemaError('enum value {0!r} contains duplicate element {1!r}'.format(value, item)) # depends on [control=['if'], data=['item']]
else:
seen.add(item) # depends on [control=['for'], data=['item']]
return value
|
def initialize_arg_parser():
"""
Initializes the option parser with the options -
-q --image-query {[str]} -- [Image Search Query]
-n --image-count {[int]} -- [Count of images that need to be downloaded]
-f --destination-folder {[str]} -- [Download Destination Folder]
-t --thread-count {[int]} -- [Count of Threads, to parallelize download of images]
"""
GoogleImageExtractor.__argParser = ArgumentParser(
description='Utility to search and download images from google')
# Add the required arguments
requiredArguments = GoogleImageExtractor.__argParser.add_argument_group(
'required arguments')
requiredArguments.add_argument('-q', '--image-query', dest='imageQuery',
type=str, required=True, help='Image Search Query',
metavar='<image_query>')
# Add the optional arguments
optionalArguments = GoogleImageExtractor.__argParser.add_argument_group(
'optional arguments')
optionalArguments.add_argument('-f', '--destination-folder', dest='destinationFolder',
type=str, help='Download Destination Folder, default is the current folder',
metavar='<destination_folder>', default="./")
optionalArguments.add_argument('-n', '--image-count', dest='imageCount',
type=int, help='Count of images that neeed to be extracted',
metavar='<image_count>', default=100)
optionalArguments.add_argument('-t', '--thread-count', dest='threadCount',
type=int, help='Count of threads, to parallelize download of images',
metavar='<thread_count>', default=4)
|
def function[initialize_arg_parser, parameter[]]:
constant[
Initializes the option parser with the options -
-q --image-query {[str]} -- [Image Search Query]
-n --image-count {[int]} -- [Count of images that need to be downloaded]
-f --destination-folder {[str]} -- [Download Destination Folder]
-t --thread-count {[int]} -- [Count of Threads, to parallelize download of images]
]
name[GoogleImageExtractor].__argParser assign[=] call[name[ArgumentParser], parameter[]]
variable[requiredArguments] assign[=] call[name[GoogleImageExtractor].__argParser.add_argument_group, parameter[constant[required arguments]]]
call[name[requiredArguments].add_argument, parameter[constant[-q], constant[--image-query]]]
variable[optionalArguments] assign[=] call[name[GoogleImageExtractor].__argParser.add_argument_group, parameter[constant[optional arguments]]]
call[name[optionalArguments].add_argument, parameter[constant[-f], constant[--destination-folder]]]
call[name[optionalArguments].add_argument, parameter[constant[-n], constant[--image-count]]]
call[name[optionalArguments].add_argument, parameter[constant[-t], constant[--thread-count]]]
|
keyword[def] identifier[initialize_arg_parser] ():
literal[string]
identifier[GoogleImageExtractor] . identifier[__argParser] = identifier[ArgumentParser] (
identifier[description] = literal[string] )
identifier[requiredArguments] = identifier[GoogleImageExtractor] . identifier[__argParser] . identifier[add_argument_group] (
literal[string] )
identifier[requiredArguments] . identifier[add_argument] ( literal[string] , literal[string] , identifier[dest] = literal[string] ,
identifier[type] = identifier[str] , identifier[required] = keyword[True] , identifier[help] = literal[string] ,
identifier[metavar] = literal[string] )
identifier[optionalArguments] = identifier[GoogleImageExtractor] . identifier[__argParser] . identifier[add_argument_group] (
literal[string] )
identifier[optionalArguments] . identifier[add_argument] ( literal[string] , literal[string] , identifier[dest] = literal[string] ,
identifier[type] = identifier[str] , identifier[help] = literal[string] ,
identifier[metavar] = literal[string] , identifier[default] = literal[string] )
identifier[optionalArguments] . identifier[add_argument] ( literal[string] , literal[string] , identifier[dest] = literal[string] ,
identifier[type] = identifier[int] , identifier[help] = literal[string] ,
identifier[metavar] = literal[string] , identifier[default] = literal[int] )
identifier[optionalArguments] . identifier[add_argument] ( literal[string] , literal[string] , identifier[dest] = literal[string] ,
identifier[type] = identifier[int] , identifier[help] = literal[string] ,
identifier[metavar] = literal[string] , identifier[default] = literal[int] )
|
def initialize_arg_parser():
"""
Initializes the option parser with the options -
-q --image-query {[str]} -- [Image Search Query]
-n --image-count {[int]} -- [Count of images that need to be downloaded]
-f --destination-folder {[str]} -- [Download Destination Folder]
-t --thread-count {[int]} -- [Count of Threads, to parallelize download of images]
"""
GoogleImageExtractor.__argParser = ArgumentParser(description='Utility to search and download images from google')
# Add the required arguments
requiredArguments = GoogleImageExtractor.__argParser.add_argument_group('required arguments')
requiredArguments.add_argument('-q', '--image-query', dest='imageQuery', type=str, required=True, help='Image Search Query', metavar='<image_query>')
# Add the optional arguments
optionalArguments = GoogleImageExtractor.__argParser.add_argument_group('optional arguments')
optionalArguments.add_argument('-f', '--destination-folder', dest='destinationFolder', type=str, help='Download Destination Folder, default is the current folder', metavar='<destination_folder>', default='./')
optionalArguments.add_argument('-n', '--image-count', dest='imageCount', type=int, help='Count of images that neeed to be extracted', metavar='<image_count>', default=100)
optionalArguments.add_argument('-t', '--thread-count', dest='threadCount', type=int, help='Count of threads, to parallelize download of images', metavar='<thread_count>', default=4)
|
def _load_from_environ(metadata, value_func=None):
"""
Load configuration from environment variables.
Any environment variable prefixed with the metadata's name will be
used to recursively set dictionary keys, splitting on '__'.
:param value_func: a mutator for the envvar's value (if any)
"""
# We'll match the ennvar name against the metadata's name. The ennvar
# name must be uppercase and hyphens in names converted to underscores.
#
# | envar | name | matches? |
# +-------------+---------+----------+
# | FOO_BAR | foo | yes |
# | FOO_BAR | bar | no |
# | foo_bar | bar | no |
# | FOO_BAR_BAZ | foo_bar | yes |
# | FOO_BAR_BAZ | foo-bar | yes |
# +-------------+---------+----------+
prefix = metadata.name.upper().replace("-", "_")
return expand_config(
environ,
separator="__",
skip_to=1,
key_parts_filter=lambda key_parts: len(key_parts) > 1 and key_parts[0] == prefix,
value_func=lambda value: value_func(value) if value_func else value,
)
|
def function[_load_from_environ, parameter[metadata, value_func]]:
constant[
Load configuration from environment variables.
Any environment variable prefixed with the metadata's name will be
used to recursively set dictionary keys, splitting on '__'.
:param value_func: a mutator for the envvar's value (if any)
]
variable[prefix] assign[=] call[call[name[metadata].name.upper, parameter[]].replace, parameter[constant[-], constant[_]]]
return[call[name[expand_config], parameter[name[environ]]]]
|
keyword[def] identifier[_load_from_environ] ( identifier[metadata] , identifier[value_func] = keyword[None] ):
literal[string]
identifier[prefix] = identifier[metadata] . identifier[name] . identifier[upper] (). identifier[replace] ( literal[string] , literal[string] )
keyword[return] identifier[expand_config] (
identifier[environ] ,
identifier[separator] = literal[string] ,
identifier[skip_to] = literal[int] ,
identifier[key_parts_filter] = keyword[lambda] identifier[key_parts] : identifier[len] ( identifier[key_parts] )> literal[int] keyword[and] identifier[key_parts] [ literal[int] ]== identifier[prefix] ,
identifier[value_func] = keyword[lambda] identifier[value] : identifier[value_func] ( identifier[value] ) keyword[if] identifier[value_func] keyword[else] identifier[value] ,
)
|
def _load_from_environ(metadata, value_func=None):
"""
Load configuration from environment variables.
Any environment variable prefixed with the metadata's name will be
used to recursively set dictionary keys, splitting on '__'.
:param value_func: a mutator for the envvar's value (if any)
"""
# We'll match the ennvar name against the metadata's name. The ennvar
# name must be uppercase and hyphens in names converted to underscores.
#
# | envar | name | matches? |
# +-------------+---------+----------+
# | FOO_BAR | foo | yes |
# | FOO_BAR | bar | no |
# | foo_bar | bar | no |
# | FOO_BAR_BAZ | foo_bar | yes |
# | FOO_BAR_BAZ | foo-bar | yes |
# +-------------+---------+----------+
prefix = metadata.name.upper().replace('-', '_')
return expand_config(environ, separator='__', skip_to=1, key_parts_filter=lambda key_parts: len(key_parts) > 1 and key_parts[0] == prefix, value_func=lambda value: value_func(value) if value_func else value)
|
def fix_report(self, report, errors="drop", prefer="before"):
"""Perform utc assignment on all readings in a report.
The returned report will have all reading timestamps in UTC. This only
works on SignedListReport objects. Note that the report should
typically have previously been added to the UTC assigner using
add_report or no reference points from the report will be used.
Args:
report (SignedListReport): The report that we should fix.
errors (str): The behavior that we should have when we can't
fix a given reading. The only currently support behavior is
drop, which means that the reading will be dropped and not
included in the new report.
prefer (str): Whether to prefer fixing readings by looking for
reference points after the reading or before, all other things
being equal. See the description of ``assign_utc``.
Returns:
SignedListReport: The report with UTC timestamps.
"""
if not isinstance(report, SignedListReport):
raise ArgumentError("Report must be a SignedListReport", report=report)
if errors not in ('drop',):
raise ArgumentError("Unknown errors handler: {}, supported=['drop']".format(errors))
self.ensure_prepared()
fixed_readings = []
dropped_readings = 0
for reading in report.visible_readings:
assignment = self.assign_utc(reading.reading_id, reading.raw_time, prefer=prefer)
if assignment is None:
dropped_readings += 1
continue
fixed_reading = IOTileReading(assignment.rtc_value, reading.stream, reading.value,
reading_time=assignment.utc, reading_id=reading.reading_id)
fixed_readings.append(fixed_reading)
fixed_report = SignedListReport.FromReadings(report.origin, fixed_readings, report_id=report.report_id,
selector=report.streamer_selector, streamer=report.origin_streamer,
sent_timestamp=report.sent_timestamp)
fixed_report.received_time = report.received_time
if dropped_readings > 0:
self._logger.warning("Dropped %d readings of %d when fixing UTC timestamps in report 0x%08X for device 0x%08X",
dropped_readings, len(report.visible_readings), report.report_id, report.origin)
return fixed_report
|
def function[fix_report, parameter[self, report, errors, prefer]]:
constant[Perform utc assignment on all readings in a report.
The returned report will have all reading timestamps in UTC. This only
works on SignedListReport objects. Note that the report should
typically have previously been added to the UTC assigner using
add_report or no reference points from the report will be used.
Args:
report (SignedListReport): The report that we should fix.
errors (str): The behavior that we should have when we can't
fix a given reading. The only currently support behavior is
drop, which means that the reading will be dropped and not
included in the new report.
prefer (str): Whether to prefer fixing readings by looking for
reference points after the reading or before, all other things
being equal. See the description of ``assign_utc``.
Returns:
SignedListReport: The report with UTC timestamps.
]
if <ast.UnaryOp object at 0x7da20c6c5540> begin[:]
<ast.Raise object at 0x7da20c6c7880>
if compare[name[errors] <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Constant object at 0x7da20c6c5d50>]]] begin[:]
<ast.Raise object at 0x7da20c6c5780>
call[name[self].ensure_prepared, parameter[]]
variable[fixed_readings] assign[=] list[[]]
variable[dropped_readings] assign[=] constant[0]
for taget[name[reading]] in starred[name[report].visible_readings] begin[:]
variable[assignment] assign[=] call[name[self].assign_utc, parameter[name[reading].reading_id, name[reading].raw_time]]
if compare[name[assignment] is constant[None]] begin[:]
<ast.AugAssign object at 0x7da20c6c7460>
continue
variable[fixed_reading] assign[=] call[name[IOTileReading], parameter[name[assignment].rtc_value, name[reading].stream, name[reading].value]]
call[name[fixed_readings].append, parameter[name[fixed_reading]]]
variable[fixed_report] assign[=] call[name[SignedListReport].FromReadings, parameter[name[report].origin, name[fixed_readings]]]
name[fixed_report].received_time assign[=] name[report].received_time
if compare[name[dropped_readings] greater[>] constant[0]] begin[:]
call[name[self]._logger.warning, parameter[constant[Dropped %d readings of %d when fixing UTC timestamps in report 0x%08X for device 0x%08X], name[dropped_readings], call[name[len], parameter[name[report].visible_readings]], name[report].report_id, name[report].origin]]
return[name[fixed_report]]
|
keyword[def] identifier[fix_report] ( identifier[self] , identifier[report] , identifier[errors] = literal[string] , identifier[prefer] = literal[string] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[report] , identifier[SignedListReport] ):
keyword[raise] identifier[ArgumentError] ( literal[string] , identifier[report] = identifier[report] )
keyword[if] identifier[errors] keyword[not] keyword[in] ( literal[string] ,):
keyword[raise] identifier[ArgumentError] ( literal[string] . identifier[format] ( identifier[errors] ))
identifier[self] . identifier[ensure_prepared] ()
identifier[fixed_readings] =[]
identifier[dropped_readings] = literal[int]
keyword[for] identifier[reading] keyword[in] identifier[report] . identifier[visible_readings] :
identifier[assignment] = identifier[self] . identifier[assign_utc] ( identifier[reading] . identifier[reading_id] , identifier[reading] . identifier[raw_time] , identifier[prefer] = identifier[prefer] )
keyword[if] identifier[assignment] keyword[is] keyword[None] :
identifier[dropped_readings] += literal[int]
keyword[continue]
identifier[fixed_reading] = identifier[IOTileReading] ( identifier[assignment] . identifier[rtc_value] , identifier[reading] . identifier[stream] , identifier[reading] . identifier[value] ,
identifier[reading_time] = identifier[assignment] . identifier[utc] , identifier[reading_id] = identifier[reading] . identifier[reading_id] )
identifier[fixed_readings] . identifier[append] ( identifier[fixed_reading] )
identifier[fixed_report] = identifier[SignedListReport] . identifier[FromReadings] ( identifier[report] . identifier[origin] , identifier[fixed_readings] , identifier[report_id] = identifier[report] . identifier[report_id] ,
identifier[selector] = identifier[report] . identifier[streamer_selector] , identifier[streamer] = identifier[report] . identifier[origin_streamer] ,
identifier[sent_timestamp] = identifier[report] . identifier[sent_timestamp] )
identifier[fixed_report] . identifier[received_time] = identifier[report] . identifier[received_time]
keyword[if] identifier[dropped_readings] > literal[int] :
identifier[self] . identifier[_logger] . identifier[warning] ( literal[string] ,
identifier[dropped_readings] , identifier[len] ( identifier[report] . identifier[visible_readings] ), identifier[report] . identifier[report_id] , identifier[report] . identifier[origin] )
keyword[return] identifier[fixed_report]
|
def fix_report(self, report, errors='drop', prefer='before'):
"""Perform utc assignment on all readings in a report.
The returned report will have all reading timestamps in UTC. This only
works on SignedListReport objects. Note that the report should
typically have previously been added to the UTC assigner using
add_report or no reference points from the report will be used.
Args:
report (SignedListReport): The report that we should fix.
errors (str): The behavior that we should have when we can't
fix a given reading. The only currently support behavior is
drop, which means that the reading will be dropped and not
included in the new report.
prefer (str): Whether to prefer fixing readings by looking for
reference points after the reading or before, all other things
being equal. See the description of ``assign_utc``.
Returns:
SignedListReport: The report with UTC timestamps.
"""
if not isinstance(report, SignedListReport):
raise ArgumentError('Report must be a SignedListReport', report=report) # depends on [control=['if'], data=[]]
if errors not in ('drop',):
raise ArgumentError("Unknown errors handler: {}, supported=['drop']".format(errors)) # depends on [control=['if'], data=['errors']]
self.ensure_prepared()
fixed_readings = []
dropped_readings = 0
for reading in report.visible_readings:
assignment = self.assign_utc(reading.reading_id, reading.raw_time, prefer=prefer)
if assignment is None:
dropped_readings += 1
continue # depends on [control=['if'], data=[]]
fixed_reading = IOTileReading(assignment.rtc_value, reading.stream, reading.value, reading_time=assignment.utc, reading_id=reading.reading_id)
fixed_readings.append(fixed_reading) # depends on [control=['for'], data=['reading']]
fixed_report = SignedListReport.FromReadings(report.origin, fixed_readings, report_id=report.report_id, selector=report.streamer_selector, streamer=report.origin_streamer, sent_timestamp=report.sent_timestamp)
fixed_report.received_time = report.received_time
if dropped_readings > 0:
self._logger.warning('Dropped %d readings of %d when fixing UTC timestamps in report 0x%08X for device 0x%08X', dropped_readings, len(report.visible_readings), report.report_id, report.origin) # depends on [control=['if'], data=['dropped_readings']]
return fixed_report
|
def _is_writable(self, obj):
"""Check if the argument is a writable file-like object."""
try:
write = getattr(obj, 'write')
except AttributeError:
return False
else:
return is_method(write, min_arity=1, max_arity=1)
|
def function[_is_writable, parameter[self, obj]]:
constant[Check if the argument is a writable file-like object.]
<ast.Try object at 0x7da1b0e63670>
|
keyword[def] identifier[_is_writable] ( identifier[self] , identifier[obj] ):
literal[string]
keyword[try] :
identifier[write] = identifier[getattr] ( identifier[obj] , literal[string] )
keyword[except] identifier[AttributeError] :
keyword[return] keyword[False]
keyword[else] :
keyword[return] identifier[is_method] ( identifier[write] , identifier[min_arity] = literal[int] , identifier[max_arity] = literal[int] )
|
def _is_writable(self, obj):
"""Check if the argument is a writable file-like object."""
try:
write = getattr(obj, 'write') # depends on [control=['try'], data=[]]
except AttributeError:
return False # depends on [control=['except'], data=[]]
else:
return is_method(write, min_arity=1, max_arity=1)
|
def _find_sections(md_ast, sections, last, last_class, total_lines=None):
"""
Walks through a CommonMark AST to find section headers that delineate
content that should be updated by this script
:param md_ast:
The AST of the markdown document
:param sections:
A dict to store the start and end lines of a section. The key will be
a two-element tuple of the section type ("class", "function",
"method" or "attribute") and identifier. The values are a two-element
tuple of the start and end line number in the markdown document of the
section.
:param last:
A dict containing information about the last section header seen.
Includes the keys "type_name", "identifier", "start_line".
:param last_class:
A unicode string of the name of the last class found - used when
processing methods and attributes.
:param total_lines:
An integer of the total number of lines in the markdown document -
used to work around a bug in the API of the Python port of CommonMark
"""
def child_walker(node):
for child, entering in node.walker():
if child == node:
continue
yield child, entering
for child, entering in child_walker(md_ast):
if child.t == 'heading':
start_line = child.sourcepos[0][0]
if child.level == 2:
if last:
sections[(last['type_name'], last['identifier'])] = (last['start_line'], start_line - 1)
last.clear()
if child.level in set([3, 5]):
heading_elements = []
for heading_child, _ in child_walker(child):
heading_elements.append(heading_child)
if len(heading_elements) != 2:
continue
first = heading_elements[0]
second = heading_elements[1]
if first.t != 'code':
continue
if second.t != 'text':
continue
type_name = second.literal.strip()
identifier = first.literal.strip().replace('()', '').lstrip('.')
if last:
sections[(last['type_name'], last['identifier'])] = (last['start_line'], start_line - 1)
last.clear()
if type_name == 'function':
if child.level != 3:
continue
if type_name == 'class':
if child.level != 3:
continue
last_class.append(identifier)
if type_name in set(['method', 'attribute']):
if child.level != 5:
continue
identifier = last_class[-1] + '.' + identifier
last.update({
'type_name': type_name,
'identifier': identifier,
'start_line': start_line,
})
elif child.t == 'block_quote':
find_sections(child, sections, last, last_class)
if last:
sections[(last['type_name'], last['identifier'])] = (last['start_line'], total_lines)
|
def function[_find_sections, parameter[md_ast, sections, last, last_class, total_lines]]:
constant[
Walks through a CommonMark AST to find section headers that delineate
content that should be updated by this script
:param md_ast:
The AST of the markdown document
:param sections:
A dict to store the start and end lines of a section. The key will be
a two-element tuple of the section type ("class", "function",
"method" or "attribute") and identifier. The values are a two-element
tuple of the start and end line number in the markdown document of the
section.
:param last:
A dict containing information about the last section header seen.
Includes the keys "type_name", "identifier", "start_line".
:param last_class:
A unicode string of the name of the last class found - used when
processing methods and attributes.
:param total_lines:
An integer of the total number of lines in the markdown document -
used to work around a bug in the API of the Python port of CommonMark
]
def function[child_walker, parameter[node]]:
for taget[tuple[[<ast.Name object at 0x7da1b009b430>, <ast.Name object at 0x7da1b009b460>]]] in starred[call[name[node].walker, parameter[]]] begin[:]
if compare[name[child] equal[==] name[node]] begin[:]
continue
<ast.Yield object at 0x7da1b0099ba0>
for taget[tuple[[<ast.Name object at 0x7da1b009bc40>, <ast.Name object at 0x7da1b0099990>]]] in starred[call[name[child_walker], parameter[name[md_ast]]]] begin[:]
if compare[name[child].t equal[==] constant[heading]] begin[:]
variable[start_line] assign[=] call[call[name[child].sourcepos][constant[0]]][constant[0]]
if compare[name[child].level equal[==] constant[2]] begin[:]
if name[last] begin[:]
call[name[sections]][tuple[[<ast.Subscript object at 0x7da1b0099810>, <ast.Subscript object at 0x7da1b009b4f0>]]] assign[=] tuple[[<ast.Subscript object at 0x7da1b009bbe0>, <ast.BinOp object at 0x7da1b0098dc0>]]
call[name[last].clear, parameter[]]
if compare[name[child].level in call[name[set], parameter[list[[<ast.Constant object at 0x7da1b009a230>, <ast.Constant object at 0x7da1b009bd30>]]]]] begin[:]
variable[heading_elements] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1b0098c10>, <ast.Name object at 0x7da1b009ab30>]]] in starred[call[name[child_walker], parameter[name[child]]]] begin[:]
call[name[heading_elements].append, parameter[name[heading_child]]]
if compare[call[name[len], parameter[name[heading_elements]]] not_equal[!=] constant[2]] begin[:]
continue
variable[first] assign[=] call[name[heading_elements]][constant[0]]
variable[second] assign[=] call[name[heading_elements]][constant[1]]
if compare[name[first].t not_equal[!=] constant[code]] begin[:]
continue
if compare[name[second].t not_equal[!=] constant[text]] begin[:]
continue
variable[type_name] assign[=] call[name[second].literal.strip, parameter[]]
variable[identifier] assign[=] call[call[call[name[first].literal.strip, parameter[]].replace, parameter[constant[()], constant[]]].lstrip, parameter[constant[.]]]
if name[last] begin[:]
call[name[sections]][tuple[[<ast.Subscript object at 0x7da1b00f4520>, <ast.Subscript object at 0x7da1b00f49a0>]]] assign[=] tuple[[<ast.Subscript object at 0x7da1b00f4970>, <ast.BinOp object at 0x7da1b00f4730>]]
call[name[last].clear, parameter[]]
if compare[name[type_name] equal[==] constant[function]] begin[:]
if compare[name[child].level not_equal[!=] constant[3]] begin[:]
continue
if compare[name[type_name] equal[==] constant[class]] begin[:]
if compare[name[child].level not_equal[!=] constant[3]] begin[:]
continue
call[name[last_class].append, parameter[name[identifier]]]
if compare[name[type_name] in call[name[set], parameter[list[[<ast.Constant object at 0x7da1b00f7d90>, <ast.Constant object at 0x7da1b00f4460>]]]]] begin[:]
if compare[name[child].level not_equal[!=] constant[5]] begin[:]
continue
variable[identifier] assign[=] binary_operation[binary_operation[call[name[last_class]][<ast.UnaryOp object at 0x7da1b00f4e20>] + constant[.]] + name[identifier]]
call[name[last].update, parameter[dictionary[[<ast.Constant object at 0x7da1b00f4a60>, <ast.Constant object at 0x7da1b00f5ab0>, <ast.Constant object at 0x7da1b00f6500>], [<ast.Name object at 0x7da1b00f5720>, <ast.Name object at 0x7da1b00f4ac0>, <ast.Name object at 0x7da1b00f56c0>]]]]
if name[last] begin[:]
call[name[sections]][tuple[[<ast.Subscript object at 0x7da1b00f6e00>, <ast.Subscript object at 0x7da1b00f52d0>]]] assign[=] tuple[[<ast.Subscript object at 0x7da1b00f55d0>, <ast.Name object at 0x7da1b00f6020>]]
|
keyword[def] identifier[_find_sections] ( identifier[md_ast] , identifier[sections] , identifier[last] , identifier[last_class] , identifier[total_lines] = keyword[None] ):
literal[string]
keyword[def] identifier[child_walker] ( identifier[node] ):
keyword[for] identifier[child] , identifier[entering] keyword[in] identifier[node] . identifier[walker] ():
keyword[if] identifier[child] == identifier[node] :
keyword[continue]
keyword[yield] identifier[child] , identifier[entering]
keyword[for] identifier[child] , identifier[entering] keyword[in] identifier[child_walker] ( identifier[md_ast] ):
keyword[if] identifier[child] . identifier[t] == literal[string] :
identifier[start_line] = identifier[child] . identifier[sourcepos] [ literal[int] ][ literal[int] ]
keyword[if] identifier[child] . identifier[level] == literal[int] :
keyword[if] identifier[last] :
identifier[sections] [( identifier[last] [ literal[string] ], identifier[last] [ literal[string] ])]=( identifier[last] [ literal[string] ], identifier[start_line] - literal[int] )
identifier[last] . identifier[clear] ()
keyword[if] identifier[child] . identifier[level] keyword[in] identifier[set] ([ literal[int] , literal[int] ]):
identifier[heading_elements] =[]
keyword[for] identifier[heading_child] , identifier[_] keyword[in] identifier[child_walker] ( identifier[child] ):
identifier[heading_elements] . identifier[append] ( identifier[heading_child] )
keyword[if] identifier[len] ( identifier[heading_elements] )!= literal[int] :
keyword[continue]
identifier[first] = identifier[heading_elements] [ literal[int] ]
identifier[second] = identifier[heading_elements] [ literal[int] ]
keyword[if] identifier[first] . identifier[t] != literal[string] :
keyword[continue]
keyword[if] identifier[second] . identifier[t] != literal[string] :
keyword[continue]
identifier[type_name] = identifier[second] . identifier[literal] . identifier[strip] ()
identifier[identifier] = identifier[first] . identifier[literal] . identifier[strip] (). identifier[replace] ( literal[string] , literal[string] ). identifier[lstrip] ( literal[string] )
keyword[if] identifier[last] :
identifier[sections] [( identifier[last] [ literal[string] ], identifier[last] [ literal[string] ])]=( identifier[last] [ literal[string] ], identifier[start_line] - literal[int] )
identifier[last] . identifier[clear] ()
keyword[if] identifier[type_name] == literal[string] :
keyword[if] identifier[child] . identifier[level] != literal[int] :
keyword[continue]
keyword[if] identifier[type_name] == literal[string] :
keyword[if] identifier[child] . identifier[level] != literal[int] :
keyword[continue]
identifier[last_class] . identifier[append] ( identifier[identifier] )
keyword[if] identifier[type_name] keyword[in] identifier[set] ([ literal[string] , literal[string] ]):
keyword[if] identifier[child] . identifier[level] != literal[int] :
keyword[continue]
identifier[identifier] = identifier[last_class] [- literal[int] ]+ literal[string] + identifier[identifier]
identifier[last] . identifier[update] ({
literal[string] : identifier[type_name] ,
literal[string] : identifier[identifier] ,
literal[string] : identifier[start_line] ,
})
keyword[elif] identifier[child] . identifier[t] == literal[string] :
identifier[find_sections] ( identifier[child] , identifier[sections] , identifier[last] , identifier[last_class] )
keyword[if] identifier[last] :
identifier[sections] [( identifier[last] [ literal[string] ], identifier[last] [ literal[string] ])]=( identifier[last] [ literal[string] ], identifier[total_lines] )
|
def _find_sections(md_ast, sections, last, last_class, total_lines=None):
"""
Walks through a CommonMark AST to find section headers that delineate
content that should be updated by this script
:param md_ast:
The AST of the markdown document
:param sections:
A dict to store the start and end lines of a section. The key will be
a two-element tuple of the section type ("class", "function",
"method" or "attribute") and identifier. The values are a two-element
tuple of the start and end line number in the markdown document of the
section.
:param last:
A dict containing information about the last section header seen.
Includes the keys "type_name", "identifier", "start_line".
:param last_class:
A unicode string of the name of the last class found - used when
processing methods and attributes.
:param total_lines:
An integer of the total number of lines in the markdown document -
used to work around a bug in the API of the Python port of CommonMark
"""
def child_walker(node):
for (child, entering) in node.walker():
if child == node:
continue # depends on [control=['if'], data=[]]
yield (child, entering) # depends on [control=['for'], data=[]]
for (child, entering) in child_walker(md_ast):
if child.t == 'heading':
start_line = child.sourcepos[0][0]
if child.level == 2:
if last:
sections[last['type_name'], last['identifier']] = (last['start_line'], start_line - 1)
last.clear() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if child.level in set([3, 5]):
heading_elements = []
for (heading_child, _) in child_walker(child):
heading_elements.append(heading_child) # depends on [control=['for'], data=[]]
if len(heading_elements) != 2:
continue # depends on [control=['if'], data=[]]
first = heading_elements[0]
second = heading_elements[1]
if first.t != 'code':
continue # depends on [control=['if'], data=[]]
if second.t != 'text':
continue # depends on [control=['if'], data=[]]
type_name = second.literal.strip()
identifier = first.literal.strip().replace('()', '').lstrip('.')
if last:
sections[last['type_name'], last['identifier']] = (last['start_line'], start_line - 1)
last.clear() # depends on [control=['if'], data=[]]
if type_name == 'function':
if child.level != 3:
continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if type_name == 'class':
if child.level != 3:
continue # depends on [control=['if'], data=[]]
last_class.append(identifier) # depends on [control=['if'], data=[]]
if type_name in set(['method', 'attribute']):
if child.level != 5:
continue # depends on [control=['if'], data=[]]
identifier = last_class[-1] + '.' + identifier # depends on [control=['if'], data=[]]
last.update({'type_name': type_name, 'identifier': identifier, 'start_line': start_line}) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif child.t == 'block_quote':
find_sections(child, sections, last, last_class) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if last:
sections[last['type_name'], last['identifier']] = (last['start_line'], total_lines) # depends on [control=['if'], data=[]]
|
def cbrt (x):
"""Returns the cube root of x."""
if x >= 0:
return math.pow(x , 1.0 / 3.0)
else:
return - math.pow(abs(x), 1.0 / 3.0)
|
def function[cbrt, parameter[x]]:
constant[Returns the cube root of x.]
if compare[name[x] greater_or_equal[>=] constant[0]] begin[:]
return[call[name[math].pow, parameter[name[x], binary_operation[constant[1.0] / constant[3.0]]]]]
|
keyword[def] identifier[cbrt] ( identifier[x] ):
literal[string]
keyword[if] identifier[x] >= literal[int] :
keyword[return] identifier[math] . identifier[pow] ( identifier[x] , literal[int] / literal[int] )
keyword[else] :
keyword[return] - identifier[math] . identifier[pow] ( identifier[abs] ( identifier[x] ), literal[int] / literal[int] )
|
def cbrt(x):
"""Returns the cube root of x."""
if x >= 0:
return math.pow(x, 1.0 / 3.0) # depends on [control=['if'], data=['x']]
else:
return -math.pow(abs(x), 1.0 / 3.0)
|
def get_date_range_this_year(now=None):
"""Return the starting and ending date of the current school year."""
if now is None:
now = datetime.datetime.now().date()
if now.month <= settings.YEAR_TURNOVER_MONTH:
date_start = datetime.datetime(now.year - 1, 8, 1) # TODO; don't hardcode these values
date_end = datetime.datetime(now.year, 7, 1)
else:
date_start = datetime.datetime(now.year, 8, 1)
date_end = datetime.datetime(now.year + 1, 7, 1)
return timezone.make_aware(date_start), timezone.make_aware(date_end)
|
def function[get_date_range_this_year, parameter[now]]:
constant[Return the starting and ending date of the current school year.]
if compare[name[now] is constant[None]] begin[:]
variable[now] assign[=] call[call[name[datetime].datetime.now, parameter[]].date, parameter[]]
if compare[name[now].month less_or_equal[<=] name[settings].YEAR_TURNOVER_MONTH] begin[:]
variable[date_start] assign[=] call[name[datetime].datetime, parameter[binary_operation[name[now].year - constant[1]], constant[8], constant[1]]]
variable[date_end] assign[=] call[name[datetime].datetime, parameter[name[now].year, constant[7], constant[1]]]
return[tuple[[<ast.Call object at 0x7da1b02e7580>, <ast.Call object at 0x7da1b02e4e20>]]]
|
keyword[def] identifier[get_date_range_this_year] ( identifier[now] = keyword[None] ):
literal[string]
keyword[if] identifier[now] keyword[is] keyword[None] :
identifier[now] = identifier[datetime] . identifier[datetime] . identifier[now] (). identifier[date] ()
keyword[if] identifier[now] . identifier[month] <= identifier[settings] . identifier[YEAR_TURNOVER_MONTH] :
identifier[date_start] = identifier[datetime] . identifier[datetime] ( identifier[now] . identifier[year] - literal[int] , literal[int] , literal[int] )
identifier[date_end] = identifier[datetime] . identifier[datetime] ( identifier[now] . identifier[year] , literal[int] , literal[int] )
keyword[else] :
identifier[date_start] = identifier[datetime] . identifier[datetime] ( identifier[now] . identifier[year] , literal[int] , literal[int] )
identifier[date_end] = identifier[datetime] . identifier[datetime] ( identifier[now] . identifier[year] + literal[int] , literal[int] , literal[int] )
keyword[return] identifier[timezone] . identifier[make_aware] ( identifier[date_start] ), identifier[timezone] . identifier[make_aware] ( identifier[date_end] )
|
def get_date_range_this_year(now=None):
"""Return the starting and ending date of the current school year."""
if now is None:
now = datetime.datetime.now().date() # depends on [control=['if'], data=['now']]
if now.month <= settings.YEAR_TURNOVER_MONTH:
date_start = datetime.datetime(now.year - 1, 8, 1) # TODO; don't hardcode these values
date_end = datetime.datetime(now.year, 7, 1) # depends on [control=['if'], data=[]]
else:
date_start = datetime.datetime(now.year, 8, 1)
date_end = datetime.datetime(now.year + 1, 7, 1)
return (timezone.make_aware(date_start), timezone.make_aware(date_end))
|
def load_config(self, namespace=None, rcfile=None):
""" Load file given in "rcfile".
"""
if namespace is None:
namespace = config
if namespace.scgi_url:
return # already have the connection to rTorrent
# Get and check config file name
if not rcfile:
rcfile = getattr(config, "rtorrent_rc", None)
if not rcfile:
raise error.UserError("No 'rtorrent_rc' path defined in configuration!")
if not os.path.isfile(rcfile):
raise error.UserError("Config file %r doesn't exist!" % (rcfile,))
# Parse the file
self.LOG.debug("Loading rtorrent config from %r" % (rcfile,))
rc_vals = Bunch(scgi_local='', scgi_port = '')
with open(rcfile) as handle:
continued = False
for line in handle.readlines():
# Skip comments, continuations, and empty lines
line = line.strip()
continued, was_continued = line.endswith('\\'), continued
if not line or was_continued or line.startswith("#"):
continue
# Be lenient about errors, after all it's not our own config file
try:
key, val = line.split("=", 1)
except ValueError:
self.LOG.warning("Ignored invalid line %r in %r!" % (line, rcfile))
continue
key, val = key.strip(), val.strip()
key = self.RTORRENT_RC_ALIASES.get(key, key).replace('.', '_')
# Copy values we're interested in
if key in self.RTORRENT_RC_KEYS:
self.LOG.debug("rtorrent.rc: %s = %s" % (key, val))
rc_vals[key] = val
# Validate fields
if rc_vals.scgi_local:
rc_vals.scgi_local = os.path.expanduser(rc_vals.scgi_local)
if rc_vals.scgi_local.startswith('/'):
rc_vals.scgi_local = "scgi://" + rc_vals.scgi_local
if rc_vals.scgi_port and not rc_vals.scgi_port.startswith("scgi://"):
rc_vals.scgi_port = "scgi://" + rc_vals.scgi_port
# Prefer UNIX domain sockets over TCP sockets
namespace.scgi_url = rc_vals.scgi_local or rc_vals.scgi_port
|
def function[load_config, parameter[self, namespace, rcfile]]:
constant[ Load file given in "rcfile".
]
if compare[name[namespace] is constant[None]] begin[:]
variable[namespace] assign[=] name[config]
if name[namespace].scgi_url begin[:]
return[None]
if <ast.UnaryOp object at 0x7da18bc73d90> begin[:]
variable[rcfile] assign[=] call[name[getattr], parameter[name[config], constant[rtorrent_rc], constant[None]]]
if <ast.UnaryOp object at 0x7da18bc736d0> begin[:]
<ast.Raise object at 0x7da18bc734c0>
if <ast.UnaryOp object at 0x7da18bc71ba0> begin[:]
<ast.Raise object at 0x7da18bc72ec0>
call[name[self].LOG.debug, parameter[binary_operation[constant[Loading rtorrent config from %r] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18bc73a90>]]]]]
variable[rc_vals] assign[=] call[name[Bunch], parameter[]]
with call[name[open], parameter[name[rcfile]]] begin[:]
variable[continued] assign[=] constant[False]
for taget[name[line]] in starred[call[name[handle].readlines, parameter[]]] begin[:]
variable[line] assign[=] call[name[line].strip, parameter[]]
<ast.Tuple object at 0x7da18bc72590> assign[=] tuple[[<ast.Call object at 0x7da18bc72170>, <ast.Name object at 0x7da18bc72da0>]]
if <ast.BoolOp object at 0x7da18bc72b60> begin[:]
continue
<ast.Try object at 0x7da18bc71d20>
<ast.Tuple object at 0x7da18bc72770> assign[=] tuple[[<ast.Call object at 0x7da18bc73e20>, <ast.Call object at 0x7da18bc73520>]]
variable[key] assign[=] call[call[name[self].RTORRENT_RC_ALIASES.get, parameter[name[key], name[key]]].replace, parameter[constant[.], constant[_]]]
if compare[name[key] in name[self].RTORRENT_RC_KEYS] begin[:]
call[name[self].LOG.debug, parameter[binary_operation[constant[rtorrent.rc: %s = %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18bc735b0>, <ast.Name object at 0x7da18bc721a0>]]]]]
call[name[rc_vals]][name[key]] assign[=] name[val]
if name[rc_vals].scgi_local begin[:]
name[rc_vals].scgi_local assign[=] call[name[os].path.expanduser, parameter[name[rc_vals].scgi_local]]
if call[name[rc_vals].scgi_local.startswith, parameter[constant[/]]] begin[:]
name[rc_vals].scgi_local assign[=] binary_operation[constant[scgi://] + name[rc_vals].scgi_local]
if <ast.BoolOp object at 0x7da18bc737f0> begin[:]
name[rc_vals].scgi_port assign[=] binary_operation[constant[scgi://] + name[rc_vals].scgi_port]
name[namespace].scgi_url assign[=] <ast.BoolOp object at 0x7da18bc73640>
|
keyword[def] identifier[load_config] ( identifier[self] , identifier[namespace] = keyword[None] , identifier[rcfile] = keyword[None] ):
literal[string]
keyword[if] identifier[namespace] keyword[is] keyword[None] :
identifier[namespace] = identifier[config]
keyword[if] identifier[namespace] . identifier[scgi_url] :
keyword[return]
keyword[if] keyword[not] identifier[rcfile] :
identifier[rcfile] = identifier[getattr] ( identifier[config] , literal[string] , keyword[None] )
keyword[if] keyword[not] identifier[rcfile] :
keyword[raise] identifier[error] . identifier[UserError] ( literal[string] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[rcfile] ):
keyword[raise] identifier[error] . identifier[UserError] ( literal[string] %( identifier[rcfile] ,))
identifier[self] . identifier[LOG] . identifier[debug] ( literal[string] %( identifier[rcfile] ,))
identifier[rc_vals] = identifier[Bunch] ( identifier[scgi_local] = literal[string] , identifier[scgi_port] = literal[string] )
keyword[with] identifier[open] ( identifier[rcfile] ) keyword[as] identifier[handle] :
identifier[continued] = keyword[False]
keyword[for] identifier[line] keyword[in] identifier[handle] . identifier[readlines] ():
identifier[line] = identifier[line] . identifier[strip] ()
identifier[continued] , identifier[was_continued] = identifier[line] . identifier[endswith] ( literal[string] ), identifier[continued]
keyword[if] keyword[not] identifier[line] keyword[or] identifier[was_continued] keyword[or] identifier[line] . identifier[startswith] ( literal[string] ):
keyword[continue]
keyword[try] :
identifier[key] , identifier[val] = identifier[line] . identifier[split] ( literal[string] , literal[int] )
keyword[except] identifier[ValueError] :
identifier[self] . identifier[LOG] . identifier[warning] ( literal[string] %( identifier[line] , identifier[rcfile] ))
keyword[continue]
identifier[key] , identifier[val] = identifier[key] . identifier[strip] (), identifier[val] . identifier[strip] ()
identifier[key] = identifier[self] . identifier[RTORRENT_RC_ALIASES] . identifier[get] ( identifier[key] , identifier[key] ). identifier[replace] ( literal[string] , literal[string] )
keyword[if] identifier[key] keyword[in] identifier[self] . identifier[RTORRENT_RC_KEYS] :
identifier[self] . identifier[LOG] . identifier[debug] ( literal[string] %( identifier[key] , identifier[val] ))
identifier[rc_vals] [ identifier[key] ]= identifier[val]
keyword[if] identifier[rc_vals] . identifier[scgi_local] :
identifier[rc_vals] . identifier[scgi_local] = identifier[os] . identifier[path] . identifier[expanduser] ( identifier[rc_vals] . identifier[scgi_local] )
keyword[if] identifier[rc_vals] . identifier[scgi_local] . identifier[startswith] ( literal[string] ):
identifier[rc_vals] . identifier[scgi_local] = literal[string] + identifier[rc_vals] . identifier[scgi_local]
keyword[if] identifier[rc_vals] . identifier[scgi_port] keyword[and] keyword[not] identifier[rc_vals] . identifier[scgi_port] . identifier[startswith] ( literal[string] ):
identifier[rc_vals] . identifier[scgi_port] = literal[string] + identifier[rc_vals] . identifier[scgi_port]
identifier[namespace] . identifier[scgi_url] = identifier[rc_vals] . identifier[scgi_local] keyword[or] identifier[rc_vals] . identifier[scgi_port]
|
def load_config(self, namespace=None, rcfile=None):
""" Load file given in "rcfile".
"""
if namespace is None:
namespace = config # depends on [control=['if'], data=['namespace']]
if namespace.scgi_url:
return # already have the connection to rTorrent # depends on [control=['if'], data=[]]
# Get and check config file name
if not rcfile:
rcfile = getattr(config, 'rtorrent_rc', None) # depends on [control=['if'], data=[]]
if not rcfile:
raise error.UserError("No 'rtorrent_rc' path defined in configuration!") # depends on [control=['if'], data=[]]
if not os.path.isfile(rcfile):
raise error.UserError("Config file %r doesn't exist!" % (rcfile,)) # depends on [control=['if'], data=[]]
# Parse the file
self.LOG.debug('Loading rtorrent config from %r' % (rcfile,))
rc_vals = Bunch(scgi_local='', scgi_port='')
with open(rcfile) as handle:
continued = False
for line in handle.readlines():
# Skip comments, continuations, and empty lines
line = line.strip()
(continued, was_continued) = (line.endswith('\\'), continued)
if not line or was_continued or line.startswith('#'):
continue # depends on [control=['if'], data=[]]
# Be lenient about errors, after all it's not our own config file
try:
(key, val) = line.split('=', 1) # depends on [control=['try'], data=[]]
except ValueError:
self.LOG.warning('Ignored invalid line %r in %r!' % (line, rcfile))
continue # depends on [control=['except'], data=[]]
(key, val) = (key.strip(), val.strip())
key = self.RTORRENT_RC_ALIASES.get(key, key).replace('.', '_')
# Copy values we're interested in
if key in self.RTORRENT_RC_KEYS:
self.LOG.debug('rtorrent.rc: %s = %s' % (key, val))
rc_vals[key] = val # depends on [control=['if'], data=['key']] # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['handle']]
# Validate fields
if rc_vals.scgi_local:
rc_vals.scgi_local = os.path.expanduser(rc_vals.scgi_local)
if rc_vals.scgi_local.startswith('/'):
rc_vals.scgi_local = 'scgi://' + rc_vals.scgi_local # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if rc_vals.scgi_port and (not rc_vals.scgi_port.startswith('scgi://')):
rc_vals.scgi_port = 'scgi://' + rc_vals.scgi_port # depends on [control=['if'], data=[]]
# Prefer UNIX domain sockets over TCP sockets
namespace.scgi_url = rc_vals.scgi_local or rc_vals.scgi_port
|
def rename(
df,
values: Dict[str, Dict[str, str]] = None,
columns: Dict[str, Dict[str, str]] = None,
locale: str = None
):
"""
Replaces data values and column names according to the locale
---
### Parameters
- `values` (optional: dict):
- key: term to be replaced
- value:
- key: the locale e.g. 'en' or 'fr'
- value: term's translation
- `columns` (optional: dict):
- key: columns name to be replaced
- value:
- key: the locale e.g. 'en' or 'fr'
- value: column name's translation
- `locale` (optional: str): the locale you want to use.
By default the client locale is used.
---
### Example
**Input**
| label | value |
|:----------------:|:-----:|
| France | 100 |
| Europe wo France | 500 |
```cson
rename:
values:
'Europe wo France':
'en': 'Europe excl. France'
'fr': 'Europe excl. France'
columns:
'value':
'en': 'revenue'
'fr': 'revenue'
```
**Output**
| label | revenue |
|:-------------------:|:-------:|
| France | 100 |
| Europe excl. France | 500 |
"""
if values:
to_replace = list(values.keys())
value = [values[term][locale] for term in values]
df = df.replace(to_replace=to_replace, value=value)
if columns:
_keys = list(columns.keys())
_values = [column[locale] for column in columns.values()]
columns = dict(list(zip(_keys, _values)))
df = df.rename(columns=columns)
return df
|
def function[rename, parameter[df, values, columns, locale]]:
constant[
Replaces data values and column names according to the locale
---
### Parameters
- `values` (optional: dict):
- key: term to be replaced
- value:
- key: the locale e.g. 'en' or 'fr'
- value: term's translation
- `columns` (optional: dict):
- key: columns name to be replaced
- value:
- key: the locale e.g. 'en' or 'fr'
- value: column name's translation
- `locale` (optional: str): the locale you want to use.
By default the client locale is used.
---
### Example
**Input**
| label | value |
|:----------------:|:-----:|
| France | 100 |
| Europe wo France | 500 |
```cson
rename:
values:
'Europe wo France':
'en': 'Europe excl. France'
'fr': 'Europe excl. France'
columns:
'value':
'en': 'revenue'
'fr': 'revenue'
```
**Output**
| label | revenue |
|:-------------------:|:-------:|
| France | 100 |
| Europe excl. France | 500 |
]
if name[values] begin[:]
variable[to_replace] assign[=] call[name[list], parameter[call[name[values].keys, parameter[]]]]
variable[value] assign[=] <ast.ListComp object at 0x7da18c4ce2f0>
variable[df] assign[=] call[name[df].replace, parameter[]]
if name[columns] begin[:]
variable[_keys] assign[=] call[name[list], parameter[call[name[columns].keys, parameter[]]]]
variable[_values] assign[=] <ast.ListComp object at 0x7da18c4ce950>
variable[columns] assign[=] call[name[dict], parameter[call[name[list], parameter[call[name[zip], parameter[name[_keys], name[_values]]]]]]]
variable[df] assign[=] call[name[df].rename, parameter[]]
return[name[df]]
|
keyword[def] identifier[rename] (
identifier[df] ,
identifier[values] : identifier[Dict] [ identifier[str] , identifier[Dict] [ identifier[str] , identifier[str] ]]= keyword[None] ,
identifier[columns] : identifier[Dict] [ identifier[str] , identifier[Dict] [ identifier[str] , identifier[str] ]]= keyword[None] ,
identifier[locale] : identifier[str] = keyword[None]
):
literal[string]
keyword[if] identifier[values] :
identifier[to_replace] = identifier[list] ( identifier[values] . identifier[keys] ())
identifier[value] =[ identifier[values] [ identifier[term] ][ identifier[locale] ] keyword[for] identifier[term] keyword[in] identifier[values] ]
identifier[df] = identifier[df] . identifier[replace] ( identifier[to_replace] = identifier[to_replace] , identifier[value] = identifier[value] )
keyword[if] identifier[columns] :
identifier[_keys] = identifier[list] ( identifier[columns] . identifier[keys] ())
identifier[_values] =[ identifier[column] [ identifier[locale] ] keyword[for] identifier[column] keyword[in] identifier[columns] . identifier[values] ()]
identifier[columns] = identifier[dict] ( identifier[list] ( identifier[zip] ( identifier[_keys] , identifier[_values] )))
identifier[df] = identifier[df] . identifier[rename] ( identifier[columns] = identifier[columns] )
keyword[return] identifier[df]
|
def rename(df, values: Dict[str, Dict[str, str]]=None, columns: Dict[str, Dict[str, str]]=None, locale: str=None):
"""
Replaces data values and column names according to the locale
---
### Parameters
- `values` (optional: dict):
- key: term to be replaced
- value:
- key: the locale e.g. 'en' or 'fr'
- value: term's translation
- `columns` (optional: dict):
- key: columns name to be replaced
- value:
- key: the locale e.g. 'en' or 'fr'
- value: column name's translation
- `locale` (optional: str): the locale you want to use.
By default the client locale is used.
---
### Example
**Input**
| label | value |
|:----------------:|:-----:|
| France | 100 |
| Europe wo France | 500 |
```cson
rename:
values:
'Europe wo France':
'en': 'Europe excl. France'
'fr': 'Europe excl. France'
columns:
'value':
'en': 'revenue'
'fr': 'revenue'
```
**Output**
| label | revenue |
|:-------------------:|:-------:|
| France | 100 |
| Europe excl. France | 500 |
"""
if values:
to_replace = list(values.keys())
value = [values[term][locale] for term in values]
df = df.replace(to_replace=to_replace, value=value) # depends on [control=['if'], data=[]]
if columns:
_keys = list(columns.keys())
_values = [column[locale] for column in columns.values()]
columns = dict(list(zip(_keys, _values)))
df = df.rename(columns=columns) # depends on [control=['if'], data=[]]
return df
|
def _write_hex_long(self, data, pos, value):
"""
Writes an unsigned long value across a byte array.
:param data: the buffer to write the value to
:type data: bytearray
:param pos: the starting position
:type pos: int
:param value: the value to write
:type value: unsigned long
"""
self._write_hex_byte(data, pos + 0, (value >> 56) & 0xff)
self._write_hex_byte(data, pos + 2, (value >> 48) & 0xff)
self._write_hex_byte(data, pos + 4, (value >> 40) & 0xff)
self._write_hex_byte(data, pos + 6, (value >> 32) & 0xff)
self._write_hex_byte(data, pos + 8, (value >> 24) & 0xff)
self._write_hex_byte(data, pos + 10, (value >> 16) & 0xff)
self._write_hex_byte(data, pos + 12, (value >> 8) & 0xff)
self._write_hex_byte(data, pos + 14, (value & 0xff))
|
def function[_write_hex_long, parameter[self, data, pos, value]]:
constant[
Writes an unsigned long value across a byte array.
:param data: the buffer to write the value to
:type data: bytearray
:param pos: the starting position
:type pos: int
:param value: the value to write
:type value: unsigned long
]
call[name[self]._write_hex_byte, parameter[name[data], binary_operation[name[pos] + constant[0]], binary_operation[binary_operation[name[value] <ast.RShift object at 0x7da2590d6a40> constant[56]] <ast.BitAnd object at 0x7da2590d6b60> constant[255]]]]
call[name[self]._write_hex_byte, parameter[name[data], binary_operation[name[pos] + constant[2]], binary_operation[binary_operation[name[value] <ast.RShift object at 0x7da2590d6a40> constant[48]] <ast.BitAnd object at 0x7da2590d6b60> constant[255]]]]
call[name[self]._write_hex_byte, parameter[name[data], binary_operation[name[pos] + constant[4]], binary_operation[binary_operation[name[value] <ast.RShift object at 0x7da2590d6a40> constant[40]] <ast.BitAnd object at 0x7da2590d6b60> constant[255]]]]
call[name[self]._write_hex_byte, parameter[name[data], binary_operation[name[pos] + constant[6]], binary_operation[binary_operation[name[value] <ast.RShift object at 0x7da2590d6a40> constant[32]] <ast.BitAnd object at 0x7da2590d6b60> constant[255]]]]
call[name[self]._write_hex_byte, parameter[name[data], binary_operation[name[pos] + constant[8]], binary_operation[binary_operation[name[value] <ast.RShift object at 0x7da2590d6a40> constant[24]] <ast.BitAnd object at 0x7da2590d6b60> constant[255]]]]
call[name[self]._write_hex_byte, parameter[name[data], binary_operation[name[pos] + constant[10]], binary_operation[binary_operation[name[value] <ast.RShift object at 0x7da2590d6a40> constant[16]] <ast.BitAnd object at 0x7da2590d6b60> constant[255]]]]
call[name[self]._write_hex_byte, parameter[name[data], binary_operation[name[pos] + constant[12]], binary_operation[binary_operation[name[value] <ast.RShift object at 0x7da2590d6a40> constant[8]] <ast.BitAnd object at 0x7da2590d6b60> constant[255]]]]
call[name[self]._write_hex_byte, parameter[name[data], binary_operation[name[pos] + constant[14]], binary_operation[name[value] <ast.BitAnd object at 0x7da2590d6b60> constant[255]]]]
|
keyword[def] identifier[_write_hex_long] ( identifier[self] , identifier[data] , identifier[pos] , identifier[value] ):
literal[string]
identifier[self] . identifier[_write_hex_byte] ( identifier[data] , identifier[pos] + literal[int] ,( identifier[value] >> literal[int] )& literal[int] )
identifier[self] . identifier[_write_hex_byte] ( identifier[data] , identifier[pos] + literal[int] ,( identifier[value] >> literal[int] )& literal[int] )
identifier[self] . identifier[_write_hex_byte] ( identifier[data] , identifier[pos] + literal[int] ,( identifier[value] >> literal[int] )& literal[int] )
identifier[self] . identifier[_write_hex_byte] ( identifier[data] , identifier[pos] + literal[int] ,( identifier[value] >> literal[int] )& literal[int] )
identifier[self] . identifier[_write_hex_byte] ( identifier[data] , identifier[pos] + literal[int] ,( identifier[value] >> literal[int] )& literal[int] )
identifier[self] . identifier[_write_hex_byte] ( identifier[data] , identifier[pos] + literal[int] ,( identifier[value] >> literal[int] )& literal[int] )
identifier[self] . identifier[_write_hex_byte] ( identifier[data] , identifier[pos] + literal[int] ,( identifier[value] >> literal[int] )& literal[int] )
identifier[self] . identifier[_write_hex_byte] ( identifier[data] , identifier[pos] + literal[int] ,( identifier[value] & literal[int] ))
|
def _write_hex_long(self, data, pos, value):
"""
Writes an unsigned long value across a byte array.
:param data: the buffer to write the value to
:type data: bytearray
:param pos: the starting position
:type pos: int
:param value: the value to write
:type value: unsigned long
"""
self._write_hex_byte(data, pos + 0, value >> 56 & 255)
self._write_hex_byte(data, pos + 2, value >> 48 & 255)
self._write_hex_byte(data, pos + 4, value >> 40 & 255)
self._write_hex_byte(data, pos + 6, value >> 32 & 255)
self._write_hex_byte(data, pos + 8, value >> 24 & 255)
self._write_hex_byte(data, pos + 10, value >> 16 & 255)
self._write_hex_byte(data, pos + 12, value >> 8 & 255)
self._write_hex_byte(data, pos + 14, value & 255)
|
async def _pb_request(self, endpoint, request_pb, response_pb):
"""Send a Protocol Buffer formatted chat API request.
Args:
endpoint (str): The chat API endpoint to use.
request_pb: The request body as a Protocol Buffer message.
response_pb: The response body as a Protocol Buffer message.
Raises:
NetworkError: If the request fails.
"""
logger.debug('Sending Protocol Buffer request %s:\n%s', endpoint,
request_pb)
res = await self._base_request(
'https://clients6.google.com/chat/v1/{}'.format(endpoint),
'application/x-protobuf', # Request body is Protocol Buffer.
'proto', # Response body is Protocol Buffer.
request_pb.SerializeToString()
)
try:
response_pb.ParseFromString(base64.b64decode(res.body))
except binascii.Error as e:
raise exceptions.NetworkError(
'Failed to decode base64 response: {}'.format(e)
)
except google.protobuf.message.DecodeError as e:
raise exceptions.NetworkError(
'Failed to decode Protocol Buffer response: {}'.format(e)
)
logger.debug('Received Protocol Buffer response:\n%s', response_pb)
status = response_pb.response_header.status
if status != hangouts_pb2.RESPONSE_STATUS_OK:
description = response_pb.response_header.error_description
raise exceptions.NetworkError(
'Request failed with status {}: \'{}\''
.format(status, description)
)
|
<ast.AsyncFunctionDef object at 0x7da207f99600>
|
keyword[async] keyword[def] identifier[_pb_request] ( identifier[self] , identifier[endpoint] , identifier[request_pb] , identifier[response_pb] ):
literal[string]
identifier[logger] . identifier[debug] ( literal[string] , identifier[endpoint] ,
identifier[request_pb] )
identifier[res] = keyword[await] identifier[self] . identifier[_base_request] (
literal[string] . identifier[format] ( identifier[endpoint] ),
literal[string] ,
literal[string] ,
identifier[request_pb] . identifier[SerializeToString] ()
)
keyword[try] :
identifier[response_pb] . identifier[ParseFromString] ( identifier[base64] . identifier[b64decode] ( identifier[res] . identifier[body] ))
keyword[except] identifier[binascii] . identifier[Error] keyword[as] identifier[e] :
keyword[raise] identifier[exceptions] . identifier[NetworkError] (
literal[string] . identifier[format] ( identifier[e] )
)
keyword[except] identifier[google] . identifier[protobuf] . identifier[message] . identifier[DecodeError] keyword[as] identifier[e] :
keyword[raise] identifier[exceptions] . identifier[NetworkError] (
literal[string] . identifier[format] ( identifier[e] )
)
identifier[logger] . identifier[debug] ( literal[string] , identifier[response_pb] )
identifier[status] = identifier[response_pb] . identifier[response_header] . identifier[status]
keyword[if] identifier[status] != identifier[hangouts_pb2] . identifier[RESPONSE_STATUS_OK] :
identifier[description] = identifier[response_pb] . identifier[response_header] . identifier[error_description]
keyword[raise] identifier[exceptions] . identifier[NetworkError] (
literal[string]
. identifier[format] ( identifier[status] , identifier[description] )
)
|
async def _pb_request(self, endpoint, request_pb, response_pb):
"""Send a Protocol Buffer formatted chat API request.
Args:
endpoint (str): The chat API endpoint to use.
request_pb: The request body as a Protocol Buffer message.
response_pb: The response body as a Protocol Buffer message.
Raises:
NetworkError: If the request fails.
"""
logger.debug('Sending Protocol Buffer request %s:\n%s', endpoint, request_pb) # Request body is Protocol Buffer.
# Response body is Protocol Buffer.
res = await self._base_request('https://clients6.google.com/chat/v1/{}'.format(endpoint), 'application/x-protobuf', 'proto', request_pb.SerializeToString())
try:
response_pb.ParseFromString(base64.b64decode(res.body)) # depends on [control=['try'], data=[]]
except binascii.Error as e:
raise exceptions.NetworkError('Failed to decode base64 response: {}'.format(e)) # depends on [control=['except'], data=['e']]
except google.protobuf.message.DecodeError as e:
raise exceptions.NetworkError('Failed to decode Protocol Buffer response: {}'.format(e)) # depends on [control=['except'], data=['e']]
logger.debug('Received Protocol Buffer response:\n%s', response_pb)
status = response_pb.response_header.status
if status != hangouts_pb2.RESPONSE_STATUS_OK:
description = response_pb.response_header.error_description
raise exceptions.NetworkError("Request failed with status {}: '{}'".format(status, description)) # depends on [control=['if'], data=['status']]
|
def close_drivers(cls, scope, test_name, test_passed=True, context=None):
"""Stop all drivers, capture screenshots, copy webdriver and GGR logs and download saved videos
:param scope: execution scope (function, module, class or session)
:param test_name: executed test name
:param test_passed: True if the test has passed
:param context: behave context
"""
if scope == 'function':
# Capture screenshot on error
if not test_passed:
cls.capture_screenshots(test_name)
# Execute behave dynamic environment
if context and hasattr(context, 'dyn_env'):
context.dyn_env.execute_after_scenario_steps(context)
# Save webdriver logs on error or if it is enabled
cls.save_all_webdriver_logs(test_name, test_passed)
# Close browser and stop driver if it must not be reused
reuse_driver = cls.get_default_wrapper().should_reuse_driver(scope, test_passed, context)
cls.stop_drivers(reuse_driver)
cls.download_videos(test_name, test_passed, reuse_driver)
cls.save_all_ggr_logs(test_name, test_passed)
cls.remove_drivers(reuse_driver)
|
def function[close_drivers, parameter[cls, scope, test_name, test_passed, context]]:
constant[Stop all drivers, capture screenshots, copy webdriver and GGR logs and download saved videos
:param scope: execution scope (function, module, class or session)
:param test_name: executed test name
:param test_passed: True if the test has passed
:param context: behave context
]
if compare[name[scope] equal[==] constant[function]] begin[:]
if <ast.UnaryOp object at 0x7da1b23d0460> begin[:]
call[name[cls].capture_screenshots, parameter[name[test_name]]]
if <ast.BoolOp object at 0x7da1b23d0640> begin[:]
call[name[context].dyn_env.execute_after_scenario_steps, parameter[name[context]]]
call[name[cls].save_all_webdriver_logs, parameter[name[test_name], name[test_passed]]]
variable[reuse_driver] assign[=] call[call[name[cls].get_default_wrapper, parameter[]].should_reuse_driver, parameter[name[scope], name[test_passed], name[context]]]
call[name[cls].stop_drivers, parameter[name[reuse_driver]]]
call[name[cls].download_videos, parameter[name[test_name], name[test_passed], name[reuse_driver]]]
call[name[cls].save_all_ggr_logs, parameter[name[test_name], name[test_passed]]]
call[name[cls].remove_drivers, parameter[name[reuse_driver]]]
|
keyword[def] identifier[close_drivers] ( identifier[cls] , identifier[scope] , identifier[test_name] , identifier[test_passed] = keyword[True] , identifier[context] = keyword[None] ):
literal[string]
keyword[if] identifier[scope] == literal[string] :
keyword[if] keyword[not] identifier[test_passed] :
identifier[cls] . identifier[capture_screenshots] ( identifier[test_name] )
keyword[if] identifier[context] keyword[and] identifier[hasattr] ( identifier[context] , literal[string] ):
identifier[context] . identifier[dyn_env] . identifier[execute_after_scenario_steps] ( identifier[context] )
identifier[cls] . identifier[save_all_webdriver_logs] ( identifier[test_name] , identifier[test_passed] )
identifier[reuse_driver] = identifier[cls] . identifier[get_default_wrapper] (). identifier[should_reuse_driver] ( identifier[scope] , identifier[test_passed] , identifier[context] )
identifier[cls] . identifier[stop_drivers] ( identifier[reuse_driver] )
identifier[cls] . identifier[download_videos] ( identifier[test_name] , identifier[test_passed] , identifier[reuse_driver] )
identifier[cls] . identifier[save_all_ggr_logs] ( identifier[test_name] , identifier[test_passed] )
identifier[cls] . identifier[remove_drivers] ( identifier[reuse_driver] )
|
def close_drivers(cls, scope, test_name, test_passed=True, context=None):
"""Stop all drivers, capture screenshots, copy webdriver and GGR logs and download saved videos
:param scope: execution scope (function, module, class or session)
:param test_name: executed test name
:param test_passed: True if the test has passed
:param context: behave context
"""
if scope == 'function':
# Capture screenshot on error
if not test_passed:
cls.capture_screenshots(test_name) # depends on [control=['if'], data=[]]
# Execute behave dynamic environment
if context and hasattr(context, 'dyn_env'):
context.dyn_env.execute_after_scenario_steps(context) # depends on [control=['if'], data=[]]
# Save webdriver logs on error or if it is enabled
cls.save_all_webdriver_logs(test_name, test_passed) # depends on [control=['if'], data=[]]
# Close browser and stop driver if it must not be reused
reuse_driver = cls.get_default_wrapper().should_reuse_driver(scope, test_passed, context)
cls.stop_drivers(reuse_driver)
cls.download_videos(test_name, test_passed, reuse_driver)
cls.save_all_ggr_logs(test_name, test_passed)
cls.remove_drivers(reuse_driver)
|
def get_login_password(site_name="github.com",
netrc_file="~/.netrc",
git_credential_file="~/.git-credentials"):
"""Read a .netrc file and return login/password for LWN."""
try:
n = netrc.netrc(os.path.expanduser(netrc_file))
except OSError:
pass
else:
if site_name in n.hosts:
return n.hosts[site_name][0], n.hosts[site_name][2]
try:
with open(os.path.expanduser(git_credential_file)) as f:
for line in f:
parsed = parse.urlparse(line.strip())
if parsed.hostname == site_name:
return (parse.unquote(parsed.username),
parse.unquote(parsed.password))
except OSError:
pass
return None, None
|
def function[get_login_password, parameter[site_name, netrc_file, git_credential_file]]:
constant[Read a .netrc file and return login/password for LWN.]
<ast.Try object at 0x7da18c4cd720>
<ast.Try object at 0x7da1b152ad10>
return[tuple[[<ast.Constant object at 0x7da1b1529900>, <ast.Constant object at 0x7da1b1529840>]]]
|
keyword[def] identifier[get_login_password] ( identifier[site_name] = literal[string] ,
identifier[netrc_file] = literal[string] ,
identifier[git_credential_file] = literal[string] ):
literal[string]
keyword[try] :
identifier[n] = identifier[netrc] . identifier[netrc] ( identifier[os] . identifier[path] . identifier[expanduser] ( identifier[netrc_file] ))
keyword[except] identifier[OSError] :
keyword[pass]
keyword[else] :
keyword[if] identifier[site_name] keyword[in] identifier[n] . identifier[hosts] :
keyword[return] identifier[n] . identifier[hosts] [ identifier[site_name] ][ literal[int] ], identifier[n] . identifier[hosts] [ identifier[site_name] ][ literal[int] ]
keyword[try] :
keyword[with] identifier[open] ( identifier[os] . identifier[path] . identifier[expanduser] ( identifier[git_credential_file] )) keyword[as] identifier[f] :
keyword[for] identifier[line] keyword[in] identifier[f] :
identifier[parsed] = identifier[parse] . identifier[urlparse] ( identifier[line] . identifier[strip] ())
keyword[if] identifier[parsed] . identifier[hostname] == identifier[site_name] :
keyword[return] ( identifier[parse] . identifier[unquote] ( identifier[parsed] . identifier[username] ),
identifier[parse] . identifier[unquote] ( identifier[parsed] . identifier[password] ))
keyword[except] identifier[OSError] :
keyword[pass]
keyword[return] keyword[None] , keyword[None]
|
def get_login_password(site_name='github.com', netrc_file='~/.netrc', git_credential_file='~/.git-credentials'):
"""Read a .netrc file and return login/password for LWN."""
try:
n = netrc.netrc(os.path.expanduser(netrc_file)) # depends on [control=['try'], data=[]]
except OSError:
pass # depends on [control=['except'], data=[]]
else:
if site_name in n.hosts:
return (n.hosts[site_name][0], n.hosts[site_name][2]) # depends on [control=['if'], data=['site_name']]
try:
with open(os.path.expanduser(git_credential_file)) as f:
for line in f:
parsed = parse.urlparse(line.strip())
if parsed.hostname == site_name:
return (parse.unquote(parsed.username), parse.unquote(parsed.password)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['f']] # depends on [control=['try'], data=[]]
except OSError:
pass # depends on [control=['except'], data=[]]
return (None, None)
|
def readline(self, size=-1):
'''This reads and returns one entire line. The newline at the end of
line is returned as part of the string, unless the file ends without a
newline. An empty string is returned if EOF is encountered immediately.
This looks for a newline as a CR/LF pair (\\r\\n) even on UNIX because
this is what the pseudotty device returns. So contrary to what you may
expect you will receive newlines as \\r\\n.
If the size argument is 0 then an empty string is returned. In all
other cases the size argument is ignored, which is not standard
behavior for a file-like object. '''
if size == 0:
return self.string_type()
# delimiter default is EOF
index = self.expect([self.crlf, self.delimiter])
if index == 0:
return self.before + self.crlf
else:
return self.before
|
def function[readline, parameter[self, size]]:
constant[This reads and returns one entire line. The newline at the end of
line is returned as part of the string, unless the file ends without a
newline. An empty string is returned if EOF is encountered immediately.
This looks for a newline as a CR/LF pair (\r\n) even on UNIX because
this is what the pseudotty device returns. So contrary to what you may
expect you will receive newlines as \r\n.
If the size argument is 0 then an empty string is returned. In all
other cases the size argument is ignored, which is not standard
behavior for a file-like object. ]
if compare[name[size] equal[==] constant[0]] begin[:]
return[call[name[self].string_type, parameter[]]]
variable[index] assign[=] call[name[self].expect, parameter[list[[<ast.Attribute object at 0x7da2054a7fd0>, <ast.Attribute object at 0x7da2054a7280>]]]]
if compare[name[index] equal[==] constant[0]] begin[:]
return[binary_operation[name[self].before + name[self].crlf]]
|
keyword[def] identifier[readline] ( identifier[self] , identifier[size] =- literal[int] ):
literal[string]
keyword[if] identifier[size] == literal[int] :
keyword[return] identifier[self] . identifier[string_type] ()
identifier[index] = identifier[self] . identifier[expect] ([ identifier[self] . identifier[crlf] , identifier[self] . identifier[delimiter] ])
keyword[if] identifier[index] == literal[int] :
keyword[return] identifier[self] . identifier[before] + identifier[self] . identifier[crlf]
keyword[else] :
keyword[return] identifier[self] . identifier[before]
|
def readline(self, size=-1):
"""This reads and returns one entire line. The newline at the end of
line is returned as part of the string, unless the file ends without a
newline. An empty string is returned if EOF is encountered immediately.
This looks for a newline as a CR/LF pair (\\r\\n) even on UNIX because
this is what the pseudotty device returns. So contrary to what you may
expect you will receive newlines as \\r\\n.
If the size argument is 0 then an empty string is returned. In all
other cases the size argument is ignored, which is not standard
behavior for a file-like object. """
if size == 0:
return self.string_type() # depends on [control=['if'], data=[]]
# delimiter default is EOF
index = self.expect([self.crlf, self.delimiter])
if index == 0:
return self.before + self.crlf # depends on [control=['if'], data=[]]
else:
return self.before
|
def get_tails(chains):
"""
Args:
An ordered collection of block generators.
Returns
A dictionary of lists of blocks for all chains where:
1. The first block in all the lists has the same block number
2. Each list has all blocks from the common block to the current
block in increasing order
3. The dictionary key is the index of the chain in `chains` that
the list was generated from
A list of indexes of the chains that had communication problems.
"""
def get_num_of_oldest(blocks):
return blocks[0].num
# Get the first block from every chain
tails = {}
bad_chains = []
for i, chain in chains.items():
try:
tails[i] = [next(chain)]
except StopIteration:
bad_chains.append(i)
# Find the minimum block number between all chains
min_block_num = min(map(get_num_of_oldest, tails.values()))
# Walk all chains back to the minimum block number, adding blocks to the
# chain lists as we go
for i, chain in chains.items():
if i not in bad_chains:
tail = tails[i]
while get_num_of_oldest(tail) > min_block_num:
try:
block = next(chain)
except StopIteration:
bad_chains.append(i)
break
tail.insert(0, block)
return tails, bad_chains
|
def function[get_tails, parameter[chains]]:
constant[
Args:
An ordered collection of block generators.
Returns
A dictionary of lists of blocks for all chains where:
1. The first block in all the lists has the same block number
2. Each list has all blocks from the common block to the current
block in increasing order
3. The dictionary key is the index of the chain in `chains` that
the list was generated from
A list of indexes of the chains that had communication problems.
]
def function[get_num_of_oldest, parameter[blocks]]:
return[call[name[blocks]][constant[0]].num]
variable[tails] assign[=] dictionary[[], []]
variable[bad_chains] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da18bc725c0>, <ast.Name object at 0x7da18bc70760>]]] in starred[call[name[chains].items, parameter[]]] begin[:]
<ast.Try object at 0x7da18bc70d90>
variable[min_block_num] assign[=] call[name[min], parameter[call[name[map], parameter[name[get_num_of_oldest], call[name[tails].values, parameter[]]]]]]
for taget[tuple[[<ast.Name object at 0x7da18bc706d0>, <ast.Name object at 0x7da18bc709a0>]]] in starred[call[name[chains].items, parameter[]]] begin[:]
if compare[name[i] <ast.NotIn object at 0x7da2590d7190> name[bad_chains]] begin[:]
variable[tail] assign[=] call[name[tails]][name[i]]
while compare[call[name[get_num_of_oldest], parameter[name[tail]]] greater[>] name[min_block_num]] begin[:]
<ast.Try object at 0x7da18bc71d50>
call[name[tail].insert, parameter[constant[0], name[block]]]
return[tuple[[<ast.Name object at 0x7da18f00fa30>, <ast.Name object at 0x7da18f00fc70>]]]
|
keyword[def] identifier[get_tails] ( identifier[chains] ):
literal[string]
keyword[def] identifier[get_num_of_oldest] ( identifier[blocks] ):
keyword[return] identifier[blocks] [ literal[int] ]. identifier[num]
identifier[tails] ={}
identifier[bad_chains] =[]
keyword[for] identifier[i] , identifier[chain] keyword[in] identifier[chains] . identifier[items] ():
keyword[try] :
identifier[tails] [ identifier[i] ]=[ identifier[next] ( identifier[chain] )]
keyword[except] identifier[StopIteration] :
identifier[bad_chains] . identifier[append] ( identifier[i] )
identifier[min_block_num] = identifier[min] ( identifier[map] ( identifier[get_num_of_oldest] , identifier[tails] . identifier[values] ()))
keyword[for] identifier[i] , identifier[chain] keyword[in] identifier[chains] . identifier[items] ():
keyword[if] identifier[i] keyword[not] keyword[in] identifier[bad_chains] :
identifier[tail] = identifier[tails] [ identifier[i] ]
keyword[while] identifier[get_num_of_oldest] ( identifier[tail] )> identifier[min_block_num] :
keyword[try] :
identifier[block] = identifier[next] ( identifier[chain] )
keyword[except] identifier[StopIteration] :
identifier[bad_chains] . identifier[append] ( identifier[i] )
keyword[break]
identifier[tail] . identifier[insert] ( literal[int] , identifier[block] )
keyword[return] identifier[tails] , identifier[bad_chains]
|
def get_tails(chains):
"""
Args:
An ordered collection of block generators.
Returns
A dictionary of lists of blocks for all chains where:
1. The first block in all the lists has the same block number
2. Each list has all blocks from the common block to the current
block in increasing order
3. The dictionary key is the index of the chain in `chains` that
the list was generated from
A list of indexes of the chains that had communication problems.
"""
def get_num_of_oldest(blocks):
return blocks[0].num
# Get the first block from every chain
tails = {}
bad_chains = []
for (i, chain) in chains.items():
try:
tails[i] = [next(chain)] # depends on [control=['try'], data=[]]
except StopIteration:
bad_chains.append(i) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=[]]
# Find the minimum block number between all chains
min_block_num = min(map(get_num_of_oldest, tails.values()))
# Walk all chains back to the minimum block number, adding blocks to the
# chain lists as we go
for (i, chain) in chains.items():
if i not in bad_chains:
tail = tails[i]
while get_num_of_oldest(tail) > min_block_num:
try:
block = next(chain) # depends on [control=['try'], data=[]]
except StopIteration:
bad_chains.append(i)
break # depends on [control=['except'], data=[]]
tail.insert(0, block) # depends on [control=['while'], data=[]] # depends on [control=['if'], data=['i', 'bad_chains']] # depends on [control=['for'], data=[]]
return (tails, bad_chains)
|
def tag(version, params):
"""Build and return full command to use with subprocess.Popen for 'git tag' command
:param version:
:param params:
:return: list
"""
cmd = ['git', 'tag', '-a', '-m', 'v%s' % version, str(version)]
if params:
cmd.extend(params)
return cmd
|
def function[tag, parameter[version, params]]:
constant[Build and return full command to use with subprocess.Popen for 'git tag' command
:param version:
:param params:
:return: list
]
variable[cmd] assign[=] list[[<ast.Constant object at 0x7da1b087a0b0>, <ast.Constant object at 0x7da1b087a9e0>, <ast.Constant object at 0x7da1b087a170>, <ast.Constant object at 0x7da1b087b460>, <ast.BinOp object at 0x7da1b087ba00>, <ast.Call object at 0x7da1b087a590>]]
if name[params] begin[:]
call[name[cmd].extend, parameter[name[params]]]
return[name[cmd]]
|
keyword[def] identifier[tag] ( identifier[version] , identifier[params] ):
literal[string]
identifier[cmd] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] % identifier[version] , identifier[str] ( identifier[version] )]
keyword[if] identifier[params] :
identifier[cmd] . identifier[extend] ( identifier[params] )
keyword[return] identifier[cmd]
|
def tag(version, params):
"""Build and return full command to use with subprocess.Popen for 'git tag' command
:param version:
:param params:
:return: list
"""
cmd = ['git', 'tag', '-a', '-m', 'v%s' % version, str(version)]
if params:
cmd.extend(params) # depends on [control=['if'], data=[]]
return cmd
|
def get_abs_template_path(template_name, directory, extension):
""" Given a template name, a directory, and an extension, return the
absolute path to the template. """
# Get the relative path
relative_path = join(directory, template_name)
file_with_ext = template_name
if extension:
# If there is a default extension, but no file extension, then add it
file_name, file_ext = splitext(file_with_ext)
if not file_ext:
file_with_ext = extsep.join(
(file_name, extension.replace(extsep, '')))
# Rebuild the relative path
relative_path = join(directory, file_with_ext)
return abspath(relative_path)
|
def function[get_abs_template_path, parameter[template_name, directory, extension]]:
constant[ Given a template name, a directory, and an extension, return the
absolute path to the template. ]
variable[relative_path] assign[=] call[name[join], parameter[name[directory], name[template_name]]]
variable[file_with_ext] assign[=] name[template_name]
if name[extension] begin[:]
<ast.Tuple object at 0x7da204621e70> assign[=] call[name[splitext], parameter[name[file_with_ext]]]
if <ast.UnaryOp object at 0x7da204621300> begin[:]
variable[file_with_ext] assign[=] call[name[extsep].join, parameter[tuple[[<ast.Name object at 0x7da204622e30>, <ast.Call object at 0x7da204621c30>]]]]
variable[relative_path] assign[=] call[name[join], parameter[name[directory], name[file_with_ext]]]
return[call[name[abspath], parameter[name[relative_path]]]]
|
keyword[def] identifier[get_abs_template_path] ( identifier[template_name] , identifier[directory] , identifier[extension] ):
literal[string]
identifier[relative_path] = identifier[join] ( identifier[directory] , identifier[template_name] )
identifier[file_with_ext] = identifier[template_name]
keyword[if] identifier[extension] :
identifier[file_name] , identifier[file_ext] = identifier[splitext] ( identifier[file_with_ext] )
keyword[if] keyword[not] identifier[file_ext] :
identifier[file_with_ext] = identifier[extsep] . identifier[join] (
( identifier[file_name] , identifier[extension] . identifier[replace] ( identifier[extsep] , literal[string] )))
identifier[relative_path] = identifier[join] ( identifier[directory] , identifier[file_with_ext] )
keyword[return] identifier[abspath] ( identifier[relative_path] )
|
def get_abs_template_path(template_name, directory, extension):
""" Given a template name, a directory, and an extension, return the
absolute path to the template. """
# Get the relative path
relative_path = join(directory, template_name)
file_with_ext = template_name
if extension:
# If there is a default extension, but no file extension, then add it
(file_name, file_ext) = splitext(file_with_ext)
if not file_ext:
file_with_ext = extsep.join((file_name, extension.replace(extsep, '')))
# Rebuild the relative path
relative_path = join(directory, file_with_ext) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
return abspath(relative_path)
|
def ctypes2buffer(cptr, length):
"""Convert ctypes pointer to buffer type."""
if not isinstance(cptr, ctypes.POINTER(ctypes.c_char)):
raise RuntimeError('expected char pointer')
res = bytearray(length)
rptr = (ctypes.c_char * length).from_buffer(res)
if not ctypes.memmove(rptr, cptr, length):
raise RuntimeError('memmove failed')
return res
|
def function[ctypes2buffer, parameter[cptr, length]]:
constant[Convert ctypes pointer to buffer type.]
if <ast.UnaryOp object at 0x7da1b209b430> begin[:]
<ast.Raise object at 0x7da1b209a080>
variable[res] assign[=] call[name[bytearray], parameter[name[length]]]
variable[rptr] assign[=] call[binary_operation[name[ctypes].c_char * name[length]].from_buffer, parameter[name[res]]]
if <ast.UnaryOp object at 0x7da1b209b760> begin[:]
<ast.Raise object at 0x7da1b209abc0>
return[name[res]]
|
keyword[def] identifier[ctypes2buffer] ( identifier[cptr] , identifier[length] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[cptr] , identifier[ctypes] . identifier[POINTER] ( identifier[ctypes] . identifier[c_char] )):
keyword[raise] identifier[RuntimeError] ( literal[string] )
identifier[res] = identifier[bytearray] ( identifier[length] )
identifier[rptr] =( identifier[ctypes] . identifier[c_char] * identifier[length] ). identifier[from_buffer] ( identifier[res] )
keyword[if] keyword[not] identifier[ctypes] . identifier[memmove] ( identifier[rptr] , identifier[cptr] , identifier[length] ):
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[return] identifier[res]
|
def ctypes2buffer(cptr, length):
"""Convert ctypes pointer to buffer type."""
if not isinstance(cptr, ctypes.POINTER(ctypes.c_char)):
raise RuntimeError('expected char pointer') # depends on [control=['if'], data=[]]
res = bytearray(length)
rptr = (ctypes.c_char * length).from_buffer(res)
if not ctypes.memmove(rptr, cptr, length):
raise RuntimeError('memmove failed') # depends on [control=['if'], data=[]]
return res
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.