code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def tnet_to_nx(df, t=None):
"""
Creates undirected networkx object
"""
if t is not None:
df = get_network_when(df, t=t)
if 'weight' in df.columns:
nxobj = nx.from_pandas_edgelist(
df, source='i', target='j', edge_attr='weight')
else:
nxobj = nx.from_pandas_edgelist(df, source='i', target='j')
return nxobj | def function[tnet_to_nx, parameter[df, t]]:
constant[
Creates undirected networkx object
]
if compare[name[t] is_not constant[None]] begin[:]
variable[df] assign[=] call[name[get_network_when], parameter[name[df]]]
if compare[constant[weight] in name[df].columns] begin[:]
variable[nxobj] assign[=] call[name[nx].from_pandas_edgelist, parameter[name[df]]]
return[name[nxobj]] | keyword[def] identifier[tnet_to_nx] ( identifier[df] , identifier[t] = keyword[None] ):
literal[string]
keyword[if] identifier[t] keyword[is] keyword[not] keyword[None] :
identifier[df] = identifier[get_network_when] ( identifier[df] , identifier[t] = identifier[t] )
keyword[if] literal[string] keyword[in] identifier[df] . identifier[columns] :
identifier[nxobj] = identifier[nx] . identifier[from_pandas_edgelist] (
identifier[df] , identifier[source] = literal[string] , identifier[target] = literal[string] , identifier[edge_attr] = literal[string] )
keyword[else] :
identifier[nxobj] = identifier[nx] . identifier[from_pandas_edgelist] ( identifier[df] , identifier[source] = literal[string] , identifier[target] = literal[string] )
keyword[return] identifier[nxobj] | def tnet_to_nx(df, t=None):
"""
Creates undirected networkx object
"""
if t is not None:
df = get_network_when(df, t=t) # depends on [control=['if'], data=['t']]
if 'weight' in df.columns:
nxobj = nx.from_pandas_edgelist(df, source='i', target='j', edge_attr='weight') # depends on [control=['if'], data=[]]
else:
nxobj = nx.from_pandas_edgelist(df, source='i', target='j')
return nxobj |
def getPublicKeys(self, current=False):
""" Return all installed public keys
:param bool current: If true, returns only keys for currently
connected blockchain
"""
pubkeys = self.store.getPublicKeys()
if not current:
return pubkeys
pubs = []
for pubkey in pubkeys:
# Filter those keys not for our network
if pubkey[: len(self.prefix)] == self.prefix:
pubs.append(pubkey)
return pubs | def function[getPublicKeys, parameter[self, current]]:
constant[ Return all installed public keys
:param bool current: If true, returns only keys for currently
connected blockchain
]
variable[pubkeys] assign[=] call[name[self].store.getPublicKeys, parameter[]]
if <ast.UnaryOp object at 0x7da1b0108a60> begin[:]
return[name[pubkeys]]
variable[pubs] assign[=] list[[]]
for taget[name[pubkey]] in starred[name[pubkeys]] begin[:]
if compare[call[name[pubkey]][<ast.Slice object at 0x7da1b0108ac0>] equal[==] name[self].prefix] begin[:]
call[name[pubs].append, parameter[name[pubkey]]]
return[name[pubs]] | keyword[def] identifier[getPublicKeys] ( identifier[self] , identifier[current] = keyword[False] ):
literal[string]
identifier[pubkeys] = identifier[self] . identifier[store] . identifier[getPublicKeys] ()
keyword[if] keyword[not] identifier[current] :
keyword[return] identifier[pubkeys]
identifier[pubs] =[]
keyword[for] identifier[pubkey] keyword[in] identifier[pubkeys] :
keyword[if] identifier[pubkey] [: identifier[len] ( identifier[self] . identifier[prefix] )]== identifier[self] . identifier[prefix] :
identifier[pubs] . identifier[append] ( identifier[pubkey] )
keyword[return] identifier[pubs] | def getPublicKeys(self, current=False):
""" Return all installed public keys
:param bool current: If true, returns only keys for currently
connected blockchain
"""
pubkeys = self.store.getPublicKeys()
if not current:
return pubkeys # depends on [control=['if'], data=[]]
pubs = []
for pubkey in pubkeys:
# Filter those keys not for our network
if pubkey[:len(self.prefix)] == self.prefix:
pubs.append(pubkey) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['pubkey']]
return pubs |
def generateIntegers(api_key=None,
api_version=None,
**kwargs):
'''
Generate random integers
:param api_key: The Random.org api key.
:param api_version: The Random.org api version.
:param number: The number of integers to generate
:param minimum: The lower boundary for the range from which the
random numbers will be picked. Must be within
the [-1e9,1e9] range.
:param maximum: The upper boundary for the range from which the
random numbers will be picked. Must be within
the [-1e9,1e9] range.
:param replacement: Specifies whether the random numbers should
be picked with replacement. The default (true)
will cause the numbers to be picked with replacement,
i.e., the resulting numbers may contain duplicate
values (like a series of dice rolls). If you want the
numbers picked to be unique (like raffle tickets drawn
from a container), set this value to false.
:param base: Specifies the base that will be used to display the numbers.
Values allowed are 2, 8, 10 and 16. This affects the JSON
types and formatting of the resulting data as discussed below.
:return: A list of integers.
CLI Example:
.. code-block:: bash
salt '*' random_org.generateIntegers number=5 minimum=1 maximum=6
salt '*' random_org.generateIntegers number=5 minimum=2 maximum=255 base=2
'''
ret = {'res': True}
if not api_key or not api_version:
try:
options = __salt__['config.option']('random_org')
if not api_key:
api_key = options.get('api_key')
if not api_version:
api_version = options.get('api_version')
except (NameError, KeyError, AttributeError):
log.error('No Random.org api key found.')
ret['message'] = 'No Random.org api key or api version found.'
ret['res'] = False
return ret
for item in ['number', 'minimum', 'maximum']:
if item not in kwargs:
ret['res'] = False
ret['message'] = 'Rquired argument, {0} is missing.'.format(item)
return ret
if not _numeric(kwargs['number']) or not 1 <= kwargs['number'] <= 10000:
ret['res'] = False
ret['message'] = 'Number of integers must be between 1 and 10000'
return ret
if not _numeric(kwargs['minimum']) or not -1000000000 <= kwargs['minimum'] <= 1000000000:
ret['res'] = False
ret['message'] = 'Minimum argument must be between -1,000,000,000 and 1,000,000,000'
return ret
if not _numeric(kwargs['maximum']) or not -1000000000 <= kwargs['maximum'] <= 1000000000:
ret['res'] = False
ret['message'] = 'Maximum argument must be between -1,000,000,000 and 1,000,000,000'
return ret
if 'base' in kwargs:
base = kwargs['base']
if base not in [2, 8, 10, 16]:
ret['res'] = False
ret['message'] = 'Base must be either 2, 8, 10 or 16.'
return ret
else:
base = 10
if 'replacement' not in kwargs:
replacement = True
else:
replacement = kwargs['replacement']
if isinstance(api_version, int):
api_version = six.text_type(api_version)
_function = RANDOM_ORG_FUNCTIONS.get(api_version).get('generateIntegers').get('method')
data = {}
data['id'] = 1911220
data['jsonrpc'] = '2.0'
data['method'] = _function
data['params'] = {'apiKey': api_key,
'n': kwargs['number'],
'min': kwargs['minimum'],
'max': kwargs['maximum'],
'replacement': replacement,
'base': base
}
result = _query(api_version=api_version, data=data)
log.debug('result %s', result)
if result:
if 'random' in result:
random_data = result.get('random').get('data')
ret['data'] = random_data
else:
ret['res'] = False
ret['message'] = result['message']
else:
ret['res'] = False
ret['message'] = result['message']
return ret | def function[generateIntegers, parameter[api_key, api_version]]:
constant[
Generate random integers
:param api_key: The Random.org api key.
:param api_version: The Random.org api version.
:param number: The number of integers to generate
:param minimum: The lower boundary for the range from which the
random numbers will be picked. Must be within
the [-1e9,1e9] range.
:param maximum: The upper boundary for the range from which the
random numbers will be picked. Must be within
the [-1e9,1e9] range.
:param replacement: Specifies whether the random numbers should
be picked with replacement. The default (true)
will cause the numbers to be picked with replacement,
i.e., the resulting numbers may contain duplicate
values (like a series of dice rolls). If you want the
numbers picked to be unique (like raffle tickets drawn
from a container), set this value to false.
:param base: Specifies the base that will be used to display the numbers.
Values allowed are 2, 8, 10 and 16. This affects the JSON
types and formatting of the resulting data as discussed below.
:return: A list of integers.
CLI Example:
.. code-block:: bash
salt '*' random_org.generateIntegers number=5 minimum=1 maximum=6
salt '*' random_org.generateIntegers number=5 minimum=2 maximum=255 base=2
]
variable[ret] assign[=] dictionary[[<ast.Constant object at 0x7da18bcc9bd0>], [<ast.Constant object at 0x7da18bccabc0>]]
if <ast.BoolOp object at 0x7da18bcc9b10> begin[:]
<ast.Try object at 0x7da18bccaa10>
for taget[name[item]] in starred[list[[<ast.Constant object at 0x7da18bcca5c0>, <ast.Constant object at 0x7da18bccb5b0>, <ast.Constant object at 0x7da18bcc9270>]]] begin[:]
if compare[name[item] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:]
call[name[ret]][constant[res]] assign[=] constant[False]
call[name[ret]][constant[message]] assign[=] call[constant[Rquired argument, {0} is missing.].format, parameter[name[item]]]
return[name[ret]]
if <ast.BoolOp object at 0x7da18bccbbe0> begin[:]
call[name[ret]][constant[res]] assign[=] constant[False]
call[name[ret]][constant[message]] assign[=] constant[Number of integers must be between 1 and 10000]
return[name[ret]]
if <ast.BoolOp object at 0x7da18bcca620> begin[:]
call[name[ret]][constant[res]] assign[=] constant[False]
call[name[ret]][constant[message]] assign[=] constant[Minimum argument must be between -1,000,000,000 and 1,000,000,000]
return[name[ret]]
if <ast.BoolOp object at 0x7da18bcc9180> begin[:]
call[name[ret]][constant[res]] assign[=] constant[False]
call[name[ret]][constant[message]] assign[=] constant[Maximum argument must be between -1,000,000,000 and 1,000,000,000]
return[name[ret]]
if compare[constant[base] in name[kwargs]] begin[:]
variable[base] assign[=] call[name[kwargs]][constant[base]]
if compare[name[base] <ast.NotIn object at 0x7da2590d7190> list[[<ast.Constant object at 0x7da18bccaad0>, <ast.Constant object at 0x7da18bcc8040>, <ast.Constant object at 0x7da18bcc8e20>, <ast.Constant object at 0x7da18bccab90>]]] begin[:]
call[name[ret]][constant[res]] assign[=] constant[False]
call[name[ret]][constant[message]] assign[=] constant[Base must be either 2, 8, 10 or 16.]
return[name[ret]]
if compare[constant[replacement] <ast.NotIn object at 0x7da2590d7190> name[kwargs]] begin[:]
variable[replacement] assign[=] constant[True]
if call[name[isinstance], parameter[name[api_version], name[int]]] begin[:]
variable[api_version] assign[=] call[name[six].text_type, parameter[name[api_version]]]
variable[_function] assign[=] call[call[call[name[RANDOM_ORG_FUNCTIONS].get, parameter[name[api_version]]].get, parameter[constant[generateIntegers]]].get, parameter[constant[method]]]
variable[data] assign[=] dictionary[[], []]
call[name[data]][constant[id]] assign[=] constant[1911220]
call[name[data]][constant[jsonrpc]] assign[=] constant[2.0]
call[name[data]][constant[method]] assign[=] name[_function]
call[name[data]][constant[params]] assign[=] dictionary[[<ast.Constant object at 0x7da2041dac50>, <ast.Constant object at 0x7da2041d8c10>, <ast.Constant object at 0x7da2041da320>, <ast.Constant object at 0x7da2041d80a0>, <ast.Constant object at 0x7da2041da1d0>, <ast.Constant object at 0x7da2041dbd30>], [<ast.Name object at 0x7da2041dbd90>, <ast.Subscript object at 0x7da2041d84f0>, <ast.Subscript object at 0x7da2041d97b0>, <ast.Subscript object at 0x7da2041da950>, <ast.Name object at 0x7da2041db280>, <ast.Name object at 0x7da2041d8040>]]
variable[result] assign[=] call[name[_query], parameter[]]
call[name[log].debug, parameter[constant[result %s], name[result]]]
if name[result] begin[:]
if compare[constant[random] in name[result]] begin[:]
variable[random_data] assign[=] call[call[name[result].get, parameter[constant[random]]].get, parameter[constant[data]]]
call[name[ret]][constant[data]] assign[=] name[random_data]
return[name[ret]] | keyword[def] identifier[generateIntegers] ( identifier[api_key] = keyword[None] ,
identifier[api_version] = keyword[None] ,
** identifier[kwargs] ):
literal[string]
identifier[ret] ={ literal[string] : keyword[True] }
keyword[if] keyword[not] identifier[api_key] keyword[or] keyword[not] identifier[api_version] :
keyword[try] :
identifier[options] = identifier[__salt__] [ literal[string] ]( literal[string] )
keyword[if] keyword[not] identifier[api_key] :
identifier[api_key] = identifier[options] . identifier[get] ( literal[string] )
keyword[if] keyword[not] identifier[api_version] :
identifier[api_version] = identifier[options] . identifier[get] ( literal[string] )
keyword[except] ( identifier[NameError] , identifier[KeyError] , identifier[AttributeError] ):
identifier[log] . identifier[error] ( literal[string] )
identifier[ret] [ literal[string] ]= literal[string]
identifier[ret] [ literal[string] ]= keyword[False]
keyword[return] identifier[ret]
keyword[for] identifier[item] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
keyword[if] identifier[item] keyword[not] keyword[in] identifier[kwargs] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string] . identifier[format] ( identifier[item] )
keyword[return] identifier[ret]
keyword[if] keyword[not] identifier[_numeric] ( identifier[kwargs] [ literal[string] ]) keyword[or] keyword[not] literal[int] <= identifier[kwargs] [ literal[string] ]<= literal[int] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret]
keyword[if] keyword[not] identifier[_numeric] ( identifier[kwargs] [ literal[string] ]) keyword[or] keyword[not] - literal[int] <= identifier[kwargs] [ literal[string] ]<= literal[int] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret]
keyword[if] keyword[not] identifier[_numeric] ( identifier[kwargs] [ literal[string] ]) keyword[or] keyword[not] - literal[int] <= identifier[kwargs] [ literal[string] ]<= literal[int] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret]
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[base] = identifier[kwargs] [ literal[string] ]
keyword[if] identifier[base] keyword[not] keyword[in] [ literal[int] , literal[int] , literal[int] , literal[int] ]:
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= literal[string]
keyword[return] identifier[ret]
keyword[else] :
identifier[base] = literal[int]
keyword[if] literal[string] keyword[not] keyword[in] identifier[kwargs] :
identifier[replacement] = keyword[True]
keyword[else] :
identifier[replacement] = identifier[kwargs] [ literal[string] ]
keyword[if] identifier[isinstance] ( identifier[api_version] , identifier[int] ):
identifier[api_version] = identifier[six] . identifier[text_type] ( identifier[api_version] )
identifier[_function] = identifier[RANDOM_ORG_FUNCTIONS] . identifier[get] ( identifier[api_version] ). identifier[get] ( literal[string] ). identifier[get] ( literal[string] )
identifier[data] ={}
identifier[data] [ literal[string] ]= literal[int]
identifier[data] [ literal[string] ]= literal[string]
identifier[data] [ literal[string] ]= identifier[_function]
identifier[data] [ literal[string] ]={ literal[string] : identifier[api_key] ,
literal[string] : identifier[kwargs] [ literal[string] ],
literal[string] : identifier[kwargs] [ literal[string] ],
literal[string] : identifier[kwargs] [ literal[string] ],
literal[string] : identifier[replacement] ,
literal[string] : identifier[base]
}
identifier[result] = identifier[_query] ( identifier[api_version] = identifier[api_version] , identifier[data] = identifier[data] )
identifier[log] . identifier[debug] ( literal[string] , identifier[result] )
keyword[if] identifier[result] :
keyword[if] literal[string] keyword[in] identifier[result] :
identifier[random_data] = identifier[result] . identifier[get] ( literal[string] ). identifier[get] ( literal[string] )
identifier[ret] [ literal[string] ]= identifier[random_data]
keyword[else] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= identifier[result] [ literal[string] ]
keyword[else] :
identifier[ret] [ literal[string] ]= keyword[False]
identifier[ret] [ literal[string] ]= identifier[result] [ literal[string] ]
keyword[return] identifier[ret] | def generateIntegers(api_key=None, api_version=None, **kwargs):
"""
Generate random integers
:param api_key: The Random.org api key.
:param api_version: The Random.org api version.
:param number: The number of integers to generate
:param minimum: The lower boundary for the range from which the
random numbers will be picked. Must be within
the [-1e9,1e9] range.
:param maximum: The upper boundary for the range from which the
random numbers will be picked. Must be within
the [-1e9,1e9] range.
:param replacement: Specifies whether the random numbers should
be picked with replacement. The default (true)
will cause the numbers to be picked with replacement,
i.e., the resulting numbers may contain duplicate
values (like a series of dice rolls). If you want the
numbers picked to be unique (like raffle tickets drawn
from a container), set this value to false.
:param base: Specifies the base that will be used to display the numbers.
Values allowed are 2, 8, 10 and 16. This affects the JSON
types and formatting of the resulting data as discussed below.
:return: A list of integers.
CLI Example:
.. code-block:: bash
salt '*' random_org.generateIntegers number=5 minimum=1 maximum=6
salt '*' random_org.generateIntegers number=5 minimum=2 maximum=255 base=2
"""
ret = {'res': True}
if not api_key or not api_version:
try:
options = __salt__['config.option']('random_org')
if not api_key:
api_key = options.get('api_key') # depends on [control=['if'], data=[]]
if not api_version:
api_version = options.get('api_version') # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except (NameError, KeyError, AttributeError):
log.error('No Random.org api key found.')
ret['message'] = 'No Random.org api key or api version found.'
ret['res'] = False
return ret # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
for item in ['number', 'minimum', 'maximum']:
if item not in kwargs:
ret['res'] = False
ret['message'] = 'Rquired argument, {0} is missing.'.format(item)
return ret # depends on [control=['if'], data=['item']] # depends on [control=['for'], data=['item']]
if not _numeric(kwargs['number']) or not 1 <= kwargs['number'] <= 10000:
ret['res'] = False
ret['message'] = 'Number of integers must be between 1 and 10000'
return ret # depends on [control=['if'], data=[]]
if not _numeric(kwargs['minimum']) or not -1000000000 <= kwargs['minimum'] <= 1000000000:
ret['res'] = False
ret['message'] = 'Minimum argument must be between -1,000,000,000 and 1,000,000,000'
return ret # depends on [control=['if'], data=[]]
if not _numeric(kwargs['maximum']) or not -1000000000 <= kwargs['maximum'] <= 1000000000:
ret['res'] = False
ret['message'] = 'Maximum argument must be between -1,000,000,000 and 1,000,000,000'
return ret # depends on [control=['if'], data=[]]
if 'base' in kwargs:
base = kwargs['base']
if base not in [2, 8, 10, 16]:
ret['res'] = False
ret['message'] = 'Base must be either 2, 8, 10 or 16.'
return ret # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['kwargs']]
else:
base = 10
if 'replacement' not in kwargs:
replacement = True # depends on [control=['if'], data=[]]
else:
replacement = kwargs['replacement']
if isinstance(api_version, int):
api_version = six.text_type(api_version) # depends on [control=['if'], data=[]]
_function = RANDOM_ORG_FUNCTIONS.get(api_version).get('generateIntegers').get('method')
data = {}
data['id'] = 1911220
data['jsonrpc'] = '2.0'
data['method'] = _function
data['params'] = {'apiKey': api_key, 'n': kwargs['number'], 'min': kwargs['minimum'], 'max': kwargs['maximum'], 'replacement': replacement, 'base': base}
result = _query(api_version=api_version, data=data)
log.debug('result %s', result)
if result:
if 'random' in result:
random_data = result.get('random').get('data')
ret['data'] = random_data # depends on [control=['if'], data=['result']]
else:
ret['res'] = False
ret['message'] = result['message'] # depends on [control=['if'], data=[]]
else:
ret['res'] = False
ret['message'] = result['message']
return ret |
def record_file_factory(pid, record, filename):
"""Get file from a record.
:param pid: Not used. It keeps the function signature.
:param record: Record which contains the files.
:param filename: Name of the file to be returned.
:returns: File object or ``None`` if not found.
"""
try:
if not (hasattr(record, 'files') and record.files):
return None
except MissingModelError:
return None
try:
return record.files[filename]
except KeyError:
return None | def function[record_file_factory, parameter[pid, record, filename]]:
constant[Get file from a record.
:param pid: Not used. It keeps the function signature.
:param record: Record which contains the files.
:param filename: Name of the file to be returned.
:returns: File object or ``None`` if not found.
]
<ast.Try object at 0x7da1b19b4eb0>
<ast.Try object at 0x7da1b19b7340> | keyword[def] identifier[record_file_factory] ( identifier[pid] , identifier[record] , identifier[filename] ):
literal[string]
keyword[try] :
keyword[if] keyword[not] ( identifier[hasattr] ( identifier[record] , literal[string] ) keyword[and] identifier[record] . identifier[files] ):
keyword[return] keyword[None]
keyword[except] identifier[MissingModelError] :
keyword[return] keyword[None]
keyword[try] :
keyword[return] identifier[record] . identifier[files] [ identifier[filename] ]
keyword[except] identifier[KeyError] :
keyword[return] keyword[None] | def record_file_factory(pid, record, filename):
"""Get file from a record.
:param pid: Not used. It keeps the function signature.
:param record: Record which contains the files.
:param filename: Name of the file to be returned.
:returns: File object or ``None`` if not found.
"""
try:
if not (hasattr(record, 'files') and record.files):
return None # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except MissingModelError:
return None # depends on [control=['except'], data=[]]
try:
return record.files[filename] # depends on [control=['try'], data=[]]
except KeyError:
return None # depends on [control=['except'], data=[]] |
def get_unique_token(self):
"""
Get a unique token for usage in differentiating test runs that need to
run in parallel.
"""
if self._unique_token is None:
self._unique_token = self._random_token()
return self._unique_token | def function[get_unique_token, parameter[self]]:
constant[
Get a unique token for usage in differentiating test runs that need to
run in parallel.
]
if compare[name[self]._unique_token is constant[None]] begin[:]
name[self]._unique_token assign[=] call[name[self]._random_token, parameter[]]
return[name[self]._unique_token] | keyword[def] identifier[get_unique_token] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_unique_token] keyword[is] keyword[None] :
identifier[self] . identifier[_unique_token] = identifier[self] . identifier[_random_token] ()
keyword[return] identifier[self] . identifier[_unique_token] | def get_unique_token(self):
"""
Get a unique token for usage in differentiating test runs that need to
run in parallel.
"""
if self._unique_token is None:
self._unique_token = self._random_token() # depends on [control=['if'], data=[]]
return self._unique_token |
def decrypt_file(file_path, recipient_key, *, base64=False):
"Returns (filename, file_contents) if successful"
crypto.assert_type_and_length('recipient_key', recipient_key, crypto.UserLock)
with open(file_path, "rb") as I:
contents = I.read()
if base64:
contents = crypto.b64decode(contents)
crypted = crypto.MiniLockFile(contents)
return crypted.decrypt(recipient_key) | def function[decrypt_file, parameter[file_path, recipient_key]]:
constant[Returns (filename, file_contents) if successful]
call[name[crypto].assert_type_and_length, parameter[constant[recipient_key], name[recipient_key], name[crypto].UserLock]]
with call[name[open], parameter[name[file_path], constant[rb]]] begin[:]
variable[contents] assign[=] call[name[I].read, parameter[]]
if name[base64] begin[:]
variable[contents] assign[=] call[name[crypto].b64decode, parameter[name[contents]]]
variable[crypted] assign[=] call[name[crypto].MiniLockFile, parameter[name[contents]]]
return[call[name[crypted].decrypt, parameter[name[recipient_key]]]] | keyword[def] identifier[decrypt_file] ( identifier[file_path] , identifier[recipient_key] ,*, identifier[base64] = keyword[False] ):
literal[string]
identifier[crypto] . identifier[assert_type_and_length] ( literal[string] , identifier[recipient_key] , identifier[crypto] . identifier[UserLock] )
keyword[with] identifier[open] ( identifier[file_path] , literal[string] ) keyword[as] identifier[I] :
identifier[contents] = identifier[I] . identifier[read] ()
keyword[if] identifier[base64] :
identifier[contents] = identifier[crypto] . identifier[b64decode] ( identifier[contents] )
identifier[crypted] = identifier[crypto] . identifier[MiniLockFile] ( identifier[contents] )
keyword[return] identifier[crypted] . identifier[decrypt] ( identifier[recipient_key] ) | def decrypt_file(file_path, recipient_key, *, base64=False):
"""Returns (filename, file_contents) if successful"""
crypto.assert_type_and_length('recipient_key', recipient_key, crypto.UserLock)
with open(file_path, 'rb') as I:
contents = I.read()
if base64:
contents = crypto.b64decode(contents) # depends on [control=['if'], data=[]]
crypted = crypto.MiniLockFile(contents) # depends on [control=['with'], data=['I']]
return crypted.decrypt(recipient_key) |
def log_text(self, text, client=None, **kw):
"""API call: log a text message via a POST request
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/write
:type text: str
:param text: the log message.
:type client: :class:`~google.cloud.logging.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current logger.
:type kw: dict
:param kw: (optional) additional keyword arguments for the entry.
See :class:`~google.cloud.logging.entries.LogEntry`.
"""
self._do_log(client, TextEntry, text, **kw) | def function[log_text, parameter[self, text, client]]:
constant[API call: log a text message via a POST request
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/write
:type text: str
:param text: the log message.
:type client: :class:`~google.cloud.logging.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current logger.
:type kw: dict
:param kw: (optional) additional keyword arguments for the entry.
See :class:`~google.cloud.logging.entries.LogEntry`.
]
call[name[self]._do_log, parameter[name[client], name[TextEntry], name[text]]] | keyword[def] identifier[log_text] ( identifier[self] , identifier[text] , identifier[client] = keyword[None] ,** identifier[kw] ):
literal[string]
identifier[self] . identifier[_do_log] ( identifier[client] , identifier[TextEntry] , identifier[text] ,** identifier[kw] ) | def log_text(self, text, client=None, **kw):
"""API call: log a text message via a POST request
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/entries/write
:type text: str
:param text: the log message.
:type client: :class:`~google.cloud.logging.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current logger.
:type kw: dict
:param kw: (optional) additional keyword arguments for the entry.
See :class:`~google.cloud.logging.entries.LogEntry`.
"""
self._do_log(client, TextEntry, text, **kw) |
def run_as_root(command, *args, **kwargs):
"""
Run a remote command as the root user.
When connecting as root to the remote system, this will use Fabric's
``run`` function. In other cases, it will use ``sudo``.
"""
from burlap.common import run_or_dryrun, sudo_or_dryrun
if env.user == 'root':
func = run_or_dryrun
else:
func = sudo_or_dryrun
return func(command, *args, **kwargs) | def function[run_as_root, parameter[command]]:
constant[
Run a remote command as the root user.
When connecting as root to the remote system, this will use Fabric's
``run`` function. In other cases, it will use ``sudo``.
]
from relative_module[burlap.common] import module[run_or_dryrun], module[sudo_or_dryrun]
if compare[name[env].user equal[==] constant[root]] begin[:]
variable[func] assign[=] name[run_or_dryrun]
return[call[name[func], parameter[name[command], <ast.Starred object at 0x7da1b00e3d90>]]] | keyword[def] identifier[run_as_root] ( identifier[command] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
keyword[from] identifier[burlap] . identifier[common] keyword[import] identifier[run_or_dryrun] , identifier[sudo_or_dryrun]
keyword[if] identifier[env] . identifier[user] == literal[string] :
identifier[func] = identifier[run_or_dryrun]
keyword[else] :
identifier[func] = identifier[sudo_or_dryrun]
keyword[return] identifier[func] ( identifier[command] ,* identifier[args] ,** identifier[kwargs] ) | def run_as_root(command, *args, **kwargs):
"""
Run a remote command as the root user.
When connecting as root to the remote system, this will use Fabric's
``run`` function. In other cases, it will use ``sudo``.
"""
from burlap.common import run_or_dryrun, sudo_or_dryrun
if env.user == 'root':
func = run_or_dryrun # depends on [control=['if'], data=[]]
else:
func = sudo_or_dryrun
return func(command, *args, **kwargs) |
def _ParseOriginalFilename(self, file_object, format_version):
"""Parses the original filename.
Args:
file_object (FileIO): file-like object.
format_version (int): format version.
Returns:
str: filename or None on error.
Raises:
ParseError: if the original filename cannot be read.
"""
file_offset = file_object.tell()
if format_version == 1:
data_type_map = self._GetDataTypeMap(
'recycle_bin_metadata_utf16le_string')
else:
data_type_map = self._GetDataTypeMap(
'recycle_bin_metadata_utf16le_string_with_size')
try:
original_filename, _ = self._ReadStructureFromFileObject(
file_object, file_offset, data_type_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError(
'Unable to parse original filename with error: {0!s}'.format(
exception))
if format_version == 1:
return original_filename.rstrip('\x00')
return original_filename.string.rstrip('\x00') | def function[_ParseOriginalFilename, parameter[self, file_object, format_version]]:
constant[Parses the original filename.
Args:
file_object (FileIO): file-like object.
format_version (int): format version.
Returns:
str: filename or None on error.
Raises:
ParseError: if the original filename cannot be read.
]
variable[file_offset] assign[=] call[name[file_object].tell, parameter[]]
if compare[name[format_version] equal[==] constant[1]] begin[:]
variable[data_type_map] assign[=] call[name[self]._GetDataTypeMap, parameter[constant[recycle_bin_metadata_utf16le_string]]]
<ast.Try object at 0x7da20cabfe50>
if compare[name[format_version] equal[==] constant[1]] begin[:]
return[call[name[original_filename].rstrip, parameter[constant[ ]]]]
return[call[name[original_filename].string.rstrip, parameter[constant[ ]]]] | keyword[def] identifier[_ParseOriginalFilename] ( identifier[self] , identifier[file_object] , identifier[format_version] ):
literal[string]
identifier[file_offset] = identifier[file_object] . identifier[tell] ()
keyword[if] identifier[format_version] == literal[int] :
identifier[data_type_map] = identifier[self] . identifier[_GetDataTypeMap] (
literal[string] )
keyword[else] :
identifier[data_type_map] = identifier[self] . identifier[_GetDataTypeMap] (
literal[string] )
keyword[try] :
identifier[original_filename] , identifier[_] = identifier[self] . identifier[_ReadStructureFromFileObject] (
identifier[file_object] , identifier[file_offset] , identifier[data_type_map] )
keyword[except] ( identifier[ValueError] , identifier[errors] . identifier[ParseError] ) keyword[as] identifier[exception] :
keyword[raise] identifier[errors] . identifier[ParseError] (
literal[string] . identifier[format] (
identifier[exception] ))
keyword[if] identifier[format_version] == literal[int] :
keyword[return] identifier[original_filename] . identifier[rstrip] ( literal[string] )
keyword[return] identifier[original_filename] . identifier[string] . identifier[rstrip] ( literal[string] ) | def _ParseOriginalFilename(self, file_object, format_version):
"""Parses the original filename.
Args:
file_object (FileIO): file-like object.
format_version (int): format version.
Returns:
str: filename or None on error.
Raises:
ParseError: if the original filename cannot be read.
"""
file_offset = file_object.tell()
if format_version == 1:
data_type_map = self._GetDataTypeMap('recycle_bin_metadata_utf16le_string') # depends on [control=['if'], data=[]]
else:
data_type_map = self._GetDataTypeMap('recycle_bin_metadata_utf16le_string_with_size')
try:
(original_filename, _) = self._ReadStructureFromFileObject(file_object, file_offset, data_type_map) # depends on [control=['try'], data=[]]
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError('Unable to parse original filename with error: {0!s}'.format(exception)) # depends on [control=['except'], data=['exception']]
if format_version == 1:
return original_filename.rstrip('\x00') # depends on [control=['if'], data=[]]
return original_filename.string.rstrip('\x00') |
def options(self, section):
""" Returns a list of options for a section """
if self.config.has_section(section):
return (True, self.config.options(section))
return (False, 'Section: ' + section + ' does not exist') | def function[options, parameter[self, section]]:
constant[ Returns a list of options for a section ]
if call[name[self].config.has_section, parameter[name[section]]] begin[:]
return[tuple[[<ast.Constant object at 0x7da2041dae30>, <ast.Call object at 0x7da2041dadd0>]]]
return[tuple[[<ast.Constant object at 0x7da2041daa40>, <ast.BinOp object at 0x7da2041d8730>]]] | keyword[def] identifier[options] ( identifier[self] , identifier[section] ):
literal[string]
keyword[if] identifier[self] . identifier[config] . identifier[has_section] ( identifier[section] ):
keyword[return] ( keyword[True] , identifier[self] . identifier[config] . identifier[options] ( identifier[section] ))
keyword[return] ( keyword[False] , literal[string] + identifier[section] + literal[string] ) | def options(self, section):
""" Returns a list of options for a section """
if self.config.has_section(section):
return (True, self.config.options(section)) # depends on [control=['if'], data=[]]
return (False, 'Section: ' + section + ' does not exist') |
def yearInfo2yearDay(yearInfo):
'''calculate the days in a lunar year from the lunar year's info
>>> yearInfo2yearDay(0) # no leap month, and every month has 29 days.
348
>>> yearInfo2yearDay(1) # 1 leap month, and every month has 29 days.
377
>>> yearInfo2yearDay((2**12-1)*16) # no leap month, and every month has 30 days.
360
>>> yearInfo2yearDay((2**13-1)*16+1) # 1 leap month, and every month has 30 days.
390
>>> # 1 leap month, and every normal month has 30 days, and leap month has 29 days.
>>> yearInfo2yearDay((2**12-1)*16+1)
389
'''
yearInfo = int(yearInfo)
res = 29 * 12
leap = False
if yearInfo % 16 != 0:
leap = True
res += 29
yearInfo //= 16
for i in range(12 + leap):
if yearInfo % 2 == 1:
res += 1
yearInfo //= 2
return res | def function[yearInfo2yearDay, parameter[yearInfo]]:
constant[calculate the days in a lunar year from the lunar year's info
>>> yearInfo2yearDay(0) # no leap month, and every month has 29 days.
348
>>> yearInfo2yearDay(1) # 1 leap month, and every month has 29 days.
377
>>> yearInfo2yearDay((2**12-1)*16) # no leap month, and every month has 30 days.
360
>>> yearInfo2yearDay((2**13-1)*16+1) # 1 leap month, and every month has 30 days.
390
>>> # 1 leap month, and every normal month has 30 days, and leap month has 29 days.
>>> yearInfo2yearDay((2**12-1)*16+1)
389
]
variable[yearInfo] assign[=] call[name[int], parameter[name[yearInfo]]]
variable[res] assign[=] binary_operation[constant[29] * constant[12]]
variable[leap] assign[=] constant[False]
if compare[binary_operation[name[yearInfo] <ast.Mod object at 0x7da2590d6920> constant[16]] not_equal[!=] constant[0]] begin[:]
variable[leap] assign[=] constant[True]
<ast.AugAssign object at 0x7da2054a40a0>
<ast.AugAssign object at 0x7da2054a7940>
for taget[name[i]] in starred[call[name[range], parameter[binary_operation[constant[12] + name[leap]]]]] begin[:]
if compare[binary_operation[name[yearInfo] <ast.Mod object at 0x7da2590d6920> constant[2]] equal[==] constant[1]] begin[:]
<ast.AugAssign object at 0x7da2054a4760>
<ast.AugAssign object at 0x7da2054a4eb0>
return[name[res]] | keyword[def] identifier[yearInfo2yearDay] ( identifier[yearInfo] ):
literal[string]
identifier[yearInfo] = identifier[int] ( identifier[yearInfo] )
identifier[res] = literal[int] * literal[int]
identifier[leap] = keyword[False]
keyword[if] identifier[yearInfo] % literal[int] != literal[int] :
identifier[leap] = keyword[True]
identifier[res] += literal[int]
identifier[yearInfo] //= literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] + identifier[leap] ):
keyword[if] identifier[yearInfo] % literal[int] == literal[int] :
identifier[res] += literal[int]
identifier[yearInfo] //= literal[int]
keyword[return] identifier[res] | def yearInfo2yearDay(yearInfo):
"""calculate the days in a lunar year from the lunar year's info
>>> yearInfo2yearDay(0) # no leap month, and every month has 29 days.
348
>>> yearInfo2yearDay(1) # 1 leap month, and every month has 29 days.
377
>>> yearInfo2yearDay((2**12-1)*16) # no leap month, and every month has 30 days.
360
>>> yearInfo2yearDay((2**13-1)*16+1) # 1 leap month, and every month has 30 days.
390
>>> # 1 leap month, and every normal month has 30 days, and leap month has 29 days.
>>> yearInfo2yearDay((2**12-1)*16+1)
389
"""
yearInfo = int(yearInfo)
res = 29 * 12
leap = False
if yearInfo % 16 != 0:
leap = True
res += 29 # depends on [control=['if'], data=[]]
yearInfo //= 16
for i in range(12 + leap):
if yearInfo % 2 == 1:
res += 1 # depends on [control=['if'], data=[]]
yearInfo //= 2 # depends on [control=['for'], data=[]]
return res |
def get_function(self, name: str) -> AbiFunction or None:
"""
This interface is used to get an AbiFunction object from AbiInfo object by given function name.
:param name: the function name in abi file
:return: if succeed, an AbiFunction will constructed based on given function name
"""
for func in self.functions:
if func['name'] == name:
return AbiFunction(func['name'], func['parameters'], func.get('returntype', ''))
return None | def function[get_function, parameter[self, name]]:
constant[
This interface is used to get an AbiFunction object from AbiInfo object by given function name.
:param name: the function name in abi file
:return: if succeed, an AbiFunction will constructed based on given function name
]
for taget[name[func]] in starred[name[self].functions] begin[:]
if compare[call[name[func]][constant[name]] equal[==] name[name]] begin[:]
return[call[name[AbiFunction], parameter[call[name[func]][constant[name]], call[name[func]][constant[parameters]], call[name[func].get, parameter[constant[returntype], constant[]]]]]]
return[constant[None]] | keyword[def] identifier[get_function] ( identifier[self] , identifier[name] : identifier[str] )-> identifier[AbiFunction] keyword[or] keyword[None] :
literal[string]
keyword[for] identifier[func] keyword[in] identifier[self] . identifier[functions] :
keyword[if] identifier[func] [ literal[string] ]== identifier[name] :
keyword[return] identifier[AbiFunction] ( identifier[func] [ literal[string] ], identifier[func] [ literal[string] ], identifier[func] . identifier[get] ( literal[string] , literal[string] ))
keyword[return] keyword[None] | def get_function(self, name: str) -> AbiFunction or None:
"""
This interface is used to get an AbiFunction object from AbiInfo object by given function name.
:param name: the function name in abi file
:return: if succeed, an AbiFunction will constructed based on given function name
"""
for func in self.functions:
if func['name'] == name:
return AbiFunction(func['name'], func['parameters'], func.get('returntype', '')) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['func']]
return None |
def _find_node_by_indices(self, point):
""""Find the GSNode that is refered to by the given indices.
See GSNode::_indices()
"""
path_index, node_index = point
path = self.paths[int(path_index)]
node = path.nodes[int(node_index)]
return node | def function[_find_node_by_indices, parameter[self, point]]:
constant["Find the GSNode that is refered to by the given indices.
See GSNode::_indices()
]
<ast.Tuple object at 0x7da20c993190> assign[=] name[point]
variable[path] assign[=] call[name[self].paths][call[name[int], parameter[name[path_index]]]]
variable[node] assign[=] call[name[path].nodes][call[name[int], parameter[name[node_index]]]]
return[name[node]] | keyword[def] identifier[_find_node_by_indices] ( identifier[self] , identifier[point] ):
literal[string]
identifier[path_index] , identifier[node_index] = identifier[point]
identifier[path] = identifier[self] . identifier[paths] [ identifier[int] ( identifier[path_index] )]
identifier[node] = identifier[path] . identifier[nodes] [ identifier[int] ( identifier[node_index] )]
keyword[return] identifier[node] | def _find_node_by_indices(self, point):
""""Find the GSNode that is refered to by the given indices.
See GSNode::_indices()
"""
(path_index, node_index) = point
path = self.paths[int(path_index)]
node = path.nodes[int(node_index)]
return node |
def condor_submit(submit_file):
"""
Submit a condor job described by the given file. Parse an external id for
the submission or return None and a reason for the failure.
"""
external_id = None
try:
submit = Popen(('condor_submit', submit_file), stdout=PIPE, stderr=STDOUT)
message, _ = submit.communicate()
if submit.returncode == 0:
external_id = parse_external_id(message, type='condor')
else:
message = PROBLEM_PARSING_EXTERNAL_ID
except Exception as e:
message = str(e)
return external_id, message | def function[condor_submit, parameter[submit_file]]:
constant[
Submit a condor job described by the given file. Parse an external id for
the submission or return None and a reason for the failure.
]
variable[external_id] assign[=] constant[None]
<ast.Try object at 0x7da1b053b880>
return[tuple[[<ast.Name object at 0x7da1b05beaa0>, <ast.Name object at 0x7da1b054a620>]]] | keyword[def] identifier[condor_submit] ( identifier[submit_file] ):
literal[string]
identifier[external_id] = keyword[None]
keyword[try] :
identifier[submit] = identifier[Popen] (( literal[string] , identifier[submit_file] ), identifier[stdout] = identifier[PIPE] , identifier[stderr] = identifier[STDOUT] )
identifier[message] , identifier[_] = identifier[submit] . identifier[communicate] ()
keyword[if] identifier[submit] . identifier[returncode] == literal[int] :
identifier[external_id] = identifier[parse_external_id] ( identifier[message] , identifier[type] = literal[string] )
keyword[else] :
identifier[message] = identifier[PROBLEM_PARSING_EXTERNAL_ID]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[message] = identifier[str] ( identifier[e] )
keyword[return] identifier[external_id] , identifier[message] | def condor_submit(submit_file):
"""
Submit a condor job described by the given file. Parse an external id for
the submission or return None and a reason for the failure.
"""
external_id = None
try:
submit = Popen(('condor_submit', submit_file), stdout=PIPE, stderr=STDOUT)
(message, _) = submit.communicate()
if submit.returncode == 0:
external_id = parse_external_id(message, type='condor') # depends on [control=['if'], data=[]]
else:
message = PROBLEM_PARSING_EXTERNAL_ID # depends on [control=['try'], data=[]]
except Exception as e:
message = str(e) # depends on [control=['except'], data=['e']]
return (external_id, message) |
async def listener(self):
"""
Listener task for receiving ops from Lavalink.
"""
while self._ws.open and self._is_shutdown is False:
try:
data = json.loads(await self._ws.recv())
except websockets.ConnectionClosed:
break
raw_op = data.get("op")
try:
op = LavalinkIncomingOp(raw_op)
except ValueError:
socket_log.debug("Received unknown op: %s", data)
else:
socket_log.debug("Received known op: %s", data)
self.loop.create_task(self._handle_op(op, data))
self.ready.clear()
log.debug("Listener exited: ws %s SHUTDOWN %s.", self._ws.open, self._is_shutdown)
self.loop.create_task(self._reconnect()) | <ast.AsyncFunctionDef object at 0x7da1b1c7fe80> | keyword[async] keyword[def] identifier[listener] ( identifier[self] ):
literal[string]
keyword[while] identifier[self] . identifier[_ws] . identifier[open] keyword[and] identifier[self] . identifier[_is_shutdown] keyword[is] keyword[False] :
keyword[try] :
identifier[data] = identifier[json] . identifier[loads] ( keyword[await] identifier[self] . identifier[_ws] . identifier[recv] ())
keyword[except] identifier[websockets] . identifier[ConnectionClosed] :
keyword[break]
identifier[raw_op] = identifier[data] . identifier[get] ( literal[string] )
keyword[try] :
identifier[op] = identifier[LavalinkIncomingOp] ( identifier[raw_op] )
keyword[except] identifier[ValueError] :
identifier[socket_log] . identifier[debug] ( literal[string] , identifier[data] )
keyword[else] :
identifier[socket_log] . identifier[debug] ( literal[string] , identifier[data] )
identifier[self] . identifier[loop] . identifier[create_task] ( identifier[self] . identifier[_handle_op] ( identifier[op] , identifier[data] ))
identifier[self] . identifier[ready] . identifier[clear] ()
identifier[log] . identifier[debug] ( literal[string] , identifier[self] . identifier[_ws] . identifier[open] , identifier[self] . identifier[_is_shutdown] )
identifier[self] . identifier[loop] . identifier[create_task] ( identifier[self] . identifier[_reconnect] ()) | async def listener(self):
"""
Listener task for receiving ops from Lavalink.
"""
while self._ws.open and self._is_shutdown is False:
try:
data = json.loads(await self._ws.recv()) # depends on [control=['try'], data=[]]
except websockets.ConnectionClosed:
break # depends on [control=['except'], data=[]]
raw_op = data.get('op')
try:
op = LavalinkIncomingOp(raw_op) # depends on [control=['try'], data=[]]
except ValueError:
socket_log.debug('Received unknown op: %s', data) # depends on [control=['except'], data=[]]
else:
socket_log.debug('Received known op: %s', data)
self.loop.create_task(self._handle_op(op, data)) # depends on [control=['while'], data=[]]
self.ready.clear()
log.debug('Listener exited: ws %s SHUTDOWN %s.', self._ws.open, self._is_shutdown)
self.loop.create_task(self._reconnect()) |
def clientConnectionFailed(self, err, address: Address):
"""
Called when we fail to connect to an endpoint
Args:
err: Twisted Failure instance
address: the address we failed to connect to
"""
if type(err.value) == error.TimeoutError:
logger.debug(f"Failed connecting to {address} connection timed out")
elif type(err.value) == error.ConnectError:
ce = err.value
if len(ce.args) > 0:
logger.debug(f"Failed connecting to {address} {ce.args[0].value}")
else:
logger.debug(f"Failed connecting to {address}")
else:
logger.debug(f"Failed connecting to {address} {err.value}")
self.peers_connecting -= 1
self.RemoveKnownAddress(address)
self.RemoveFromQueue(address)
# if we failed to connect to new addresses, we should always add them to the DEAD_ADDRS list
self.AddDeadAddress(address)
# for testing
return err.type | def function[clientConnectionFailed, parameter[self, err, address]]:
constant[
Called when we fail to connect to an endpoint
Args:
err: Twisted Failure instance
address: the address we failed to connect to
]
if compare[call[name[type], parameter[name[err].value]] equal[==] name[error].TimeoutError] begin[:]
call[name[logger].debug, parameter[<ast.JoinedStr object at 0x7da18bc700d0>]]
<ast.AugAssign object at 0x7da2041dadd0>
call[name[self].RemoveKnownAddress, parameter[name[address]]]
call[name[self].RemoveFromQueue, parameter[name[address]]]
call[name[self].AddDeadAddress, parameter[name[address]]]
return[name[err].type] | keyword[def] identifier[clientConnectionFailed] ( identifier[self] , identifier[err] , identifier[address] : identifier[Address] ):
literal[string]
keyword[if] identifier[type] ( identifier[err] . identifier[value] )== identifier[error] . identifier[TimeoutError] :
identifier[logger] . identifier[debug] ( literal[string] )
keyword[elif] identifier[type] ( identifier[err] . identifier[value] )== identifier[error] . identifier[ConnectError] :
identifier[ce] = identifier[err] . identifier[value]
keyword[if] identifier[len] ( identifier[ce] . identifier[args] )> literal[int] :
identifier[logger] . identifier[debug] ( literal[string] )
keyword[else] :
identifier[logger] . identifier[debug] ( literal[string] )
keyword[else] :
identifier[logger] . identifier[debug] ( literal[string] )
identifier[self] . identifier[peers_connecting] -= literal[int]
identifier[self] . identifier[RemoveKnownAddress] ( identifier[address] )
identifier[self] . identifier[RemoveFromQueue] ( identifier[address] )
identifier[self] . identifier[AddDeadAddress] ( identifier[address] )
keyword[return] identifier[err] . identifier[type] | def clientConnectionFailed(self, err, address: Address):
"""
Called when we fail to connect to an endpoint
Args:
err: Twisted Failure instance
address: the address we failed to connect to
"""
if type(err.value) == error.TimeoutError:
logger.debug(f'Failed connecting to {address} connection timed out') # depends on [control=['if'], data=[]]
elif type(err.value) == error.ConnectError:
ce = err.value
if len(ce.args) > 0:
logger.debug(f'Failed connecting to {address} {ce.args[0].value}') # depends on [control=['if'], data=[]]
else:
logger.debug(f'Failed connecting to {address}') # depends on [control=['if'], data=[]]
else:
logger.debug(f'Failed connecting to {address} {err.value}')
self.peers_connecting -= 1
self.RemoveKnownAddress(address)
self.RemoveFromQueue(address)
# if we failed to connect to new addresses, we should always add them to the DEAD_ADDRS list
self.AddDeadAddress(address)
# for testing
return err.type |
def add_edge_attributes(self, edge, attrs):
"""
Append a sequence of attributes to the given edge
@type edge: edge
@param edge: One edge.
@type attrs: tuple
@param attrs: Node attributes specified as a sequence of tuples in the form (attribute, value).
"""
for attr in attrs:
self.add_edge_attribute(edge, attr) | def function[add_edge_attributes, parameter[self, edge, attrs]]:
constant[
Append a sequence of attributes to the given edge
@type edge: edge
@param edge: One edge.
@type attrs: tuple
@param attrs: Node attributes specified as a sequence of tuples in the form (attribute, value).
]
for taget[name[attr]] in starred[name[attrs]] begin[:]
call[name[self].add_edge_attribute, parameter[name[edge], name[attr]]] | keyword[def] identifier[add_edge_attributes] ( identifier[self] , identifier[edge] , identifier[attrs] ):
literal[string]
keyword[for] identifier[attr] keyword[in] identifier[attrs] :
identifier[self] . identifier[add_edge_attribute] ( identifier[edge] , identifier[attr] ) | def add_edge_attributes(self, edge, attrs):
"""
Append a sequence of attributes to the given edge
@type edge: edge
@param edge: One edge.
@type attrs: tuple
@param attrs: Node attributes specified as a sequence of tuples in the form (attribute, value).
"""
for attr in attrs:
self.add_edge_attribute(edge, attr) # depends on [control=['for'], data=['attr']] |
def evaluate_objective(self):
"""
Evaluates the objective
"""
self.Y_new, cost_new = self.objective.evaluate(self.suggested_sample)
self.cost.update_cost_model(self.suggested_sample, cost_new)
self.Y = np.vstack((self.Y,self.Y_new)) | def function[evaluate_objective, parameter[self]]:
constant[
Evaluates the objective
]
<ast.Tuple object at 0x7da1b2345120> assign[=] call[name[self].objective.evaluate, parameter[name[self].suggested_sample]]
call[name[self].cost.update_cost_model, parameter[name[self].suggested_sample, name[cost_new]]]
name[self].Y assign[=] call[name[np].vstack, parameter[tuple[[<ast.Attribute object at 0x7da1b2345c30>, <ast.Attribute object at 0x7da1b2346c50>]]]] | keyword[def] identifier[evaluate_objective] ( identifier[self] ):
literal[string]
identifier[self] . identifier[Y_new] , identifier[cost_new] = identifier[self] . identifier[objective] . identifier[evaluate] ( identifier[self] . identifier[suggested_sample] )
identifier[self] . identifier[cost] . identifier[update_cost_model] ( identifier[self] . identifier[suggested_sample] , identifier[cost_new] )
identifier[self] . identifier[Y] = identifier[np] . identifier[vstack] (( identifier[self] . identifier[Y] , identifier[self] . identifier[Y_new] )) | def evaluate_objective(self):
"""
Evaluates the objective
"""
(self.Y_new, cost_new) = self.objective.evaluate(self.suggested_sample)
self.cost.update_cost_model(self.suggested_sample, cost_new)
self.Y = np.vstack((self.Y, self.Y_new)) |
def generate_documentation(schema):
"""
Generates reStructuredText documentation from a Confirm file.
:param schema: Dictionary representing the Confirm schema.
:returns: String representing the reStructuredText documentation.
"""
documentation_title = "Configuration documentation"
documentation = documentation_title + "\n"
documentation += "=" * len(documentation_title) + '\n'
for section_name in schema:
section_created = False
for option_name in schema[section_name]:
option = schema[section_name][option_name]
if not section_created:
documentation += '\n'
documentation += section_name + '\n'
documentation += '-' * len(section_name) + '\n'
section_created = True
documentation += '\n'
documentation += option_name + '\n'
documentation += '~' * len(option_name) + '\n'
if option.get('required'):
documentation += "** This option is required! **\n"
if option.get('type'):
documentation += '*Type : %s.*\n' % option.get('type')
if option.get('description'):
documentation += option.get('description') + '\n'
if option.get('default'):
documentation += 'The default value is %s.\n' % option.get('default')
if option.get('deprecated'):
documentation += "** This option is deprecated! **\n"
return documentation | def function[generate_documentation, parameter[schema]]:
constant[
Generates reStructuredText documentation from a Confirm file.
:param schema: Dictionary representing the Confirm schema.
:returns: String representing the reStructuredText documentation.
]
variable[documentation_title] assign[=] constant[Configuration documentation]
variable[documentation] assign[=] binary_operation[name[documentation_title] + constant[
]]
<ast.AugAssign object at 0x7da1b1f29180>
for taget[name[section_name]] in starred[name[schema]] begin[:]
variable[section_created] assign[=] constant[False]
for taget[name[option_name]] in starred[call[name[schema]][name[section_name]]] begin[:]
variable[option] assign[=] call[call[name[schema]][name[section_name]]][name[option_name]]
if <ast.UnaryOp object at 0x7da1b20d5990> begin[:]
<ast.AugAssign object at 0x7da1b20d6890>
<ast.AugAssign object at 0x7da1b20d5300>
<ast.AugAssign object at 0x7da1b20d5690>
variable[section_created] assign[=] constant[True]
<ast.AugAssign object at 0x7da1b20d4550>
<ast.AugAssign object at 0x7da1b20d4ee0>
<ast.AugAssign object at 0x7da1b20d6500>
if call[name[option].get, parameter[constant[required]]] begin[:]
<ast.AugAssign object at 0x7da18fe92260>
if call[name[option].get, parameter[constant[type]]] begin[:]
<ast.AugAssign object at 0x7da18fe901c0>
if call[name[option].get, parameter[constant[description]]] begin[:]
<ast.AugAssign object at 0x7da18fe906d0>
if call[name[option].get, parameter[constant[default]]] begin[:]
<ast.AugAssign object at 0x7da1b1f285e0>
if call[name[option].get, parameter[constant[deprecated]]] begin[:]
<ast.AugAssign object at 0x7da1b1f288b0>
return[name[documentation]] | keyword[def] identifier[generate_documentation] ( identifier[schema] ):
literal[string]
identifier[documentation_title] = literal[string]
identifier[documentation] = identifier[documentation_title] + literal[string]
identifier[documentation] += literal[string] * identifier[len] ( identifier[documentation_title] )+ literal[string]
keyword[for] identifier[section_name] keyword[in] identifier[schema] :
identifier[section_created] = keyword[False]
keyword[for] identifier[option_name] keyword[in] identifier[schema] [ identifier[section_name] ]:
identifier[option] = identifier[schema] [ identifier[section_name] ][ identifier[option_name] ]
keyword[if] keyword[not] identifier[section_created] :
identifier[documentation] += literal[string]
identifier[documentation] += identifier[section_name] + literal[string]
identifier[documentation] += literal[string] * identifier[len] ( identifier[section_name] )+ literal[string]
identifier[section_created] = keyword[True]
identifier[documentation] += literal[string]
identifier[documentation] += identifier[option_name] + literal[string]
identifier[documentation] += literal[string] * identifier[len] ( identifier[option_name] )+ literal[string]
keyword[if] identifier[option] . identifier[get] ( literal[string] ):
identifier[documentation] += literal[string]
keyword[if] identifier[option] . identifier[get] ( literal[string] ):
identifier[documentation] += literal[string] % identifier[option] . identifier[get] ( literal[string] )
keyword[if] identifier[option] . identifier[get] ( literal[string] ):
identifier[documentation] += identifier[option] . identifier[get] ( literal[string] )+ literal[string]
keyword[if] identifier[option] . identifier[get] ( literal[string] ):
identifier[documentation] += literal[string] % identifier[option] . identifier[get] ( literal[string] )
keyword[if] identifier[option] . identifier[get] ( literal[string] ):
identifier[documentation] += literal[string]
keyword[return] identifier[documentation] | def generate_documentation(schema):
"""
Generates reStructuredText documentation from a Confirm file.
:param schema: Dictionary representing the Confirm schema.
:returns: String representing the reStructuredText documentation.
"""
documentation_title = 'Configuration documentation'
documentation = documentation_title + '\n'
documentation += '=' * len(documentation_title) + '\n'
for section_name in schema:
section_created = False
for option_name in schema[section_name]:
option = schema[section_name][option_name]
if not section_created:
documentation += '\n'
documentation += section_name + '\n'
documentation += '-' * len(section_name) + '\n'
section_created = True # depends on [control=['if'], data=[]]
documentation += '\n'
documentation += option_name + '\n'
documentation += '~' * len(option_name) + '\n'
if option.get('required'):
documentation += '** This option is required! **\n' # depends on [control=['if'], data=[]]
if option.get('type'):
documentation += '*Type : %s.*\n' % option.get('type') # depends on [control=['if'], data=[]]
if option.get('description'):
documentation += option.get('description') + '\n' # depends on [control=['if'], data=[]]
if option.get('default'):
documentation += 'The default value is %s.\n' % option.get('default') # depends on [control=['if'], data=[]]
if option.get('deprecated'):
documentation += '** This option is deprecated! **\n' # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['option_name']] # depends on [control=['for'], data=['section_name']]
return documentation |
def insert_characters(self, count=None):
"""Insert the indicated # of blank characters at the cursor
position. The cursor does not move and remains at the beginning
of the inserted blank characters. Data on the line is shifted
forward.
:param int count: number of characters to insert.
"""
self.dirty.add(self.cursor.y)
count = count or 1
line = self.buffer[self.cursor.y]
for x in range(self.columns, self.cursor.x - 1, -1):
if x + count <= self.columns:
line[x + count] = line[x]
line.pop(x, None) | def function[insert_characters, parameter[self, count]]:
constant[Insert the indicated # of blank characters at the cursor
position. The cursor does not move and remains at the beginning
of the inserted blank characters. Data on the line is shifted
forward.
:param int count: number of characters to insert.
]
call[name[self].dirty.add, parameter[name[self].cursor.y]]
variable[count] assign[=] <ast.BoolOp object at 0x7da1b07bb6a0>
variable[line] assign[=] call[name[self].buffer][name[self].cursor.y]
for taget[name[x]] in starred[call[name[range], parameter[name[self].columns, binary_operation[name[self].cursor.x - constant[1]], <ast.UnaryOp object at 0x7da1b07aec20>]]] begin[:]
if compare[binary_operation[name[x] + name[count]] less_or_equal[<=] name[self].columns] begin[:]
call[name[line]][binary_operation[name[x] + name[count]]] assign[=] call[name[line]][name[x]]
call[name[line].pop, parameter[name[x], constant[None]]] | keyword[def] identifier[insert_characters] ( identifier[self] , identifier[count] = keyword[None] ):
literal[string]
identifier[self] . identifier[dirty] . identifier[add] ( identifier[self] . identifier[cursor] . identifier[y] )
identifier[count] = identifier[count] keyword[or] literal[int]
identifier[line] = identifier[self] . identifier[buffer] [ identifier[self] . identifier[cursor] . identifier[y] ]
keyword[for] identifier[x] keyword[in] identifier[range] ( identifier[self] . identifier[columns] , identifier[self] . identifier[cursor] . identifier[x] - literal[int] ,- literal[int] ):
keyword[if] identifier[x] + identifier[count] <= identifier[self] . identifier[columns] :
identifier[line] [ identifier[x] + identifier[count] ]= identifier[line] [ identifier[x] ]
identifier[line] . identifier[pop] ( identifier[x] , keyword[None] ) | def insert_characters(self, count=None):
"""Insert the indicated # of blank characters at the cursor
position. The cursor does not move and remains at the beginning
of the inserted blank characters. Data on the line is shifted
forward.
:param int count: number of characters to insert.
"""
self.dirty.add(self.cursor.y)
count = count or 1
line = self.buffer[self.cursor.y]
for x in range(self.columns, self.cursor.x - 1, -1):
if x + count <= self.columns:
line[x + count] = line[x] # depends on [control=['if'], data=[]]
line.pop(x, None) # depends on [control=['for'], data=['x']] |
def _format_envvars(ctx):
"""Format all envvars for a `click.Command`."""
params = [x for x in ctx.command.params if getattr(x, 'envvar')]
for param in params:
yield '.. _{command_name}-{param_name}-{envvar}:'.format(
command_name=ctx.command_path.replace(' ', '-'),
param_name=param.name,
envvar=param.envvar,
)
yield ''
for line in _format_envvar(param):
yield line
yield '' | def function[_format_envvars, parameter[ctx]]:
constant[Format all envvars for a `click.Command`.]
variable[params] assign[=] <ast.ListComp object at 0x7da1b0d0ebc0>
for taget[name[param]] in starred[name[params]] begin[:]
<ast.Yield object at 0x7da20e955c30>
<ast.Yield object at 0x7da1b0e16e30>
for taget[name[line]] in starred[call[name[_format_envvar], parameter[name[param]]]] begin[:]
<ast.Yield object at 0x7da1b0e15810>
<ast.Yield object at 0x7da1b0e16a10> | keyword[def] identifier[_format_envvars] ( identifier[ctx] ):
literal[string]
identifier[params] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[ctx] . identifier[command] . identifier[params] keyword[if] identifier[getattr] ( identifier[x] , literal[string] )]
keyword[for] identifier[param] keyword[in] identifier[params] :
keyword[yield] literal[string] . identifier[format] (
identifier[command_name] = identifier[ctx] . identifier[command_path] . identifier[replace] ( literal[string] , literal[string] ),
identifier[param_name] = identifier[param] . identifier[name] ,
identifier[envvar] = identifier[param] . identifier[envvar] ,
)
keyword[yield] literal[string]
keyword[for] identifier[line] keyword[in] identifier[_format_envvar] ( identifier[param] ):
keyword[yield] identifier[line]
keyword[yield] literal[string] | def _format_envvars(ctx):
"""Format all envvars for a `click.Command`."""
params = [x for x in ctx.command.params if getattr(x, 'envvar')]
for param in params:
yield '.. _{command_name}-{param_name}-{envvar}:'.format(command_name=ctx.command_path.replace(' ', '-'), param_name=param.name, envvar=param.envvar)
yield ''
for line in _format_envvar(param):
yield line # depends on [control=['for'], data=['line']]
yield '' # depends on [control=['for'], data=['param']] |
def make_serviceitem_servicedll(servicedll, condition='contains', negate=False, preserve_case=False):
"""
Create a node for ServiceItem/serviceDLL
:return: A IndicatorItem represented as an Element node
"""
document = 'ServiceItem'
search = 'ServiceItem/serviceDLL'
content_type = 'string'
content = servicedll
ii_node = ioc_api.make_indicatoritem_node(condition, document, search, content_type, content,
negate=negate, preserve_case=preserve_case)
return ii_node | def function[make_serviceitem_servicedll, parameter[servicedll, condition, negate, preserve_case]]:
constant[
Create a node for ServiceItem/serviceDLL
:return: A IndicatorItem represented as an Element node
]
variable[document] assign[=] constant[ServiceItem]
variable[search] assign[=] constant[ServiceItem/serviceDLL]
variable[content_type] assign[=] constant[string]
variable[content] assign[=] name[servicedll]
variable[ii_node] assign[=] call[name[ioc_api].make_indicatoritem_node, parameter[name[condition], name[document], name[search], name[content_type], name[content]]]
return[name[ii_node]] | keyword[def] identifier[make_serviceitem_servicedll] ( identifier[servicedll] , identifier[condition] = literal[string] , identifier[negate] = keyword[False] , identifier[preserve_case] = keyword[False] ):
literal[string]
identifier[document] = literal[string]
identifier[search] = literal[string]
identifier[content_type] = literal[string]
identifier[content] = identifier[servicedll]
identifier[ii_node] = identifier[ioc_api] . identifier[make_indicatoritem_node] ( identifier[condition] , identifier[document] , identifier[search] , identifier[content_type] , identifier[content] ,
identifier[negate] = identifier[negate] , identifier[preserve_case] = identifier[preserve_case] )
keyword[return] identifier[ii_node] | def make_serviceitem_servicedll(servicedll, condition='contains', negate=False, preserve_case=False):
"""
Create a node for ServiceItem/serviceDLL
:return: A IndicatorItem represented as an Element node
"""
document = 'ServiceItem'
search = 'ServiceItem/serviceDLL'
content_type = 'string'
content = servicedll
ii_node = ioc_api.make_indicatoritem_node(condition, document, search, content_type, content, negate=negate, preserve_case=preserve_case)
return ii_node |
def make_color_legend_rects(colors, labels=None):
"""
Make list of rectangles and labels for making legends.
Parameters
----------
colors : pandas.Series or list
Pandas series whose values are colors and index is labels.
Alternatively, you can provide a list with colors and provide the labels
as a list.
labels : list
If colors is a list, this should be the list of corresponding labels.
Returns
-------
out : pd.Series
Pandas series whose values are matplotlib rectangles and whose index are
the legend labels for those rectangles. You can add each of these
rectangles to your axis using ax.add_patch(r) for r in out then create a
legend whose labels are out.values and whose labels are
legend_rects.index:
for r in legend_rects:
ax.add_patch(r)
lgd = ax.legend(legend_rects.values, labels=legend_rects.index)
"""
from matplotlib.pyplot import Rectangle
if labels:
d = dict(zip(labels, colors))
se = pd.Series(d)
else:
se = colors
rects = []
for i in se.index:
r = Rectangle((0, 0), 0, 0, fc=se[i])
rects.append(r)
out = pd.Series(rects, index=se.index)
return out | def function[make_color_legend_rects, parameter[colors, labels]]:
constant[
Make list of rectangles and labels for making legends.
Parameters
----------
colors : pandas.Series or list
Pandas series whose values are colors and index is labels.
Alternatively, you can provide a list with colors and provide the labels
as a list.
labels : list
If colors is a list, this should be the list of corresponding labels.
Returns
-------
out : pd.Series
Pandas series whose values are matplotlib rectangles and whose index are
the legend labels for those rectangles. You can add each of these
rectangles to your axis using ax.add_patch(r) for r in out then create a
legend whose labels are out.values and whose labels are
legend_rects.index:
for r in legend_rects:
ax.add_patch(r)
lgd = ax.legend(legend_rects.values, labels=legend_rects.index)
]
from relative_module[matplotlib.pyplot] import module[Rectangle]
if name[labels] begin[:]
variable[d] assign[=] call[name[dict], parameter[call[name[zip], parameter[name[labels], name[colors]]]]]
variable[se] assign[=] call[name[pd].Series, parameter[name[d]]]
variable[rects] assign[=] list[[]]
for taget[name[i]] in starred[name[se].index] begin[:]
variable[r] assign[=] call[name[Rectangle], parameter[tuple[[<ast.Constant object at 0x7da1b1415b40>, <ast.Constant object at 0x7da1b14164a0>]], constant[0], constant[0]]]
call[name[rects].append, parameter[name[r]]]
variable[out] assign[=] call[name[pd].Series, parameter[name[rects]]]
return[name[out]] | keyword[def] identifier[make_color_legend_rects] ( identifier[colors] , identifier[labels] = keyword[None] ):
literal[string]
keyword[from] identifier[matplotlib] . identifier[pyplot] keyword[import] identifier[Rectangle]
keyword[if] identifier[labels] :
identifier[d] = identifier[dict] ( identifier[zip] ( identifier[labels] , identifier[colors] ))
identifier[se] = identifier[pd] . identifier[Series] ( identifier[d] )
keyword[else] :
identifier[se] = identifier[colors]
identifier[rects] =[]
keyword[for] identifier[i] keyword[in] identifier[se] . identifier[index] :
identifier[r] = identifier[Rectangle] (( literal[int] , literal[int] ), literal[int] , literal[int] , identifier[fc] = identifier[se] [ identifier[i] ])
identifier[rects] . identifier[append] ( identifier[r] )
identifier[out] = identifier[pd] . identifier[Series] ( identifier[rects] , identifier[index] = identifier[se] . identifier[index] )
keyword[return] identifier[out] | def make_color_legend_rects(colors, labels=None):
"""
Make list of rectangles and labels for making legends.
Parameters
----------
colors : pandas.Series or list
Pandas series whose values are colors and index is labels.
Alternatively, you can provide a list with colors and provide the labels
as a list.
labels : list
If colors is a list, this should be the list of corresponding labels.
Returns
-------
out : pd.Series
Pandas series whose values are matplotlib rectangles and whose index are
the legend labels for those rectangles. You can add each of these
rectangles to your axis using ax.add_patch(r) for r in out then create a
legend whose labels are out.values and whose labels are
legend_rects.index:
for r in legend_rects:
ax.add_patch(r)
lgd = ax.legend(legend_rects.values, labels=legend_rects.index)
"""
from matplotlib.pyplot import Rectangle
if labels:
d = dict(zip(labels, colors))
se = pd.Series(d) # depends on [control=['if'], data=[]]
else:
se = colors
rects = []
for i in se.index:
r = Rectangle((0, 0), 0, 0, fc=se[i])
rects.append(r) # depends on [control=['for'], data=['i']]
out = pd.Series(rects, index=se.index)
return out |
def main():
"""Main entry point for CLI commands."""
options = docopt(__doc__, version=__version__)
if options['segment']:
segment(
options['<file>'],
options['--output'],
options['--target-duration'],
options['--mpegts'],
) | def function[main, parameter[]]:
constant[Main entry point for CLI commands.]
variable[options] assign[=] call[name[docopt], parameter[name[__doc__]]]
if call[name[options]][constant[segment]] begin[:]
call[name[segment], parameter[call[name[options]][constant[<file>]], call[name[options]][constant[--output]], call[name[options]][constant[--target-duration]], call[name[options]][constant[--mpegts]]]] | keyword[def] identifier[main] ():
literal[string]
identifier[options] = identifier[docopt] ( identifier[__doc__] , identifier[version] = identifier[__version__] )
keyword[if] identifier[options] [ literal[string] ]:
identifier[segment] (
identifier[options] [ literal[string] ],
identifier[options] [ literal[string] ],
identifier[options] [ literal[string] ],
identifier[options] [ literal[string] ],
) | def main():
"""Main entry point for CLI commands."""
options = docopt(__doc__, version=__version__)
if options['segment']:
segment(options['<file>'], options['--output'], options['--target-duration'], options['--mpegts']) # depends on [control=['if'], data=[]] |
def any_slug_field(field, **kwargs):
"""
Return random value for SlugField
>>> result = any_field(models.SlugField())
>>> type(result)
<type 'str'>
>>> from django.core.validators import slug_re
>>> re.match(slug_re, result) is not None
True
"""
letters = ascii_letters + digits + '_-'
return xunit.any_string(letters = letters, max_length = field.max_length) | def function[any_slug_field, parameter[field]]:
constant[
Return random value for SlugField
>>> result = any_field(models.SlugField())
>>> type(result)
<type 'str'>
>>> from django.core.validators import slug_re
>>> re.match(slug_re, result) is not None
True
]
variable[letters] assign[=] binary_operation[binary_operation[name[ascii_letters] + name[digits]] + constant[_-]]
return[call[name[xunit].any_string, parameter[]]] | keyword[def] identifier[any_slug_field] ( identifier[field] ,** identifier[kwargs] ):
literal[string]
identifier[letters] = identifier[ascii_letters] + identifier[digits] + literal[string]
keyword[return] identifier[xunit] . identifier[any_string] ( identifier[letters] = identifier[letters] , identifier[max_length] = identifier[field] . identifier[max_length] ) | def any_slug_field(field, **kwargs):
"""
Return random value for SlugField
>>> result = any_field(models.SlugField())
>>> type(result)
<type 'str'>
>>> from django.core.validators import slug_re
>>> re.match(slug_re, result) is not None
True
"""
letters = ascii_letters + digits + '_-'
return xunit.any_string(letters=letters, max_length=field.max_length) |
def items(self, founditems=[]): #pylint: disable=dangerous-default-value
"""Returns a depth-first flat list of *all* items below this element (not limited to AbstractElement)"""
l = []
for e in self.data:
if e not in founditems: #prevent going in recursive loops
l.append(e)
if isinstance(e, AbstractElement):
l += e.items(l)
return l | def function[items, parameter[self, founditems]]:
constant[Returns a depth-first flat list of *all* items below this element (not limited to AbstractElement)]
variable[l] assign[=] list[[]]
for taget[name[e]] in starred[name[self].data] begin[:]
if compare[name[e] <ast.NotIn object at 0x7da2590d7190> name[founditems]] begin[:]
call[name[l].append, parameter[name[e]]]
if call[name[isinstance], parameter[name[e], name[AbstractElement]]] begin[:]
<ast.AugAssign object at 0x7da204567190>
return[name[l]] | keyword[def] identifier[items] ( identifier[self] , identifier[founditems] =[]):
literal[string]
identifier[l] =[]
keyword[for] identifier[e] keyword[in] identifier[self] . identifier[data] :
keyword[if] identifier[e] keyword[not] keyword[in] identifier[founditems] :
identifier[l] . identifier[append] ( identifier[e] )
keyword[if] identifier[isinstance] ( identifier[e] , identifier[AbstractElement] ):
identifier[l] += identifier[e] . identifier[items] ( identifier[l] )
keyword[return] identifier[l] | def items(self, founditems=[]): #pylint: disable=dangerous-default-value
'Returns a depth-first flat list of *all* items below this element (not limited to AbstractElement)'
l = []
for e in self.data:
if e not in founditems: #prevent going in recursive loops
l.append(e)
if isinstance(e, AbstractElement):
l += e.items(l) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['e']] # depends on [control=['for'], data=['e']]
return l |
def pack_bot_file_id(file):
"""
Inverse operation for `resolve_bot_file_id`.
The only parameters this method will accept are :tl:`Document` and
:tl:`Photo`, and it will return a variable-length ``file_id`` string.
If an invalid parameter is given, it will ``return None``.
"""
if isinstance(file, types.MessageMediaDocument):
file = file.document
elif isinstance(file, types.MessageMediaPhoto):
file = file.photo
if isinstance(file, types.Document):
file_type = 5
for attribute in file.attributes:
if isinstance(attribute, types.DocumentAttributeAudio):
file_type = 3 if attribute.voice else 9
elif isinstance(attribute, types.DocumentAttributeVideo):
file_type = 13 if attribute.round_message else 4
elif isinstance(attribute, types.DocumentAttributeSticker):
file_type = 8
elif isinstance(attribute, types.DocumentAttributeAnimated):
file_type = 10
else:
continue
break
return _encode_telegram_base64(_rle_encode(struct.pack(
'<iiqqb', file_type, file.dc_id, file.id, file.access_hash, 2)))
elif isinstance(file, types.Photo):
size = next((x for x in reversed(file.sizes) if isinstance(
x, (types.PhotoSize, types.PhotoCachedSize))), None)
if not size:
return None
size = size.location
return _encode_telegram_base64(_rle_encode(struct.pack(
'<iiqqqqib', 2, file.dc_id, file.id, file.access_hash,
size.volume_id, 0, size.local_id, 2 # 0 = old `secret`
)))
else:
return None | def function[pack_bot_file_id, parameter[file]]:
constant[
Inverse operation for `resolve_bot_file_id`.
The only parameters this method will accept are :tl:`Document` and
:tl:`Photo`, and it will return a variable-length ``file_id`` string.
If an invalid parameter is given, it will ``return None``.
]
if call[name[isinstance], parameter[name[file], name[types].MessageMediaDocument]] begin[:]
variable[file] assign[=] name[file].document
if call[name[isinstance], parameter[name[file], name[types].Document]] begin[:]
variable[file_type] assign[=] constant[5]
for taget[name[attribute]] in starred[name[file].attributes] begin[:]
if call[name[isinstance], parameter[name[attribute], name[types].DocumentAttributeAudio]] begin[:]
variable[file_type] assign[=] <ast.IfExp object at 0x7da207f02950>
break
return[call[name[_encode_telegram_base64], parameter[call[name[_rle_encode], parameter[call[name[struct].pack, parameter[constant[<iiqqb], name[file_type], name[file].dc_id, name[file].id, name[file].access_hash, constant[2]]]]]]]] | keyword[def] identifier[pack_bot_file_id] ( identifier[file] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[file] , identifier[types] . identifier[MessageMediaDocument] ):
identifier[file] = identifier[file] . identifier[document]
keyword[elif] identifier[isinstance] ( identifier[file] , identifier[types] . identifier[MessageMediaPhoto] ):
identifier[file] = identifier[file] . identifier[photo]
keyword[if] identifier[isinstance] ( identifier[file] , identifier[types] . identifier[Document] ):
identifier[file_type] = literal[int]
keyword[for] identifier[attribute] keyword[in] identifier[file] . identifier[attributes] :
keyword[if] identifier[isinstance] ( identifier[attribute] , identifier[types] . identifier[DocumentAttributeAudio] ):
identifier[file_type] = literal[int] keyword[if] identifier[attribute] . identifier[voice] keyword[else] literal[int]
keyword[elif] identifier[isinstance] ( identifier[attribute] , identifier[types] . identifier[DocumentAttributeVideo] ):
identifier[file_type] = literal[int] keyword[if] identifier[attribute] . identifier[round_message] keyword[else] literal[int]
keyword[elif] identifier[isinstance] ( identifier[attribute] , identifier[types] . identifier[DocumentAttributeSticker] ):
identifier[file_type] = literal[int]
keyword[elif] identifier[isinstance] ( identifier[attribute] , identifier[types] . identifier[DocumentAttributeAnimated] ):
identifier[file_type] = literal[int]
keyword[else] :
keyword[continue]
keyword[break]
keyword[return] identifier[_encode_telegram_base64] ( identifier[_rle_encode] ( identifier[struct] . identifier[pack] (
literal[string] , identifier[file_type] , identifier[file] . identifier[dc_id] , identifier[file] . identifier[id] , identifier[file] . identifier[access_hash] , literal[int] )))
keyword[elif] identifier[isinstance] ( identifier[file] , identifier[types] . identifier[Photo] ):
identifier[size] = identifier[next] (( identifier[x] keyword[for] identifier[x] keyword[in] identifier[reversed] ( identifier[file] . identifier[sizes] ) keyword[if] identifier[isinstance] (
identifier[x] ,( identifier[types] . identifier[PhotoSize] , identifier[types] . identifier[PhotoCachedSize] ))), keyword[None] )
keyword[if] keyword[not] identifier[size] :
keyword[return] keyword[None]
identifier[size] = identifier[size] . identifier[location]
keyword[return] identifier[_encode_telegram_base64] ( identifier[_rle_encode] ( identifier[struct] . identifier[pack] (
literal[string] , literal[int] , identifier[file] . identifier[dc_id] , identifier[file] . identifier[id] , identifier[file] . identifier[access_hash] ,
identifier[size] . identifier[volume_id] , literal[int] , identifier[size] . identifier[local_id] , literal[int]
)))
keyword[else] :
keyword[return] keyword[None] | def pack_bot_file_id(file):
"""
Inverse operation for `resolve_bot_file_id`.
The only parameters this method will accept are :tl:`Document` and
:tl:`Photo`, and it will return a variable-length ``file_id`` string.
If an invalid parameter is given, it will ``return None``.
"""
if isinstance(file, types.MessageMediaDocument):
file = file.document # depends on [control=['if'], data=[]]
elif isinstance(file, types.MessageMediaPhoto):
file = file.photo # depends on [control=['if'], data=[]]
if isinstance(file, types.Document):
file_type = 5
for attribute in file.attributes:
if isinstance(attribute, types.DocumentAttributeAudio):
file_type = 3 if attribute.voice else 9 # depends on [control=['if'], data=[]]
elif isinstance(attribute, types.DocumentAttributeVideo):
file_type = 13 if attribute.round_message else 4 # depends on [control=['if'], data=[]]
elif isinstance(attribute, types.DocumentAttributeSticker):
file_type = 8 # depends on [control=['if'], data=[]]
elif isinstance(attribute, types.DocumentAttributeAnimated):
file_type = 10 # depends on [control=['if'], data=[]]
else:
continue
break # depends on [control=['for'], data=['attribute']]
return _encode_telegram_base64(_rle_encode(struct.pack('<iiqqb', file_type, file.dc_id, file.id, file.access_hash, 2))) # depends on [control=['if'], data=[]]
elif isinstance(file, types.Photo):
size = next((x for x in reversed(file.sizes) if isinstance(x, (types.PhotoSize, types.PhotoCachedSize))), None)
if not size:
return None # depends on [control=['if'], data=[]]
size = size.location # 0 = old `secret`
return _encode_telegram_base64(_rle_encode(struct.pack('<iiqqqqib', 2, file.dc_id, file.id, file.access_hash, size.volume_id, 0, size.local_id, 2))) # depends on [control=['if'], data=[]]
else:
return None |
def _set_police_priority_map(self, v, load=False):
"""
Setter method for police_priority_map, mapped from YANG variable /police_priority_map (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_police_priority_map is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_police_priority_map() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("name",police_priority_map.police_priority_map, yang_name="police-priority-map", rest_name="police-priority-map", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Policer Priority Map Configuration', u'sort-priority': u'69', u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'policer-priority-map', u'cli-mode-name': u'config-policepmap'}}), is_container='list', yang_name="police-priority-map", rest_name="police-priority-map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Policer Priority Map Configuration', u'sort-priority': u'69', u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'policer-priority-map', u'cli-mode-name': u'config-policepmap'}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """police_priority_map must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("name",police_priority_map.police_priority_map, yang_name="police-priority-map", rest_name="police-priority-map", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Policer Priority Map Configuration', u'sort-priority': u'69', u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'policer-priority-map', u'cli-mode-name': u'config-policepmap'}}), is_container='list', yang_name="police-priority-map", rest_name="police-priority-map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Policer Priority Map Configuration', u'sort-priority': u'69', u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'policer-priority-map', u'cli-mode-name': u'config-policepmap'}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='list', is_config=True)""",
})
self.__police_priority_map = t
if hasattr(self, '_set'):
self._set() | def function[_set_police_priority_map, parameter[self, v, load]]:
constant[
Setter method for police_priority_map, mapped from YANG variable /police_priority_map (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_police_priority_map is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_police_priority_map() directly.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da2041db4f0>
name[self].__police_priority_map assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_police_priority_map] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[YANGListType] ( literal[string] , identifier[police_priority_map] . identifier[police_priority_map] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[is_container] = literal[string] , identifier[user_ordered] = keyword[False] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[yang_keys] = literal[string] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : literal[string] , literal[string] : literal[string] }}), identifier[is_container] = literal[string] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : keyword[None] , literal[string] : keyword[None] , literal[string] : literal[string] , literal[string] : literal[string] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__police_priority_map] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_police_priority_map(self, v, load=False):
"""
Setter method for police_priority_map, mapped from YANG variable /police_priority_map (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_police_priority_map is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_police_priority_map() directly.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=YANGListType('name', police_priority_map.police_priority_map, yang_name='police-priority-map', rest_name='police-priority-map', parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Policer Priority Map Configuration', u'sort-priority': u'69', u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'policer-priority-map', u'cli-mode-name': u'config-policepmap'}}), is_container='list', yang_name='police-priority-map', rest_name='police-priority-map', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Policer Priority Map Configuration', u'sort-priority': u'69', u'cli-suppress-list-no': None, u'cli-full-command': None, u'callpoint': u'policer-priority-map', u'cli-mode-name': u'config-policepmap'}}, namespace='urn:brocade.com:mgmt:brocade-policer', defining_module='brocade-policer', yang_type='list', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'police_priority_map must be of a type compatible with list', 'defined-type': 'list', 'generated-type': 'YANGDynClass(base=YANGListType("name",police_priority_map.police_priority_map, yang_name="police-priority-map", rest_name="police-priority-map", parent=self, is_container=\'list\', user_ordered=False, path_helper=self._path_helper, yang_keys=\'name\', extensions={u\'tailf-common\': {u\'info\': u\'Policer Priority Map Configuration\', u\'sort-priority\': u\'69\', u\'cli-suppress-list-no\': None, u\'cli-full-command\': None, u\'callpoint\': u\'policer-priority-map\', u\'cli-mode-name\': u\'config-policepmap\'}}), is_container=\'list\', yang_name="police-priority-map", rest_name="police-priority-map", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'info\': u\'Policer Priority Map Configuration\', u\'sort-priority\': u\'69\', u\'cli-suppress-list-no\': None, u\'cli-full-command\': None, u\'callpoint\': u\'policer-priority-map\', u\'cli-mode-name\': u\'config-policepmap\'}}, namespace=\'urn:brocade.com:mgmt:brocade-policer\', defining_module=\'brocade-policer\', yang_type=\'list\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__police_priority_map = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def get_client_ip(request):
"""
Naively yank the first IP address in an X-Forwarded-For header
and assume this is correct.
Note: Don't use this in security sensitive situations since this
value may be forged from a client.
"""
try:
return request.META['HTTP_X_FORWARDED_FOR'].split(',')[0].strip()
except (KeyError, IndexError):
return request.META.get('REMOTE_ADDR') | def function[get_client_ip, parameter[request]]:
constant[
Naively yank the first IP address in an X-Forwarded-For header
and assume this is correct.
Note: Don't use this in security sensitive situations since this
value may be forged from a client.
]
<ast.Try object at 0x7da207f9a410> | keyword[def] identifier[get_client_ip] ( identifier[request] ):
literal[string]
keyword[try] :
keyword[return] identifier[request] . identifier[META] [ literal[string] ]. identifier[split] ( literal[string] )[ literal[int] ]. identifier[strip] ()
keyword[except] ( identifier[KeyError] , identifier[IndexError] ):
keyword[return] identifier[request] . identifier[META] . identifier[get] ( literal[string] ) | def get_client_ip(request):
"""
Naively yank the first IP address in an X-Forwarded-For header
and assume this is correct.
Note: Don't use this in security sensitive situations since this
value may be forged from a client.
"""
try:
return request.META['HTTP_X_FORWARDED_FOR'].split(',')[0].strip() # depends on [control=['try'], data=[]]
except (KeyError, IndexError):
return request.META.get('REMOTE_ADDR') # depends on [control=['except'], data=[]] |
def copy_all_files_and_subfolders(src, dest, base_path_ignore, xtn_list):
"""
file_tools.copy_all_files_and_subfolders(src, dest, backup_path, ['*.*'])
gets list of all subfolders and copies each file to
its own folder in 'dest' folder
paths, xtn, excluded, output_file_name = 'my_files.csv')
"""
ensure_dir(dest)
fl = mod_fl.FileList([src], xtn_list, exclude_folders, '')
all_paths = fl.get_list_of_paths()
fl.save_filelist(os.path.join(dest,'files_backed_up.csv'), ["name", "path", "size", "date"])
for p in all_paths:
dest_folder = os.path.join(dest, p[len(base_path_ignore):])
ensure_dir(dest_folder)
#print('copying ' + p)
copy_files_to_folder(p, dest_folder, xtn='*') | def function[copy_all_files_and_subfolders, parameter[src, dest, base_path_ignore, xtn_list]]:
constant[
file_tools.copy_all_files_and_subfolders(src, dest, backup_path, ['*.*'])
gets list of all subfolders and copies each file to
its own folder in 'dest' folder
paths, xtn, excluded, output_file_name = 'my_files.csv')
]
call[name[ensure_dir], parameter[name[dest]]]
variable[fl] assign[=] call[name[mod_fl].FileList, parameter[list[[<ast.Name object at 0x7da18f58c5e0>]], name[xtn_list], name[exclude_folders], constant[]]]
variable[all_paths] assign[=] call[name[fl].get_list_of_paths, parameter[]]
call[name[fl].save_filelist, parameter[call[name[os].path.join, parameter[name[dest], constant[files_backed_up.csv]]], list[[<ast.Constant object at 0x7da18f58eec0>, <ast.Constant object at 0x7da18f58e500>, <ast.Constant object at 0x7da18f58d0f0>, <ast.Constant object at 0x7da18f58f040>]]]]
for taget[name[p]] in starred[name[all_paths]] begin[:]
variable[dest_folder] assign[=] call[name[os].path.join, parameter[name[dest], call[name[p]][<ast.Slice object at 0x7da18f58d480>]]]
call[name[ensure_dir], parameter[name[dest_folder]]]
call[name[copy_files_to_folder], parameter[name[p], name[dest_folder]]] | keyword[def] identifier[copy_all_files_and_subfolders] ( identifier[src] , identifier[dest] , identifier[base_path_ignore] , identifier[xtn_list] ):
literal[string]
identifier[ensure_dir] ( identifier[dest] )
identifier[fl] = identifier[mod_fl] . identifier[FileList] ([ identifier[src] ], identifier[xtn_list] , identifier[exclude_folders] , literal[string] )
identifier[all_paths] = identifier[fl] . identifier[get_list_of_paths] ()
identifier[fl] . identifier[save_filelist] ( identifier[os] . identifier[path] . identifier[join] ( identifier[dest] , literal[string] ),[ literal[string] , literal[string] , literal[string] , literal[string] ])
keyword[for] identifier[p] keyword[in] identifier[all_paths] :
identifier[dest_folder] = identifier[os] . identifier[path] . identifier[join] ( identifier[dest] , identifier[p] [ identifier[len] ( identifier[base_path_ignore] ):])
identifier[ensure_dir] ( identifier[dest_folder] )
identifier[copy_files_to_folder] ( identifier[p] , identifier[dest_folder] , identifier[xtn] = literal[string] ) | def copy_all_files_and_subfolders(src, dest, base_path_ignore, xtn_list):
"""
file_tools.copy_all_files_and_subfolders(src, dest, backup_path, ['*.*'])
gets list of all subfolders and copies each file to
its own folder in 'dest' folder
paths, xtn, excluded, output_file_name = 'my_files.csv')
"""
ensure_dir(dest)
fl = mod_fl.FileList([src], xtn_list, exclude_folders, '')
all_paths = fl.get_list_of_paths()
fl.save_filelist(os.path.join(dest, 'files_backed_up.csv'), ['name', 'path', 'size', 'date'])
for p in all_paths:
dest_folder = os.path.join(dest, p[len(base_path_ignore):])
ensure_dir(dest_folder) #print('copying ' + p)
copy_files_to_folder(p, dest_folder, xtn='*') # depends on [control=['for'], data=['p']] |
def is_catchup_needed_during_view_change(self) -> bool:
"""
Check if received a quorum of view change done messages and if yes
check if caught up till the
Check if all requests ordered till last prepared certificate
Check if last catchup resulted in no txns
"""
if self.caught_up_for_current_view():
logger.info('{} is caught up for the current view {}'.format(self, self.viewNo))
return False
logger.info('{} is not caught up for the current view {}'.format(self, self.viewNo))
if self.num_txns_caught_up_in_last_catchup() == 0:
if self.has_ordered_till_last_prepared_certificate():
logger.info('{} ordered till last prepared certificate'.format(self))
return False
if self.is_catch_up_limit(self.config.MIN_TIMEOUT_CATCHUPS_DONE_DURING_VIEW_CHANGE):
# No more 3PC messages will be processed since maximum catchup
# rounds have been done
self.master_replica.last_prepared_before_view_change = None
return False
return True | def function[is_catchup_needed_during_view_change, parameter[self]]:
constant[
Check if received a quorum of view change done messages and if yes
check if caught up till the
Check if all requests ordered till last prepared certificate
Check if last catchup resulted in no txns
]
if call[name[self].caught_up_for_current_view, parameter[]] begin[:]
call[name[logger].info, parameter[call[constant[{} is caught up for the current view {}].format, parameter[name[self], name[self].viewNo]]]]
return[constant[False]]
call[name[logger].info, parameter[call[constant[{} is not caught up for the current view {}].format, parameter[name[self], name[self].viewNo]]]]
if compare[call[name[self].num_txns_caught_up_in_last_catchup, parameter[]] equal[==] constant[0]] begin[:]
if call[name[self].has_ordered_till_last_prepared_certificate, parameter[]] begin[:]
call[name[logger].info, parameter[call[constant[{} ordered till last prepared certificate].format, parameter[name[self]]]]]
return[constant[False]]
if call[name[self].is_catch_up_limit, parameter[name[self].config.MIN_TIMEOUT_CATCHUPS_DONE_DURING_VIEW_CHANGE]] begin[:]
name[self].master_replica.last_prepared_before_view_change assign[=] constant[None]
return[constant[False]]
return[constant[True]] | keyword[def] identifier[is_catchup_needed_during_view_change] ( identifier[self] )-> identifier[bool] :
literal[string]
keyword[if] identifier[self] . identifier[caught_up_for_current_view] ():
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[self] , identifier[self] . identifier[viewNo] ))
keyword[return] keyword[False]
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[self] , identifier[self] . identifier[viewNo] ))
keyword[if] identifier[self] . identifier[num_txns_caught_up_in_last_catchup] ()== literal[int] :
keyword[if] identifier[self] . identifier[has_ordered_till_last_prepared_certificate] ():
identifier[logger] . identifier[info] ( literal[string] . identifier[format] ( identifier[self] ))
keyword[return] keyword[False]
keyword[if] identifier[self] . identifier[is_catch_up_limit] ( identifier[self] . identifier[config] . identifier[MIN_TIMEOUT_CATCHUPS_DONE_DURING_VIEW_CHANGE] ):
identifier[self] . identifier[master_replica] . identifier[last_prepared_before_view_change] = keyword[None]
keyword[return] keyword[False]
keyword[return] keyword[True] | def is_catchup_needed_during_view_change(self) -> bool:
"""
Check if received a quorum of view change done messages and if yes
check if caught up till the
Check if all requests ordered till last prepared certificate
Check if last catchup resulted in no txns
"""
if self.caught_up_for_current_view():
logger.info('{} is caught up for the current view {}'.format(self, self.viewNo))
return False # depends on [control=['if'], data=[]]
logger.info('{} is not caught up for the current view {}'.format(self, self.viewNo))
if self.num_txns_caught_up_in_last_catchup() == 0:
if self.has_ordered_till_last_prepared_certificate():
logger.info('{} ordered till last prepared certificate'.format(self))
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if self.is_catch_up_limit(self.config.MIN_TIMEOUT_CATCHUPS_DONE_DURING_VIEW_CHANGE):
# No more 3PC messages will be processed since maximum catchup
# rounds have been done
self.master_replica.last_prepared_before_view_change = None
return False # depends on [control=['if'], data=[]]
return True |
def parse_args(args):
"""Uses python argparse to collect positional args"""
Log.info("Input args: %r" % args)
parser = argparse.ArgumentParser()
parser.add_argument("--shard", type=int, required=True)
parser.add_argument("--topology-name", required=True)
parser.add_argument("--topology-id", required=True)
parser.add_argument("--topology-defn-file", required=True)
parser.add_argument("--state-manager-connection", required=True)
parser.add_argument("--state-manager-root", required=True)
parser.add_argument("--state-manager-config-file", required=True)
parser.add_argument("--tmaster-binary", required=True)
parser.add_argument("--stmgr-binary", required=True)
parser.add_argument("--metrics-manager-classpath", required=True)
parser.add_argument("--instance-jvm-opts", required=True)
parser.add_argument("--classpath", required=True)
parser.add_argument("--master-port", required=True)
parser.add_argument("--tmaster-controller-port", required=True)
parser.add_argument("--tmaster-stats-port", required=True)
parser.add_argument("--heron-internals-config-file", required=True)
parser.add_argument("--override-config-file", required=True)
parser.add_argument("--component-ram-map", required=True)
parser.add_argument("--component-jvm-opts", required=True)
parser.add_argument("--pkg-type", required=True)
parser.add_argument("--topology-binary-file", required=True)
parser.add_argument("--heron-java-home", required=True)
parser.add_argument("--shell-port", required=True)
parser.add_argument("--heron-shell-binary", required=True)
parser.add_argument("--metrics-manager-port", required=True)
parser.add_argument("--cluster", required=True)
parser.add_argument("--role", required=True)
parser.add_argument("--environment", required=True)
parser.add_argument("--instance-classpath", required=True)
parser.add_argument("--metrics-sinks-config-file", required=True)
parser.add_argument("--scheduler-classpath", required=True)
parser.add_argument("--scheduler-port", required=True)
parser.add_argument("--python-instance-binary", required=True)
parser.add_argument("--cpp-instance-binary", required=True)
parser.add_argument("--metricscache-manager-classpath", required=True)
parser.add_argument("--metricscache-manager-master-port", required=True)
parser.add_argument("--metricscache-manager-stats-port", required=True)
parser.add_argument("--metricscache-manager-mode", required=False)
parser.add_argument("--is-stateful", required=True)
parser.add_argument("--checkpoint-manager-classpath", required=True)
parser.add_argument("--checkpoint-manager-port", required=True)
parser.add_argument("--checkpoint-manager-ram", type=long, required=True)
parser.add_argument("--stateful-config-file", required=True)
parser.add_argument("--health-manager-mode", required=True)
parser.add_argument("--health-manager-classpath", required=True)
parser.add_argument("--jvm-remote-debugger-ports", required=False,
help="ports to be used by a remote debugger for JVM instances")
parsed_args, unknown_args = parser.parse_known_args(args[1:])
if unknown_args:
Log.error('Unknown argument: %s' % unknown_args[0])
parser.print_help()
sys.exit(1)
return parsed_args | def function[parse_args, parameter[args]]:
constant[Uses python argparse to collect positional args]
call[name[Log].info, parameter[binary_operation[constant[Input args: %r] <ast.Mod object at 0x7da2590d6920> name[args]]]]
variable[parser] assign[=] call[name[argparse].ArgumentParser, parameter[]]
call[name[parser].add_argument, parameter[constant[--shard]]]
call[name[parser].add_argument, parameter[constant[--topology-name]]]
call[name[parser].add_argument, parameter[constant[--topology-id]]]
call[name[parser].add_argument, parameter[constant[--topology-defn-file]]]
call[name[parser].add_argument, parameter[constant[--state-manager-connection]]]
call[name[parser].add_argument, parameter[constant[--state-manager-root]]]
call[name[parser].add_argument, parameter[constant[--state-manager-config-file]]]
call[name[parser].add_argument, parameter[constant[--tmaster-binary]]]
call[name[parser].add_argument, parameter[constant[--stmgr-binary]]]
call[name[parser].add_argument, parameter[constant[--metrics-manager-classpath]]]
call[name[parser].add_argument, parameter[constant[--instance-jvm-opts]]]
call[name[parser].add_argument, parameter[constant[--classpath]]]
call[name[parser].add_argument, parameter[constant[--master-port]]]
call[name[parser].add_argument, parameter[constant[--tmaster-controller-port]]]
call[name[parser].add_argument, parameter[constant[--tmaster-stats-port]]]
call[name[parser].add_argument, parameter[constant[--heron-internals-config-file]]]
call[name[parser].add_argument, parameter[constant[--override-config-file]]]
call[name[parser].add_argument, parameter[constant[--component-ram-map]]]
call[name[parser].add_argument, parameter[constant[--component-jvm-opts]]]
call[name[parser].add_argument, parameter[constant[--pkg-type]]]
call[name[parser].add_argument, parameter[constant[--topology-binary-file]]]
call[name[parser].add_argument, parameter[constant[--heron-java-home]]]
call[name[parser].add_argument, parameter[constant[--shell-port]]]
call[name[parser].add_argument, parameter[constant[--heron-shell-binary]]]
call[name[parser].add_argument, parameter[constant[--metrics-manager-port]]]
call[name[parser].add_argument, parameter[constant[--cluster]]]
call[name[parser].add_argument, parameter[constant[--role]]]
call[name[parser].add_argument, parameter[constant[--environment]]]
call[name[parser].add_argument, parameter[constant[--instance-classpath]]]
call[name[parser].add_argument, parameter[constant[--metrics-sinks-config-file]]]
call[name[parser].add_argument, parameter[constant[--scheduler-classpath]]]
call[name[parser].add_argument, parameter[constant[--scheduler-port]]]
call[name[parser].add_argument, parameter[constant[--python-instance-binary]]]
call[name[parser].add_argument, parameter[constant[--cpp-instance-binary]]]
call[name[parser].add_argument, parameter[constant[--metricscache-manager-classpath]]]
call[name[parser].add_argument, parameter[constant[--metricscache-manager-master-port]]]
call[name[parser].add_argument, parameter[constant[--metricscache-manager-stats-port]]]
call[name[parser].add_argument, parameter[constant[--metricscache-manager-mode]]]
call[name[parser].add_argument, parameter[constant[--is-stateful]]]
call[name[parser].add_argument, parameter[constant[--checkpoint-manager-classpath]]]
call[name[parser].add_argument, parameter[constant[--checkpoint-manager-port]]]
call[name[parser].add_argument, parameter[constant[--checkpoint-manager-ram]]]
call[name[parser].add_argument, parameter[constant[--stateful-config-file]]]
call[name[parser].add_argument, parameter[constant[--health-manager-mode]]]
call[name[parser].add_argument, parameter[constant[--health-manager-classpath]]]
call[name[parser].add_argument, parameter[constant[--jvm-remote-debugger-ports]]]
<ast.Tuple object at 0x7da2054a7d30> assign[=] call[name[parser].parse_known_args, parameter[call[name[args]][<ast.Slice object at 0x7da2054a5cc0>]]]
if name[unknown_args] begin[:]
call[name[Log].error, parameter[binary_operation[constant[Unknown argument: %s] <ast.Mod object at 0x7da2590d6920> call[name[unknown_args]][constant[0]]]]]
call[name[parser].print_help, parameter[]]
call[name[sys].exit, parameter[constant[1]]]
return[name[parsed_args]] | keyword[def] identifier[parse_args] ( identifier[args] ):
literal[string]
identifier[Log] . identifier[info] ( literal[string] % identifier[args] )
identifier[parser] = identifier[argparse] . identifier[ArgumentParser] ()
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[int] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[False] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[type] = identifier[long] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[True] )
identifier[parser] . identifier[add_argument] ( literal[string] , identifier[required] = keyword[False] ,
identifier[help] = literal[string] )
identifier[parsed_args] , identifier[unknown_args] = identifier[parser] . identifier[parse_known_args] ( identifier[args] [ literal[int] :])
keyword[if] identifier[unknown_args] :
identifier[Log] . identifier[error] ( literal[string] % identifier[unknown_args] [ literal[int] ])
identifier[parser] . identifier[print_help] ()
identifier[sys] . identifier[exit] ( literal[int] )
keyword[return] identifier[parsed_args] | def parse_args(args):
"""Uses python argparse to collect positional args"""
Log.info('Input args: %r' % args)
parser = argparse.ArgumentParser()
parser.add_argument('--shard', type=int, required=True)
parser.add_argument('--topology-name', required=True)
parser.add_argument('--topology-id', required=True)
parser.add_argument('--topology-defn-file', required=True)
parser.add_argument('--state-manager-connection', required=True)
parser.add_argument('--state-manager-root', required=True)
parser.add_argument('--state-manager-config-file', required=True)
parser.add_argument('--tmaster-binary', required=True)
parser.add_argument('--stmgr-binary', required=True)
parser.add_argument('--metrics-manager-classpath', required=True)
parser.add_argument('--instance-jvm-opts', required=True)
parser.add_argument('--classpath', required=True)
parser.add_argument('--master-port', required=True)
parser.add_argument('--tmaster-controller-port', required=True)
parser.add_argument('--tmaster-stats-port', required=True)
parser.add_argument('--heron-internals-config-file', required=True)
parser.add_argument('--override-config-file', required=True)
parser.add_argument('--component-ram-map', required=True)
parser.add_argument('--component-jvm-opts', required=True)
parser.add_argument('--pkg-type', required=True)
parser.add_argument('--topology-binary-file', required=True)
parser.add_argument('--heron-java-home', required=True)
parser.add_argument('--shell-port', required=True)
parser.add_argument('--heron-shell-binary', required=True)
parser.add_argument('--metrics-manager-port', required=True)
parser.add_argument('--cluster', required=True)
parser.add_argument('--role', required=True)
parser.add_argument('--environment', required=True)
parser.add_argument('--instance-classpath', required=True)
parser.add_argument('--metrics-sinks-config-file', required=True)
parser.add_argument('--scheduler-classpath', required=True)
parser.add_argument('--scheduler-port', required=True)
parser.add_argument('--python-instance-binary', required=True)
parser.add_argument('--cpp-instance-binary', required=True)
parser.add_argument('--metricscache-manager-classpath', required=True)
parser.add_argument('--metricscache-manager-master-port', required=True)
parser.add_argument('--metricscache-manager-stats-port', required=True)
parser.add_argument('--metricscache-manager-mode', required=False)
parser.add_argument('--is-stateful', required=True)
parser.add_argument('--checkpoint-manager-classpath', required=True)
parser.add_argument('--checkpoint-manager-port', required=True)
parser.add_argument('--checkpoint-manager-ram', type=long, required=True)
parser.add_argument('--stateful-config-file', required=True)
parser.add_argument('--health-manager-mode', required=True)
parser.add_argument('--health-manager-classpath', required=True)
parser.add_argument('--jvm-remote-debugger-ports', required=False, help='ports to be used by a remote debugger for JVM instances')
(parsed_args, unknown_args) = parser.parse_known_args(args[1:])
if unknown_args:
Log.error('Unknown argument: %s' % unknown_args[0])
parser.print_help()
sys.exit(1) # depends on [control=['if'], data=[]]
return parsed_args |
def _get_or_update_parent(key, val, to_str, parent=None, **options):
"""
:param key: Key of current child (dict{,-like} object)
:param val: Value of current child (dict{,-like} object or [dict{,...}])
:param to_str: Callable to convert value to string
:param parent: XML ElementTree parent node object or None
:param options: Keyword options, see :func:`container_to_etree`
"""
elem = ET.Element(key)
vals = val if anyconfig.utils.is_iterable(val) else [val]
for val_ in vals:
container_to_etree(val_, parent=elem, to_str=to_str, **options)
if parent is None: # 'elem' is the top level etree.
return elem
parent.append(elem)
return parent | def function[_get_or_update_parent, parameter[key, val, to_str, parent]]:
constant[
:param key: Key of current child (dict{,-like} object)
:param val: Value of current child (dict{,-like} object or [dict{,...}])
:param to_str: Callable to convert value to string
:param parent: XML ElementTree parent node object or None
:param options: Keyword options, see :func:`container_to_etree`
]
variable[elem] assign[=] call[name[ET].Element, parameter[name[key]]]
variable[vals] assign[=] <ast.IfExp object at 0x7da18fe92d40>
for taget[name[val_]] in starred[name[vals]] begin[:]
call[name[container_to_etree], parameter[name[val_]]]
if compare[name[parent] is constant[None]] begin[:]
return[name[elem]]
call[name[parent].append, parameter[name[elem]]]
return[name[parent]] | keyword[def] identifier[_get_or_update_parent] ( identifier[key] , identifier[val] , identifier[to_str] , identifier[parent] = keyword[None] ,** identifier[options] ):
literal[string]
identifier[elem] = identifier[ET] . identifier[Element] ( identifier[key] )
identifier[vals] = identifier[val] keyword[if] identifier[anyconfig] . identifier[utils] . identifier[is_iterable] ( identifier[val] ) keyword[else] [ identifier[val] ]
keyword[for] identifier[val_] keyword[in] identifier[vals] :
identifier[container_to_etree] ( identifier[val_] , identifier[parent] = identifier[elem] , identifier[to_str] = identifier[to_str] ,** identifier[options] )
keyword[if] identifier[parent] keyword[is] keyword[None] :
keyword[return] identifier[elem]
identifier[parent] . identifier[append] ( identifier[elem] )
keyword[return] identifier[parent] | def _get_or_update_parent(key, val, to_str, parent=None, **options):
"""
:param key: Key of current child (dict{,-like} object)
:param val: Value of current child (dict{,-like} object or [dict{,...}])
:param to_str: Callable to convert value to string
:param parent: XML ElementTree parent node object or None
:param options: Keyword options, see :func:`container_to_etree`
"""
elem = ET.Element(key)
vals = val if anyconfig.utils.is_iterable(val) else [val]
for val_ in vals:
container_to_etree(val_, parent=elem, to_str=to_str, **options) # depends on [control=['for'], data=['val_']]
if parent is None: # 'elem' is the top level etree.
return elem # depends on [control=['if'], data=[]]
parent.append(elem)
return parent |
def entry_point(self, entry_point):
"""
Provide an entry point for element types to search.
:param str entry_point: valid entry point. Use `~object_types()`
to find all available entry points.
"""
if len(entry_point.split(',')) == 1:
self._params.update(
href=self._resource.get(entry_point))
return self
else:
self._params.update(
filter_context=entry_point)
return self | def function[entry_point, parameter[self, entry_point]]:
constant[
Provide an entry point for element types to search.
:param str entry_point: valid entry point. Use `~object_types()`
to find all available entry points.
]
if compare[call[name[len], parameter[call[name[entry_point].split, parameter[constant[,]]]]] equal[==] constant[1]] begin[:]
call[name[self]._params.update, parameter[]]
return[name[self]] | keyword[def] identifier[entry_point] ( identifier[self] , identifier[entry_point] ):
literal[string]
keyword[if] identifier[len] ( identifier[entry_point] . identifier[split] ( literal[string] ))== literal[int] :
identifier[self] . identifier[_params] . identifier[update] (
identifier[href] = identifier[self] . identifier[_resource] . identifier[get] ( identifier[entry_point] ))
keyword[return] identifier[self]
keyword[else] :
identifier[self] . identifier[_params] . identifier[update] (
identifier[filter_context] = identifier[entry_point] )
keyword[return] identifier[self] | def entry_point(self, entry_point):
"""
Provide an entry point for element types to search.
:param str entry_point: valid entry point. Use `~object_types()`
to find all available entry points.
"""
if len(entry_point.split(',')) == 1:
self._params.update(href=self._resource.get(entry_point))
return self # depends on [control=['if'], data=[]]
else:
self._params.update(filter_context=entry_point)
return self |
def _compile_proto(full_path, dest):
'Helper to compile protobuf files'
proto_path = os.path.dirname(full_path)
protoc_args = [find_protoc(),
'--python_out={}'.format(dest),
'--proto_path={}'.format(proto_path),
full_path]
proc = subprocess.Popen(protoc_args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
outs, errs = proc.communicate(timeout=5)
except subprocess.TimeoutExpired:
proc.kill()
outs, errs = proc.communicate()
return False
if proc.returncode != 0:
msg = 'Failed compiling "{}": \n\nstderr: {}\nstdout: {}'.format(
full_path, errs.decode('utf-8'), outs.decode('utf-8'))
raise BadProtobuf(msg)
return True | def function[_compile_proto, parameter[full_path, dest]]:
constant[Helper to compile protobuf files]
variable[proto_path] assign[=] call[name[os].path.dirname, parameter[name[full_path]]]
variable[protoc_args] assign[=] list[[<ast.Call object at 0x7da1b16be6b0>, <ast.Call object at 0x7da1b16bf460>, <ast.Call object at 0x7da1b16bd5a0>, <ast.Name object at 0x7da1b16be860>]]
variable[proc] assign[=] call[name[subprocess].Popen, parameter[name[protoc_args]]]
<ast.Try object at 0x7da1b16bc670>
if compare[name[proc].returncode not_equal[!=] constant[0]] begin[:]
variable[msg] assign[=] call[constant[Failed compiling "{}":
stderr: {}
stdout: {}].format, parameter[name[full_path], call[name[errs].decode, parameter[constant[utf-8]]], call[name[outs].decode, parameter[constant[utf-8]]]]]
<ast.Raise object at 0x7da1b16bf910>
return[constant[True]] | keyword[def] identifier[_compile_proto] ( identifier[full_path] , identifier[dest] ):
literal[string]
identifier[proto_path] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[full_path] )
identifier[protoc_args] =[ identifier[find_protoc] (),
literal[string] . identifier[format] ( identifier[dest] ),
literal[string] . identifier[format] ( identifier[proto_path] ),
identifier[full_path] ]
identifier[proc] = identifier[subprocess] . identifier[Popen] ( identifier[protoc_args] , identifier[stdout] = identifier[subprocess] . identifier[PIPE] ,
identifier[stderr] = identifier[subprocess] . identifier[PIPE] )
keyword[try] :
identifier[outs] , identifier[errs] = identifier[proc] . identifier[communicate] ( identifier[timeout] = literal[int] )
keyword[except] identifier[subprocess] . identifier[TimeoutExpired] :
identifier[proc] . identifier[kill] ()
identifier[outs] , identifier[errs] = identifier[proc] . identifier[communicate] ()
keyword[return] keyword[False]
keyword[if] identifier[proc] . identifier[returncode] != literal[int] :
identifier[msg] = literal[string] . identifier[format] (
identifier[full_path] , identifier[errs] . identifier[decode] ( literal[string] ), identifier[outs] . identifier[decode] ( literal[string] ))
keyword[raise] identifier[BadProtobuf] ( identifier[msg] )
keyword[return] keyword[True] | def _compile_proto(full_path, dest):
"""Helper to compile protobuf files"""
proto_path = os.path.dirname(full_path)
protoc_args = [find_protoc(), '--python_out={}'.format(dest), '--proto_path={}'.format(proto_path), full_path]
proc = subprocess.Popen(protoc_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
try:
(outs, errs) = proc.communicate(timeout=5) # depends on [control=['try'], data=[]]
except subprocess.TimeoutExpired:
proc.kill()
(outs, errs) = proc.communicate()
return False # depends on [control=['except'], data=[]]
if proc.returncode != 0:
msg = 'Failed compiling "{}": \n\nstderr: {}\nstdout: {}'.format(full_path, errs.decode('utf-8'), outs.decode('utf-8'))
raise BadProtobuf(msg) # depends on [control=['if'], data=[]]
return True |
async def _send_sysex(self, sysex_command, sysex_data=None):
"""
This is a private utility method.
This method sends a sysex command to Firmata.
:param sysex_command: sysex command
:param sysex_data: data for command
:returns : No return value.
"""
if not sysex_data:
sysex_data = []
# convert the message command and data to characters
sysex_message = chr(PrivateConstants.START_SYSEX)
sysex_message += chr(sysex_command)
if len(sysex_data):
for d in sysex_data:
sysex_message += chr(d)
sysex_message += chr(PrivateConstants.END_SYSEX)
for data in sysex_message:
await self.write(data) | <ast.AsyncFunctionDef object at 0x7da18eb57fd0> | keyword[async] keyword[def] identifier[_send_sysex] ( identifier[self] , identifier[sysex_command] , identifier[sysex_data] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[sysex_data] :
identifier[sysex_data] =[]
identifier[sysex_message] = identifier[chr] ( identifier[PrivateConstants] . identifier[START_SYSEX] )
identifier[sysex_message] += identifier[chr] ( identifier[sysex_command] )
keyword[if] identifier[len] ( identifier[sysex_data] ):
keyword[for] identifier[d] keyword[in] identifier[sysex_data] :
identifier[sysex_message] += identifier[chr] ( identifier[d] )
identifier[sysex_message] += identifier[chr] ( identifier[PrivateConstants] . identifier[END_SYSEX] )
keyword[for] identifier[data] keyword[in] identifier[sysex_message] :
keyword[await] identifier[self] . identifier[write] ( identifier[data] ) | async def _send_sysex(self, sysex_command, sysex_data=None):
"""
This is a private utility method.
This method sends a sysex command to Firmata.
:param sysex_command: sysex command
:param sysex_data: data for command
:returns : No return value.
"""
if not sysex_data:
sysex_data = [] # depends on [control=['if'], data=[]]
# convert the message command and data to characters
sysex_message = chr(PrivateConstants.START_SYSEX)
sysex_message += chr(sysex_command)
if len(sysex_data):
for d in sysex_data:
sysex_message += chr(d) # depends on [control=['for'], data=['d']] # depends on [control=['if'], data=[]]
sysex_message += chr(PrivateConstants.END_SYSEX)
for data in sysex_message:
await self.write(data) # depends on [control=['for'], data=['data']] |
def wait_for_state(vmid, state, timeout=300):
'''
Wait until a specific state has been reached on a node
'''
start_time = time.time()
node = get_vm_status(vmid=vmid)
if not node:
log.error('wait_for_state: No VM retrieved based on given criteria.')
raise SaltCloudExecutionFailure
while True:
if node['status'] == state:
log.debug('Host %s is now in "%s" state!', node['name'], state)
return True
time.sleep(1)
if time.time() - start_time > timeout:
log.debug('Timeout reached while waiting for %s to become %s',
node['name'], state)
return False
node = get_vm_status(vmid=vmid)
log.debug('State for %s is: "%s" instead of "%s"',
node['name'], node['status'], state) | def function[wait_for_state, parameter[vmid, state, timeout]]:
constant[
Wait until a specific state has been reached on a node
]
variable[start_time] assign[=] call[name[time].time, parameter[]]
variable[node] assign[=] call[name[get_vm_status], parameter[]]
if <ast.UnaryOp object at 0x7da20c76d990> begin[:]
call[name[log].error, parameter[constant[wait_for_state: No VM retrieved based on given criteria.]]]
<ast.Raise object at 0x7da20c76c490>
while constant[True] begin[:]
if compare[call[name[node]][constant[status]] equal[==] name[state]] begin[:]
call[name[log].debug, parameter[constant[Host %s is now in "%s" state!], call[name[node]][constant[name]], name[state]]]
return[constant[True]]
call[name[time].sleep, parameter[constant[1]]]
if compare[binary_operation[call[name[time].time, parameter[]] - name[start_time]] greater[>] name[timeout]] begin[:]
call[name[log].debug, parameter[constant[Timeout reached while waiting for %s to become %s], call[name[node]][constant[name]], name[state]]]
return[constant[False]]
variable[node] assign[=] call[name[get_vm_status], parameter[]]
call[name[log].debug, parameter[constant[State for %s is: "%s" instead of "%s"], call[name[node]][constant[name]], call[name[node]][constant[status]], name[state]]] | keyword[def] identifier[wait_for_state] ( identifier[vmid] , identifier[state] , identifier[timeout] = literal[int] ):
literal[string]
identifier[start_time] = identifier[time] . identifier[time] ()
identifier[node] = identifier[get_vm_status] ( identifier[vmid] = identifier[vmid] )
keyword[if] keyword[not] identifier[node] :
identifier[log] . identifier[error] ( literal[string] )
keyword[raise] identifier[SaltCloudExecutionFailure]
keyword[while] keyword[True] :
keyword[if] identifier[node] [ literal[string] ]== identifier[state] :
identifier[log] . identifier[debug] ( literal[string] , identifier[node] [ literal[string] ], identifier[state] )
keyword[return] keyword[True]
identifier[time] . identifier[sleep] ( literal[int] )
keyword[if] identifier[time] . identifier[time] ()- identifier[start_time] > identifier[timeout] :
identifier[log] . identifier[debug] ( literal[string] ,
identifier[node] [ literal[string] ], identifier[state] )
keyword[return] keyword[False]
identifier[node] = identifier[get_vm_status] ( identifier[vmid] = identifier[vmid] )
identifier[log] . identifier[debug] ( literal[string] ,
identifier[node] [ literal[string] ], identifier[node] [ literal[string] ], identifier[state] ) | def wait_for_state(vmid, state, timeout=300):
"""
Wait until a specific state has been reached on a node
"""
start_time = time.time()
node = get_vm_status(vmid=vmid)
if not node:
log.error('wait_for_state: No VM retrieved based on given criteria.')
raise SaltCloudExecutionFailure # depends on [control=['if'], data=[]]
while True:
if node['status'] == state:
log.debug('Host %s is now in "%s" state!', node['name'], state)
return True # depends on [control=['if'], data=['state']]
time.sleep(1)
if time.time() - start_time > timeout:
log.debug('Timeout reached while waiting for %s to become %s', node['name'], state)
return False # depends on [control=['if'], data=[]]
node = get_vm_status(vmid=vmid)
log.debug('State for %s is: "%s" instead of "%s"', node['name'], node['status'], state) # depends on [control=['while'], data=[]] |
def set_passwd_cb(self, callback, userdata=None):
"""
Set the passphrase callback. This function will be called
when a private key with a passphrase is loaded.
:param callback: The Python callback to use. This must accept three
positional arguments. First, an integer giving the maximum length
of the passphrase it may return. If the returned passphrase is
longer than this, it will be truncated. Second, a boolean value
which will be true if the user should be prompted for the
passphrase twice and the callback should verify that the two values
supplied are equal. Third, the value given as the *userdata*
parameter to :meth:`set_passwd_cb`. The *callback* must return
a byte string. If an error occurs, *callback* should return a false
value (e.g. an empty string).
:param userdata: (optional) A Python object which will be given as
argument to the callback
:return: None
"""
if not callable(callback):
raise TypeError("callback must be callable")
self._passphrase_helper = self._wrap_callback(callback)
self._passphrase_callback = self._passphrase_helper.callback
_lib.SSL_CTX_set_default_passwd_cb(
self._context, self._passphrase_callback)
self._passphrase_userdata = userdata | def function[set_passwd_cb, parameter[self, callback, userdata]]:
constant[
Set the passphrase callback. This function will be called
when a private key with a passphrase is loaded.
:param callback: The Python callback to use. This must accept three
positional arguments. First, an integer giving the maximum length
of the passphrase it may return. If the returned passphrase is
longer than this, it will be truncated. Second, a boolean value
which will be true if the user should be prompted for the
passphrase twice and the callback should verify that the two values
supplied are equal. Third, the value given as the *userdata*
parameter to :meth:`set_passwd_cb`. The *callback* must return
a byte string. If an error occurs, *callback* should return a false
value (e.g. an empty string).
:param userdata: (optional) A Python object which will be given as
argument to the callback
:return: None
]
if <ast.UnaryOp object at 0x7da1b03a6080> begin[:]
<ast.Raise object at 0x7da1b03a4c10>
name[self]._passphrase_helper assign[=] call[name[self]._wrap_callback, parameter[name[callback]]]
name[self]._passphrase_callback assign[=] name[self]._passphrase_helper.callback
call[name[_lib].SSL_CTX_set_default_passwd_cb, parameter[name[self]._context, name[self]._passphrase_callback]]
name[self]._passphrase_userdata assign[=] name[userdata] | keyword[def] identifier[set_passwd_cb] ( identifier[self] , identifier[callback] , identifier[userdata] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[callable] ( identifier[callback] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[self] . identifier[_passphrase_helper] = identifier[self] . identifier[_wrap_callback] ( identifier[callback] )
identifier[self] . identifier[_passphrase_callback] = identifier[self] . identifier[_passphrase_helper] . identifier[callback]
identifier[_lib] . identifier[SSL_CTX_set_default_passwd_cb] (
identifier[self] . identifier[_context] , identifier[self] . identifier[_passphrase_callback] )
identifier[self] . identifier[_passphrase_userdata] = identifier[userdata] | def set_passwd_cb(self, callback, userdata=None):
"""
Set the passphrase callback. This function will be called
when a private key with a passphrase is loaded.
:param callback: The Python callback to use. This must accept three
positional arguments. First, an integer giving the maximum length
of the passphrase it may return. If the returned passphrase is
longer than this, it will be truncated. Second, a boolean value
which will be true if the user should be prompted for the
passphrase twice and the callback should verify that the two values
supplied are equal. Third, the value given as the *userdata*
parameter to :meth:`set_passwd_cb`. The *callback* must return
a byte string. If an error occurs, *callback* should return a false
value (e.g. an empty string).
:param userdata: (optional) A Python object which will be given as
argument to the callback
:return: None
"""
if not callable(callback):
raise TypeError('callback must be callable') # depends on [control=['if'], data=[]]
self._passphrase_helper = self._wrap_callback(callback)
self._passphrase_callback = self._passphrase_helper.callback
_lib.SSL_CTX_set_default_passwd_cb(self._context, self._passphrase_callback)
self._passphrase_userdata = userdata |
def decipher_atom_key(atom_key, forcefield):
"""
Return element for deciphered atom key.
This functions checks if the forcfield specified by user is supported
and passes the atom key to the appropriate function for deciphering.
Parameters
----------
atom_key : str
The atom key which is to be deciphered.
forcefield : str
The forcefield to which the atom key belongs to.
Returns
-------
str
A string that is the periodic table element equvalent of forcefield
atom key.
"""
load_funcs = {
'DLF': dlf_notation,
'DL_F': dlf_notation,
'OPLS': opls_notation,
'OPLSAA': opls_notation,
'OPLS2005': opls_notation,
'OPLS3': opls_notation,
}
if forcefield.upper() in load_funcs.keys():
return load_funcs[forcefield.upper()](atom_key)
else:
raise _ForceFieldError(
("Unfortunetely, '{0}' forcefield is not supported by pyWINDOW."
" For list of supported forcefields see User's Manual or "
"MolecularSystem._decipher_atom_keys() function doc string."
).format(forcefield)) | def function[decipher_atom_key, parameter[atom_key, forcefield]]:
constant[
Return element for deciphered atom key.
This functions checks if the forcfield specified by user is supported
and passes the atom key to the appropriate function for deciphering.
Parameters
----------
atom_key : str
The atom key which is to be deciphered.
forcefield : str
The forcefield to which the atom key belongs to.
Returns
-------
str
A string that is the periodic table element equvalent of forcefield
atom key.
]
variable[load_funcs] assign[=] dictionary[[<ast.Constant object at 0x7da2047eb2b0>, <ast.Constant object at 0x7da2047e9ea0>, <ast.Constant object at 0x7da2047ebb20>, <ast.Constant object at 0x7da2047eb760>, <ast.Constant object at 0x7da2047e97b0>, <ast.Constant object at 0x7da2047e9120>], [<ast.Name object at 0x7da2047ead40>, <ast.Name object at 0x7da2047ea650>, <ast.Name object at 0x7da2047eb160>, <ast.Name object at 0x7da2047eaad0>, <ast.Name object at 0x7da2047e84c0>, <ast.Name object at 0x7da2047e9e70>]]
if compare[call[name[forcefield].upper, parameter[]] in call[name[load_funcs].keys, parameter[]]] begin[:]
return[call[call[name[load_funcs]][call[name[forcefield].upper, parameter[]]], parameter[name[atom_key]]]] | keyword[def] identifier[decipher_atom_key] ( identifier[atom_key] , identifier[forcefield] ):
literal[string]
identifier[load_funcs] ={
literal[string] : identifier[dlf_notation] ,
literal[string] : identifier[dlf_notation] ,
literal[string] : identifier[opls_notation] ,
literal[string] : identifier[opls_notation] ,
literal[string] : identifier[opls_notation] ,
literal[string] : identifier[opls_notation] ,
}
keyword[if] identifier[forcefield] . identifier[upper] () keyword[in] identifier[load_funcs] . identifier[keys] ():
keyword[return] identifier[load_funcs] [ identifier[forcefield] . identifier[upper] ()]( identifier[atom_key] )
keyword[else] :
keyword[raise] identifier[_ForceFieldError] (
( literal[string]
literal[string]
literal[string]
). identifier[format] ( identifier[forcefield] )) | def decipher_atom_key(atom_key, forcefield):
"""
Return element for deciphered atom key.
This functions checks if the forcfield specified by user is supported
and passes the atom key to the appropriate function for deciphering.
Parameters
----------
atom_key : str
The atom key which is to be deciphered.
forcefield : str
The forcefield to which the atom key belongs to.
Returns
-------
str
A string that is the periodic table element equvalent of forcefield
atom key.
"""
load_funcs = {'DLF': dlf_notation, 'DL_F': dlf_notation, 'OPLS': opls_notation, 'OPLSAA': opls_notation, 'OPLS2005': opls_notation, 'OPLS3': opls_notation}
if forcefield.upper() in load_funcs.keys():
return load_funcs[forcefield.upper()](atom_key) # depends on [control=['if'], data=[]]
else:
raise _ForceFieldError("Unfortunetely, '{0}' forcefield is not supported by pyWINDOW. For list of supported forcefields see User's Manual or MolecularSystem._decipher_atom_keys() function doc string.".format(forcefield)) |
def serialize_duration(attr, **kwargs):
"""Serialize TimeDelta object into ISO-8601 formatted string.
:param TimeDelta attr: Object to be serialized.
:rtype: str
"""
if isinstance(attr, str):
attr = isodate.parse_duration(attr)
return isodate.duration_isoformat(attr) | def function[serialize_duration, parameter[attr]]:
constant[Serialize TimeDelta object into ISO-8601 formatted string.
:param TimeDelta attr: Object to be serialized.
:rtype: str
]
if call[name[isinstance], parameter[name[attr], name[str]]] begin[:]
variable[attr] assign[=] call[name[isodate].parse_duration, parameter[name[attr]]]
return[call[name[isodate].duration_isoformat, parameter[name[attr]]]] | keyword[def] identifier[serialize_duration] ( identifier[attr] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[attr] , identifier[str] ):
identifier[attr] = identifier[isodate] . identifier[parse_duration] ( identifier[attr] )
keyword[return] identifier[isodate] . identifier[duration_isoformat] ( identifier[attr] ) | def serialize_duration(attr, **kwargs):
"""Serialize TimeDelta object into ISO-8601 formatted string.
:param TimeDelta attr: Object to be serialized.
:rtype: str
"""
if isinstance(attr, str):
attr = isodate.parse_duration(attr) # depends on [control=['if'], data=[]]
return isodate.duration_isoformat(attr) |
def print_contexts(self, names=False):
"""Print users"""
contexts = self.get_contexts()
if names:
contexts = [context['name'] for context in contexts]
pprint.pprint(contexts) | def function[print_contexts, parameter[self, names]]:
constant[Print users]
variable[contexts] assign[=] call[name[self].get_contexts, parameter[]]
if name[names] begin[:]
variable[contexts] assign[=] <ast.ListComp object at 0x7da18c4cdde0>
call[name[pprint].pprint, parameter[name[contexts]]] | keyword[def] identifier[print_contexts] ( identifier[self] , identifier[names] = keyword[False] ):
literal[string]
identifier[contexts] = identifier[self] . identifier[get_contexts] ()
keyword[if] identifier[names] :
identifier[contexts] =[ identifier[context] [ literal[string] ] keyword[for] identifier[context] keyword[in] identifier[contexts] ]
identifier[pprint] . identifier[pprint] ( identifier[contexts] ) | def print_contexts(self, names=False):
"""Print users"""
contexts = self.get_contexts()
if names:
contexts = [context['name'] for context in contexts] # depends on [control=['if'], data=[]]
pprint.pprint(contexts) |
def add_asset(self, filename, asset_type, display_name,
encoding_rate=None, frame_width=None, frame_height=None,
encode_to=None, encode_multiple=False,
h264_preserve_as_rendition=False, h264_no_processing=False):
"""
Add an asset to the Video object.
"""
m = hashlib.md5()
fp = file(filename, 'rb')
bits = fp.read(262144) ## 256KB
while bits:
m.update(bits)
bits = fp.read(262144)
fp.close()
hash_code = m.hexdigest()
refid = "%s-%s" % (os.path.basename(filename), hash_code)
asset = {
'filename': filename,
'type': asset_type,
'size': os.path.getsize(filename),
'refid': refid,
'hash-code': hash_code}
if encoding_rate:
asset.update({'encoding-rate': encoding_rate})
if frame_width:
asset.update({'frame-width': frame_width})
if frame_height:
asset.update({'frame-height': frame_height})
if display_name:
asset.update({'display-name': display_name})
if encode_to:
asset.update({'encode-to': encode_to})
asset.update({'encode-multiple': encode_multiple})
if encode_multiple and h264_preserve_as_rendition:
asset.update({
'h264-preserve-as-rendition': h264_preserve_as_rendition})
else:
if h264_no_processing:
asset.update({'h264-no-processing': h264_no_processing})
self.assets.append(asset) | def function[add_asset, parameter[self, filename, asset_type, display_name, encoding_rate, frame_width, frame_height, encode_to, encode_multiple, h264_preserve_as_rendition, h264_no_processing]]:
constant[
Add an asset to the Video object.
]
variable[m] assign[=] call[name[hashlib].md5, parameter[]]
variable[fp] assign[=] call[name[file], parameter[name[filename], constant[rb]]]
variable[bits] assign[=] call[name[fp].read, parameter[constant[262144]]]
while name[bits] begin[:]
call[name[m].update, parameter[name[bits]]]
variable[bits] assign[=] call[name[fp].read, parameter[constant[262144]]]
call[name[fp].close, parameter[]]
variable[hash_code] assign[=] call[name[m].hexdigest, parameter[]]
variable[refid] assign[=] binary_operation[constant[%s-%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da207f00550>, <ast.Name object at 0x7da207f00520>]]]
variable[asset] assign[=] dictionary[[<ast.Constant object at 0x7da207f016f0>, <ast.Constant object at 0x7da207f02bf0>, <ast.Constant object at 0x7da207f02710>, <ast.Constant object at 0x7da207f00d90>, <ast.Constant object at 0x7da207f031c0>], [<ast.Name object at 0x7da207f03eb0>, <ast.Name object at 0x7da207f01bd0>, <ast.Call object at 0x7da207f01de0>, <ast.Name object at 0x7da207f03010>, <ast.Name object at 0x7da207f01f00>]]
if name[encoding_rate] begin[:]
call[name[asset].update, parameter[dictionary[[<ast.Constant object at 0x7da207f03d00>], [<ast.Name object at 0x7da207f01c30>]]]]
if name[frame_width] begin[:]
call[name[asset].update, parameter[dictionary[[<ast.Constant object at 0x7da207f03f10>], [<ast.Name object at 0x7da207f01270>]]]]
if name[frame_height] begin[:]
call[name[asset].update, parameter[dictionary[[<ast.Constant object at 0x7da207f02b30>], [<ast.Name object at 0x7da207f01960>]]]]
if name[display_name] begin[:]
call[name[asset].update, parameter[dictionary[[<ast.Constant object at 0x7da207f02cb0>], [<ast.Name object at 0x7da207f02770>]]]]
if name[encode_to] begin[:]
call[name[asset].update, parameter[dictionary[[<ast.Constant object at 0x7da207f00e20>], [<ast.Name object at 0x7da207f01900>]]]]
call[name[asset].update, parameter[dictionary[[<ast.Constant object at 0x7da207f00700>], [<ast.Name object at 0x7da207f00130>]]]]
if <ast.BoolOp object at 0x7da207f00a90> begin[:]
call[name[asset].update, parameter[dictionary[[<ast.Constant object at 0x7da207f02a10>], [<ast.Name object at 0x7da207f007f0>]]]]
call[name[self].assets.append, parameter[name[asset]]] | keyword[def] identifier[add_asset] ( identifier[self] , identifier[filename] , identifier[asset_type] , identifier[display_name] ,
identifier[encoding_rate] = keyword[None] , identifier[frame_width] = keyword[None] , identifier[frame_height] = keyword[None] ,
identifier[encode_to] = keyword[None] , identifier[encode_multiple] = keyword[False] ,
identifier[h264_preserve_as_rendition] = keyword[False] , identifier[h264_no_processing] = keyword[False] ):
literal[string]
identifier[m] = identifier[hashlib] . identifier[md5] ()
identifier[fp] = identifier[file] ( identifier[filename] , literal[string] )
identifier[bits] = identifier[fp] . identifier[read] ( literal[int] )
keyword[while] identifier[bits] :
identifier[m] . identifier[update] ( identifier[bits] )
identifier[bits] = identifier[fp] . identifier[read] ( literal[int] )
identifier[fp] . identifier[close] ()
identifier[hash_code] = identifier[m] . identifier[hexdigest] ()
identifier[refid] = literal[string] %( identifier[os] . identifier[path] . identifier[basename] ( identifier[filename] ), identifier[hash_code] )
identifier[asset] ={
literal[string] : identifier[filename] ,
literal[string] : identifier[asset_type] ,
literal[string] : identifier[os] . identifier[path] . identifier[getsize] ( identifier[filename] ),
literal[string] : identifier[refid] ,
literal[string] : identifier[hash_code] }
keyword[if] identifier[encoding_rate] :
identifier[asset] . identifier[update] ({ literal[string] : identifier[encoding_rate] })
keyword[if] identifier[frame_width] :
identifier[asset] . identifier[update] ({ literal[string] : identifier[frame_width] })
keyword[if] identifier[frame_height] :
identifier[asset] . identifier[update] ({ literal[string] : identifier[frame_height] })
keyword[if] identifier[display_name] :
identifier[asset] . identifier[update] ({ literal[string] : identifier[display_name] })
keyword[if] identifier[encode_to] :
identifier[asset] . identifier[update] ({ literal[string] : identifier[encode_to] })
identifier[asset] . identifier[update] ({ literal[string] : identifier[encode_multiple] })
keyword[if] identifier[encode_multiple] keyword[and] identifier[h264_preserve_as_rendition] :
identifier[asset] . identifier[update] ({
literal[string] : identifier[h264_preserve_as_rendition] })
keyword[else] :
keyword[if] identifier[h264_no_processing] :
identifier[asset] . identifier[update] ({ literal[string] : identifier[h264_no_processing] })
identifier[self] . identifier[assets] . identifier[append] ( identifier[asset] ) | def add_asset(self, filename, asset_type, display_name, encoding_rate=None, frame_width=None, frame_height=None, encode_to=None, encode_multiple=False, h264_preserve_as_rendition=False, h264_no_processing=False):
"""
Add an asset to the Video object.
"""
m = hashlib.md5()
fp = file(filename, 'rb')
bits = fp.read(262144) ## 256KB
while bits:
m.update(bits)
bits = fp.read(262144) # depends on [control=['while'], data=[]]
fp.close()
hash_code = m.hexdigest()
refid = '%s-%s' % (os.path.basename(filename), hash_code)
asset = {'filename': filename, 'type': asset_type, 'size': os.path.getsize(filename), 'refid': refid, 'hash-code': hash_code}
if encoding_rate:
asset.update({'encoding-rate': encoding_rate}) # depends on [control=['if'], data=[]]
if frame_width:
asset.update({'frame-width': frame_width}) # depends on [control=['if'], data=[]]
if frame_height:
asset.update({'frame-height': frame_height}) # depends on [control=['if'], data=[]]
if display_name:
asset.update({'display-name': display_name}) # depends on [control=['if'], data=[]]
if encode_to:
asset.update({'encode-to': encode_to})
asset.update({'encode-multiple': encode_multiple})
if encode_multiple and h264_preserve_as_rendition:
asset.update({'h264-preserve-as-rendition': h264_preserve_as_rendition}) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif h264_no_processing:
asset.update({'h264-no-processing': h264_no_processing}) # depends on [control=['if'], data=[]]
self.assets.append(asset) |
def make_unix_filename(fname):
"""
:param fname: the basename of a file (e.g., xxx in /zzz/yyy/xxx).
:returns: a valid unix filename
:rtype: string
:raises: DXError if the filename is invalid on a Unix system
The problem being solved here is that *fname* is a python string, it
may contain characters that are invalid for a file name. We replace all the slashes with %2F.
Another issue, is that the user may choose an invalid name. Since we focus
on Unix systems, the only possibilies are "." and "..".
"""
# sanity check for filenames
bad_filenames = [".", ".."]
if fname in bad_filenames:
raise DXError("Invalid filename {}".format(fname))
return fname.replace('/', '%2F') | def function[make_unix_filename, parameter[fname]]:
constant[
:param fname: the basename of a file (e.g., xxx in /zzz/yyy/xxx).
:returns: a valid unix filename
:rtype: string
:raises: DXError if the filename is invalid on a Unix system
The problem being solved here is that *fname* is a python string, it
may contain characters that are invalid for a file name. We replace all the slashes with %2F.
Another issue, is that the user may choose an invalid name. Since we focus
on Unix systems, the only possibilies are "." and "..".
]
variable[bad_filenames] assign[=] list[[<ast.Constant object at 0x7da18bc73640>, <ast.Constant object at 0x7da18bc71f00>]]
if compare[name[fname] in name[bad_filenames]] begin[:]
<ast.Raise object at 0x7da18bc70700>
return[call[name[fname].replace, parameter[constant[/], constant[%2F]]]] | keyword[def] identifier[make_unix_filename] ( identifier[fname] ):
literal[string]
identifier[bad_filenames] =[ literal[string] , literal[string] ]
keyword[if] identifier[fname] keyword[in] identifier[bad_filenames] :
keyword[raise] identifier[DXError] ( literal[string] . identifier[format] ( identifier[fname] ))
keyword[return] identifier[fname] . identifier[replace] ( literal[string] , literal[string] ) | def make_unix_filename(fname):
"""
:param fname: the basename of a file (e.g., xxx in /zzz/yyy/xxx).
:returns: a valid unix filename
:rtype: string
:raises: DXError if the filename is invalid on a Unix system
The problem being solved here is that *fname* is a python string, it
may contain characters that are invalid for a file name. We replace all the slashes with %2F.
Another issue, is that the user may choose an invalid name. Since we focus
on Unix systems, the only possibilies are "." and "..".
"""
# sanity check for filenames
bad_filenames = ['.', '..']
if fname in bad_filenames:
raise DXError('Invalid filename {}'.format(fname)) # depends on [control=['if'], data=['fname']]
return fname.replace('/', '%2F') |
def run_command(self, scan_id, host, cmd):
"""
Run a single command via SSH and return the content of stdout or
None in case of an Error. A scan error is issued in the latter
case.
For logging into 'host', the scan options 'port', 'username',
'password' and 'ssh_timeout' are used.
"""
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
options = self.get_scan_options(scan_id)
port = int(options['port'])
timeout = int(options['ssh_timeout'])
# For backward compatibility, consider the legacy mode to get
# credentials as scan_option.
# First and second modes should be removed in future releases.
# On the third case it receives the credentials as a subelement of
# the <target>.
credentials = self.get_scan_credentials(scan_id, host)
if ('username_password' in options and
':' in options['username_password']):
username, password = options['username_password'].split(':', 1)
elif 'username' in options and options['username']:
username = options['username']
password = options['password']
elif credentials:
cred_params = credentials.get('ssh')
username = cred_params.get('username', '')
password = cred_params.get('password', '')
else:
self.add_scan_error(scan_id, host=host,
value='Erroneous username_password value')
raise ValueError('Erroneous username_password value')
try:
ssh.connect(hostname=host, username=username, password=password,
timeout=timeout, port=port)
except (paramiko.ssh_exception.AuthenticationException,
socket.error) as err:
# Errors: No route to host, connection timeout, authentication
# failure etc,.
self.add_scan_error(scan_id, host=host, value=str(err))
return None
_, stdout, _ = ssh.exec_command(cmd)
result = stdout.readlines()
ssh.close()
return result | def function[run_command, parameter[self, scan_id, host, cmd]]:
constant[
Run a single command via SSH and return the content of stdout or
None in case of an Error. A scan error is issued in the latter
case.
For logging into 'host', the scan options 'port', 'username',
'password' and 'ssh_timeout' are used.
]
variable[ssh] assign[=] call[name[paramiko].SSHClient, parameter[]]
call[name[ssh].set_missing_host_key_policy, parameter[call[name[paramiko].AutoAddPolicy, parameter[]]]]
variable[options] assign[=] call[name[self].get_scan_options, parameter[name[scan_id]]]
variable[port] assign[=] call[name[int], parameter[call[name[options]][constant[port]]]]
variable[timeout] assign[=] call[name[int], parameter[call[name[options]][constant[ssh_timeout]]]]
variable[credentials] assign[=] call[name[self].get_scan_credentials, parameter[name[scan_id], name[host]]]
if <ast.BoolOp object at 0x7da20e955f00> begin[:]
<ast.Tuple object at 0x7da20e9560e0> assign[=] call[call[name[options]][constant[username_password]].split, parameter[constant[:], constant[1]]]
<ast.Try object at 0x7da20e957340>
<ast.Tuple object at 0x7da20e954670> assign[=] call[name[ssh].exec_command, parameter[name[cmd]]]
variable[result] assign[=] call[name[stdout].readlines, parameter[]]
call[name[ssh].close, parameter[]]
return[name[result]] | keyword[def] identifier[run_command] ( identifier[self] , identifier[scan_id] , identifier[host] , identifier[cmd] ):
literal[string]
identifier[ssh] = identifier[paramiko] . identifier[SSHClient] ()
identifier[ssh] . identifier[set_missing_host_key_policy] ( identifier[paramiko] . identifier[AutoAddPolicy] ())
identifier[options] = identifier[self] . identifier[get_scan_options] ( identifier[scan_id] )
identifier[port] = identifier[int] ( identifier[options] [ literal[string] ])
identifier[timeout] = identifier[int] ( identifier[options] [ literal[string] ])
identifier[credentials] = identifier[self] . identifier[get_scan_credentials] ( identifier[scan_id] , identifier[host] )
keyword[if] ( literal[string] keyword[in] identifier[options] keyword[and]
literal[string] keyword[in] identifier[options] [ literal[string] ]):
identifier[username] , identifier[password] = identifier[options] [ literal[string] ]. identifier[split] ( literal[string] , literal[int] )
keyword[elif] literal[string] keyword[in] identifier[options] keyword[and] identifier[options] [ literal[string] ]:
identifier[username] = identifier[options] [ literal[string] ]
identifier[password] = identifier[options] [ literal[string] ]
keyword[elif] identifier[credentials] :
identifier[cred_params] = identifier[credentials] . identifier[get] ( literal[string] )
identifier[username] = identifier[cred_params] . identifier[get] ( literal[string] , literal[string] )
identifier[password] = identifier[cred_params] . identifier[get] ( literal[string] , literal[string] )
keyword[else] :
identifier[self] . identifier[add_scan_error] ( identifier[scan_id] , identifier[host] = identifier[host] ,
identifier[value] = literal[string] )
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[try] :
identifier[ssh] . identifier[connect] ( identifier[hostname] = identifier[host] , identifier[username] = identifier[username] , identifier[password] = identifier[password] ,
identifier[timeout] = identifier[timeout] , identifier[port] = identifier[port] )
keyword[except] ( identifier[paramiko] . identifier[ssh_exception] . identifier[AuthenticationException] ,
identifier[socket] . identifier[error] ) keyword[as] identifier[err] :
identifier[self] . identifier[add_scan_error] ( identifier[scan_id] , identifier[host] = identifier[host] , identifier[value] = identifier[str] ( identifier[err] ))
keyword[return] keyword[None]
identifier[_] , identifier[stdout] , identifier[_] = identifier[ssh] . identifier[exec_command] ( identifier[cmd] )
identifier[result] = identifier[stdout] . identifier[readlines] ()
identifier[ssh] . identifier[close] ()
keyword[return] identifier[result] | def run_command(self, scan_id, host, cmd):
"""
Run a single command via SSH and return the content of stdout or
None in case of an Error. A scan error is issued in the latter
case.
For logging into 'host', the scan options 'port', 'username',
'password' and 'ssh_timeout' are used.
"""
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
options = self.get_scan_options(scan_id)
port = int(options['port'])
timeout = int(options['ssh_timeout'])
# For backward compatibility, consider the legacy mode to get
# credentials as scan_option.
# First and second modes should be removed in future releases.
# On the third case it receives the credentials as a subelement of
# the <target>.
credentials = self.get_scan_credentials(scan_id, host)
if 'username_password' in options and ':' in options['username_password']:
(username, password) = options['username_password'].split(':', 1) # depends on [control=['if'], data=[]]
elif 'username' in options and options['username']:
username = options['username']
password = options['password'] # depends on [control=['if'], data=[]]
elif credentials:
cred_params = credentials.get('ssh')
username = cred_params.get('username', '')
password = cred_params.get('password', '') # depends on [control=['if'], data=[]]
else:
self.add_scan_error(scan_id, host=host, value='Erroneous username_password value')
raise ValueError('Erroneous username_password value')
try:
ssh.connect(hostname=host, username=username, password=password, timeout=timeout, port=port) # depends on [control=['try'], data=[]]
except (paramiko.ssh_exception.AuthenticationException, socket.error) as err:
# Errors: No route to host, connection timeout, authentication
# failure etc,.
self.add_scan_error(scan_id, host=host, value=str(err))
return None # depends on [control=['except'], data=['err']]
(_, stdout, _) = ssh.exec_command(cmd)
result = stdout.readlines()
ssh.close()
return result |
def set_dev_name(self, devname, callback=None):
'''
Set camera name
'''
params = {'devName': devname.encode('gbk')}
return self.execute_command('setDevName', params, callback=callback) | def function[set_dev_name, parameter[self, devname, callback]]:
constant[
Set camera name
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b1ad1630>], [<ast.Call object at 0x7da1b1ad1420>]]
return[call[name[self].execute_command, parameter[constant[setDevName], name[params]]]] | keyword[def] identifier[set_dev_name] ( identifier[self] , identifier[devname] , identifier[callback] = keyword[None] ):
literal[string]
identifier[params] ={ literal[string] : identifier[devname] . identifier[encode] ( literal[string] )}
keyword[return] identifier[self] . identifier[execute_command] ( literal[string] , identifier[params] , identifier[callback] = identifier[callback] ) | def set_dev_name(self, devname, callback=None):
"""
Set camera name
"""
params = {'devName': devname.encode('gbk')}
return self.execute_command('setDevName', params, callback=callback) |
def _write_directory_records(self, vd, outfp, progress):
# type: (headervd.PrimaryOrSupplementaryVD, BinaryIO, PyCdlib._Progress) -> None
'''
An internal method to write out the directory records from a particular
Volume Descriptor.
Parameters:
vd - The Volume Descriptor to write the Directory Records from.
outfp - The file object to write data to.
progress - The _Progress object to use for outputting progress.
Returns:
Nothing.
'''
log_block_size = vd.logical_block_size()
le_ptr_offset = 0
be_ptr_offset = 0
dirs = collections.deque([vd.root_directory_record()])
while dirs:
curr = dirs.popleft()
curr_dirrecord_offset = 0
if curr.is_dir():
if curr.ptr is None:
raise pycdlibexception.PyCdlibInternalError('Directory has no Path Table Record')
# Little Endian PTR
outfp.seek(vd.path_table_location_le * log_block_size + le_ptr_offset)
ret = curr.ptr.record_little_endian()
self._outfp_write_with_check(outfp, ret)
le_ptr_offset += len(ret)
# Big Endian PTR
outfp.seek(vd.path_table_location_be * log_block_size + be_ptr_offset)
ret = curr.ptr.record_big_endian()
self._outfp_write_with_check(outfp, ret)
be_ptr_offset += len(ret)
progress.call(curr.get_data_length())
dir_extent = curr.extent_location()
for child in curr.children:
# No matter what type the child is, we need to first write
# out the directory record entry.
recstr = child.record()
if (curr_dirrecord_offset + len(recstr)) > log_block_size:
dir_extent += 1
curr_dirrecord_offset = 0
outfp.seek(dir_extent * log_block_size + curr_dirrecord_offset)
# Now write out the child
self._outfp_write_with_check(outfp, recstr)
curr_dirrecord_offset += len(recstr)
if child.rock_ridge is not None:
if child.rock_ridge.dr_entries.ce_record is not None:
# The child has a continue block, so write it out here.
ce_rec = child.rock_ridge.dr_entries.ce_record
outfp.seek(ce_rec.bl_cont_area * self.pvd.logical_block_size() + ce_rec.offset_cont_area)
rec = child.rock_ridge.record_ce_entries()
self._outfp_write_with_check(outfp, rec)
progress.call(len(rec))
if child.rock_ridge.child_link_record_exists():
continue
if child.is_dir():
# If the child is a directory, and is not dot or dotdot,
# we want to descend into it to look at the children.
if not child.is_dot() and not child.is_dotdot():
dirs.append(child) | def function[_write_directory_records, parameter[self, vd, outfp, progress]]:
constant[
An internal method to write out the directory records from a particular
Volume Descriptor.
Parameters:
vd - The Volume Descriptor to write the Directory Records from.
outfp - The file object to write data to.
progress - The _Progress object to use for outputting progress.
Returns:
Nothing.
]
variable[log_block_size] assign[=] call[name[vd].logical_block_size, parameter[]]
variable[le_ptr_offset] assign[=] constant[0]
variable[be_ptr_offset] assign[=] constant[0]
variable[dirs] assign[=] call[name[collections].deque, parameter[list[[<ast.Call object at 0x7da1b0d0f670>]]]]
while name[dirs] begin[:]
variable[curr] assign[=] call[name[dirs].popleft, parameter[]]
variable[curr_dirrecord_offset] assign[=] constant[0]
if call[name[curr].is_dir, parameter[]] begin[:]
if compare[name[curr].ptr is constant[None]] begin[:]
<ast.Raise object at 0x7da1b0d0dbd0>
call[name[outfp].seek, parameter[binary_operation[binary_operation[name[vd].path_table_location_le * name[log_block_size]] + name[le_ptr_offset]]]]
variable[ret] assign[=] call[name[curr].ptr.record_little_endian, parameter[]]
call[name[self]._outfp_write_with_check, parameter[name[outfp], name[ret]]]
<ast.AugAssign object at 0x7da1b0d0c760>
call[name[outfp].seek, parameter[binary_operation[binary_operation[name[vd].path_table_location_be * name[log_block_size]] + name[be_ptr_offset]]]]
variable[ret] assign[=] call[name[curr].ptr.record_big_endian, parameter[]]
call[name[self]._outfp_write_with_check, parameter[name[outfp], name[ret]]]
<ast.AugAssign object at 0x7da1b0d0f640>
call[name[progress].call, parameter[call[name[curr].get_data_length, parameter[]]]]
variable[dir_extent] assign[=] call[name[curr].extent_location, parameter[]]
for taget[name[child]] in starred[name[curr].children] begin[:]
variable[recstr] assign[=] call[name[child].record, parameter[]]
if compare[binary_operation[name[curr_dirrecord_offset] + call[name[len], parameter[name[recstr]]]] greater[>] name[log_block_size]] begin[:]
<ast.AugAssign object at 0x7da1b0d0cfd0>
variable[curr_dirrecord_offset] assign[=] constant[0]
call[name[outfp].seek, parameter[binary_operation[binary_operation[name[dir_extent] * name[log_block_size]] + name[curr_dirrecord_offset]]]]
call[name[self]._outfp_write_with_check, parameter[name[outfp], name[recstr]]]
<ast.AugAssign object at 0x7da1b0d0e1a0>
if compare[name[child].rock_ridge is_not constant[None]] begin[:]
if compare[name[child].rock_ridge.dr_entries.ce_record is_not constant[None]] begin[:]
variable[ce_rec] assign[=] name[child].rock_ridge.dr_entries.ce_record
call[name[outfp].seek, parameter[binary_operation[binary_operation[name[ce_rec].bl_cont_area * call[name[self].pvd.logical_block_size, parameter[]]] + name[ce_rec].offset_cont_area]]]
variable[rec] assign[=] call[name[child].rock_ridge.record_ce_entries, parameter[]]
call[name[self]._outfp_write_with_check, parameter[name[outfp], name[rec]]]
call[name[progress].call, parameter[call[name[len], parameter[name[rec]]]]]
if call[name[child].rock_ridge.child_link_record_exists, parameter[]] begin[:]
continue
if call[name[child].is_dir, parameter[]] begin[:]
if <ast.BoolOp object at 0x7da1b0ff01c0> begin[:]
call[name[dirs].append, parameter[name[child]]] | keyword[def] identifier[_write_directory_records] ( identifier[self] , identifier[vd] , identifier[outfp] , identifier[progress] ):
literal[string]
identifier[log_block_size] = identifier[vd] . identifier[logical_block_size] ()
identifier[le_ptr_offset] = literal[int]
identifier[be_ptr_offset] = literal[int]
identifier[dirs] = identifier[collections] . identifier[deque] ([ identifier[vd] . identifier[root_directory_record] ()])
keyword[while] identifier[dirs] :
identifier[curr] = identifier[dirs] . identifier[popleft] ()
identifier[curr_dirrecord_offset] = literal[int]
keyword[if] identifier[curr] . identifier[is_dir] ():
keyword[if] identifier[curr] . identifier[ptr] keyword[is] keyword[None] :
keyword[raise] identifier[pycdlibexception] . identifier[PyCdlibInternalError] ( literal[string] )
identifier[outfp] . identifier[seek] ( identifier[vd] . identifier[path_table_location_le] * identifier[log_block_size] + identifier[le_ptr_offset] )
identifier[ret] = identifier[curr] . identifier[ptr] . identifier[record_little_endian] ()
identifier[self] . identifier[_outfp_write_with_check] ( identifier[outfp] , identifier[ret] )
identifier[le_ptr_offset] += identifier[len] ( identifier[ret] )
identifier[outfp] . identifier[seek] ( identifier[vd] . identifier[path_table_location_be] * identifier[log_block_size] + identifier[be_ptr_offset] )
identifier[ret] = identifier[curr] . identifier[ptr] . identifier[record_big_endian] ()
identifier[self] . identifier[_outfp_write_with_check] ( identifier[outfp] , identifier[ret] )
identifier[be_ptr_offset] += identifier[len] ( identifier[ret] )
identifier[progress] . identifier[call] ( identifier[curr] . identifier[get_data_length] ())
identifier[dir_extent] = identifier[curr] . identifier[extent_location] ()
keyword[for] identifier[child] keyword[in] identifier[curr] . identifier[children] :
identifier[recstr] = identifier[child] . identifier[record] ()
keyword[if] ( identifier[curr_dirrecord_offset] + identifier[len] ( identifier[recstr] ))> identifier[log_block_size] :
identifier[dir_extent] += literal[int]
identifier[curr_dirrecord_offset] = literal[int]
identifier[outfp] . identifier[seek] ( identifier[dir_extent] * identifier[log_block_size] + identifier[curr_dirrecord_offset] )
identifier[self] . identifier[_outfp_write_with_check] ( identifier[outfp] , identifier[recstr] )
identifier[curr_dirrecord_offset] += identifier[len] ( identifier[recstr] )
keyword[if] identifier[child] . identifier[rock_ridge] keyword[is] keyword[not] keyword[None] :
keyword[if] identifier[child] . identifier[rock_ridge] . identifier[dr_entries] . identifier[ce_record] keyword[is] keyword[not] keyword[None] :
identifier[ce_rec] = identifier[child] . identifier[rock_ridge] . identifier[dr_entries] . identifier[ce_record]
identifier[outfp] . identifier[seek] ( identifier[ce_rec] . identifier[bl_cont_area] * identifier[self] . identifier[pvd] . identifier[logical_block_size] ()+ identifier[ce_rec] . identifier[offset_cont_area] )
identifier[rec] = identifier[child] . identifier[rock_ridge] . identifier[record_ce_entries] ()
identifier[self] . identifier[_outfp_write_with_check] ( identifier[outfp] , identifier[rec] )
identifier[progress] . identifier[call] ( identifier[len] ( identifier[rec] ))
keyword[if] identifier[child] . identifier[rock_ridge] . identifier[child_link_record_exists] ():
keyword[continue]
keyword[if] identifier[child] . identifier[is_dir] ():
keyword[if] keyword[not] identifier[child] . identifier[is_dot] () keyword[and] keyword[not] identifier[child] . identifier[is_dotdot] ():
identifier[dirs] . identifier[append] ( identifier[child] ) | def _write_directory_records(self, vd, outfp, progress):
# type: (headervd.PrimaryOrSupplementaryVD, BinaryIO, PyCdlib._Progress) -> None
'\n An internal method to write out the directory records from a particular\n Volume Descriptor.\n\n Parameters:\n vd - The Volume Descriptor to write the Directory Records from.\n outfp - The file object to write data to.\n progress - The _Progress object to use for outputting progress.\n Returns:\n Nothing.\n '
log_block_size = vd.logical_block_size()
le_ptr_offset = 0
be_ptr_offset = 0
dirs = collections.deque([vd.root_directory_record()])
while dirs:
curr = dirs.popleft()
curr_dirrecord_offset = 0
if curr.is_dir():
if curr.ptr is None:
raise pycdlibexception.PyCdlibInternalError('Directory has no Path Table Record') # depends on [control=['if'], data=[]]
# Little Endian PTR
outfp.seek(vd.path_table_location_le * log_block_size + le_ptr_offset)
ret = curr.ptr.record_little_endian()
self._outfp_write_with_check(outfp, ret)
le_ptr_offset += len(ret)
# Big Endian PTR
outfp.seek(vd.path_table_location_be * log_block_size + be_ptr_offset)
ret = curr.ptr.record_big_endian()
self._outfp_write_with_check(outfp, ret)
be_ptr_offset += len(ret)
progress.call(curr.get_data_length()) # depends on [control=['if'], data=[]]
dir_extent = curr.extent_location()
for child in curr.children:
# No matter what type the child is, we need to first write
# out the directory record entry.
recstr = child.record()
if curr_dirrecord_offset + len(recstr) > log_block_size:
dir_extent += 1
curr_dirrecord_offset = 0 # depends on [control=['if'], data=[]]
outfp.seek(dir_extent * log_block_size + curr_dirrecord_offset)
# Now write out the child
self._outfp_write_with_check(outfp, recstr)
curr_dirrecord_offset += len(recstr)
if child.rock_ridge is not None:
if child.rock_ridge.dr_entries.ce_record is not None:
# The child has a continue block, so write it out here.
ce_rec = child.rock_ridge.dr_entries.ce_record
outfp.seek(ce_rec.bl_cont_area * self.pvd.logical_block_size() + ce_rec.offset_cont_area)
rec = child.rock_ridge.record_ce_entries()
self._outfp_write_with_check(outfp, rec)
progress.call(len(rec)) # depends on [control=['if'], data=[]]
if child.rock_ridge.child_link_record_exists():
continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if child.is_dir():
# If the child is a directory, and is not dot or dotdot,
# we want to descend into it to look at the children.
if not child.is_dot() and (not child.is_dotdot()):
dirs.append(child) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['child']] # depends on [control=['while'], data=[]] |
def serialize_data(data, compression=False, encryption=False, public_key=None):
"""Serializes normal Python datatypes into plaintext using json.
You may also choose to enable compression and encryption when serializing
data to send over the network. Enabling one or both of these options will
incur additional overhead.
Args:
data (dict): The data to convert into plain text using json.
compression (boolean): True or False value on whether or not to compress
the serialized data.
encryption (rsa.encryption): An encryption instance used to encrypt the
message if encryption is desired.
public_key (str): The public key to use to encrypt if encryption is
enabled.
Returns:
The string message serialized using json.
"""
message = json.dumps(data)
if compression:
message = zlib.compress(message)
message = binascii.b2a_base64(message)
if encryption and public_key:
message = encryption.encrypt(message, public_key)
encoded_message = str.encode(message)
return encoded_message | def function[serialize_data, parameter[data, compression, encryption, public_key]]:
constant[Serializes normal Python datatypes into plaintext using json.
You may also choose to enable compression and encryption when serializing
data to send over the network. Enabling one or both of these options will
incur additional overhead.
Args:
data (dict): The data to convert into plain text using json.
compression (boolean): True or False value on whether or not to compress
the serialized data.
encryption (rsa.encryption): An encryption instance used to encrypt the
message if encryption is desired.
public_key (str): The public key to use to encrypt if encryption is
enabled.
Returns:
The string message serialized using json.
]
variable[message] assign[=] call[name[json].dumps, parameter[name[data]]]
if name[compression] begin[:]
variable[message] assign[=] call[name[zlib].compress, parameter[name[message]]]
variable[message] assign[=] call[name[binascii].b2a_base64, parameter[name[message]]]
if <ast.BoolOp object at 0x7da1b2351720> begin[:]
variable[message] assign[=] call[name[encryption].encrypt, parameter[name[message], name[public_key]]]
variable[encoded_message] assign[=] call[name[str].encode, parameter[name[message]]]
return[name[encoded_message]] | keyword[def] identifier[serialize_data] ( identifier[data] , identifier[compression] = keyword[False] , identifier[encryption] = keyword[False] , identifier[public_key] = keyword[None] ):
literal[string]
identifier[message] = identifier[json] . identifier[dumps] ( identifier[data] )
keyword[if] identifier[compression] :
identifier[message] = identifier[zlib] . identifier[compress] ( identifier[message] )
identifier[message] = identifier[binascii] . identifier[b2a_base64] ( identifier[message] )
keyword[if] identifier[encryption] keyword[and] identifier[public_key] :
identifier[message] = identifier[encryption] . identifier[encrypt] ( identifier[message] , identifier[public_key] )
identifier[encoded_message] = identifier[str] . identifier[encode] ( identifier[message] )
keyword[return] identifier[encoded_message] | def serialize_data(data, compression=False, encryption=False, public_key=None):
"""Serializes normal Python datatypes into plaintext using json.
You may also choose to enable compression and encryption when serializing
data to send over the network. Enabling one or both of these options will
incur additional overhead.
Args:
data (dict): The data to convert into plain text using json.
compression (boolean): True or False value on whether or not to compress
the serialized data.
encryption (rsa.encryption): An encryption instance used to encrypt the
message if encryption is desired.
public_key (str): The public key to use to encrypt if encryption is
enabled.
Returns:
The string message serialized using json.
"""
message = json.dumps(data)
if compression:
message = zlib.compress(message)
message = binascii.b2a_base64(message) # depends on [control=['if'], data=[]]
if encryption and public_key:
message = encryption.encrypt(message, public_key) # depends on [control=['if'], data=[]]
encoded_message = str.encode(message)
return encoded_message |
def get_db(directory, engine=None):
"""Get a database
:param directory: The root data directory
:param engine: a pre-created SQLAlchemy engine (default: in-memory SQLite)
"""
if engine is None:
engine = create_engine('sqlite://')
tables.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
db = Session()
if directory is not None:
load_from_directory(db, directory)
return db | def function[get_db, parameter[directory, engine]]:
constant[Get a database
:param directory: The root data directory
:param engine: a pre-created SQLAlchemy engine (default: in-memory SQLite)
]
if compare[name[engine] is constant[None]] begin[:]
variable[engine] assign[=] call[name[create_engine], parameter[constant[sqlite://]]]
call[name[tables].metadata.create_all, parameter[name[engine]]]
variable[Session] assign[=] call[name[sessionmaker], parameter[]]
variable[db] assign[=] call[name[Session], parameter[]]
if compare[name[directory] is_not constant[None]] begin[:]
call[name[load_from_directory], parameter[name[db], name[directory]]]
return[name[db]] | keyword[def] identifier[get_db] ( identifier[directory] , identifier[engine] = keyword[None] ):
literal[string]
keyword[if] identifier[engine] keyword[is] keyword[None] :
identifier[engine] = identifier[create_engine] ( literal[string] )
identifier[tables] . identifier[metadata] . identifier[create_all] ( identifier[engine] )
identifier[Session] = identifier[sessionmaker] ( identifier[bind] = identifier[engine] )
identifier[db] = identifier[Session] ()
keyword[if] identifier[directory] keyword[is] keyword[not] keyword[None] :
identifier[load_from_directory] ( identifier[db] , identifier[directory] )
keyword[return] identifier[db] | def get_db(directory, engine=None):
"""Get a database
:param directory: The root data directory
:param engine: a pre-created SQLAlchemy engine (default: in-memory SQLite)
"""
if engine is None:
engine = create_engine('sqlite://') # depends on [control=['if'], data=['engine']]
tables.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
db = Session()
if directory is not None:
load_from_directory(db, directory) # depends on [control=['if'], data=['directory']]
return db |
def _no_access(basedir):
''' Return True if the given base dir is not accessible or writeable
'''
import os
return not os.access(basedir, os.W_OK | os.X_OK) | def function[_no_access, parameter[basedir]]:
constant[ Return True if the given base dir is not accessible or writeable
]
import module[os]
return[<ast.UnaryOp object at 0x7da18f8109d0>] | keyword[def] identifier[_no_access] ( identifier[basedir] ):
literal[string]
keyword[import] identifier[os]
keyword[return] keyword[not] identifier[os] . identifier[access] ( identifier[basedir] , identifier[os] . identifier[W_OK] | identifier[os] . identifier[X_OK] ) | def _no_access(basedir):
""" Return True if the given base dir is not accessible or writeable
"""
import os
return not os.access(basedir, os.W_OK | os.X_OK) |
def register_migration(self, migration: 'Migration'):
"""Register a migration.
You can only register migrations in order. For example, you can
register migrations from version 1 to 2, then 2 to 3, then 3 to
4. You cannot register 1 to 2 followed by 3 to 4.
"""
if migration.from_ver >= migration.to_ver:
raise ValueError('Migration cannot downgrade verson')
if migration.from_ver != self._final_ver:
raise ValueError('Cannot register disjoint migration')
self._migrations[migration.from_ver] = migration
self._final_ver = migration.to_ver | def function[register_migration, parameter[self, migration]]:
constant[Register a migration.
You can only register migrations in order. For example, you can
register migrations from version 1 to 2, then 2 to 3, then 3 to
4. You cannot register 1 to 2 followed by 3 to 4.
]
if compare[name[migration].from_ver greater_or_equal[>=] name[migration].to_ver] begin[:]
<ast.Raise object at 0x7da1b1352290>
if compare[name[migration].from_ver not_equal[!=] name[self]._final_ver] begin[:]
<ast.Raise object at 0x7da1b13529e0>
call[name[self]._migrations][name[migration].from_ver] assign[=] name[migration]
name[self]._final_ver assign[=] name[migration].to_ver | keyword[def] identifier[register_migration] ( identifier[self] , identifier[migration] : literal[string] ):
literal[string]
keyword[if] identifier[migration] . identifier[from_ver] >= identifier[migration] . identifier[to_ver] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[migration] . identifier[from_ver] != identifier[self] . identifier[_final_ver] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[self] . identifier[_migrations] [ identifier[migration] . identifier[from_ver] ]= identifier[migration]
identifier[self] . identifier[_final_ver] = identifier[migration] . identifier[to_ver] | def register_migration(self, migration: 'Migration'):
"""Register a migration.
You can only register migrations in order. For example, you can
register migrations from version 1 to 2, then 2 to 3, then 3 to
4. You cannot register 1 to 2 followed by 3 to 4.
"""
if migration.from_ver >= migration.to_ver:
raise ValueError('Migration cannot downgrade verson') # depends on [control=['if'], data=[]]
if migration.from_ver != self._final_ver:
raise ValueError('Cannot register disjoint migration') # depends on [control=['if'], data=[]]
self._migrations[migration.from_ver] = migration
self._final_ver = migration.to_ver |
def setVisible( self, state ):
"""
Overloads the setVisible method for the dialog to resize the contents \
of the splitter properly.
:param state | <bool>
"""
super(XWizardBrowserDialog, self).setVisible(state)
if ( state ):
mwidth = self.uiPluginTREE.minimumWidth()
self.uiMainSPLT.setSizes([mwidth,
self.uiMainSPLT.width() - mwidth]) | def function[setVisible, parameter[self, state]]:
constant[
Overloads the setVisible method for the dialog to resize the contents of the splitter properly.
:param state | <bool>
]
call[call[name[super], parameter[name[XWizardBrowserDialog], name[self]]].setVisible, parameter[name[state]]]
if name[state] begin[:]
variable[mwidth] assign[=] call[name[self].uiPluginTREE.minimumWidth, parameter[]]
call[name[self].uiMainSPLT.setSizes, parameter[list[[<ast.Name object at 0x7da18eb56860>, <ast.BinOp object at 0x7da18eb54910>]]]] | keyword[def] identifier[setVisible] ( identifier[self] , identifier[state] ):
literal[string]
identifier[super] ( identifier[XWizardBrowserDialog] , identifier[self] ). identifier[setVisible] ( identifier[state] )
keyword[if] ( identifier[state] ):
identifier[mwidth] = identifier[self] . identifier[uiPluginTREE] . identifier[minimumWidth] ()
identifier[self] . identifier[uiMainSPLT] . identifier[setSizes] ([ identifier[mwidth] ,
identifier[self] . identifier[uiMainSPLT] . identifier[width] ()- identifier[mwidth] ]) | def setVisible(self, state):
"""
Overloads the setVisible method for the dialog to resize the contents of the splitter properly.
:param state | <bool>
"""
super(XWizardBrowserDialog, self).setVisible(state)
if state:
mwidth = self.uiPluginTREE.minimumWidth()
self.uiMainSPLT.setSizes([mwidth, self.uiMainSPLT.width() - mwidth]) # depends on [control=['if'], data=[]] |
def _print_cline(self, buf, i, icol):
"""
Print clines after multirow-blocks are finished
"""
for cl in self.clinebuf:
if cl[0] == i:
buf.write('\\cline{{{cl:d}-{icol:d}}}\n'
.format(cl=cl[1], icol=icol))
# remove entries that have been written to buffer
self.clinebuf = [x for x in self.clinebuf if x[0] != i] | def function[_print_cline, parameter[self, buf, i, icol]]:
constant[
Print clines after multirow-blocks are finished
]
for taget[name[cl]] in starred[name[self].clinebuf] begin[:]
if compare[call[name[cl]][constant[0]] equal[==] name[i]] begin[:]
call[name[buf].write, parameter[call[constant[\cline{{{cl:d}-{icol:d}}}
].format, parameter[]]]]
name[self].clinebuf assign[=] <ast.ListComp object at 0x7da18dc04a30> | keyword[def] identifier[_print_cline] ( identifier[self] , identifier[buf] , identifier[i] , identifier[icol] ):
literal[string]
keyword[for] identifier[cl] keyword[in] identifier[self] . identifier[clinebuf] :
keyword[if] identifier[cl] [ literal[int] ]== identifier[i] :
identifier[buf] . identifier[write] ( literal[string]
. identifier[format] ( identifier[cl] = identifier[cl] [ literal[int] ], identifier[icol] = identifier[icol] ))
identifier[self] . identifier[clinebuf] =[ identifier[x] keyword[for] identifier[x] keyword[in] identifier[self] . identifier[clinebuf] keyword[if] identifier[x] [ literal[int] ]!= identifier[i] ] | def _print_cline(self, buf, i, icol):
"""
Print clines after multirow-blocks are finished
"""
for cl in self.clinebuf:
if cl[0] == i:
buf.write('\\cline{{{cl:d}-{icol:d}}}\n'.format(cl=cl[1], icol=icol)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['cl']]
# remove entries that have been written to buffer
self.clinebuf = [x for x in self.clinebuf if x[0] != i] |
def get_ground_diffuse(surface_tilt, ghi, albedo=.25, surface_type=None):
'''
Estimate diffuse irradiance from ground reflections given
irradiance, albedo, and surface tilt
Function to determine the portion of irradiance on a tilted surface
due to ground reflections. Any of the inputs may be DataFrames or
scalars.
Parameters
----------
surface_tilt : numeric
Surface tilt angles in decimal degrees. Tilt must be >=0 and
<=180. The tilt angle is defined as degrees from horizontal
(e.g. surface facing up = 0, surface facing horizon = 90).
ghi : numeric
Global horizontal irradiance in W/m^2.
albedo : numeric, default 0.25
Ground reflectance, typically 0.1-0.4 for surfaces on Earth
(land), may increase over snow, ice, etc. May also be known as
the reflection coefficient. Must be >=0 and <=1. Will be
overridden if surface_type is supplied.
surface_type: None or string, default None
If not None, overrides albedo. String can be one of 'urban',
'grass', 'fresh grass', 'snow', 'fresh snow', 'asphalt', 'concrete',
'aluminum', 'copper', 'fresh steel', 'dirty steel', 'sea'.
Returns
-------
grounddiffuse : numeric
Ground reflected irradiances in W/m^2.
References
----------
[1] Loutzenhiser P.G. et. al. "Empirical validation of models to compute
solar irradiance on inclined surfaces for building energy simulation"
2007, Solar Energy vol. 81. pp. 254-267.
The calculation is the last term of equations 3, 4, 7, 8, 10, 11, and 12.
[2] albedos from:
http://files.pvsyst.com/help/albedo.htm
and
http://en.wikipedia.org/wiki/Albedo
and
https://doi.org/10.1175/1520-0469(1972)029<0959:AOTSS>2.0.CO;2
'''
if surface_type is not None:
albedo = SURFACE_ALBEDOS[surface_type]
diffuse_irrad = ghi * albedo * (1 - np.cos(np.radians(surface_tilt))) * 0.5
try:
diffuse_irrad.name = 'diffuse_ground'
except AttributeError:
pass
return diffuse_irrad | def function[get_ground_diffuse, parameter[surface_tilt, ghi, albedo, surface_type]]:
constant[
Estimate diffuse irradiance from ground reflections given
irradiance, albedo, and surface tilt
Function to determine the portion of irradiance on a tilted surface
due to ground reflections. Any of the inputs may be DataFrames or
scalars.
Parameters
----------
surface_tilt : numeric
Surface tilt angles in decimal degrees. Tilt must be >=0 and
<=180. The tilt angle is defined as degrees from horizontal
(e.g. surface facing up = 0, surface facing horizon = 90).
ghi : numeric
Global horizontal irradiance in W/m^2.
albedo : numeric, default 0.25
Ground reflectance, typically 0.1-0.4 for surfaces on Earth
(land), may increase over snow, ice, etc. May also be known as
the reflection coefficient. Must be >=0 and <=1. Will be
overridden if surface_type is supplied.
surface_type: None or string, default None
If not None, overrides albedo. String can be one of 'urban',
'grass', 'fresh grass', 'snow', 'fresh snow', 'asphalt', 'concrete',
'aluminum', 'copper', 'fresh steel', 'dirty steel', 'sea'.
Returns
-------
grounddiffuse : numeric
Ground reflected irradiances in W/m^2.
References
----------
[1] Loutzenhiser P.G. et. al. "Empirical validation of models to compute
solar irradiance on inclined surfaces for building energy simulation"
2007, Solar Energy vol. 81. pp. 254-267.
The calculation is the last term of equations 3, 4, 7, 8, 10, 11, and 12.
[2] albedos from:
http://files.pvsyst.com/help/albedo.htm
and
http://en.wikipedia.org/wiki/Albedo
and
https://doi.org/10.1175/1520-0469(1972)029<0959:AOTSS>2.0.CO;2
]
if compare[name[surface_type] is_not constant[None]] begin[:]
variable[albedo] assign[=] call[name[SURFACE_ALBEDOS]][name[surface_type]]
variable[diffuse_irrad] assign[=] binary_operation[binary_operation[binary_operation[name[ghi] * name[albedo]] * binary_operation[constant[1] - call[name[np].cos, parameter[call[name[np].radians, parameter[name[surface_tilt]]]]]]] * constant[0.5]]
<ast.Try object at 0x7da1b1b0c9d0>
return[name[diffuse_irrad]] | keyword[def] identifier[get_ground_diffuse] ( identifier[surface_tilt] , identifier[ghi] , identifier[albedo] = literal[int] , identifier[surface_type] = keyword[None] ):
literal[string]
keyword[if] identifier[surface_type] keyword[is] keyword[not] keyword[None] :
identifier[albedo] = identifier[SURFACE_ALBEDOS] [ identifier[surface_type] ]
identifier[diffuse_irrad] = identifier[ghi] * identifier[albedo] *( literal[int] - identifier[np] . identifier[cos] ( identifier[np] . identifier[radians] ( identifier[surface_tilt] )))* literal[int]
keyword[try] :
identifier[diffuse_irrad] . identifier[name] = literal[string]
keyword[except] identifier[AttributeError] :
keyword[pass]
keyword[return] identifier[diffuse_irrad] | def get_ground_diffuse(surface_tilt, ghi, albedo=0.25, surface_type=None):
"""
Estimate diffuse irradiance from ground reflections given
irradiance, albedo, and surface tilt
Function to determine the portion of irradiance on a tilted surface
due to ground reflections. Any of the inputs may be DataFrames or
scalars.
Parameters
----------
surface_tilt : numeric
Surface tilt angles in decimal degrees. Tilt must be >=0 and
<=180. The tilt angle is defined as degrees from horizontal
(e.g. surface facing up = 0, surface facing horizon = 90).
ghi : numeric
Global horizontal irradiance in W/m^2.
albedo : numeric, default 0.25
Ground reflectance, typically 0.1-0.4 for surfaces on Earth
(land), may increase over snow, ice, etc. May also be known as
the reflection coefficient. Must be >=0 and <=1. Will be
overridden if surface_type is supplied.
surface_type: None or string, default None
If not None, overrides albedo. String can be one of 'urban',
'grass', 'fresh grass', 'snow', 'fresh snow', 'asphalt', 'concrete',
'aluminum', 'copper', 'fresh steel', 'dirty steel', 'sea'.
Returns
-------
grounddiffuse : numeric
Ground reflected irradiances in W/m^2.
References
----------
[1] Loutzenhiser P.G. et. al. "Empirical validation of models to compute
solar irradiance on inclined surfaces for building energy simulation"
2007, Solar Energy vol. 81. pp. 254-267.
The calculation is the last term of equations 3, 4, 7, 8, 10, 11, and 12.
[2] albedos from:
http://files.pvsyst.com/help/albedo.htm
and
http://en.wikipedia.org/wiki/Albedo
and
https://doi.org/10.1175/1520-0469(1972)029<0959:AOTSS>2.0.CO;2
"""
if surface_type is not None:
albedo = SURFACE_ALBEDOS[surface_type] # depends on [control=['if'], data=['surface_type']]
diffuse_irrad = ghi * albedo * (1 - np.cos(np.radians(surface_tilt))) * 0.5
try:
diffuse_irrad.name = 'diffuse_ground' # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]]
return diffuse_irrad |
def SetServerInformation(self, server, port):
"""Set the server information.
Args:
server (str): IP address or hostname of the server.
port (int): Port number of the server.
"""
self._host = server
self._port = port
logger.debug('Elasticsearch server: {0!s} port: {1:d}'.format(
server, port)) | def function[SetServerInformation, parameter[self, server, port]]:
constant[Set the server information.
Args:
server (str): IP address or hostname of the server.
port (int): Port number of the server.
]
name[self]._host assign[=] name[server]
name[self]._port assign[=] name[port]
call[name[logger].debug, parameter[call[constant[Elasticsearch server: {0!s} port: {1:d}].format, parameter[name[server], name[port]]]]] | keyword[def] identifier[SetServerInformation] ( identifier[self] , identifier[server] , identifier[port] ):
literal[string]
identifier[self] . identifier[_host] = identifier[server]
identifier[self] . identifier[_port] = identifier[port]
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] (
identifier[server] , identifier[port] )) | def SetServerInformation(self, server, port):
"""Set the server information.
Args:
server (str): IP address or hostname of the server.
port (int): Port number of the server.
"""
self._host = server
self._port = port
logger.debug('Elasticsearch server: {0!s} port: {1:d}'.format(server, port)) |
def remove_members_in_score_range_in(
self, leaderboard_name, min_score, max_score):
'''
Remove members from the named leaderboard in a given score range.
@param leaderboard_name [String] Name of the leaderboard.
@param min_score [float] Minimum score.
@param max_score [float] Maximum score.
'''
self.redis_connection.zremrangebyscore(
leaderboard_name,
min_score,
max_score) | def function[remove_members_in_score_range_in, parameter[self, leaderboard_name, min_score, max_score]]:
constant[
Remove members from the named leaderboard in a given score range.
@param leaderboard_name [String] Name of the leaderboard.
@param min_score [float] Minimum score.
@param max_score [float] Maximum score.
]
call[name[self].redis_connection.zremrangebyscore, parameter[name[leaderboard_name], name[min_score], name[max_score]]] | keyword[def] identifier[remove_members_in_score_range_in] (
identifier[self] , identifier[leaderboard_name] , identifier[min_score] , identifier[max_score] ):
literal[string]
identifier[self] . identifier[redis_connection] . identifier[zremrangebyscore] (
identifier[leaderboard_name] ,
identifier[min_score] ,
identifier[max_score] ) | def remove_members_in_score_range_in(self, leaderboard_name, min_score, max_score):
"""
Remove members from the named leaderboard in a given score range.
@param leaderboard_name [String] Name of the leaderboard.
@param min_score [float] Minimum score.
@param max_score [float] Maximum score.
"""
self.redis_connection.zremrangebyscore(leaderboard_name, min_score, max_score) |
def load_replacement_patterns(self):
"""Check for availability of the specified dictionary."""
filename = self.dictionary + '.py'
models = self.language + '_models_cltk'
rel_path = os.path.join('~/cltk_data',
self.language,
'model',
models,
'semantics',
filename)
path = os.path.expanduser(rel_path)
logger.info('Loading lemmata or synonyms. This may take a minute.')
loader = importlib.machinery.SourceFileLoader(filename, path)
module = types.ModuleType(loader.name)
loader.exec_module(module)
return module.DICTIONARY | def function[load_replacement_patterns, parameter[self]]:
constant[Check for availability of the specified dictionary.]
variable[filename] assign[=] binary_operation[name[self].dictionary + constant[.py]]
variable[models] assign[=] binary_operation[name[self].language + constant[_models_cltk]]
variable[rel_path] assign[=] call[name[os].path.join, parameter[constant[~/cltk_data], name[self].language, constant[model], name[models], constant[semantics], name[filename]]]
variable[path] assign[=] call[name[os].path.expanduser, parameter[name[rel_path]]]
call[name[logger].info, parameter[constant[Loading lemmata or synonyms. This may take a minute.]]]
variable[loader] assign[=] call[name[importlib].machinery.SourceFileLoader, parameter[name[filename], name[path]]]
variable[module] assign[=] call[name[types].ModuleType, parameter[name[loader].name]]
call[name[loader].exec_module, parameter[name[module]]]
return[name[module].DICTIONARY] | keyword[def] identifier[load_replacement_patterns] ( identifier[self] ):
literal[string]
identifier[filename] = identifier[self] . identifier[dictionary] + literal[string]
identifier[models] = identifier[self] . identifier[language] + literal[string]
identifier[rel_path] = identifier[os] . identifier[path] . identifier[join] ( literal[string] ,
identifier[self] . identifier[language] ,
literal[string] ,
identifier[models] ,
literal[string] ,
identifier[filename] )
identifier[path] = identifier[os] . identifier[path] . identifier[expanduser] ( identifier[rel_path] )
identifier[logger] . identifier[info] ( literal[string] )
identifier[loader] = identifier[importlib] . identifier[machinery] . identifier[SourceFileLoader] ( identifier[filename] , identifier[path] )
identifier[module] = identifier[types] . identifier[ModuleType] ( identifier[loader] . identifier[name] )
identifier[loader] . identifier[exec_module] ( identifier[module] )
keyword[return] identifier[module] . identifier[DICTIONARY] | def load_replacement_patterns(self):
"""Check for availability of the specified dictionary."""
filename = self.dictionary + '.py'
models = self.language + '_models_cltk'
rel_path = os.path.join('~/cltk_data', self.language, 'model', models, 'semantics', filename)
path = os.path.expanduser(rel_path)
logger.info('Loading lemmata or synonyms. This may take a minute.')
loader = importlib.machinery.SourceFileLoader(filename, path)
module = types.ModuleType(loader.name)
loader.exec_module(module)
return module.DICTIONARY |
def destroy_droplet(self, droplet_id, scrub_data=False):
"""
This method destroys one of your droplets - this is irreversible.
Required parameters:
droplet_id:
Numeric, this is the id of your droplet that you want to destroy
Optional parameters
scrub_data:
Boolean, this will strictly write 0s to your prior partition to
ensure that all data is completely erased
"""
params = {}
if scrub_data:
params['scrub_data'] = True
json = self.request('/droplets/%s/destroy' % droplet_id, method='GET',
params=params)
status = json.get('status')
if status == 'OK':
return json.get('event_id')
else:
message = json.get('message')
raise DOPException('[%s]: %s' % (status, message)) | def function[destroy_droplet, parameter[self, droplet_id, scrub_data]]:
constant[
This method destroys one of your droplets - this is irreversible.
Required parameters:
droplet_id:
Numeric, this is the id of your droplet that you want to destroy
Optional parameters
scrub_data:
Boolean, this will strictly write 0s to your prior partition to
ensure that all data is completely erased
]
variable[params] assign[=] dictionary[[], []]
if name[scrub_data] begin[:]
call[name[params]][constant[scrub_data]] assign[=] constant[True]
variable[json] assign[=] call[name[self].request, parameter[binary_operation[constant[/droplets/%s/destroy] <ast.Mod object at 0x7da2590d6920> name[droplet_id]]]]
variable[status] assign[=] call[name[json].get, parameter[constant[status]]]
if compare[name[status] equal[==] constant[OK]] begin[:]
return[call[name[json].get, parameter[constant[event_id]]]] | keyword[def] identifier[destroy_droplet] ( identifier[self] , identifier[droplet_id] , identifier[scrub_data] = keyword[False] ):
literal[string]
identifier[params] ={}
keyword[if] identifier[scrub_data] :
identifier[params] [ literal[string] ]= keyword[True]
identifier[json] = identifier[self] . identifier[request] ( literal[string] % identifier[droplet_id] , identifier[method] = literal[string] ,
identifier[params] = identifier[params] )
identifier[status] = identifier[json] . identifier[get] ( literal[string] )
keyword[if] identifier[status] == literal[string] :
keyword[return] identifier[json] . identifier[get] ( literal[string] )
keyword[else] :
identifier[message] = identifier[json] . identifier[get] ( literal[string] )
keyword[raise] identifier[DOPException] ( literal[string] %( identifier[status] , identifier[message] )) | def destroy_droplet(self, droplet_id, scrub_data=False):
"""
This method destroys one of your droplets - this is irreversible.
Required parameters:
droplet_id:
Numeric, this is the id of your droplet that you want to destroy
Optional parameters
scrub_data:
Boolean, this will strictly write 0s to your prior partition to
ensure that all data is completely erased
"""
params = {}
if scrub_data:
params['scrub_data'] = True # depends on [control=['if'], data=[]]
json = self.request('/droplets/%s/destroy' % droplet_id, method='GET', params=params)
status = json.get('status')
if status == 'OK':
return json.get('event_id') # depends on [control=['if'], data=[]]
else:
message = json.get('message')
raise DOPException('[%s]: %s' % (status, message)) |
def has_implicit_access_to_dashboard(user, obj): # pylint: disable=unused-argument
"""
Check that if request user has implicit access to `ENTERPRISE_DASHBOARD_ADMIN_ROLE` feature role.
Returns:
boolean: whether the request user has access or not
"""
request = get_request_or_stub()
decoded_jwt = get_decoded_jwt_from_request(request)
return request_user_has_implicit_access_via_jwt(decoded_jwt, ENTERPRISE_DASHBOARD_ADMIN_ROLE) | def function[has_implicit_access_to_dashboard, parameter[user, obj]]:
constant[
Check that if request user has implicit access to `ENTERPRISE_DASHBOARD_ADMIN_ROLE` feature role.
Returns:
boolean: whether the request user has access or not
]
variable[request] assign[=] call[name[get_request_or_stub], parameter[]]
variable[decoded_jwt] assign[=] call[name[get_decoded_jwt_from_request], parameter[name[request]]]
return[call[name[request_user_has_implicit_access_via_jwt], parameter[name[decoded_jwt], name[ENTERPRISE_DASHBOARD_ADMIN_ROLE]]]] | keyword[def] identifier[has_implicit_access_to_dashboard] ( identifier[user] , identifier[obj] ):
literal[string]
identifier[request] = identifier[get_request_or_stub] ()
identifier[decoded_jwt] = identifier[get_decoded_jwt_from_request] ( identifier[request] )
keyword[return] identifier[request_user_has_implicit_access_via_jwt] ( identifier[decoded_jwt] , identifier[ENTERPRISE_DASHBOARD_ADMIN_ROLE] ) | def has_implicit_access_to_dashboard(user, obj): # pylint: disable=unused-argument
'\n Check that if request user has implicit access to `ENTERPRISE_DASHBOARD_ADMIN_ROLE` feature role.\n\n Returns:\n boolean: whether the request user has access or not\n '
request = get_request_or_stub()
decoded_jwt = get_decoded_jwt_from_request(request)
return request_user_has_implicit_access_via_jwt(decoded_jwt, ENTERPRISE_DASHBOARD_ADMIN_ROLE) |
def _get_requirement_attr(self, attr, path):
"""
Gets the attribute for a given requirement file in path
:param attr: string, attribute
:param path: string, path
:return: The attribute for the requirement, or the global default
"""
for req_file in self.requirements:
if path.strip("/") == req_file.path.strip("/"):
return getattr(req_file, attr)
return getattr(self, attr) | def function[_get_requirement_attr, parameter[self, attr, path]]:
constant[
Gets the attribute for a given requirement file in path
:param attr: string, attribute
:param path: string, path
:return: The attribute for the requirement, or the global default
]
for taget[name[req_file]] in starred[name[self].requirements] begin[:]
if compare[call[name[path].strip, parameter[constant[/]]] equal[==] call[name[req_file].path.strip, parameter[constant[/]]]] begin[:]
return[call[name[getattr], parameter[name[req_file], name[attr]]]]
return[call[name[getattr], parameter[name[self], name[attr]]]] | keyword[def] identifier[_get_requirement_attr] ( identifier[self] , identifier[attr] , identifier[path] ):
literal[string]
keyword[for] identifier[req_file] keyword[in] identifier[self] . identifier[requirements] :
keyword[if] identifier[path] . identifier[strip] ( literal[string] )== identifier[req_file] . identifier[path] . identifier[strip] ( literal[string] ):
keyword[return] identifier[getattr] ( identifier[req_file] , identifier[attr] )
keyword[return] identifier[getattr] ( identifier[self] , identifier[attr] ) | def _get_requirement_attr(self, attr, path):
"""
Gets the attribute for a given requirement file in path
:param attr: string, attribute
:param path: string, path
:return: The attribute for the requirement, or the global default
"""
for req_file in self.requirements:
if path.strip('/') == req_file.path.strip('/'):
return getattr(req_file, attr) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['req_file']]
return getattr(self, attr) |
def update(self):
"""Update editor extra selections with added decorations.
NOTE: Update TextDecorations to use editor font, using a different
font family and point size could cause unwanted behaviors.
"""
font = self.editor.font()
for decoration in self._decorations:
try:
decoration.format.setFont(
font, QTextCharFormat.FontPropertiesSpecifiedOnly)
except (TypeError, AttributeError): # Qt < 5.3
decoration.format.setFontFamily(font.family())
decoration.format.setFontPointSize(font.pointSize())
self.editor.setExtraSelections(self._decorations) | def function[update, parameter[self]]:
constant[Update editor extra selections with added decorations.
NOTE: Update TextDecorations to use editor font, using a different
font family and point size could cause unwanted behaviors.
]
variable[font] assign[=] call[name[self].editor.font, parameter[]]
for taget[name[decoration]] in starred[name[self]._decorations] begin[:]
<ast.Try object at 0x7da1b26af940>
call[name[self].editor.setExtraSelections, parameter[name[self]._decorations]] | keyword[def] identifier[update] ( identifier[self] ):
literal[string]
identifier[font] = identifier[self] . identifier[editor] . identifier[font] ()
keyword[for] identifier[decoration] keyword[in] identifier[self] . identifier[_decorations] :
keyword[try] :
identifier[decoration] . identifier[format] . identifier[setFont] (
identifier[font] , identifier[QTextCharFormat] . identifier[FontPropertiesSpecifiedOnly] )
keyword[except] ( identifier[TypeError] , identifier[AttributeError] ):
identifier[decoration] . identifier[format] . identifier[setFontFamily] ( identifier[font] . identifier[family] ())
identifier[decoration] . identifier[format] . identifier[setFontPointSize] ( identifier[font] . identifier[pointSize] ())
identifier[self] . identifier[editor] . identifier[setExtraSelections] ( identifier[self] . identifier[_decorations] ) | def update(self):
"""Update editor extra selections with added decorations.
NOTE: Update TextDecorations to use editor font, using a different
font family and point size could cause unwanted behaviors.
"""
font = self.editor.font()
for decoration in self._decorations:
try:
decoration.format.setFont(font, QTextCharFormat.FontPropertiesSpecifiedOnly) # depends on [control=['try'], data=[]]
except (TypeError, AttributeError): # Qt < 5.3
decoration.format.setFontFamily(font.family())
decoration.format.setFontPointSize(font.pointSize()) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['decoration']]
self.editor.setExtraSelections(self._decorations) |
def prefetch_relations(weak_queryset):
"""
FROM: https://djangosnippets.org/snippets/2492/
Consider such a model class::
class Action(models.Model):
actor_content_type = models.ForeignKey(ContentType,related_name='actor')
actor_object_id = models.PositiveIntegerField()
actor = GenericForeignKey('actor_content_type','actor_object_id')
And dataset::
Action(actor=user1).save()
Action(actor=user2).save()
This will hit the user table once for each action::
[a.actor for a in Action.objects.all()]
Whereas this will hit the user table once::
[a.actor for a in prefetch_relations(Action.objects.all())]
Actually, the example above will hit the database N+1 times, where N is
the number of actions. But with prefetch_relations(), the database will be
hit N+1 times where N is the number of distinct content types.
Note that prefetch_relations() is recursive.
Here an example, making a list with prefetch_relations(), and then without
prefetch_relations(). See the number of database hits after each test.
In [1]: from django import db; from prefetch_relations import prefetch_relations
In [2]: db.reset_queries()
In [3]: x = [(a.actor, a.action_object, a.target) for a in prefetch_relations(Action.objects.all().order_by('-pk'))]
In [4]: print len(db.connection.queries)
34
In [5]: db.reset_queries()
In [6]: print len(db.connection.queries)
0
In [7]: x = [(a.actor, a.action_object, a.target) for a in Action.objects.all().order_by('-pk')]
In [8]: print len(db.connection.queries)
396
"""
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
# reverse model's generic foreign keys into a dict:
# { 'field_name': generic.GenericForeignKey instance, ... }
gfks = {}
for name, gfk in weak_queryset.model.__dict__.items():
if not isinstance(gfk, GenericForeignKey):
continue
gfks[name] = gfk
data = {}
for weak_model in weak_queryset:
for gfk_name, gfk_field in gfks.items():
related_content_type_id = getattr(
weak_model,
gfk_field.model._meta.get_field(gfk_field.ct_field).get_attname())
if not related_content_type_id:
continue
related_content_type = ContentType.objects.get_for_id(related_content_type_id)
related_object_id = int(getattr(weak_model, gfk_field.fk_field))
if related_content_type not in data.keys():
data[related_content_type] = []
data[related_content_type].append(related_object_id)
for content_type, object_ids in data.items():
model_class = content_type.model_class()
models = prefetch_relations(model_class.objects.filter(pk__in=object_ids).select_related())
for model in models:
for weak_model in weak_queryset:
for gfk_name, gfk_field in gfks.items():
related_content_type_id = getattr(
weak_model,
gfk_field.model._meta.get_field(gfk_field.ct_field).get_attname())
if not related_content_type_id:
continue
related_content_type = ContentType.objects.get_for_id(related_content_type_id)
related_object_id = int(getattr(weak_model, gfk_field.fk_field))
if related_object_id != model.pk:
continue
if related_content_type != content_type:
continue
setattr(weak_model, gfk_name, model)
return weak_queryset | def function[prefetch_relations, parameter[weak_queryset]]:
constant[
FROM: https://djangosnippets.org/snippets/2492/
Consider such a model class::
class Action(models.Model):
actor_content_type = models.ForeignKey(ContentType,related_name='actor')
actor_object_id = models.PositiveIntegerField()
actor = GenericForeignKey('actor_content_type','actor_object_id')
And dataset::
Action(actor=user1).save()
Action(actor=user2).save()
This will hit the user table once for each action::
[a.actor for a in Action.objects.all()]
Whereas this will hit the user table once::
[a.actor for a in prefetch_relations(Action.objects.all())]
Actually, the example above will hit the database N+1 times, where N is
the number of actions. But with prefetch_relations(), the database will be
hit N+1 times where N is the number of distinct content types.
Note that prefetch_relations() is recursive.
Here an example, making a list with prefetch_relations(), and then without
prefetch_relations(). See the number of database hits after each test.
In [1]: from django import db; from prefetch_relations import prefetch_relations
In [2]: db.reset_queries()
In [3]: x = [(a.actor, a.action_object, a.target) for a in prefetch_relations(Action.objects.all().order_by('-pk'))]
In [4]: print len(db.connection.queries)
34
In [5]: db.reset_queries()
In [6]: print len(db.connection.queries)
0
In [7]: x = [(a.actor, a.action_object, a.target) for a in Action.objects.all().order_by('-pk')]
In [8]: print len(db.connection.queries)
396
]
from relative_module[django.contrib.contenttypes.models] import module[ContentType]
from relative_module[django.contrib.contenttypes.fields] import module[GenericForeignKey]
variable[gfks] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b0d0c700>, <ast.Name object at 0x7da1b0d0ddb0>]]] in starred[call[name[weak_queryset].model.__dict__.items, parameter[]]] begin[:]
if <ast.UnaryOp object at 0x7da1b0d0f070> begin[:]
continue
call[name[gfks]][name[name]] assign[=] name[gfk]
variable[data] assign[=] dictionary[[], []]
for taget[name[weak_model]] in starred[name[weak_queryset]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b0d0ec20>, <ast.Name object at 0x7da1b0d0d930>]]] in starred[call[name[gfks].items, parameter[]]] begin[:]
variable[related_content_type_id] assign[=] call[name[getattr], parameter[name[weak_model], call[call[name[gfk_field].model._meta.get_field, parameter[name[gfk_field].ct_field]].get_attname, parameter[]]]]
if <ast.UnaryOp object at 0x7da1b0d0c910> begin[:]
continue
variable[related_content_type] assign[=] call[name[ContentType].objects.get_for_id, parameter[name[related_content_type_id]]]
variable[related_object_id] assign[=] call[name[int], parameter[call[name[getattr], parameter[name[weak_model], name[gfk_field].fk_field]]]]
if compare[name[related_content_type] <ast.NotIn object at 0x7da2590d7190> call[name[data].keys, parameter[]]] begin[:]
call[name[data]][name[related_content_type]] assign[=] list[[]]
call[call[name[data]][name[related_content_type]].append, parameter[name[related_object_id]]]
for taget[tuple[[<ast.Name object at 0x7da1b0d223e0>, <ast.Name object at 0x7da1b0d21f30>]]] in starred[call[name[data].items, parameter[]]] begin[:]
variable[model_class] assign[=] call[name[content_type].model_class, parameter[]]
variable[models] assign[=] call[name[prefetch_relations], parameter[call[call[name[model_class].objects.filter, parameter[]].select_related, parameter[]]]]
for taget[name[model]] in starred[name[models]] begin[:]
for taget[name[weak_model]] in starred[name[weak_queryset]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b0d0e110>, <ast.Name object at 0x7da1b0d0ff40>]]] in starred[call[name[gfks].items, parameter[]]] begin[:]
variable[related_content_type_id] assign[=] call[name[getattr], parameter[name[weak_model], call[call[name[gfk_field].model._meta.get_field, parameter[name[gfk_field].ct_field]].get_attname, parameter[]]]]
if <ast.UnaryOp object at 0x7da1b0d0e290> begin[:]
continue
variable[related_content_type] assign[=] call[name[ContentType].objects.get_for_id, parameter[name[related_content_type_id]]]
variable[related_object_id] assign[=] call[name[int], parameter[call[name[getattr], parameter[name[weak_model], name[gfk_field].fk_field]]]]
if compare[name[related_object_id] not_equal[!=] name[model].pk] begin[:]
continue
if compare[name[related_content_type] not_equal[!=] name[content_type]] begin[:]
continue
call[name[setattr], parameter[name[weak_model], name[gfk_name], name[model]]]
return[name[weak_queryset]] | keyword[def] identifier[prefetch_relations] ( identifier[weak_queryset] ):
literal[string]
keyword[from] identifier[django] . identifier[contrib] . identifier[contenttypes] . identifier[models] keyword[import] identifier[ContentType]
keyword[from] identifier[django] . identifier[contrib] . identifier[contenttypes] . identifier[fields] keyword[import] identifier[GenericForeignKey]
identifier[gfks] ={}
keyword[for] identifier[name] , identifier[gfk] keyword[in] identifier[weak_queryset] . identifier[model] . identifier[__dict__] . identifier[items] ():
keyword[if] keyword[not] identifier[isinstance] ( identifier[gfk] , identifier[GenericForeignKey] ):
keyword[continue]
identifier[gfks] [ identifier[name] ]= identifier[gfk]
identifier[data] ={}
keyword[for] identifier[weak_model] keyword[in] identifier[weak_queryset] :
keyword[for] identifier[gfk_name] , identifier[gfk_field] keyword[in] identifier[gfks] . identifier[items] ():
identifier[related_content_type_id] = identifier[getattr] (
identifier[weak_model] ,
identifier[gfk_field] . identifier[model] . identifier[_meta] . identifier[get_field] ( identifier[gfk_field] . identifier[ct_field] ). identifier[get_attname] ())
keyword[if] keyword[not] identifier[related_content_type_id] :
keyword[continue]
identifier[related_content_type] = identifier[ContentType] . identifier[objects] . identifier[get_for_id] ( identifier[related_content_type_id] )
identifier[related_object_id] = identifier[int] ( identifier[getattr] ( identifier[weak_model] , identifier[gfk_field] . identifier[fk_field] ))
keyword[if] identifier[related_content_type] keyword[not] keyword[in] identifier[data] . identifier[keys] ():
identifier[data] [ identifier[related_content_type] ]=[]
identifier[data] [ identifier[related_content_type] ]. identifier[append] ( identifier[related_object_id] )
keyword[for] identifier[content_type] , identifier[object_ids] keyword[in] identifier[data] . identifier[items] ():
identifier[model_class] = identifier[content_type] . identifier[model_class] ()
identifier[models] = identifier[prefetch_relations] ( identifier[model_class] . identifier[objects] . identifier[filter] ( identifier[pk__in] = identifier[object_ids] ). identifier[select_related] ())
keyword[for] identifier[model] keyword[in] identifier[models] :
keyword[for] identifier[weak_model] keyword[in] identifier[weak_queryset] :
keyword[for] identifier[gfk_name] , identifier[gfk_field] keyword[in] identifier[gfks] . identifier[items] ():
identifier[related_content_type_id] = identifier[getattr] (
identifier[weak_model] ,
identifier[gfk_field] . identifier[model] . identifier[_meta] . identifier[get_field] ( identifier[gfk_field] . identifier[ct_field] ). identifier[get_attname] ())
keyword[if] keyword[not] identifier[related_content_type_id] :
keyword[continue]
identifier[related_content_type] = identifier[ContentType] . identifier[objects] . identifier[get_for_id] ( identifier[related_content_type_id] )
identifier[related_object_id] = identifier[int] ( identifier[getattr] ( identifier[weak_model] , identifier[gfk_field] . identifier[fk_field] ))
keyword[if] identifier[related_object_id] != identifier[model] . identifier[pk] :
keyword[continue]
keyword[if] identifier[related_content_type] != identifier[content_type] :
keyword[continue]
identifier[setattr] ( identifier[weak_model] , identifier[gfk_name] , identifier[model] )
keyword[return] identifier[weak_queryset] | def prefetch_relations(weak_queryset):
"""
FROM: https://djangosnippets.org/snippets/2492/
Consider such a model class::
class Action(models.Model):
actor_content_type = models.ForeignKey(ContentType,related_name='actor')
actor_object_id = models.PositiveIntegerField()
actor = GenericForeignKey('actor_content_type','actor_object_id')
And dataset::
Action(actor=user1).save()
Action(actor=user2).save()
This will hit the user table once for each action::
[a.actor for a in Action.objects.all()]
Whereas this will hit the user table once::
[a.actor for a in prefetch_relations(Action.objects.all())]
Actually, the example above will hit the database N+1 times, where N is
the number of actions. But with prefetch_relations(), the database will be
hit N+1 times where N is the number of distinct content types.
Note that prefetch_relations() is recursive.
Here an example, making a list with prefetch_relations(), and then without
prefetch_relations(). See the number of database hits after each test.
In [1]: from django import db; from prefetch_relations import prefetch_relations
In [2]: db.reset_queries()
In [3]: x = [(a.actor, a.action_object, a.target) for a in prefetch_relations(Action.objects.all().order_by('-pk'))]
In [4]: print len(db.connection.queries)
34
In [5]: db.reset_queries()
In [6]: print len(db.connection.queries)
0
In [7]: x = [(a.actor, a.action_object, a.target) for a in Action.objects.all().order_by('-pk')]
In [8]: print len(db.connection.queries)
396
"""
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
# reverse model's generic foreign keys into a dict:
# { 'field_name': generic.GenericForeignKey instance, ... }
gfks = {}
for (name, gfk) in weak_queryset.model.__dict__.items():
if not isinstance(gfk, GenericForeignKey):
continue # depends on [control=['if'], data=[]]
gfks[name] = gfk # depends on [control=['for'], data=[]]
data = {}
for weak_model in weak_queryset:
for (gfk_name, gfk_field) in gfks.items():
related_content_type_id = getattr(weak_model, gfk_field.model._meta.get_field(gfk_field.ct_field).get_attname())
if not related_content_type_id:
continue # depends on [control=['if'], data=[]]
related_content_type = ContentType.objects.get_for_id(related_content_type_id)
related_object_id = int(getattr(weak_model, gfk_field.fk_field))
if related_content_type not in data.keys():
data[related_content_type] = [] # depends on [control=['if'], data=['related_content_type']]
data[related_content_type].append(related_object_id) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['weak_model']]
for (content_type, object_ids) in data.items():
model_class = content_type.model_class()
models = prefetch_relations(model_class.objects.filter(pk__in=object_ids).select_related())
for model in models:
for weak_model in weak_queryset:
for (gfk_name, gfk_field) in gfks.items():
related_content_type_id = getattr(weak_model, gfk_field.model._meta.get_field(gfk_field.ct_field).get_attname())
if not related_content_type_id:
continue # depends on [control=['if'], data=[]]
related_content_type = ContentType.objects.get_for_id(related_content_type_id)
related_object_id = int(getattr(weak_model, gfk_field.fk_field))
if related_object_id != model.pk:
continue # depends on [control=['if'], data=[]]
if related_content_type != content_type:
continue # depends on [control=['if'], data=[]]
setattr(weak_model, gfk_name, model) # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['weak_model']] # depends on [control=['for'], data=['model']] # depends on [control=['for'], data=[]]
return weak_queryset |
def get_image_files(directory, files):
"""Recursively iterate through directory tree and list all files that have a
valid image file suffix
Parameters
----------
directory : directory
Path to directory on disk
files : List(string)
List of file names
Returns
-------
List(string)
List of files that have a valid image suffix
"""
# For each file in the directory test if it is a valid image file or a
# sub-directory.
for f in os.listdir(directory):
abs_file = os.path.join(directory, f)
if os.path.isdir(abs_file):
# Recursively iterate through sub-directories
get_image_files(abs_file, files)
else:
# Add to file collection if has valid suffix
if '.' in f and '.' + f.rsplit('.', 1)[1] in VALID_IMGFILE_SUFFIXES:
files.append(abs_file)
return files | def function[get_image_files, parameter[directory, files]]:
constant[Recursively iterate through directory tree and list all files that have a
valid image file suffix
Parameters
----------
directory : directory
Path to directory on disk
files : List(string)
List of file names
Returns
-------
List(string)
List of files that have a valid image suffix
]
for taget[name[f]] in starred[call[name[os].listdir, parameter[name[directory]]]] begin[:]
variable[abs_file] assign[=] call[name[os].path.join, parameter[name[directory], name[f]]]
if call[name[os].path.isdir, parameter[name[abs_file]]] begin[:]
call[name[get_image_files], parameter[name[abs_file], name[files]]]
return[name[files]] | keyword[def] identifier[get_image_files] ( identifier[directory] , identifier[files] ):
literal[string]
keyword[for] identifier[f] keyword[in] identifier[os] . identifier[listdir] ( identifier[directory] ):
identifier[abs_file] = identifier[os] . identifier[path] . identifier[join] ( identifier[directory] , identifier[f] )
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[abs_file] ):
identifier[get_image_files] ( identifier[abs_file] , identifier[files] )
keyword[else] :
keyword[if] literal[string] keyword[in] identifier[f] keyword[and] literal[string] + identifier[f] . identifier[rsplit] ( literal[string] , literal[int] )[ literal[int] ] keyword[in] identifier[VALID_IMGFILE_SUFFIXES] :
identifier[files] . identifier[append] ( identifier[abs_file] )
keyword[return] identifier[files] | def get_image_files(directory, files):
"""Recursively iterate through directory tree and list all files that have a
valid image file suffix
Parameters
----------
directory : directory
Path to directory on disk
files : List(string)
List of file names
Returns
-------
List(string)
List of files that have a valid image suffix
"""
# For each file in the directory test if it is a valid image file or a
# sub-directory.
for f in os.listdir(directory):
abs_file = os.path.join(directory, f)
if os.path.isdir(abs_file):
# Recursively iterate through sub-directories
get_image_files(abs_file, files) # depends on [control=['if'], data=[]]
# Add to file collection if has valid suffix
elif '.' in f and '.' + f.rsplit('.', 1)[1] in VALID_IMGFILE_SUFFIXES:
files.append(abs_file) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['f']]
return files |
def install():
"""
Installs the base system and Python requirements for the entire server.
"""
# Install system requirements
sudo("apt-get update -y -q")
apt("nginx libjpeg-dev python-dev python-setuptools git-core "
"postgresql libpq-dev memcached supervisor python-pip")
run("mkdir -p /home/%s/logs" % env.user)
# Install Python requirements
sudo("pip install -U pip virtualenv virtualenvwrapper mercurial")
# Set up virtualenv
run("mkdir -p %s" % env.venv_home)
run("echo 'export WORKON_HOME=%s' >> /home/%s/.bashrc" % (env.venv_home,
env.user))
run("echo 'source /usr/local/bin/virtualenvwrapper.sh' >> "
"/home/%s/.bashrc" % env.user)
print(green("Successfully set up git, mercurial, pip, virtualenv, "
"supervisor, memcached.", bold=True)) | def function[install, parameter[]]:
constant[
Installs the base system and Python requirements for the entire server.
]
call[name[sudo], parameter[constant[apt-get update -y -q]]]
call[name[apt], parameter[constant[nginx libjpeg-dev python-dev python-setuptools git-core postgresql libpq-dev memcached supervisor python-pip]]]
call[name[run], parameter[binary_operation[constant[mkdir -p /home/%s/logs] <ast.Mod object at 0x7da2590d6920> name[env].user]]]
call[name[sudo], parameter[constant[pip install -U pip virtualenv virtualenvwrapper mercurial]]]
call[name[run], parameter[binary_operation[constant[mkdir -p %s] <ast.Mod object at 0x7da2590d6920> name[env].venv_home]]]
call[name[run], parameter[binary_operation[constant[echo 'export WORKON_HOME=%s' >> /home/%s/.bashrc] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da1b15b60b0>, <ast.Attribute object at 0x7da1b15b7fa0>]]]]]
call[name[run], parameter[binary_operation[constant[echo 'source /usr/local/bin/virtualenvwrapper.sh' >> /home/%s/.bashrc] <ast.Mod object at 0x7da2590d6920> name[env].user]]]
call[name[print], parameter[call[name[green], parameter[constant[Successfully set up git, mercurial, pip, virtualenv, supervisor, memcached.]]]]] | keyword[def] identifier[install] ():
literal[string]
identifier[sudo] ( literal[string] )
identifier[apt] ( literal[string]
literal[string] )
identifier[run] ( literal[string] % identifier[env] . identifier[user] )
identifier[sudo] ( literal[string] )
identifier[run] ( literal[string] % identifier[env] . identifier[venv_home] )
identifier[run] ( literal[string] %( identifier[env] . identifier[venv_home] ,
identifier[env] . identifier[user] ))
identifier[run] ( literal[string]
literal[string] % identifier[env] . identifier[user] )
identifier[print] ( identifier[green] ( literal[string]
literal[string] , identifier[bold] = keyword[True] )) | def install():
"""
Installs the base system and Python requirements for the entire server.
"""
# Install system requirements
sudo('apt-get update -y -q')
apt('nginx libjpeg-dev python-dev python-setuptools git-core postgresql libpq-dev memcached supervisor python-pip')
run('mkdir -p /home/%s/logs' % env.user)
# Install Python requirements
sudo('pip install -U pip virtualenv virtualenvwrapper mercurial')
# Set up virtualenv
run('mkdir -p %s' % env.venv_home)
run("echo 'export WORKON_HOME=%s' >> /home/%s/.bashrc" % (env.venv_home, env.user))
run("echo 'source /usr/local/bin/virtualenvwrapper.sh' >> /home/%s/.bashrc" % env.user)
print(green('Successfully set up git, mercurial, pip, virtualenv, supervisor, memcached.', bold=True)) |
def interp(self, coords=None, method='linear', assume_sorted=False,
kwargs={}, **coords_kwargs):
""" Multidimensional interpolation of Dataset.
Parameters
----------
coords : dict, optional
Mapping from dimension names to the new coordinates.
New coordinate can be a scalar, array-like or DataArray.
If DataArrays are passed as new coordates, their dimensions are
used for the broadcasting.
method: string, optional.
{'linear', 'nearest'} for multidimensional array,
{'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}
for 1-dimensional array. 'linear' is used by default.
assume_sorted: boolean, optional
If False, values of coordinates that are interpolated over can be
in any order and they are sorted first. If True, interpolated
coordinates are assumed to be an array of monotonically increasing
values.
kwargs: dictionary, optional
Additional keyword passed to scipy's interpolator.
**coords_kwarg : {dim: coordinate, ...}, optional
The keyword arguments form of ``coords``.
One of coords or coords_kwargs must be provided.
Returns
-------
interpolated: xr.Dataset
New dataset on the new coordinates.
Notes
-----
scipy is required.
See Also
--------
scipy.interpolate.interp1d
scipy.interpolate.interpn
"""
from . import missing
coords = either_dict_or_kwargs(coords, coords_kwargs, 'interp')
indexers = OrderedDict(self._validate_indexers(coords))
obj = self if assume_sorted else self.sortby([k for k in coords])
def maybe_variable(obj, k):
# workaround to get variable for dimension without coordinate.
try:
return obj._variables[k]
except KeyError:
return as_variable((k, range(obj.dims[k])))
def _validate_interp_indexer(x, new_x):
# In the case of datetimes, the restrictions placed on indexers
# used with interp are stronger than those which are placed on
# isel, so we need an additional check after _validate_indexers.
if (_contains_datetime_like_objects(x) and
not _contains_datetime_like_objects(new_x)):
raise TypeError('When interpolating over a datetime-like '
'coordinate, the coordinates to '
'interpolate to must be either datetime '
'strings or datetimes. '
'Instead got\n{}'.format(new_x))
else:
return (x, new_x)
variables = OrderedDict()
for name, var in obj._variables.items():
if name not in indexers:
if var.dtype.kind in 'uifc':
var_indexers = {
k: _validate_interp_indexer(maybe_variable(obj, k), v)
for k, v in indexers.items()
if k in var.dims
}
variables[name] = missing.interp(
var, var_indexers, method, **kwargs)
elif all(d not in indexers for d in var.dims):
# keep unrelated object array
variables[name] = var
coord_names = set(variables).intersection(obj._coord_names)
indexes = OrderedDict(
(k, v) for k, v in obj.indexes.items() if k not in indexers)
selected = self._replace_with_new_dims(
variables.copy(), coord_names, indexes=indexes)
# attach indexer as coordinate
variables.update(indexers)
indexes.update(
(k, v.to_index()) for k, v in indexers.items() if v.dims == (k,)
)
# Extract coordinates from indexers
coord_vars, new_indexes = (
selected._get_indexers_coords_and_indexes(coords))
variables.update(coord_vars)
indexes.update(new_indexes)
coord_names = (set(variables)
.intersection(obj._coord_names)
.union(coord_vars))
return self._replace_with_new_dims(
variables, coord_names, indexes=indexes) | def function[interp, parameter[self, coords, method, assume_sorted, kwargs]]:
constant[ Multidimensional interpolation of Dataset.
Parameters
----------
coords : dict, optional
Mapping from dimension names to the new coordinates.
New coordinate can be a scalar, array-like or DataArray.
If DataArrays are passed as new coordates, their dimensions are
used for the broadcasting.
method: string, optional.
{'linear', 'nearest'} for multidimensional array,
{'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}
for 1-dimensional array. 'linear' is used by default.
assume_sorted: boolean, optional
If False, values of coordinates that are interpolated over can be
in any order and they are sorted first. If True, interpolated
coordinates are assumed to be an array of monotonically increasing
values.
kwargs: dictionary, optional
Additional keyword passed to scipy's interpolator.
**coords_kwarg : {dim: coordinate, ...}, optional
The keyword arguments form of ``coords``.
One of coords or coords_kwargs must be provided.
Returns
-------
interpolated: xr.Dataset
New dataset on the new coordinates.
Notes
-----
scipy is required.
See Also
--------
scipy.interpolate.interp1d
scipy.interpolate.interpn
]
from relative_module[None] import module[missing]
variable[coords] assign[=] call[name[either_dict_or_kwargs], parameter[name[coords], name[coords_kwargs], constant[interp]]]
variable[indexers] assign[=] call[name[OrderedDict], parameter[call[name[self]._validate_indexers, parameter[name[coords]]]]]
variable[obj] assign[=] <ast.IfExp object at 0x7da18ede7940>
def function[maybe_variable, parameter[obj, k]]:
<ast.Try object at 0x7da18ede7fd0>
def function[_validate_interp_indexer, parameter[x, new_x]]:
if <ast.BoolOp object at 0x7da18ede5ed0> begin[:]
<ast.Raise object at 0x7da18ede7a30>
variable[variables] assign[=] call[name[OrderedDict], parameter[]]
for taget[tuple[[<ast.Name object at 0x7da18ede4f10>, <ast.Name object at 0x7da18ede6440>]]] in starred[call[name[obj]._variables.items, parameter[]]] begin[:]
if compare[name[name] <ast.NotIn object at 0x7da2590d7190> name[indexers]] begin[:]
if compare[name[var].dtype.kind in constant[uifc]] begin[:]
variable[var_indexers] assign[=] <ast.DictComp object at 0x7da204345d80>
call[name[variables]][name[name]] assign[=] call[name[missing].interp, parameter[name[var], name[var_indexers], name[method]]]
variable[coord_names] assign[=] call[call[name[set], parameter[name[variables]]].intersection, parameter[name[obj]._coord_names]]
variable[indexes] assign[=] call[name[OrderedDict], parameter[<ast.GeneratorExp object at 0x7da204344790>]]
variable[selected] assign[=] call[name[self]._replace_with_new_dims, parameter[call[name[variables].copy, parameter[]], name[coord_names]]]
call[name[variables].update, parameter[name[indexers]]]
call[name[indexes].update, parameter[<ast.GeneratorExp object at 0x7da204347670>]]
<ast.Tuple object at 0x7da204344370> assign[=] call[name[selected]._get_indexers_coords_and_indexes, parameter[name[coords]]]
call[name[variables].update, parameter[name[coord_vars]]]
call[name[indexes].update, parameter[name[new_indexes]]]
variable[coord_names] assign[=] call[call[call[name[set], parameter[name[variables]]].intersection, parameter[name[obj]._coord_names]].union, parameter[name[coord_vars]]]
return[call[name[self]._replace_with_new_dims, parameter[name[variables], name[coord_names]]]] | keyword[def] identifier[interp] ( identifier[self] , identifier[coords] = keyword[None] , identifier[method] = literal[string] , identifier[assume_sorted] = keyword[False] ,
identifier[kwargs] ={},** identifier[coords_kwargs] ):
literal[string]
keyword[from] . keyword[import] identifier[missing]
identifier[coords] = identifier[either_dict_or_kwargs] ( identifier[coords] , identifier[coords_kwargs] , literal[string] )
identifier[indexers] = identifier[OrderedDict] ( identifier[self] . identifier[_validate_indexers] ( identifier[coords] ))
identifier[obj] = identifier[self] keyword[if] identifier[assume_sorted] keyword[else] identifier[self] . identifier[sortby] ([ identifier[k] keyword[for] identifier[k] keyword[in] identifier[coords] ])
keyword[def] identifier[maybe_variable] ( identifier[obj] , identifier[k] ):
keyword[try] :
keyword[return] identifier[obj] . identifier[_variables] [ identifier[k] ]
keyword[except] identifier[KeyError] :
keyword[return] identifier[as_variable] (( identifier[k] , identifier[range] ( identifier[obj] . identifier[dims] [ identifier[k] ])))
keyword[def] identifier[_validate_interp_indexer] ( identifier[x] , identifier[new_x] ):
keyword[if] ( identifier[_contains_datetime_like_objects] ( identifier[x] ) keyword[and]
keyword[not] identifier[_contains_datetime_like_objects] ( identifier[new_x] )):
keyword[raise] identifier[TypeError] ( literal[string]
literal[string]
literal[string]
literal[string]
literal[string] . identifier[format] ( identifier[new_x] ))
keyword[else] :
keyword[return] ( identifier[x] , identifier[new_x] )
identifier[variables] = identifier[OrderedDict] ()
keyword[for] identifier[name] , identifier[var] keyword[in] identifier[obj] . identifier[_variables] . identifier[items] ():
keyword[if] identifier[name] keyword[not] keyword[in] identifier[indexers] :
keyword[if] identifier[var] . identifier[dtype] . identifier[kind] keyword[in] literal[string] :
identifier[var_indexers] ={
identifier[k] : identifier[_validate_interp_indexer] ( identifier[maybe_variable] ( identifier[obj] , identifier[k] ), identifier[v] )
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[indexers] . identifier[items] ()
keyword[if] identifier[k] keyword[in] identifier[var] . identifier[dims]
}
identifier[variables] [ identifier[name] ]= identifier[missing] . identifier[interp] (
identifier[var] , identifier[var_indexers] , identifier[method] ,** identifier[kwargs] )
keyword[elif] identifier[all] ( identifier[d] keyword[not] keyword[in] identifier[indexers] keyword[for] identifier[d] keyword[in] identifier[var] . identifier[dims] ):
identifier[variables] [ identifier[name] ]= identifier[var]
identifier[coord_names] = identifier[set] ( identifier[variables] ). identifier[intersection] ( identifier[obj] . identifier[_coord_names] )
identifier[indexes] = identifier[OrderedDict] (
( identifier[k] , identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[obj] . identifier[indexes] . identifier[items] () keyword[if] identifier[k] keyword[not] keyword[in] identifier[indexers] )
identifier[selected] = identifier[self] . identifier[_replace_with_new_dims] (
identifier[variables] . identifier[copy] (), identifier[coord_names] , identifier[indexes] = identifier[indexes] )
identifier[variables] . identifier[update] ( identifier[indexers] )
identifier[indexes] . identifier[update] (
( identifier[k] , identifier[v] . identifier[to_index] ()) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[indexers] . identifier[items] () keyword[if] identifier[v] . identifier[dims] ==( identifier[k] ,)
)
identifier[coord_vars] , identifier[new_indexes] =(
identifier[selected] . identifier[_get_indexers_coords_and_indexes] ( identifier[coords] ))
identifier[variables] . identifier[update] ( identifier[coord_vars] )
identifier[indexes] . identifier[update] ( identifier[new_indexes] )
identifier[coord_names] =( identifier[set] ( identifier[variables] )
. identifier[intersection] ( identifier[obj] . identifier[_coord_names] )
. identifier[union] ( identifier[coord_vars] ))
keyword[return] identifier[self] . identifier[_replace_with_new_dims] (
identifier[variables] , identifier[coord_names] , identifier[indexes] = identifier[indexes] ) | def interp(self, coords=None, method='linear', assume_sorted=False, kwargs={}, **coords_kwargs):
""" Multidimensional interpolation of Dataset.
Parameters
----------
coords : dict, optional
Mapping from dimension names to the new coordinates.
New coordinate can be a scalar, array-like or DataArray.
If DataArrays are passed as new coordates, their dimensions are
used for the broadcasting.
method: string, optional.
{'linear', 'nearest'} for multidimensional array,
{'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}
for 1-dimensional array. 'linear' is used by default.
assume_sorted: boolean, optional
If False, values of coordinates that are interpolated over can be
in any order and they are sorted first. If True, interpolated
coordinates are assumed to be an array of monotonically increasing
values.
kwargs: dictionary, optional
Additional keyword passed to scipy's interpolator.
**coords_kwarg : {dim: coordinate, ...}, optional
The keyword arguments form of ``coords``.
One of coords or coords_kwargs must be provided.
Returns
-------
interpolated: xr.Dataset
New dataset on the new coordinates.
Notes
-----
scipy is required.
See Also
--------
scipy.interpolate.interp1d
scipy.interpolate.interpn
"""
from . import missing
coords = either_dict_or_kwargs(coords, coords_kwargs, 'interp')
indexers = OrderedDict(self._validate_indexers(coords))
obj = self if assume_sorted else self.sortby([k for k in coords])
def maybe_variable(obj, k):
# workaround to get variable for dimension without coordinate.
try:
return obj._variables[k] # depends on [control=['try'], data=[]]
except KeyError:
return as_variable((k, range(obj.dims[k]))) # depends on [control=['except'], data=[]]
def _validate_interp_indexer(x, new_x):
# In the case of datetimes, the restrictions placed on indexers
# used with interp are stronger than those which are placed on
# isel, so we need an additional check after _validate_indexers.
if _contains_datetime_like_objects(x) and (not _contains_datetime_like_objects(new_x)):
raise TypeError('When interpolating over a datetime-like coordinate, the coordinates to interpolate to must be either datetime strings or datetimes. Instead got\n{}'.format(new_x)) # depends on [control=['if'], data=[]]
else:
return (x, new_x)
variables = OrderedDict()
for (name, var) in obj._variables.items():
if name not in indexers:
if var.dtype.kind in 'uifc':
var_indexers = {k: _validate_interp_indexer(maybe_variable(obj, k), v) for (k, v) in indexers.items() if k in var.dims}
variables[name] = missing.interp(var, var_indexers, method, **kwargs) # depends on [control=['if'], data=[]]
elif all((d not in indexers for d in var.dims)):
# keep unrelated object array
variables[name] = var # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['name', 'indexers']] # depends on [control=['for'], data=[]]
coord_names = set(variables).intersection(obj._coord_names)
indexes = OrderedDict(((k, v) for (k, v) in obj.indexes.items() if k not in indexers))
selected = self._replace_with_new_dims(variables.copy(), coord_names, indexes=indexes)
# attach indexer as coordinate
variables.update(indexers)
indexes.update(((k, v.to_index()) for (k, v) in indexers.items() if v.dims == (k,)))
# Extract coordinates from indexers
(coord_vars, new_indexes) = selected._get_indexers_coords_and_indexes(coords)
variables.update(coord_vars)
indexes.update(new_indexes)
coord_names = set(variables).intersection(obj._coord_names).union(coord_vars)
return self._replace_with_new_dims(variables, coord_names, indexes=indexes) |
def close(self):
"""Deletes temporary system objects/files. """
if self._tempDir is not None and os.path.isdir(self._tempDir):
self.logger.debug("Removing temporary directory %r", self._tempDir)
shutil.rmtree(self._tempDir)
self._tempDir = None
return | def function[close, parameter[self]]:
constant[Deletes temporary system objects/files. ]
if <ast.BoolOp object at 0x7da20c6c44c0> begin[:]
call[name[self].logger.debug, parameter[constant[Removing temporary directory %r], name[self]._tempDir]]
call[name[shutil].rmtree, parameter[name[self]._tempDir]]
name[self]._tempDir assign[=] constant[None]
return[None] | keyword[def] identifier[close] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_tempDir] keyword[is] keyword[not] keyword[None] keyword[and] identifier[os] . identifier[path] . identifier[isdir] ( identifier[self] . identifier[_tempDir] ):
identifier[self] . identifier[logger] . identifier[debug] ( literal[string] , identifier[self] . identifier[_tempDir] )
identifier[shutil] . identifier[rmtree] ( identifier[self] . identifier[_tempDir] )
identifier[self] . identifier[_tempDir] = keyword[None]
keyword[return] | def close(self):
"""Deletes temporary system objects/files. """
if self._tempDir is not None and os.path.isdir(self._tempDir):
self.logger.debug('Removing temporary directory %r', self._tempDir)
shutil.rmtree(self._tempDir)
self._tempDir = None # depends on [control=['if'], data=[]]
return |
def parse_specification(db_specification):
"""
Create a db specification derived from a
dataview of a db in which the byte layout
is specified
"""
parsed_db_specification = OrderedDict()
for line in db_specification.split('\n'):
if line and not line.startswith('#'):
row = line.split('#')[0] # remove trailing comment
index, var_name, _type = row.split()
parsed_db_specification[var_name] = (index, _type)
return parsed_db_specification | def function[parse_specification, parameter[db_specification]]:
constant[
Create a db specification derived from a
dataview of a db in which the byte layout
is specified
]
variable[parsed_db_specification] assign[=] call[name[OrderedDict], parameter[]]
for taget[name[line]] in starred[call[name[db_specification].split, parameter[constant[
]]]] begin[:]
if <ast.BoolOp object at 0x7da204565b70> begin[:]
variable[row] assign[=] call[call[name[line].split, parameter[constant[#]]]][constant[0]]
<ast.Tuple object at 0x7da2045648e0> assign[=] call[name[row].split, parameter[]]
call[name[parsed_db_specification]][name[var_name]] assign[=] tuple[[<ast.Name object at 0x7da2045648b0>, <ast.Name object at 0x7da204565540>]]
return[name[parsed_db_specification]] | keyword[def] identifier[parse_specification] ( identifier[db_specification] ):
literal[string]
identifier[parsed_db_specification] = identifier[OrderedDict] ()
keyword[for] identifier[line] keyword[in] identifier[db_specification] . identifier[split] ( literal[string] ):
keyword[if] identifier[line] keyword[and] keyword[not] identifier[line] . identifier[startswith] ( literal[string] ):
identifier[row] = identifier[line] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[index] , identifier[var_name] , identifier[_type] = identifier[row] . identifier[split] ()
identifier[parsed_db_specification] [ identifier[var_name] ]=( identifier[index] , identifier[_type] )
keyword[return] identifier[parsed_db_specification] | def parse_specification(db_specification):
"""
Create a db specification derived from a
dataview of a db in which the byte layout
is specified
"""
parsed_db_specification = OrderedDict()
for line in db_specification.split('\n'):
if line and (not line.startswith('#')):
row = line.split('#')[0] # remove trailing comment
(index, var_name, _type) = row.split()
parsed_db_specification[var_name] = (index, _type) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
return parsed_db_specification |
def set_exp(self, claim='exp', from_time=None, lifetime=None):
"""
Updates the expiration time of a token.
"""
if from_time is None:
from_time = self.current_time
if lifetime is None:
lifetime = self.lifetime
self.payload[claim] = datetime_to_epoch(from_time + lifetime) | def function[set_exp, parameter[self, claim, from_time, lifetime]]:
constant[
Updates the expiration time of a token.
]
if compare[name[from_time] is constant[None]] begin[:]
variable[from_time] assign[=] name[self].current_time
if compare[name[lifetime] is constant[None]] begin[:]
variable[lifetime] assign[=] name[self].lifetime
call[name[self].payload][name[claim]] assign[=] call[name[datetime_to_epoch], parameter[binary_operation[name[from_time] + name[lifetime]]]] | keyword[def] identifier[set_exp] ( identifier[self] , identifier[claim] = literal[string] , identifier[from_time] = keyword[None] , identifier[lifetime] = keyword[None] ):
literal[string]
keyword[if] identifier[from_time] keyword[is] keyword[None] :
identifier[from_time] = identifier[self] . identifier[current_time]
keyword[if] identifier[lifetime] keyword[is] keyword[None] :
identifier[lifetime] = identifier[self] . identifier[lifetime]
identifier[self] . identifier[payload] [ identifier[claim] ]= identifier[datetime_to_epoch] ( identifier[from_time] + identifier[lifetime] ) | def set_exp(self, claim='exp', from_time=None, lifetime=None):
"""
Updates the expiration time of a token.
"""
if from_time is None:
from_time = self.current_time # depends on [control=['if'], data=['from_time']]
if lifetime is None:
lifetime = self.lifetime # depends on [control=['if'], data=['lifetime']]
self.payload[claim] = datetime_to_epoch(from_time + lifetime) |
def convolve(data, h, res_g=None, sub_blocks=None):
"""
convolves 1d-3d data with kernel h
data and h can either be numpy arrays or gpu buffer objects (OCLArray,
which must be float32 then)
boundary conditions are clamping to zero at edge.
"""
if not len(data.shape) in [1, 2, 3]:
raise ValueError("dim = %s not supported" % (len(data.shape)))
if len(data.shape) != len(h.shape):
raise ValueError("dimemnsion of data (%s) and h (%s) are different" % (len(data.shape), len(h.shape)))
if isinstance(data, OCLArray) and isinstance(h, OCLArray):
return _convolve_buf(data, h, res_g)
elif isinstance(data, np.ndarray) and isinstance(h, np.ndarray):
if sub_blocks == (1,) * len(data.shape) or sub_blocks is None:
return _convolve_np(data, h)
else:
# cut the image into tile and operate on every of them
N_sub = [int(np.ceil(1. * n / s)) for n, s in zip(data.shape, sub_blocks)]
Npads = [int(s / 2) for s in h.shape]
res = np.empty(data.shape, np.float32)
for data_tile, data_s_src, data_s_dest \
in tile_iterator(data, blocksize=N_sub,
padsize=Npads,
mode="constant"):
res_tile = _convolve_np(data_tile.copy(),
h)
res[data_s_src] = res_tile[data_s_dest]
return res
else:
raise TypeError("unknown types (%s, %s)" % (type(data), type(h))) | def function[convolve, parameter[data, h, res_g, sub_blocks]]:
constant[
convolves 1d-3d data with kernel h
data and h can either be numpy arrays or gpu buffer objects (OCLArray,
which must be float32 then)
boundary conditions are clamping to zero at edge.
]
if <ast.UnaryOp object at 0x7da18f00dbd0> begin[:]
<ast.Raise object at 0x7da18f00c610>
if compare[call[name[len], parameter[name[data].shape]] not_equal[!=] call[name[len], parameter[name[h].shape]]] begin[:]
<ast.Raise object at 0x7da18f00feb0>
if <ast.BoolOp object at 0x7da18f00e8c0> begin[:]
return[call[name[_convolve_buf], parameter[name[data], name[h], name[res_g]]]] | keyword[def] identifier[convolve] ( identifier[data] , identifier[h] , identifier[res_g] = keyword[None] , identifier[sub_blocks] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[len] ( identifier[data] . identifier[shape] ) keyword[in] [ literal[int] , literal[int] , literal[int] ]:
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[len] ( identifier[data] . identifier[shape] )))
keyword[if] identifier[len] ( identifier[data] . identifier[shape] )!= identifier[len] ( identifier[h] . identifier[shape] ):
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[len] ( identifier[data] . identifier[shape] ), identifier[len] ( identifier[h] . identifier[shape] )))
keyword[if] identifier[isinstance] ( identifier[data] , identifier[OCLArray] ) keyword[and] identifier[isinstance] ( identifier[h] , identifier[OCLArray] ):
keyword[return] identifier[_convolve_buf] ( identifier[data] , identifier[h] , identifier[res_g] )
keyword[elif] identifier[isinstance] ( identifier[data] , identifier[np] . identifier[ndarray] ) keyword[and] identifier[isinstance] ( identifier[h] , identifier[np] . identifier[ndarray] ):
keyword[if] identifier[sub_blocks] ==( literal[int] ,)* identifier[len] ( identifier[data] . identifier[shape] ) keyword[or] identifier[sub_blocks] keyword[is] keyword[None] :
keyword[return] identifier[_convolve_np] ( identifier[data] , identifier[h] )
keyword[else] :
identifier[N_sub] =[ identifier[int] ( identifier[np] . identifier[ceil] ( literal[int] * identifier[n] / identifier[s] )) keyword[for] identifier[n] , identifier[s] keyword[in] identifier[zip] ( identifier[data] . identifier[shape] , identifier[sub_blocks] )]
identifier[Npads] =[ identifier[int] ( identifier[s] / literal[int] ) keyword[for] identifier[s] keyword[in] identifier[h] . identifier[shape] ]
identifier[res] = identifier[np] . identifier[empty] ( identifier[data] . identifier[shape] , identifier[np] . identifier[float32] )
keyword[for] identifier[data_tile] , identifier[data_s_src] , identifier[data_s_dest] keyword[in] identifier[tile_iterator] ( identifier[data] , identifier[blocksize] = identifier[N_sub] ,
identifier[padsize] = identifier[Npads] ,
identifier[mode] = literal[string] ):
identifier[res_tile] = identifier[_convolve_np] ( identifier[data_tile] . identifier[copy] (),
identifier[h] )
identifier[res] [ identifier[data_s_src] ]= identifier[res_tile] [ identifier[data_s_dest] ]
keyword[return] identifier[res]
keyword[else] :
keyword[raise] identifier[TypeError] ( literal[string] %( identifier[type] ( identifier[data] ), identifier[type] ( identifier[h] ))) | def convolve(data, h, res_g=None, sub_blocks=None):
"""
convolves 1d-3d data with kernel h
data and h can either be numpy arrays or gpu buffer objects (OCLArray,
which must be float32 then)
boundary conditions are clamping to zero at edge.
"""
if not len(data.shape) in [1, 2, 3]:
raise ValueError('dim = %s not supported' % len(data.shape)) # depends on [control=['if'], data=[]]
if len(data.shape) != len(h.shape):
raise ValueError('dimemnsion of data (%s) and h (%s) are different' % (len(data.shape), len(h.shape))) # depends on [control=['if'], data=[]]
if isinstance(data, OCLArray) and isinstance(h, OCLArray):
return _convolve_buf(data, h, res_g) # depends on [control=['if'], data=[]]
elif isinstance(data, np.ndarray) and isinstance(h, np.ndarray):
if sub_blocks == (1,) * len(data.shape) or sub_blocks is None:
return _convolve_np(data, h) # depends on [control=['if'], data=[]]
else:
# cut the image into tile and operate on every of them
N_sub = [int(np.ceil(1.0 * n / s)) for (n, s) in zip(data.shape, sub_blocks)]
Npads = [int(s / 2) for s in h.shape]
res = np.empty(data.shape, np.float32)
for (data_tile, data_s_src, data_s_dest) in tile_iterator(data, blocksize=N_sub, padsize=Npads, mode='constant'):
res_tile = _convolve_np(data_tile.copy(), h)
res[data_s_src] = res_tile[data_s_dest] # depends on [control=['for'], data=[]]
return res # depends on [control=['if'], data=[]]
else:
raise TypeError('unknown types (%s, %s)' % (type(data), type(h))) |
def join_left(self, right_table=None, fields=None, condition=None, join_type='LEFT JOIN',
schema=None, left_table=None, extract_fields=True, prefix_fields=False,
field_prefix=None, allow_duplicates=False):
"""
Wrapper for ``self.join`` with a default join of 'LEFT JOIN'
:type right_table: str or dict or :class:`Table <querybuilder.tables.Table>`
:param right_table: The table being joined with. This can be a string of the table
name, a dict of {'alias': table}, or a ``Table`` instance
:type fields: str or tuple or list or :class:`Field <querybuilder.fields.Field>`
:param fields: The fields to select from ``right_table``. Defaults to `None`. This can be
a single field, a tuple of fields, or a list of fields. Each field can be a string
or ``Field`` instance
:type condition: str
:param condition: The join condition specifying the fields being joined. If the two tables being
joined are instances of ``ModelTable`` then the condition should be created automatically.
:type join_type: str
:param join_type: The type of join (JOIN, LEFT JOIN, INNER JOIN, etc). Defaults to 'JOIN'
:type schema: str
:param schema: This is not implemented, but it will be a string of the db schema name
:type left_table: str or dict or :class:`Table <querybuilder.tables.Table>`
:param left_table: The left table being joined with. This can be a string of the table
name, a dict of {'alias': table}, or a ``Table`` instance. Defaults to the first table
in the query.
:type extract_fields: bool
:param extract_fields: If True and joining with a ``ModelTable``, then '*'
fields will be converted to individual fields for each column in the table. Defaults
to True.
:type prefix_fields: bool
:param prefix_fields: If True, then the joined table will have each of its field names
prefixed with the field_prefix. If not field_prefix is specified, a name will be
generated based on the join field name. This is usually used with nesting results
in order to create models in python or javascript. Defaults to True.
:type field_prefix: str
:param field_prefix: The field prefix to be used in front of each field name if prefix_fields
is set to True. If no field_prefix is set, one will be automatically created based on
the join field name.
:return: self
:rtype: :class:`Query <querybuilder.query.Query>`
"""
return self.join(
right_table=right_table,
fields=fields,
condition=condition,
join_type=join_type,
schema=schema,
left_table=left_table,
extract_fields=extract_fields,
prefix_fields=prefix_fields,
field_prefix=field_prefix,
allow_duplicates=allow_duplicates
) | def function[join_left, parameter[self, right_table, fields, condition, join_type, schema, left_table, extract_fields, prefix_fields, field_prefix, allow_duplicates]]:
constant[
Wrapper for ``self.join`` with a default join of 'LEFT JOIN'
:type right_table: str or dict or :class:`Table <querybuilder.tables.Table>`
:param right_table: The table being joined with. This can be a string of the table
name, a dict of {'alias': table}, or a ``Table`` instance
:type fields: str or tuple or list or :class:`Field <querybuilder.fields.Field>`
:param fields: The fields to select from ``right_table``. Defaults to `None`. This can be
a single field, a tuple of fields, or a list of fields. Each field can be a string
or ``Field`` instance
:type condition: str
:param condition: The join condition specifying the fields being joined. If the two tables being
joined are instances of ``ModelTable`` then the condition should be created automatically.
:type join_type: str
:param join_type: The type of join (JOIN, LEFT JOIN, INNER JOIN, etc). Defaults to 'JOIN'
:type schema: str
:param schema: This is not implemented, but it will be a string of the db schema name
:type left_table: str or dict or :class:`Table <querybuilder.tables.Table>`
:param left_table: The left table being joined with. This can be a string of the table
name, a dict of {'alias': table}, or a ``Table`` instance. Defaults to the first table
in the query.
:type extract_fields: bool
:param extract_fields: If True and joining with a ``ModelTable``, then '*'
fields will be converted to individual fields for each column in the table. Defaults
to True.
:type prefix_fields: bool
:param prefix_fields: If True, then the joined table will have each of its field names
prefixed with the field_prefix. If not field_prefix is specified, a name will be
generated based on the join field name. This is usually used with nesting results
in order to create models in python or javascript. Defaults to True.
:type field_prefix: str
:param field_prefix: The field prefix to be used in front of each field name if prefix_fields
is set to True. If no field_prefix is set, one will be automatically created based on
the join field name.
:return: self
:rtype: :class:`Query <querybuilder.query.Query>`
]
return[call[name[self].join, parameter[]]] | keyword[def] identifier[join_left] ( identifier[self] , identifier[right_table] = keyword[None] , identifier[fields] = keyword[None] , identifier[condition] = keyword[None] , identifier[join_type] = literal[string] ,
identifier[schema] = keyword[None] , identifier[left_table] = keyword[None] , identifier[extract_fields] = keyword[True] , identifier[prefix_fields] = keyword[False] ,
identifier[field_prefix] = keyword[None] , identifier[allow_duplicates] = keyword[False] ):
literal[string]
keyword[return] identifier[self] . identifier[join] (
identifier[right_table] = identifier[right_table] ,
identifier[fields] = identifier[fields] ,
identifier[condition] = identifier[condition] ,
identifier[join_type] = identifier[join_type] ,
identifier[schema] = identifier[schema] ,
identifier[left_table] = identifier[left_table] ,
identifier[extract_fields] = identifier[extract_fields] ,
identifier[prefix_fields] = identifier[prefix_fields] ,
identifier[field_prefix] = identifier[field_prefix] ,
identifier[allow_duplicates] = identifier[allow_duplicates]
) | def join_left(self, right_table=None, fields=None, condition=None, join_type='LEFT JOIN', schema=None, left_table=None, extract_fields=True, prefix_fields=False, field_prefix=None, allow_duplicates=False):
"""
Wrapper for ``self.join`` with a default join of 'LEFT JOIN'
:type right_table: str or dict or :class:`Table <querybuilder.tables.Table>`
:param right_table: The table being joined with. This can be a string of the table
name, a dict of {'alias': table}, or a ``Table`` instance
:type fields: str or tuple or list or :class:`Field <querybuilder.fields.Field>`
:param fields: The fields to select from ``right_table``. Defaults to `None`. This can be
a single field, a tuple of fields, or a list of fields. Each field can be a string
or ``Field`` instance
:type condition: str
:param condition: The join condition specifying the fields being joined. If the two tables being
joined are instances of ``ModelTable`` then the condition should be created automatically.
:type join_type: str
:param join_type: The type of join (JOIN, LEFT JOIN, INNER JOIN, etc). Defaults to 'JOIN'
:type schema: str
:param schema: This is not implemented, but it will be a string of the db schema name
:type left_table: str or dict or :class:`Table <querybuilder.tables.Table>`
:param left_table: The left table being joined with. This can be a string of the table
name, a dict of {'alias': table}, or a ``Table`` instance. Defaults to the first table
in the query.
:type extract_fields: bool
:param extract_fields: If True and joining with a ``ModelTable``, then '*'
fields will be converted to individual fields for each column in the table. Defaults
to True.
:type prefix_fields: bool
:param prefix_fields: If True, then the joined table will have each of its field names
prefixed with the field_prefix. If not field_prefix is specified, a name will be
generated based on the join field name. This is usually used with nesting results
in order to create models in python or javascript. Defaults to True.
:type field_prefix: str
:param field_prefix: The field prefix to be used in front of each field name if prefix_fields
is set to True. If no field_prefix is set, one will be automatically created based on
the join field name.
:return: self
:rtype: :class:`Query <querybuilder.query.Query>`
"""
return self.join(right_table=right_table, fields=fields, condition=condition, join_type=join_type, schema=schema, left_table=left_table, extract_fields=extract_fields, prefix_fields=prefix_fields, field_prefix=field_prefix, allow_duplicates=allow_duplicates) |
def _setSmsMemory(self, readDelete=None, write=None):
""" Set the current SMS memory to use for read/delete/write operations """
# Switch to the correct memory type if required
if write != None and write != self._smsMemWrite:
self.write()
readDel = readDelete or self._smsMemReadDelete
self.write('AT+CPMS="{0}","{1}"'.format(readDel, write))
self._smsMemReadDelete = readDel
self._smsMemWrite = write
elif readDelete != None and readDelete != self._smsMemReadDelete:
self.write('AT+CPMS="{0}"'.format(readDelete))
self._smsMemReadDelete = readDelete | def function[_setSmsMemory, parameter[self, readDelete, write]]:
constant[ Set the current SMS memory to use for read/delete/write operations ]
if <ast.BoolOp object at 0x7da207f9a800> begin[:]
call[name[self].write, parameter[]]
variable[readDel] assign[=] <ast.BoolOp object at 0x7da207f99120>
call[name[self].write, parameter[call[constant[AT+CPMS="{0}","{1}"].format, parameter[name[readDel], name[write]]]]]
name[self]._smsMemReadDelete assign[=] name[readDel]
name[self]._smsMemWrite assign[=] name[write] | keyword[def] identifier[_setSmsMemory] ( identifier[self] , identifier[readDelete] = keyword[None] , identifier[write] = keyword[None] ):
literal[string]
keyword[if] identifier[write] != keyword[None] keyword[and] identifier[write] != identifier[self] . identifier[_smsMemWrite] :
identifier[self] . identifier[write] ()
identifier[readDel] = identifier[readDelete] keyword[or] identifier[self] . identifier[_smsMemReadDelete]
identifier[self] . identifier[write] ( literal[string] . identifier[format] ( identifier[readDel] , identifier[write] ))
identifier[self] . identifier[_smsMemReadDelete] = identifier[readDel]
identifier[self] . identifier[_smsMemWrite] = identifier[write]
keyword[elif] identifier[readDelete] != keyword[None] keyword[and] identifier[readDelete] != identifier[self] . identifier[_smsMemReadDelete] :
identifier[self] . identifier[write] ( literal[string] . identifier[format] ( identifier[readDelete] ))
identifier[self] . identifier[_smsMemReadDelete] = identifier[readDelete] | def _setSmsMemory(self, readDelete=None, write=None):
""" Set the current SMS memory to use for read/delete/write operations """
# Switch to the correct memory type if required
if write != None and write != self._smsMemWrite:
self.write()
readDel = readDelete or self._smsMemReadDelete
self.write('AT+CPMS="{0}","{1}"'.format(readDel, write))
self._smsMemReadDelete = readDel
self._smsMemWrite = write # depends on [control=['if'], data=[]]
elif readDelete != None and readDelete != self._smsMemReadDelete:
self.write('AT+CPMS="{0}"'.format(readDelete))
self._smsMemReadDelete = readDelete # depends on [control=['if'], data=[]] |
def generate_catalogue(args, parser):
"""Generates and saves a catalogue file."""
catalogue = tacl.Catalogue()
catalogue.generate(args.corpus, args.label)
catalogue.save(args.catalogue) | def function[generate_catalogue, parameter[args, parser]]:
constant[Generates and saves a catalogue file.]
variable[catalogue] assign[=] call[name[tacl].Catalogue, parameter[]]
call[name[catalogue].generate, parameter[name[args].corpus, name[args].label]]
call[name[catalogue].save, parameter[name[args].catalogue]] | keyword[def] identifier[generate_catalogue] ( identifier[args] , identifier[parser] ):
literal[string]
identifier[catalogue] = identifier[tacl] . identifier[Catalogue] ()
identifier[catalogue] . identifier[generate] ( identifier[args] . identifier[corpus] , identifier[args] . identifier[label] )
identifier[catalogue] . identifier[save] ( identifier[args] . identifier[catalogue] ) | def generate_catalogue(args, parser):
"""Generates and saves a catalogue file."""
catalogue = tacl.Catalogue()
catalogue.generate(args.corpus, args.label)
catalogue.save(args.catalogue) |
def hist_path(self):
"""Absolute path of the HIST file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._hist_path
except AttributeError:
path = self.outdir.has_abiext("HIST")
if path: self._hist_path = path
return path | def function[hist_path, parameter[self]]:
constant[Absolute path of the HIST file. Empty string if file is not present.]
<ast.Try object at 0x7da20c6a95a0> | keyword[def] identifier[hist_path] ( identifier[self] ):
literal[string]
keyword[try] :
keyword[return] identifier[self] . identifier[_hist_path]
keyword[except] identifier[AttributeError] :
identifier[path] = identifier[self] . identifier[outdir] . identifier[has_abiext] ( literal[string] )
keyword[if] identifier[path] : identifier[self] . identifier[_hist_path] = identifier[path]
keyword[return] identifier[path] | def hist_path(self):
"""Absolute path of the HIST file. Empty string if file is not present."""
# Lazy property to avoid multiple calls to has_abiext.
try:
return self._hist_path # depends on [control=['try'], data=[]]
except AttributeError:
path = self.outdir.has_abiext('HIST')
if path:
self._hist_path = path # depends on [control=['if'], data=[]]
return path # depends on [control=['except'], data=[]] |
def is_possible_short_number_for_region(short_numobj, region_dialing_from):
"""Check whether a short number is a possible number when dialled from a
region. This provides a more lenient check than
is_valid_short_number_for_region.
Arguments:
short_numobj -- the short number to check as a PhoneNumber object.
region_dialing_from -- the region from which the number is dialed
Return whether the number is a possible short number.
"""
if not _region_dialing_from_matches_number(short_numobj, region_dialing_from):
return False
metadata = PhoneMetadata.short_metadata_for_region(region_dialing_from)
if metadata is None: # pragma no cover
return False
short_numlen = len(national_significant_number(short_numobj))
return (short_numlen in metadata.general_desc.possible_length) | def function[is_possible_short_number_for_region, parameter[short_numobj, region_dialing_from]]:
constant[Check whether a short number is a possible number when dialled from a
region. This provides a more lenient check than
is_valid_short_number_for_region.
Arguments:
short_numobj -- the short number to check as a PhoneNumber object.
region_dialing_from -- the region from which the number is dialed
Return whether the number is a possible short number.
]
if <ast.UnaryOp object at 0x7da1b188d570> begin[:]
return[constant[False]]
variable[metadata] assign[=] call[name[PhoneMetadata].short_metadata_for_region, parameter[name[region_dialing_from]]]
if compare[name[metadata] is constant[None]] begin[:]
return[constant[False]]
variable[short_numlen] assign[=] call[name[len], parameter[call[name[national_significant_number], parameter[name[short_numobj]]]]]
return[compare[name[short_numlen] in name[metadata].general_desc.possible_length]] | keyword[def] identifier[is_possible_short_number_for_region] ( identifier[short_numobj] , identifier[region_dialing_from] ):
literal[string]
keyword[if] keyword[not] identifier[_region_dialing_from_matches_number] ( identifier[short_numobj] , identifier[region_dialing_from] ):
keyword[return] keyword[False]
identifier[metadata] = identifier[PhoneMetadata] . identifier[short_metadata_for_region] ( identifier[region_dialing_from] )
keyword[if] identifier[metadata] keyword[is] keyword[None] :
keyword[return] keyword[False]
identifier[short_numlen] = identifier[len] ( identifier[national_significant_number] ( identifier[short_numobj] ))
keyword[return] ( identifier[short_numlen] keyword[in] identifier[metadata] . identifier[general_desc] . identifier[possible_length] ) | def is_possible_short_number_for_region(short_numobj, region_dialing_from):
"""Check whether a short number is a possible number when dialled from a
region. This provides a more lenient check than
is_valid_short_number_for_region.
Arguments:
short_numobj -- the short number to check as a PhoneNumber object.
region_dialing_from -- the region from which the number is dialed
Return whether the number is a possible short number.
"""
if not _region_dialing_from_matches_number(short_numobj, region_dialing_from):
return False # depends on [control=['if'], data=[]]
metadata = PhoneMetadata.short_metadata_for_region(region_dialing_from)
if metadata is None: # pragma no cover
return False # depends on [control=['if'], data=[]]
short_numlen = len(national_significant_number(short_numobj))
return short_numlen in metadata.general_desc.possible_length |
def _long_to_bytes(self, long_value):
"""
Turns a long value into its byte string equivalent.
:param long_value: the long value to be returned as a byte string
:return: a byte string equivalent of a long value
"""
_byte_string = b''
pack = struct.pack
while long_value > 0:
_byte_string = pack(b'>I', long_value & 0xffffffff) + _byte_string
long_value = long_value >> 32
for i in range(len(_byte_string)):
if _byte_string[i] != b'\000'[0]:
break
else:
_byte_string = b'\000'
i = 0
_byte_string = _byte_string[i:]
return _byte_string | def function[_long_to_bytes, parameter[self, long_value]]:
constant[
Turns a long value into its byte string equivalent.
:param long_value: the long value to be returned as a byte string
:return: a byte string equivalent of a long value
]
variable[_byte_string] assign[=] constant[b'']
variable[pack] assign[=] name[struct].pack
while compare[name[long_value] greater[>] constant[0]] begin[:]
variable[_byte_string] assign[=] binary_operation[call[name[pack], parameter[constant[b'>I'], binary_operation[name[long_value] <ast.BitAnd object at 0x7da2590d6b60> constant[4294967295]]]] + name[_byte_string]]
variable[long_value] assign[=] binary_operation[name[long_value] <ast.RShift object at 0x7da2590d6a40> constant[32]]
for taget[name[i]] in starred[call[name[range], parameter[call[name[len], parameter[name[_byte_string]]]]]] begin[:]
if compare[call[name[_byte_string]][name[i]] not_equal[!=] call[constant[b'\x00']][constant[0]]] begin[:]
break
variable[_byte_string] assign[=] call[name[_byte_string]][<ast.Slice object at 0x7da18bc72bf0>]
return[name[_byte_string]] | keyword[def] identifier[_long_to_bytes] ( identifier[self] , identifier[long_value] ):
literal[string]
identifier[_byte_string] = literal[string]
identifier[pack] = identifier[struct] . identifier[pack]
keyword[while] identifier[long_value] > literal[int] :
identifier[_byte_string] = identifier[pack] ( literal[string] , identifier[long_value] & literal[int] )+ identifier[_byte_string]
identifier[long_value] = identifier[long_value] >> literal[int]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[len] ( identifier[_byte_string] )):
keyword[if] identifier[_byte_string] [ identifier[i] ]!= literal[string] [ literal[int] ]:
keyword[break]
keyword[else] :
identifier[_byte_string] = literal[string]
identifier[i] = literal[int]
identifier[_byte_string] = identifier[_byte_string] [ identifier[i] :]
keyword[return] identifier[_byte_string] | def _long_to_bytes(self, long_value):
"""
Turns a long value into its byte string equivalent.
:param long_value: the long value to be returned as a byte string
:return: a byte string equivalent of a long value
"""
_byte_string = b''
pack = struct.pack
while long_value > 0:
_byte_string = pack(b'>I', long_value & 4294967295) + _byte_string
long_value = long_value >> 32 # depends on [control=['while'], data=['long_value']]
for i in range(len(_byte_string)):
if _byte_string[i] != b'\x00'[0]:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
else:
_byte_string = b'\x00'
i = 0
_byte_string = _byte_string[i:]
return _byte_string |
def translate(self, text: str, lang: str, form: FORMAT = FORMAT.PLAIN_TEXT):
"""
Translates a text
:param text:
text to translate, the maximum size of the text being passed is 10,000 characters
:param lang:
The translation direction. for example: "en-ru"
or target lang. for example, "ru"
:param form:
Possible values:
FORMAT.PLAIN_TEXT - Text without markup (default value).
FORMAT.HTML - Text in HTML format.
:return: str[]
"""
encodedtext = urllib.parse.quote(text)
args = "&text=" + encodedtext + "&lang=" + lang + "&format="
if form == FORMAT.PLAIN_TEXT:
args += "plain"
else:
args += "html"
r = self.yandex_translate_request("translate", args)
self.handle_errors(r)
return r.json()["text"] | def function[translate, parameter[self, text, lang, form]]:
constant[
Translates a text
:param text:
text to translate, the maximum size of the text being passed is 10,000 characters
:param lang:
The translation direction. for example: "en-ru"
or target lang. for example, "ru"
:param form:
Possible values:
FORMAT.PLAIN_TEXT - Text without markup (default value).
FORMAT.HTML - Text in HTML format.
:return: str[]
]
variable[encodedtext] assign[=] call[name[urllib].parse.quote, parameter[name[text]]]
variable[args] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[constant[&text=] + name[encodedtext]] + constant[&lang=]] + name[lang]] + constant[&format=]]
if compare[name[form] equal[==] name[FORMAT].PLAIN_TEXT] begin[:]
<ast.AugAssign object at 0x7da1b2653c10>
variable[r] assign[=] call[name[self].yandex_translate_request, parameter[constant[translate], name[args]]]
call[name[self].handle_errors, parameter[name[r]]]
return[call[call[name[r].json, parameter[]]][constant[text]]] | keyword[def] identifier[translate] ( identifier[self] , identifier[text] : identifier[str] , identifier[lang] : identifier[str] , identifier[form] : identifier[FORMAT] = identifier[FORMAT] . identifier[PLAIN_TEXT] ):
literal[string]
identifier[encodedtext] = identifier[urllib] . identifier[parse] . identifier[quote] ( identifier[text] )
identifier[args] = literal[string] + identifier[encodedtext] + literal[string] + identifier[lang] + literal[string]
keyword[if] identifier[form] == identifier[FORMAT] . identifier[PLAIN_TEXT] :
identifier[args] += literal[string]
keyword[else] :
identifier[args] += literal[string]
identifier[r] = identifier[self] . identifier[yandex_translate_request] ( literal[string] , identifier[args] )
identifier[self] . identifier[handle_errors] ( identifier[r] )
keyword[return] identifier[r] . identifier[json] ()[ literal[string] ] | def translate(self, text: str, lang: str, form: FORMAT=FORMAT.PLAIN_TEXT):
"""
Translates a text
:param text:
text to translate, the maximum size of the text being passed is 10,000 characters
:param lang:
The translation direction. for example: "en-ru"
or target lang. for example, "ru"
:param form:
Possible values:
FORMAT.PLAIN_TEXT - Text without markup (default value).
FORMAT.HTML - Text in HTML format.
:return: str[]
"""
encodedtext = urllib.parse.quote(text)
args = '&text=' + encodedtext + '&lang=' + lang + '&format='
if form == FORMAT.PLAIN_TEXT:
args += 'plain' # depends on [control=['if'], data=[]]
else:
args += 'html'
r = self.yandex_translate_request('translate', args)
self.handle_errors(r)
return r.json()['text'] |
def commit_log(self, log_json):
""" Commits a run log to the Mongo backend.
Due to limitations of maximum document size in Mongo,
stdout and stderr logs are truncated to a maximum size for
each task.
"""
log_json['_id'] = log_json['log_id']
append = {'save_date': datetime.utcnow()}
for task_name, values in log_json.get('tasks', {}).items():
for key, size in TRUNCATE_LOG_SIZES_CHAR.iteritems():
if isinstance(values.get(key, None), str):
if len(values[key]) > size:
values[key] = '\n'.join([values[key][:size/2],
'DAGOBAH STREAM SPLIT',
values[key][-1 * (size/2):]])
self.log_coll.save(dict(log_json.items() + append.items())) | def function[commit_log, parameter[self, log_json]]:
constant[ Commits a run log to the Mongo backend.
Due to limitations of maximum document size in Mongo,
stdout and stderr logs are truncated to a maximum size for
each task.
]
call[name[log_json]][constant[_id]] assign[=] call[name[log_json]][constant[log_id]]
variable[append] assign[=] dictionary[[<ast.Constant object at 0x7da1b0be2ce0>], [<ast.Call object at 0x7da1b0be17b0>]]
for taget[tuple[[<ast.Name object at 0x7da1b0be0e80>, <ast.Name object at 0x7da1b0be1660>]]] in starred[call[call[name[log_json].get, parameter[constant[tasks], dictionary[[], []]]].items, parameter[]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b0be2080>, <ast.Name object at 0x7da1b0be0df0>]]] in starred[call[name[TRUNCATE_LOG_SIZES_CHAR].iteritems, parameter[]]] begin[:]
if call[name[isinstance], parameter[call[name[values].get, parameter[name[key], constant[None]]], name[str]]] begin[:]
if compare[call[name[len], parameter[call[name[values]][name[key]]]] greater[>] name[size]] begin[:]
call[name[values]][name[key]] assign[=] call[constant[
].join, parameter[list[[<ast.Subscript object at 0x7da1b0be07c0>, <ast.Constant object at 0x7da1b0be0d00>, <ast.Subscript object at 0x7da1b0be1de0>]]]]
call[name[self].log_coll.save, parameter[call[name[dict], parameter[binary_operation[call[name[log_json].items, parameter[]] + call[name[append].items, parameter[]]]]]]] | keyword[def] identifier[commit_log] ( identifier[self] , identifier[log_json] ):
literal[string]
identifier[log_json] [ literal[string] ]= identifier[log_json] [ literal[string] ]
identifier[append] ={ literal[string] : identifier[datetime] . identifier[utcnow] ()}
keyword[for] identifier[task_name] , identifier[values] keyword[in] identifier[log_json] . identifier[get] ( literal[string] ,{}). identifier[items] ():
keyword[for] identifier[key] , identifier[size] keyword[in] identifier[TRUNCATE_LOG_SIZES_CHAR] . identifier[iteritems] ():
keyword[if] identifier[isinstance] ( identifier[values] . identifier[get] ( identifier[key] , keyword[None] ), identifier[str] ):
keyword[if] identifier[len] ( identifier[values] [ identifier[key] ])> identifier[size] :
identifier[values] [ identifier[key] ]= literal[string] . identifier[join] ([ identifier[values] [ identifier[key] ][: identifier[size] / literal[int] ],
literal[string] ,
identifier[values] [ identifier[key] ][- literal[int] *( identifier[size] / literal[int] ):]])
identifier[self] . identifier[log_coll] . identifier[save] ( identifier[dict] ( identifier[log_json] . identifier[items] ()+ identifier[append] . identifier[items] ())) | def commit_log(self, log_json):
""" Commits a run log to the Mongo backend.
Due to limitations of maximum document size in Mongo,
stdout and stderr logs are truncated to a maximum size for
each task.
"""
log_json['_id'] = log_json['log_id']
append = {'save_date': datetime.utcnow()}
for (task_name, values) in log_json.get('tasks', {}).items():
for (key, size) in TRUNCATE_LOG_SIZES_CHAR.iteritems():
if isinstance(values.get(key, None), str):
if len(values[key]) > size:
values[key] = '\n'.join([values[key][:size / 2], 'DAGOBAH STREAM SPLIT', values[key][-1 * (size / 2):]]) # depends on [control=['if'], data=['size']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
self.log_coll.save(dict(log_json.items() + append.items())) |
def convert_tree_to_newick(tree,
otu_group,
label_key,
leaf_labels,
needs_quotes_pattern=NEWICK_NEEDING_QUOTING,
subtree_id=None,
bracket_ingroup=False):
"""`label_key` is a string (a key in the otu object) or a callable that takes two arguments:
the node, and the otu (which may be None for an internal node)
If `leaf_labels` is not None, it shoulr be a (list, dict) pair which will be filled. The list will
hold the order encountered,
and the dict will map name to index in the list
"""
assert (not is_str_type(label_key)) or (label_key in PhyloSchema._NEWICK_PROP_VALS) # pylint: disable=W0212
ingroup_node_id = tree.get('^ot:inGroupClade')
if subtree_id:
if subtree_id == 'ingroup':
root_id = ingroup_node_id
ingroup_node_id = None # turns of the comment pre-ingroup-marker
else:
root_id = subtree_id
else:
root_id = tree['^ot:rootNodeId']
edges = tree['edgeBySourceId']
if root_id not in edges:
return None
nodes = tree['nodeById']
sio, out = get_utf_8_string_io_writer()
nexson_frag_write_newick(out,
edges,
nodes,
otu_group,
label_key,
leaf_labels,
root_id,
needs_quotes_pattern=needs_quotes_pattern,
ingroup_id=ingroup_node_id,
bracket_ingroup=bracket_ingroup)
flush_utf_8_writer(out)
return sio.getvalue() | def function[convert_tree_to_newick, parameter[tree, otu_group, label_key, leaf_labels, needs_quotes_pattern, subtree_id, bracket_ingroup]]:
constant[`label_key` is a string (a key in the otu object) or a callable that takes two arguments:
the node, and the otu (which may be None for an internal node)
If `leaf_labels` is not None, it shoulr be a (list, dict) pair which will be filled. The list will
hold the order encountered,
and the dict will map name to index in the list
]
assert[<ast.BoolOp object at 0x7da18ede4f70>]
variable[ingroup_node_id] assign[=] call[name[tree].get, parameter[constant[^ot:inGroupClade]]]
if name[subtree_id] begin[:]
if compare[name[subtree_id] equal[==] constant[ingroup]] begin[:]
variable[root_id] assign[=] name[ingroup_node_id]
variable[ingroup_node_id] assign[=] constant[None]
variable[edges] assign[=] call[name[tree]][constant[edgeBySourceId]]
if compare[name[root_id] <ast.NotIn object at 0x7da2590d7190> name[edges]] begin[:]
return[constant[None]]
variable[nodes] assign[=] call[name[tree]][constant[nodeById]]
<ast.Tuple object at 0x7da18ede4130> assign[=] call[name[get_utf_8_string_io_writer], parameter[]]
call[name[nexson_frag_write_newick], parameter[name[out], name[edges], name[nodes], name[otu_group], name[label_key], name[leaf_labels], name[root_id]]]
call[name[flush_utf_8_writer], parameter[name[out]]]
return[call[name[sio].getvalue, parameter[]]] | keyword[def] identifier[convert_tree_to_newick] ( identifier[tree] ,
identifier[otu_group] ,
identifier[label_key] ,
identifier[leaf_labels] ,
identifier[needs_quotes_pattern] = identifier[NEWICK_NEEDING_QUOTING] ,
identifier[subtree_id] = keyword[None] ,
identifier[bracket_ingroup] = keyword[False] ):
literal[string]
keyword[assert] ( keyword[not] identifier[is_str_type] ( identifier[label_key] )) keyword[or] ( identifier[label_key] keyword[in] identifier[PhyloSchema] . identifier[_NEWICK_PROP_VALS] )
identifier[ingroup_node_id] = identifier[tree] . identifier[get] ( literal[string] )
keyword[if] identifier[subtree_id] :
keyword[if] identifier[subtree_id] == literal[string] :
identifier[root_id] = identifier[ingroup_node_id]
identifier[ingroup_node_id] = keyword[None]
keyword[else] :
identifier[root_id] = identifier[subtree_id]
keyword[else] :
identifier[root_id] = identifier[tree] [ literal[string] ]
identifier[edges] = identifier[tree] [ literal[string] ]
keyword[if] identifier[root_id] keyword[not] keyword[in] identifier[edges] :
keyword[return] keyword[None]
identifier[nodes] = identifier[tree] [ literal[string] ]
identifier[sio] , identifier[out] = identifier[get_utf_8_string_io_writer] ()
identifier[nexson_frag_write_newick] ( identifier[out] ,
identifier[edges] ,
identifier[nodes] ,
identifier[otu_group] ,
identifier[label_key] ,
identifier[leaf_labels] ,
identifier[root_id] ,
identifier[needs_quotes_pattern] = identifier[needs_quotes_pattern] ,
identifier[ingroup_id] = identifier[ingroup_node_id] ,
identifier[bracket_ingroup] = identifier[bracket_ingroup] )
identifier[flush_utf_8_writer] ( identifier[out] )
keyword[return] identifier[sio] . identifier[getvalue] () | def convert_tree_to_newick(tree, otu_group, label_key, leaf_labels, needs_quotes_pattern=NEWICK_NEEDING_QUOTING, subtree_id=None, bracket_ingroup=False):
"""`label_key` is a string (a key in the otu object) or a callable that takes two arguments:
the node, and the otu (which may be None for an internal node)
If `leaf_labels` is not None, it shoulr be a (list, dict) pair which will be filled. The list will
hold the order encountered,
and the dict will map name to index in the list
"""
assert not is_str_type(label_key) or label_key in PhyloSchema._NEWICK_PROP_VALS # pylint: disable=W0212
ingroup_node_id = tree.get('^ot:inGroupClade')
if subtree_id:
if subtree_id == 'ingroup':
root_id = ingroup_node_id
ingroup_node_id = None # turns of the comment pre-ingroup-marker # depends on [control=['if'], data=[]]
else:
root_id = subtree_id # depends on [control=['if'], data=[]]
else:
root_id = tree['^ot:rootNodeId']
edges = tree['edgeBySourceId']
if root_id not in edges:
return None # depends on [control=['if'], data=[]]
nodes = tree['nodeById']
(sio, out) = get_utf_8_string_io_writer()
nexson_frag_write_newick(out, edges, nodes, otu_group, label_key, leaf_labels, root_id, needs_quotes_pattern=needs_quotes_pattern, ingroup_id=ingroup_node_id, bracket_ingroup=bracket_ingroup)
flush_utf_8_writer(out)
return sio.getvalue() |
def close(self):
"""
Close outputs of process.
"""
self.process.stdout.close()
self.process.stderr.close()
self.running = False | def function[close, parameter[self]]:
constant[
Close outputs of process.
]
call[name[self].process.stdout.close, parameter[]]
call[name[self].process.stderr.close, parameter[]]
name[self].running assign[=] constant[False] | keyword[def] identifier[close] ( identifier[self] ):
literal[string]
identifier[self] . identifier[process] . identifier[stdout] . identifier[close] ()
identifier[self] . identifier[process] . identifier[stderr] . identifier[close] ()
identifier[self] . identifier[running] = keyword[False] | def close(self):
"""
Close outputs of process.
"""
self.process.stdout.close()
self.process.stderr.close()
self.running = False |
def _apply_to_sets(self, func, operation, keys, *args):
"""Helper function for sdiff, sinter, and sunion"""
keys = self._list_or_args(keys, args)
if not keys:
raise TypeError("{} takes at least two arguments".format(operation.lower()))
left = self._get_set(keys[0], operation) or set()
for key in keys[1:]:
right = self._get_set(key, operation) or set()
left = func(left, right)
return left | def function[_apply_to_sets, parameter[self, func, operation, keys]]:
constant[Helper function for sdiff, sinter, and sunion]
variable[keys] assign[=] call[name[self]._list_or_args, parameter[name[keys], name[args]]]
if <ast.UnaryOp object at 0x7da2043474c0> begin[:]
<ast.Raise object at 0x7da2043457e0>
variable[left] assign[=] <ast.BoolOp object at 0x7da1b2347f70>
for taget[name[key]] in starred[call[name[keys]][<ast.Slice object at 0x7da1b2346dd0>]] begin[:]
variable[right] assign[=] <ast.BoolOp object at 0x7da1b23476a0>
variable[left] assign[=] call[name[func], parameter[name[left], name[right]]]
return[name[left]] | keyword[def] identifier[_apply_to_sets] ( identifier[self] , identifier[func] , identifier[operation] , identifier[keys] ,* identifier[args] ):
literal[string]
identifier[keys] = identifier[self] . identifier[_list_or_args] ( identifier[keys] , identifier[args] )
keyword[if] keyword[not] identifier[keys] :
keyword[raise] identifier[TypeError] ( literal[string] . identifier[format] ( identifier[operation] . identifier[lower] ()))
identifier[left] = identifier[self] . identifier[_get_set] ( identifier[keys] [ literal[int] ], identifier[operation] ) keyword[or] identifier[set] ()
keyword[for] identifier[key] keyword[in] identifier[keys] [ literal[int] :]:
identifier[right] = identifier[self] . identifier[_get_set] ( identifier[key] , identifier[operation] ) keyword[or] identifier[set] ()
identifier[left] = identifier[func] ( identifier[left] , identifier[right] )
keyword[return] identifier[left] | def _apply_to_sets(self, func, operation, keys, *args):
"""Helper function for sdiff, sinter, and sunion"""
keys = self._list_or_args(keys, args)
if not keys:
raise TypeError('{} takes at least two arguments'.format(operation.lower())) # depends on [control=['if'], data=[]]
left = self._get_set(keys[0], operation) or set()
for key in keys[1:]:
right = self._get_set(key, operation) or set()
left = func(left, right) # depends on [control=['for'], data=['key']]
return left |
def bed12(args):
"""
%prog bed12 gffile > bedfile
Produce bed12 file for coding features. The exons will be converted to blocks.
The CDS range will be shown between thickStart to thickEnd. For reference,
bed format consists of the following fields:
1. chrom
2. chromStart
3. chromEnd
4. name
5. score
6. strand
7. thickStart
8. thickEnd
9. itemRgb
10. blockCount
11. blockSizes
12. blockStarts
"""
p = OptionParser(bed12.__doc__)
p.add_option("--parent", default="mRNA",
help="Top feature type [default: %default]")
p.add_option("--block", default="exon",
help="Feature type for regular blocks [default: %default]")
p.add_option("--thick", default="CDS",
help="Feature type for thick blocks [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
gffile, = args
parent, block, thick = opts.parent, opts.block, opts.thick
outfile = opts.outfile
g = make_index(gffile)
fw = must_open(outfile, "w")
for f in g.features_of_type(parent):
chrom = f.chrom
chromStart = f.start - 1
chromEnd = f.stop
name = f.id
score = 0
strand = f.strand
thickStart = 1e15
thickEnd = 0
blocks = []
for c in g.children(name, 1):
cstart, cend = c.start - 1, c.stop
if c.featuretype == block:
blockStart = cstart - chromStart
blockSize = cend - cstart
blocks.append((blockStart, blockSize))
elif c.featuretype == thick:
thickStart = min(thickStart, cstart)
thickEnd = max(thickEnd, cend)
blocks.sort()
blockStarts, blockSizes = zip(*blocks)
blockCount = len(blocks)
blockSizes = ",".join(str(x) for x in blockSizes) + ","
blockStarts = ",".join(str(x) for x in blockStarts) + ","
itemRgb = 0
print("\t".join(str(x) for x in (chrom, chromStart, chromEnd, \
name, score, strand, thickStart, thickEnd, itemRgb,
blockCount, blockSizes, blockStarts)), file=fw) | def function[bed12, parameter[args]]:
constant[
%prog bed12 gffile > bedfile
Produce bed12 file for coding features. The exons will be converted to blocks.
The CDS range will be shown between thickStart to thickEnd. For reference,
bed format consists of the following fields:
1. chrom
2. chromStart
3. chromEnd
4. name
5. score
6. strand
7. thickStart
8. thickEnd
9. itemRgb
10. blockCount
11. blockSizes
12. blockStarts
]
variable[p] assign[=] call[name[OptionParser], parameter[name[bed12].__doc__]]
call[name[p].add_option, parameter[constant[--parent]]]
call[name[p].add_option, parameter[constant[--block]]]
call[name[p].add_option, parameter[constant[--thick]]]
call[name[p].set_outfile, parameter[]]
<ast.Tuple object at 0x7da1b076bca0> assign[=] call[name[p].parse_args, parameter[name[args]]]
if compare[call[name[len], parameter[name[args]]] not_equal[!=] constant[1]] begin[:]
call[name[sys].exit, parameter[<ast.UnaryOp object at 0x7da1b076b700>]]
<ast.Tuple object at 0x7da1b076b250> assign[=] name[args]
<ast.Tuple object at 0x7da1b076b640> assign[=] tuple[[<ast.Attribute object at 0x7da1b076ac50>, <ast.Attribute object at 0x7da1b076abf0>, <ast.Attribute object at 0x7da1b076ab90>]]
variable[outfile] assign[=] name[opts].outfile
variable[g] assign[=] call[name[make_index], parameter[name[gffile]]]
variable[fw] assign[=] call[name[must_open], parameter[name[outfile], constant[w]]]
for taget[name[f]] in starred[call[name[g].features_of_type, parameter[name[parent]]]] begin[:]
variable[chrom] assign[=] name[f].chrom
variable[chromStart] assign[=] binary_operation[name[f].start - constant[1]]
variable[chromEnd] assign[=] name[f].stop
variable[name] assign[=] name[f].id
variable[score] assign[=] constant[0]
variable[strand] assign[=] name[f].strand
variable[thickStart] assign[=] constant[1000000000000000.0]
variable[thickEnd] assign[=] constant[0]
variable[blocks] assign[=] list[[]]
for taget[name[c]] in starred[call[name[g].children, parameter[name[name], constant[1]]]] begin[:]
<ast.Tuple object at 0x7da1b0810a30> assign[=] tuple[[<ast.BinOp object at 0x7da1b0810d30>, <ast.Attribute object at 0x7da1b0796c20>]]
if compare[name[c].featuretype equal[==] name[block]] begin[:]
variable[blockStart] assign[=] binary_operation[name[cstart] - name[chromStart]]
variable[blockSize] assign[=] binary_operation[name[cend] - name[cstart]]
call[name[blocks].append, parameter[tuple[[<ast.Name object at 0x7da1b0796e30>, <ast.Name object at 0x7da1b0796e60>]]]]
call[name[blocks].sort, parameter[]]
<ast.Tuple object at 0x7da1b0797760> assign[=] call[name[zip], parameter[<ast.Starred object at 0x7da1b0796560>]]
variable[blockCount] assign[=] call[name[len], parameter[name[blocks]]]
variable[blockSizes] assign[=] binary_operation[call[constant[,].join, parameter[<ast.GeneratorExp object at 0x7da1b0797400>]] + constant[,]]
variable[blockStarts] assign[=] binary_operation[call[constant[,].join, parameter[<ast.GeneratorExp object at 0x7da1b0796b60>]] + constant[,]]
variable[itemRgb] assign[=] constant[0]
call[name[print], parameter[call[constant[ ].join, parameter[<ast.GeneratorExp object at 0x7da1b079df30>]]]] | keyword[def] identifier[bed12] ( identifier[args] ):
literal[string]
identifier[p] = identifier[OptionParser] ( identifier[bed12] . identifier[__doc__] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[add_option] ( literal[string] , identifier[default] = literal[string] ,
identifier[help] = literal[string] )
identifier[p] . identifier[set_outfile] ()
identifier[opts] , identifier[args] = identifier[p] . identifier[parse_args] ( identifier[args] )
keyword[if] identifier[len] ( identifier[args] )!= literal[int] :
identifier[sys] . identifier[exit] ( keyword[not] identifier[p] . identifier[print_help] ())
identifier[gffile] ,= identifier[args]
identifier[parent] , identifier[block] , identifier[thick] = identifier[opts] . identifier[parent] , identifier[opts] . identifier[block] , identifier[opts] . identifier[thick]
identifier[outfile] = identifier[opts] . identifier[outfile]
identifier[g] = identifier[make_index] ( identifier[gffile] )
identifier[fw] = identifier[must_open] ( identifier[outfile] , literal[string] )
keyword[for] identifier[f] keyword[in] identifier[g] . identifier[features_of_type] ( identifier[parent] ):
identifier[chrom] = identifier[f] . identifier[chrom]
identifier[chromStart] = identifier[f] . identifier[start] - literal[int]
identifier[chromEnd] = identifier[f] . identifier[stop]
identifier[name] = identifier[f] . identifier[id]
identifier[score] = literal[int]
identifier[strand] = identifier[f] . identifier[strand]
identifier[thickStart] = literal[int]
identifier[thickEnd] = literal[int]
identifier[blocks] =[]
keyword[for] identifier[c] keyword[in] identifier[g] . identifier[children] ( identifier[name] , literal[int] ):
identifier[cstart] , identifier[cend] = identifier[c] . identifier[start] - literal[int] , identifier[c] . identifier[stop]
keyword[if] identifier[c] . identifier[featuretype] == identifier[block] :
identifier[blockStart] = identifier[cstart] - identifier[chromStart]
identifier[blockSize] = identifier[cend] - identifier[cstart]
identifier[blocks] . identifier[append] (( identifier[blockStart] , identifier[blockSize] ))
keyword[elif] identifier[c] . identifier[featuretype] == identifier[thick] :
identifier[thickStart] = identifier[min] ( identifier[thickStart] , identifier[cstart] )
identifier[thickEnd] = identifier[max] ( identifier[thickEnd] , identifier[cend] )
identifier[blocks] . identifier[sort] ()
identifier[blockStarts] , identifier[blockSizes] = identifier[zip] (* identifier[blocks] )
identifier[blockCount] = identifier[len] ( identifier[blocks] )
identifier[blockSizes] = literal[string] . identifier[join] ( identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[blockSizes] )+ literal[string]
identifier[blockStarts] = literal[string] . identifier[join] ( identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[blockStarts] )+ literal[string]
identifier[itemRgb] = literal[int]
identifier[print] ( literal[string] . identifier[join] ( identifier[str] ( identifier[x] ) keyword[for] identifier[x] keyword[in] ( identifier[chrom] , identifier[chromStart] , identifier[chromEnd] , identifier[name] , identifier[score] , identifier[strand] , identifier[thickStart] , identifier[thickEnd] , identifier[itemRgb] ,
identifier[blockCount] , identifier[blockSizes] , identifier[blockStarts] )), identifier[file] = identifier[fw] ) | def bed12(args):
"""
%prog bed12 gffile > bedfile
Produce bed12 file for coding features. The exons will be converted to blocks.
The CDS range will be shown between thickStart to thickEnd. For reference,
bed format consists of the following fields:
1. chrom
2. chromStart
3. chromEnd
4. name
5. score
6. strand
7. thickStart
8. thickEnd
9. itemRgb
10. blockCount
11. blockSizes
12. blockStarts
"""
p = OptionParser(bed12.__doc__)
p.add_option('--parent', default='mRNA', help='Top feature type [default: %default]')
p.add_option('--block', default='exon', help='Feature type for regular blocks [default: %default]')
p.add_option('--thick', default='CDS', help='Feature type for thick blocks [default: %default]')
p.set_outfile()
(opts, args) = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help()) # depends on [control=['if'], data=[]]
(gffile,) = args
(parent, block, thick) = (opts.parent, opts.block, opts.thick)
outfile = opts.outfile
g = make_index(gffile)
fw = must_open(outfile, 'w')
for f in g.features_of_type(parent):
chrom = f.chrom
chromStart = f.start - 1
chromEnd = f.stop
name = f.id
score = 0
strand = f.strand
thickStart = 1000000000000000.0
thickEnd = 0
blocks = []
for c in g.children(name, 1):
(cstart, cend) = (c.start - 1, c.stop)
if c.featuretype == block:
blockStart = cstart - chromStart
blockSize = cend - cstart
blocks.append((blockStart, blockSize)) # depends on [control=['if'], data=[]]
elif c.featuretype == thick:
thickStart = min(thickStart, cstart)
thickEnd = max(thickEnd, cend) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['c']]
blocks.sort()
(blockStarts, blockSizes) = zip(*blocks)
blockCount = len(blocks)
blockSizes = ','.join((str(x) for x in blockSizes)) + ','
blockStarts = ','.join((str(x) for x in blockStarts)) + ','
itemRgb = 0
print('\t'.join((str(x) for x in (chrom, chromStart, chromEnd, name, score, strand, thickStart, thickEnd, itemRgb, blockCount, blockSizes, blockStarts))), file=fw) # depends on [control=['for'], data=['f']] |
def get_authors(context, template='zinnia/tags/authors.html'):
"""
Return the published authors.
"""
return {'template': template,
'authors': Author.published.all().annotate(
count_entries_published=Count('entries')),
'context_author': context.get('author')} | def function[get_authors, parameter[context, template]]:
constant[
Return the published authors.
]
return[dictionary[[<ast.Constant object at 0x7da1b1ddfe20>, <ast.Constant object at 0x7da1b1ddea40>, <ast.Constant object at 0x7da1b1dddb10>], [<ast.Name object at 0x7da1b1ddfb20>, <ast.Call object at 0x7da1b1ddf790>, <ast.Call object at 0x7da1b1ddde70>]]] | keyword[def] identifier[get_authors] ( identifier[context] , identifier[template] = literal[string] ):
literal[string]
keyword[return] { literal[string] : identifier[template] ,
literal[string] : identifier[Author] . identifier[published] . identifier[all] (). identifier[annotate] (
identifier[count_entries_published] = identifier[Count] ( literal[string] )),
literal[string] : identifier[context] . identifier[get] ( literal[string] )} | def get_authors(context, template='zinnia/tags/authors.html'):
"""
Return the published authors.
"""
return {'template': template, 'authors': Author.published.all().annotate(count_entries_published=Count('entries')), 'context_author': context.get('author')} |
def workflow_rename(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /workflow-xxxx/rename API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Name#API-method%3A-%2Fclass-xxxx%2Frename
"""
return DXHTTPRequest('/%s/rename' % object_id, input_params, always_retry=always_retry, **kwargs) | def function[workflow_rename, parameter[object_id, input_params, always_retry]]:
constant[
Invokes the /workflow-xxxx/rename API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Name#API-method%3A-%2Fclass-xxxx%2Frename
]
return[call[name[DXHTTPRequest], parameter[binary_operation[constant[/%s/rename] <ast.Mod object at 0x7da2590d6920> name[object_id]], name[input_params]]]] | keyword[def] identifier[workflow_rename] ( identifier[object_id] , identifier[input_params] ={}, identifier[always_retry] = keyword[True] ,** identifier[kwargs] ):
literal[string]
keyword[return] identifier[DXHTTPRequest] ( literal[string] % identifier[object_id] , identifier[input_params] , identifier[always_retry] = identifier[always_retry] ,** identifier[kwargs] ) | def workflow_rename(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /workflow-xxxx/rename API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Name#API-method%3A-%2Fclass-xxxx%2Frename
"""
return DXHTTPRequest('/%s/rename' % object_id, input_params, always_retry=always_retry, **kwargs) |
def beholder(func):
"""[ClassMethod] Behold extraction procedure."""
@functools.wraps(func)
def behold(self, proto, length, *args, **kwargs):
seek_cur = self._file.tell()
try:
return func(proto, length, *args, **kwargs)
except Exception:
from pcapkit.protocols.raw import Raw
error = traceback.format_exc(limit=1).strip().split(os.linesep)[-1]
# error = traceback.format_exc()
self._file.seek(seek_cur, os.SEEK_SET)
next_ = Raw(io.BytesIO(self._read_fileng(length)), length, error=error)
return next_
return behold | def function[beholder, parameter[func]]:
constant[[ClassMethod] Behold extraction procedure.]
def function[behold, parameter[self, proto, length]]:
variable[seek_cur] assign[=] call[name[self]._file.tell, parameter[]]
<ast.Try object at 0x7da1b053b430>
return[name[behold]] | keyword[def] identifier[beholder] ( identifier[func] ):
literal[string]
@ identifier[functools] . identifier[wraps] ( identifier[func] )
keyword[def] identifier[behold] ( identifier[self] , identifier[proto] , identifier[length] ,* identifier[args] ,** identifier[kwargs] ):
identifier[seek_cur] = identifier[self] . identifier[_file] . identifier[tell] ()
keyword[try] :
keyword[return] identifier[func] ( identifier[proto] , identifier[length] ,* identifier[args] ,** identifier[kwargs] )
keyword[except] identifier[Exception] :
keyword[from] identifier[pcapkit] . identifier[protocols] . identifier[raw] keyword[import] identifier[Raw]
identifier[error] = identifier[traceback] . identifier[format_exc] ( identifier[limit] = literal[int] ). identifier[strip] (). identifier[split] ( identifier[os] . identifier[linesep] )[- literal[int] ]
identifier[self] . identifier[_file] . identifier[seek] ( identifier[seek_cur] , identifier[os] . identifier[SEEK_SET] )
identifier[next_] = identifier[Raw] ( identifier[io] . identifier[BytesIO] ( identifier[self] . identifier[_read_fileng] ( identifier[length] )), identifier[length] , identifier[error] = identifier[error] )
keyword[return] identifier[next_]
keyword[return] identifier[behold] | def beholder(func):
"""[ClassMethod] Behold extraction procedure."""
@functools.wraps(func)
def behold(self, proto, length, *args, **kwargs):
seek_cur = self._file.tell()
try:
return func(proto, length, *args, **kwargs) # depends on [control=['try'], data=[]]
except Exception:
from pcapkit.protocols.raw import Raw
error = traceback.format_exc(limit=1).strip().split(os.linesep)[-1]
# error = traceback.format_exc()
self._file.seek(seek_cur, os.SEEK_SET)
next_ = Raw(io.BytesIO(self._read_fileng(length)), length, error=error)
return next_ # depends on [control=['except'], data=[]]
return behold |
def _count_ned_sources_in_database_requiring_metadata(
self):
"""*Count the sources in the NED table requiring metadata*
**Return:**
- ``self.total``, ``self.batches`` -- total number of galaxies needing metadata & the number of batches required to be sent to NED
*Usage:*
.. code-block:: python
totalRemaining, numberOfBatches = stream._count_ned_sources_in_database_requiring_metadata()
"""
self.log.debug(
'starting the ``_count_ned_sources_in_database_requiring_metadata`` method')
tableName = self.dbTableName
sqlQuery = u"""
select count(*) as count from %(tableName)s where raDeg is null and (download_error != 1 or download_error is null)
""" % locals()
rows = readquery(
log=self.log,
sqlQuery=sqlQuery,
dbConn=self.cataloguesDbConn,
quiet=False
)
self.total = rows[0]["count"]
self.batches = int(self.total / 50000.) + 1
if self.total == 0:
self.batches = 0
self.log.debug(
'completed the ``_count_ned_sources_in_database_requiring_metadata`` method')
return self.total, self.batches | def function[_count_ned_sources_in_database_requiring_metadata, parameter[self]]:
constant[*Count the sources in the NED table requiring metadata*
**Return:**
- ``self.total``, ``self.batches`` -- total number of galaxies needing metadata & the number of batches required to be sent to NED
*Usage:*
.. code-block:: python
totalRemaining, numberOfBatches = stream._count_ned_sources_in_database_requiring_metadata()
]
call[name[self].log.debug, parameter[constant[starting the ``_count_ned_sources_in_database_requiring_metadata`` method]]]
variable[tableName] assign[=] name[self].dbTableName
variable[sqlQuery] assign[=] binary_operation[constant[
select count(*) as count from %(tableName)s where raDeg is null and (download_error != 1 or download_error is null)
] <ast.Mod object at 0x7da2590d6920> call[name[locals], parameter[]]]
variable[rows] assign[=] call[name[readquery], parameter[]]
name[self].total assign[=] call[call[name[rows]][constant[0]]][constant[count]]
name[self].batches assign[=] binary_operation[call[name[int], parameter[binary_operation[name[self].total / constant[50000.0]]]] + constant[1]]
if compare[name[self].total equal[==] constant[0]] begin[:]
name[self].batches assign[=] constant[0]
call[name[self].log.debug, parameter[constant[completed the ``_count_ned_sources_in_database_requiring_metadata`` method]]]
return[tuple[[<ast.Attribute object at 0x7da18bcc8d60>, <ast.Attribute object at 0x7da18bcca2c0>]]] | keyword[def] identifier[_count_ned_sources_in_database_requiring_metadata] (
identifier[self] ):
literal[string]
identifier[self] . identifier[log] . identifier[debug] (
literal[string] )
identifier[tableName] = identifier[self] . identifier[dbTableName]
identifier[sqlQuery] = literal[string] % identifier[locals] ()
identifier[rows] = identifier[readquery] (
identifier[log] = identifier[self] . identifier[log] ,
identifier[sqlQuery] = identifier[sqlQuery] ,
identifier[dbConn] = identifier[self] . identifier[cataloguesDbConn] ,
identifier[quiet] = keyword[False]
)
identifier[self] . identifier[total] = identifier[rows] [ literal[int] ][ literal[string] ]
identifier[self] . identifier[batches] = identifier[int] ( identifier[self] . identifier[total] / literal[int] )+ literal[int]
keyword[if] identifier[self] . identifier[total] == literal[int] :
identifier[self] . identifier[batches] = literal[int]
identifier[self] . identifier[log] . identifier[debug] (
literal[string] )
keyword[return] identifier[self] . identifier[total] , identifier[self] . identifier[batches] | def _count_ned_sources_in_database_requiring_metadata(self):
"""*Count the sources in the NED table requiring metadata*
**Return:**
- ``self.total``, ``self.batches`` -- total number of galaxies needing metadata & the number of batches required to be sent to NED
*Usage:*
.. code-block:: python
totalRemaining, numberOfBatches = stream._count_ned_sources_in_database_requiring_metadata()
"""
self.log.debug('starting the ``_count_ned_sources_in_database_requiring_metadata`` method')
tableName = self.dbTableName
sqlQuery = u'\n select count(*) as count from %(tableName)s where raDeg is null and (download_error != 1 or download_error is null)\n ' % locals()
rows = readquery(log=self.log, sqlQuery=sqlQuery, dbConn=self.cataloguesDbConn, quiet=False)
self.total = rows[0]['count']
self.batches = int(self.total / 50000.0) + 1
if self.total == 0:
self.batches = 0 # depends on [control=['if'], data=[]]
self.log.debug('completed the ``_count_ned_sources_in_database_requiring_metadata`` method')
return (self.total, self.batches) |
def message(self):
""" Convert the message to a mime compliant email string """
return '\n'.join(
[self.from_email, str(self.to), self.subject, self.body]) | def function[message, parameter[self]]:
constant[ Convert the message to a mime compliant email string ]
return[call[constant[
].join, parameter[list[[<ast.Attribute object at 0x7da18ede6770>, <ast.Call object at 0x7da18ede4160>, <ast.Attribute object at 0x7da18ede6e30>, <ast.Attribute object at 0x7da18ede71c0>]]]]] | keyword[def] identifier[message] ( identifier[self] ):
literal[string]
keyword[return] literal[string] . identifier[join] (
[ identifier[self] . identifier[from_email] , identifier[str] ( identifier[self] . identifier[to] ), identifier[self] . identifier[subject] , identifier[self] . identifier[body] ]) | def message(self):
""" Convert the message to a mime compliant email string """
return '\n'.join([self.from_email, str(self.to), self.subject, self.body]) |
def wrap_line(line, maxline=79, result=[], count=count):
""" We have a line that is too long,
so we're going to try to wrap it.
"""
# Extract the indentation
append = result.append
extend = result.extend
indentation = line[0]
lenfirst = len(indentation)
indent = lenfirst - len(indentation.lstrip())
assert indent in (0, lenfirst)
indentation = line.pop(0) if indent else ''
# Get splittable/non-splittable groups
dgroups = list(delimiter_groups(line))
unsplittable = dgroups[::2]
splittable = dgroups[1::2]
# If the largest non-splittable group won't fit
# on a line, try to add parentheses to the line.
if max(count(x) for x in unsplittable) > maxline - indent:
line = add_parens(line, maxline, indent)
dgroups = list(delimiter_groups(line))
unsplittable = dgroups[::2]
splittable = dgroups[1::2]
# Deal with the first (always unsplittable) group, and
# then set up to deal with the remainder in pairs.
first = unsplittable[0]
append(indentation)
extend(first)
if not splittable:
return result
pos = indent + count(first)
indentation += ' '
indent += 4
if indent >= maxline/2:
maxline = maxline/2 + indent
for sg, nsg in zip(splittable, unsplittable[1:]):
if sg:
# If we already have stuff on the line and even
# the very first item won't fit, start a new line
if pos > indent and pos + len(sg[0]) > maxline:
append('\n')
append(indentation)
pos = indent
# Dump lines out of the splittable group
# until the entire thing fits
csg = count(sg)
while pos + csg > maxline:
ready, sg = split_group(sg, pos, maxline)
if ready[-1].endswith(' '):
ready[-1] = ready[-1][:-1]
extend(ready)
append('\n')
append(indentation)
pos = indent
csg = count(sg)
# Dump the remainder of the splittable group
if sg:
extend(sg)
pos += csg
# Dump the unsplittable group, optionally
# preceded by a linefeed.
cnsg = count(nsg)
if pos > indent and pos + cnsg > maxline:
append('\n')
append(indentation)
pos = indent
extend(nsg)
pos += cnsg | def function[wrap_line, parameter[line, maxline, result, count]]:
constant[ We have a line that is too long,
so we're going to try to wrap it.
]
variable[append] assign[=] name[result].append
variable[extend] assign[=] name[result].extend
variable[indentation] assign[=] call[name[line]][constant[0]]
variable[lenfirst] assign[=] call[name[len], parameter[name[indentation]]]
variable[indent] assign[=] binary_operation[name[lenfirst] - call[name[len], parameter[call[name[indentation].lstrip, parameter[]]]]]
assert[compare[name[indent] in tuple[[<ast.Constant object at 0x7da1b1d50700>, <ast.Name object at 0x7da1b1d53070>]]]]
variable[indentation] assign[=] <ast.IfExp object at 0x7da1b1d527d0>
variable[dgroups] assign[=] call[name[list], parameter[call[name[delimiter_groups], parameter[name[line]]]]]
variable[unsplittable] assign[=] call[name[dgroups]][<ast.Slice object at 0x7da1b1d52380>]
variable[splittable] assign[=] call[name[dgroups]][<ast.Slice object at 0x7da1b1d50c40>]
if compare[call[name[max], parameter[<ast.GeneratorExp object at 0x7da1b1d52fb0>]] greater[>] binary_operation[name[maxline] - name[indent]]] begin[:]
variable[line] assign[=] call[name[add_parens], parameter[name[line], name[maxline], name[indent]]]
variable[dgroups] assign[=] call[name[list], parameter[call[name[delimiter_groups], parameter[name[line]]]]]
variable[unsplittable] assign[=] call[name[dgroups]][<ast.Slice object at 0x7da1b1d50580>]
variable[splittable] assign[=] call[name[dgroups]][<ast.Slice object at 0x7da1b1d50850>]
variable[first] assign[=] call[name[unsplittable]][constant[0]]
call[name[append], parameter[name[indentation]]]
call[name[extend], parameter[name[first]]]
if <ast.UnaryOp object at 0x7da1b1d536a0> begin[:]
return[name[result]]
variable[pos] assign[=] binary_operation[name[indent] + call[name[count], parameter[name[first]]]]
<ast.AugAssign object at 0x7da1b1d515a0>
<ast.AugAssign object at 0x7da1b1d53010>
if compare[name[indent] greater_or_equal[>=] binary_operation[name[maxline] / constant[2]]] begin[:]
variable[maxline] assign[=] binary_operation[binary_operation[name[maxline] / constant[2]] + name[indent]]
for taget[tuple[[<ast.Name object at 0x7da1b1d51b10>, <ast.Name object at 0x7da1b1d51630>]]] in starred[call[name[zip], parameter[name[splittable], call[name[unsplittable]][<ast.Slice object at 0x7da1b1d50a60>]]]] begin[:]
if name[sg] begin[:]
if <ast.BoolOp object at 0x7da1b1ed6bf0> begin[:]
call[name[append], parameter[constant[
]]]
call[name[append], parameter[name[indentation]]]
variable[pos] assign[=] name[indent]
variable[csg] assign[=] call[name[count], parameter[name[sg]]]
while compare[binary_operation[name[pos] + name[csg]] greater[>] name[maxline]] begin[:]
<ast.Tuple object at 0x7da1b1ed52a0> assign[=] call[name[split_group], parameter[name[sg], name[pos], name[maxline]]]
if call[call[name[ready]][<ast.UnaryOp object at 0x7da1b1ed6f50>].endswith, parameter[constant[ ]]] begin[:]
call[name[ready]][<ast.UnaryOp object at 0x7da1b1ed7220>] assign[=] call[call[name[ready]][<ast.UnaryOp object at 0x7da1b1ed5030>]][<ast.Slice object at 0x7da1b1ed59c0>]
call[name[extend], parameter[name[ready]]]
call[name[append], parameter[constant[
]]]
call[name[append], parameter[name[indentation]]]
variable[pos] assign[=] name[indent]
variable[csg] assign[=] call[name[count], parameter[name[sg]]]
if name[sg] begin[:]
call[name[extend], parameter[name[sg]]]
<ast.AugAssign object at 0x7da1b1e8cf70>
variable[cnsg] assign[=] call[name[count], parameter[name[nsg]]]
if <ast.BoolOp object at 0x7da1b1e8d2a0> begin[:]
call[name[append], parameter[constant[
]]]
call[name[append], parameter[name[indentation]]]
variable[pos] assign[=] name[indent]
call[name[extend], parameter[name[nsg]]]
<ast.AugAssign object at 0x7da1b1e8c610> | keyword[def] identifier[wrap_line] ( identifier[line] , identifier[maxline] = literal[int] , identifier[result] =[], identifier[count] = identifier[count] ):
literal[string]
identifier[append] = identifier[result] . identifier[append]
identifier[extend] = identifier[result] . identifier[extend]
identifier[indentation] = identifier[line] [ literal[int] ]
identifier[lenfirst] = identifier[len] ( identifier[indentation] )
identifier[indent] = identifier[lenfirst] - identifier[len] ( identifier[indentation] . identifier[lstrip] ())
keyword[assert] identifier[indent] keyword[in] ( literal[int] , identifier[lenfirst] )
identifier[indentation] = identifier[line] . identifier[pop] ( literal[int] ) keyword[if] identifier[indent] keyword[else] literal[string]
identifier[dgroups] = identifier[list] ( identifier[delimiter_groups] ( identifier[line] ))
identifier[unsplittable] = identifier[dgroups] [:: literal[int] ]
identifier[splittable] = identifier[dgroups] [ literal[int] :: literal[int] ]
keyword[if] identifier[max] ( identifier[count] ( identifier[x] ) keyword[for] identifier[x] keyword[in] identifier[unsplittable] )> identifier[maxline] - identifier[indent] :
identifier[line] = identifier[add_parens] ( identifier[line] , identifier[maxline] , identifier[indent] )
identifier[dgroups] = identifier[list] ( identifier[delimiter_groups] ( identifier[line] ))
identifier[unsplittable] = identifier[dgroups] [:: literal[int] ]
identifier[splittable] = identifier[dgroups] [ literal[int] :: literal[int] ]
identifier[first] = identifier[unsplittable] [ literal[int] ]
identifier[append] ( identifier[indentation] )
identifier[extend] ( identifier[first] )
keyword[if] keyword[not] identifier[splittable] :
keyword[return] identifier[result]
identifier[pos] = identifier[indent] + identifier[count] ( identifier[first] )
identifier[indentation] += literal[string]
identifier[indent] += literal[int]
keyword[if] identifier[indent] >= identifier[maxline] / literal[int] :
identifier[maxline] = identifier[maxline] / literal[int] + identifier[indent]
keyword[for] identifier[sg] , identifier[nsg] keyword[in] identifier[zip] ( identifier[splittable] , identifier[unsplittable] [ literal[int] :]):
keyword[if] identifier[sg] :
keyword[if] identifier[pos] > identifier[indent] keyword[and] identifier[pos] + identifier[len] ( identifier[sg] [ literal[int] ])> identifier[maxline] :
identifier[append] ( literal[string] )
identifier[append] ( identifier[indentation] )
identifier[pos] = identifier[indent]
identifier[csg] = identifier[count] ( identifier[sg] )
keyword[while] identifier[pos] + identifier[csg] > identifier[maxline] :
identifier[ready] , identifier[sg] = identifier[split_group] ( identifier[sg] , identifier[pos] , identifier[maxline] )
keyword[if] identifier[ready] [- literal[int] ]. identifier[endswith] ( literal[string] ):
identifier[ready] [- literal[int] ]= identifier[ready] [- literal[int] ][:- literal[int] ]
identifier[extend] ( identifier[ready] )
identifier[append] ( literal[string] )
identifier[append] ( identifier[indentation] )
identifier[pos] = identifier[indent]
identifier[csg] = identifier[count] ( identifier[sg] )
keyword[if] identifier[sg] :
identifier[extend] ( identifier[sg] )
identifier[pos] += identifier[csg]
identifier[cnsg] = identifier[count] ( identifier[nsg] )
keyword[if] identifier[pos] > identifier[indent] keyword[and] identifier[pos] + identifier[cnsg] > identifier[maxline] :
identifier[append] ( literal[string] )
identifier[append] ( identifier[indentation] )
identifier[pos] = identifier[indent]
identifier[extend] ( identifier[nsg] )
identifier[pos] += identifier[cnsg] | def wrap_line(line, maxline=79, result=[], count=count):
""" We have a line that is too long,
so we're going to try to wrap it.
"""
# Extract the indentation
append = result.append
extend = result.extend
indentation = line[0]
lenfirst = len(indentation)
indent = lenfirst - len(indentation.lstrip())
assert indent in (0, lenfirst)
indentation = line.pop(0) if indent else ''
# Get splittable/non-splittable groups
dgroups = list(delimiter_groups(line))
unsplittable = dgroups[::2]
splittable = dgroups[1::2]
# If the largest non-splittable group won't fit
# on a line, try to add parentheses to the line.
if max((count(x) for x in unsplittable)) > maxline - indent:
line = add_parens(line, maxline, indent)
dgroups = list(delimiter_groups(line))
unsplittable = dgroups[::2]
splittable = dgroups[1::2] # depends on [control=['if'], data=[]]
# Deal with the first (always unsplittable) group, and
# then set up to deal with the remainder in pairs.
first = unsplittable[0]
append(indentation)
extend(first)
if not splittable:
return result # depends on [control=['if'], data=[]]
pos = indent + count(first)
indentation += ' '
indent += 4
if indent >= maxline / 2:
maxline = maxline / 2 + indent # depends on [control=['if'], data=['indent']]
for (sg, nsg) in zip(splittable, unsplittable[1:]):
if sg:
# If we already have stuff on the line and even
# the very first item won't fit, start a new line
if pos > indent and pos + len(sg[0]) > maxline:
append('\n')
append(indentation)
pos = indent # depends on [control=['if'], data=[]]
# Dump lines out of the splittable group
# until the entire thing fits
csg = count(sg)
while pos + csg > maxline:
(ready, sg) = split_group(sg, pos, maxline)
if ready[-1].endswith(' '):
ready[-1] = ready[-1][:-1] # depends on [control=['if'], data=[]]
extend(ready)
append('\n')
append(indentation)
pos = indent
csg = count(sg) # depends on [control=['while'], data=['maxline']]
# Dump the remainder of the splittable group
if sg:
extend(sg)
pos += csg # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# Dump the unsplittable group, optionally
# preceded by a linefeed.
cnsg = count(nsg)
if pos > indent and pos + cnsg > maxline:
append('\n')
append(indentation)
pos = indent # depends on [control=['if'], data=[]]
extend(nsg)
pos += cnsg # depends on [control=['for'], data=[]] |
def size_container_folding(value):
"""
Convert value to ast expression if size is not too big.
Converter for sized container.
"""
if len(value) < MAX_LEN:
if isinstance(value, list):
return ast.List([to_ast(elt) for elt in value], ast.Load())
elif isinstance(value, tuple):
return ast.Tuple([to_ast(elt) for elt in value], ast.Load())
elif isinstance(value, set):
return ast.Set([to_ast(elt) for elt in value])
elif isinstance(value, dict):
keys = [to_ast(elt) for elt in value.keys()]
values = [to_ast(elt) for elt in value.values()]
return ast.Dict(keys, values)
elif isinstance(value, np.ndarray):
return ast.Call(func=ast.Attribute(
ast.Name(mangle('numpy'), ast.Load(), None),
'array',
ast.Load()),
args=[to_ast(totuple(value.tolist())),
ast.Attribute(
ast.Name(mangle('numpy'), ast.Load(), None),
value.dtype.name,
ast.Load())],
keywords=[])
else:
raise ConversionError()
else:
raise ToNotEval() | def function[size_container_folding, parameter[value]]:
constant[
Convert value to ast expression if size is not too big.
Converter for sized container.
]
if compare[call[name[len], parameter[name[value]]] less[<] name[MAX_LEN]] begin[:]
if call[name[isinstance], parameter[name[value], name[list]]] begin[:]
return[call[name[ast].List, parameter[<ast.ListComp object at 0x7da20c6c6e90>, call[name[ast].Load, parameter[]]]]] | keyword[def] identifier[size_container_folding] ( identifier[value] ):
literal[string]
keyword[if] identifier[len] ( identifier[value] )< identifier[MAX_LEN] :
keyword[if] identifier[isinstance] ( identifier[value] , identifier[list] ):
keyword[return] identifier[ast] . identifier[List] ([ identifier[to_ast] ( identifier[elt] ) keyword[for] identifier[elt] keyword[in] identifier[value] ], identifier[ast] . identifier[Load] ())
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[tuple] ):
keyword[return] identifier[ast] . identifier[Tuple] ([ identifier[to_ast] ( identifier[elt] ) keyword[for] identifier[elt] keyword[in] identifier[value] ], identifier[ast] . identifier[Load] ())
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[set] ):
keyword[return] identifier[ast] . identifier[Set] ([ identifier[to_ast] ( identifier[elt] ) keyword[for] identifier[elt] keyword[in] identifier[value] ])
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[dict] ):
identifier[keys] =[ identifier[to_ast] ( identifier[elt] ) keyword[for] identifier[elt] keyword[in] identifier[value] . identifier[keys] ()]
identifier[values] =[ identifier[to_ast] ( identifier[elt] ) keyword[for] identifier[elt] keyword[in] identifier[value] . identifier[values] ()]
keyword[return] identifier[ast] . identifier[Dict] ( identifier[keys] , identifier[values] )
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[np] . identifier[ndarray] ):
keyword[return] identifier[ast] . identifier[Call] ( identifier[func] = identifier[ast] . identifier[Attribute] (
identifier[ast] . identifier[Name] ( identifier[mangle] ( literal[string] ), identifier[ast] . identifier[Load] (), keyword[None] ),
literal[string] ,
identifier[ast] . identifier[Load] ()),
identifier[args] =[ identifier[to_ast] ( identifier[totuple] ( identifier[value] . identifier[tolist] ())),
identifier[ast] . identifier[Attribute] (
identifier[ast] . identifier[Name] ( identifier[mangle] ( literal[string] ), identifier[ast] . identifier[Load] (), keyword[None] ),
identifier[value] . identifier[dtype] . identifier[name] ,
identifier[ast] . identifier[Load] ())],
identifier[keywords] =[])
keyword[else] :
keyword[raise] identifier[ConversionError] ()
keyword[else] :
keyword[raise] identifier[ToNotEval] () | def size_container_folding(value):
"""
Convert value to ast expression if size is not too big.
Converter for sized container.
"""
if len(value) < MAX_LEN:
if isinstance(value, list):
return ast.List([to_ast(elt) for elt in value], ast.Load()) # depends on [control=['if'], data=[]]
elif isinstance(value, tuple):
return ast.Tuple([to_ast(elt) for elt in value], ast.Load()) # depends on [control=['if'], data=[]]
elif isinstance(value, set):
return ast.Set([to_ast(elt) for elt in value]) # depends on [control=['if'], data=[]]
elif isinstance(value, dict):
keys = [to_ast(elt) for elt in value.keys()]
values = [to_ast(elt) for elt in value.values()]
return ast.Dict(keys, values) # depends on [control=['if'], data=[]]
elif isinstance(value, np.ndarray):
return ast.Call(func=ast.Attribute(ast.Name(mangle('numpy'), ast.Load(), None), 'array', ast.Load()), args=[to_ast(totuple(value.tolist())), ast.Attribute(ast.Name(mangle('numpy'), ast.Load(), None), value.dtype.name, ast.Load())], keywords=[]) # depends on [control=['if'], data=[]]
else:
raise ConversionError() # depends on [control=['if'], data=[]]
else:
raise ToNotEval() |
def set_censor(self, character):
"""Replaces the original censor character '*' with ``character``."""
# TODO: what if character isn't str()-able?
if isinstance(character, int):
character = str(character)
self._censor_char = character | def function[set_censor, parameter[self, character]]:
constant[Replaces the original censor character '*' with ``character``.]
if call[name[isinstance], parameter[name[character], name[int]]] begin[:]
variable[character] assign[=] call[name[str], parameter[name[character]]]
name[self]._censor_char assign[=] name[character] | keyword[def] identifier[set_censor] ( identifier[self] , identifier[character] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[character] , identifier[int] ):
identifier[character] = identifier[str] ( identifier[character] )
identifier[self] . identifier[_censor_char] = identifier[character] | def set_censor(self, character):
"""Replaces the original censor character '*' with ``character``."""
# TODO: what if character isn't str()-able?
if isinstance(character, int):
character = str(character) # depends on [control=['if'], data=[]]
self._censor_char = character |
def get_release_revision(self, project, release_id, definition_snapshot_revision, **kwargs):
"""GetReleaseRevision.
Get release for a given revision number.
:param str project: Project ID or project name
:param int release_id: Id of the release.
:param int definition_snapshot_revision: Definition snapshot revision number.
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
query_parameters = {}
if definition_snapshot_revision is not None:
query_parameters['definitionSnapshotRevision'] = self._serialize.query('definition_snapshot_revision', definition_snapshot_revision, 'int')
response = self._send(http_method='GET',
location_id='a166fde7-27ad-408e-ba75-703c2cc9d500',
version='5.0',
route_values=route_values,
query_parameters=query_parameters,
accept_media_type='text/plain')
if "callback" in kwargs:
callback = kwargs["callback"]
else:
callback = None
return self._client.stream_download(response, callback=callback) | def function[get_release_revision, parameter[self, project, release_id, definition_snapshot_revision]]:
constant[GetReleaseRevision.
Get release for a given revision number.
:param str project: Project ID or project name
:param int release_id: Id of the release.
:param int definition_snapshot_revision: Definition snapshot revision number.
:rtype: object
]
variable[route_values] assign[=] dictionary[[], []]
if compare[name[project] is_not constant[None]] begin[:]
call[name[route_values]][constant[project]] assign[=] call[name[self]._serialize.url, parameter[constant[project], name[project], constant[str]]]
if compare[name[release_id] is_not constant[None]] begin[:]
call[name[route_values]][constant[releaseId]] assign[=] call[name[self]._serialize.url, parameter[constant[release_id], name[release_id], constant[int]]]
variable[query_parameters] assign[=] dictionary[[], []]
if compare[name[definition_snapshot_revision] is_not constant[None]] begin[:]
call[name[query_parameters]][constant[definitionSnapshotRevision]] assign[=] call[name[self]._serialize.query, parameter[constant[definition_snapshot_revision], name[definition_snapshot_revision], constant[int]]]
variable[response] assign[=] call[name[self]._send, parameter[]]
if compare[constant[callback] in name[kwargs]] begin[:]
variable[callback] assign[=] call[name[kwargs]][constant[callback]]
return[call[name[self]._client.stream_download, parameter[name[response]]]] | keyword[def] identifier[get_release_revision] ( identifier[self] , identifier[project] , identifier[release_id] , identifier[definition_snapshot_revision] ,** identifier[kwargs] ):
literal[string]
identifier[route_values] ={}
keyword[if] identifier[project] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[project] , literal[string] )
keyword[if] identifier[release_id] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[release_id] , literal[string] )
identifier[query_parameters] ={}
keyword[if] identifier[definition_snapshot_revision] keyword[is] keyword[not] keyword[None] :
identifier[query_parameters] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[query] ( literal[string] , identifier[definition_snapshot_revision] , literal[string] )
identifier[response] = identifier[self] . identifier[_send] ( identifier[http_method] = literal[string] ,
identifier[location_id] = literal[string] ,
identifier[version] = literal[string] ,
identifier[route_values] = identifier[route_values] ,
identifier[query_parameters] = identifier[query_parameters] ,
identifier[accept_media_type] = literal[string] )
keyword[if] literal[string] keyword[in] identifier[kwargs] :
identifier[callback] = identifier[kwargs] [ literal[string] ]
keyword[else] :
identifier[callback] = keyword[None]
keyword[return] identifier[self] . identifier[_client] . identifier[stream_download] ( identifier[response] , identifier[callback] = identifier[callback] ) | def get_release_revision(self, project, release_id, definition_snapshot_revision, **kwargs):
"""GetReleaseRevision.
Get release for a given revision number.
:param str project: Project ID or project name
:param int release_id: Id of the release.
:param int definition_snapshot_revision: Definition snapshot revision number.
:rtype: object
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str') # depends on [control=['if'], data=['project']]
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int') # depends on [control=['if'], data=['release_id']]
query_parameters = {}
if definition_snapshot_revision is not None:
query_parameters['definitionSnapshotRevision'] = self._serialize.query('definition_snapshot_revision', definition_snapshot_revision, 'int') # depends on [control=['if'], data=['definition_snapshot_revision']]
response = self._send(http_method='GET', location_id='a166fde7-27ad-408e-ba75-703c2cc9d500', version='5.0', route_values=route_values, query_parameters=query_parameters, accept_media_type='text/plain')
if 'callback' in kwargs:
callback = kwargs['callback'] # depends on [control=['if'], data=['kwargs']]
else:
callback = None
return self._client.stream_download(response, callback=callback) |
def get_all_synDelays(self):
"""
Create and load arrays of connection delays per connection on this rank
Get random normally distributed synaptic delays,
returns dict of nested list of same shape as SpCells.
Delays are rounded to dt.
This function takes no kwargs.
Parameters
----------
None
Returns
-------
dict
output[cellindex][populationname][layerindex]`, np.array of
delays per connection.
See also
--------
numpy.random.normal
"""
tic = time()
#ok then, we will draw random numbers across ranks, which have to
#be unique per cell. Now, we simply record the random state,
#change the seed per cell, and put the original state back below.
randomstate = np.random.get_state()
#container
delays = {}
for cellindex in self.RANK_CELLINDICES:
#set the random seed on for each cellindex
np.random.seed(self.POPULATIONSEED + cellindex + 2*self.POPULATION_SIZE)
delays[cellindex] = {}
for j, X in enumerate(self.X):
delays[cellindex][X] = []
for i in self.k_yXL[:, j]:
loc = self.synDelayLoc[j]
loc /= self.dt
scale = self.synDelayScale[j]
if scale is not None:
scale /= self.dt
delay = np.random.normal(loc, scale, i).astype(int)
while np.any(delay < 1):
inds = delay < 1
delay[inds] = np.random.normal(loc, scale,
inds.sum()).astype(int)
delay = delay.astype(float)
delay *= self.dt
else:
delay = np.zeros(i) + self.synDelayLoc[j]
delays[cellindex][X].append(delay)
#reset the random number generator
np.random.set_state(randomstate)
if RANK == 0:
print('found delays in %.2f seconds' % (time()-tic))
return delays | def function[get_all_synDelays, parameter[self]]:
constant[
Create and load arrays of connection delays per connection on this rank
Get random normally distributed synaptic delays,
returns dict of nested list of same shape as SpCells.
Delays are rounded to dt.
This function takes no kwargs.
Parameters
----------
None
Returns
-------
dict
output[cellindex][populationname][layerindex]`, np.array of
delays per connection.
See also
--------
numpy.random.normal
]
variable[tic] assign[=] call[name[time], parameter[]]
variable[randomstate] assign[=] call[name[np].random.get_state, parameter[]]
variable[delays] assign[=] dictionary[[], []]
for taget[name[cellindex]] in starred[name[self].RANK_CELLINDICES] begin[:]
call[name[np].random.seed, parameter[binary_operation[binary_operation[name[self].POPULATIONSEED + name[cellindex]] + binary_operation[constant[2] * name[self].POPULATION_SIZE]]]]
call[name[delays]][name[cellindex]] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da1b0c956c0>, <ast.Name object at 0x7da1b0c960b0>]]] in starred[call[name[enumerate], parameter[name[self].X]]] begin[:]
call[call[name[delays]][name[cellindex]]][name[X]] assign[=] list[[]]
for taget[name[i]] in starred[call[name[self].k_yXL][tuple[[<ast.Slice object at 0x7da1b0c97e20>, <ast.Name object at 0x7da1b0c955d0>]]]] begin[:]
variable[loc] assign[=] call[name[self].synDelayLoc][name[j]]
<ast.AugAssign object at 0x7da1b0c97f10>
variable[scale] assign[=] call[name[self].synDelayScale][name[j]]
if compare[name[scale] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b0c946a0>
variable[delay] assign[=] call[call[name[np].random.normal, parameter[name[loc], name[scale], name[i]]].astype, parameter[name[int]]]
while call[name[np].any, parameter[compare[name[delay] less[<] constant[1]]]] begin[:]
variable[inds] assign[=] compare[name[delay] less[<] constant[1]]
call[name[delay]][name[inds]] assign[=] call[call[name[np].random.normal, parameter[name[loc], name[scale], call[name[inds].sum, parameter[]]]].astype, parameter[name[int]]]
variable[delay] assign[=] call[name[delay].astype, parameter[name[float]]]
<ast.AugAssign object at 0x7da1b0ce65c0>
call[call[call[name[delays]][name[cellindex]]][name[X]].append, parameter[name[delay]]]
call[name[np].random.set_state, parameter[name[randomstate]]]
if compare[name[RANK] equal[==] constant[0]] begin[:]
call[name[print], parameter[binary_operation[constant[found delays in %.2f seconds] <ast.Mod object at 0x7da2590d6920> binary_operation[call[name[time], parameter[]] - name[tic]]]]]
return[name[delays]] | keyword[def] identifier[get_all_synDelays] ( identifier[self] ):
literal[string]
identifier[tic] = identifier[time] ()
identifier[randomstate] = identifier[np] . identifier[random] . identifier[get_state] ()
identifier[delays] ={}
keyword[for] identifier[cellindex] keyword[in] identifier[self] . identifier[RANK_CELLINDICES] :
identifier[np] . identifier[random] . identifier[seed] ( identifier[self] . identifier[POPULATIONSEED] + identifier[cellindex] + literal[int] * identifier[self] . identifier[POPULATION_SIZE] )
identifier[delays] [ identifier[cellindex] ]={}
keyword[for] identifier[j] , identifier[X] keyword[in] identifier[enumerate] ( identifier[self] . identifier[X] ):
identifier[delays] [ identifier[cellindex] ][ identifier[X] ]=[]
keyword[for] identifier[i] keyword[in] identifier[self] . identifier[k_yXL] [:, identifier[j] ]:
identifier[loc] = identifier[self] . identifier[synDelayLoc] [ identifier[j] ]
identifier[loc] /= identifier[self] . identifier[dt]
identifier[scale] = identifier[self] . identifier[synDelayScale] [ identifier[j] ]
keyword[if] identifier[scale] keyword[is] keyword[not] keyword[None] :
identifier[scale] /= identifier[self] . identifier[dt]
identifier[delay] = identifier[np] . identifier[random] . identifier[normal] ( identifier[loc] , identifier[scale] , identifier[i] ). identifier[astype] ( identifier[int] )
keyword[while] identifier[np] . identifier[any] ( identifier[delay] < literal[int] ):
identifier[inds] = identifier[delay] < literal[int]
identifier[delay] [ identifier[inds] ]= identifier[np] . identifier[random] . identifier[normal] ( identifier[loc] , identifier[scale] ,
identifier[inds] . identifier[sum] ()). identifier[astype] ( identifier[int] )
identifier[delay] = identifier[delay] . identifier[astype] ( identifier[float] )
identifier[delay] *= identifier[self] . identifier[dt]
keyword[else] :
identifier[delay] = identifier[np] . identifier[zeros] ( identifier[i] )+ identifier[self] . identifier[synDelayLoc] [ identifier[j] ]
identifier[delays] [ identifier[cellindex] ][ identifier[X] ]. identifier[append] ( identifier[delay] )
identifier[np] . identifier[random] . identifier[set_state] ( identifier[randomstate] )
keyword[if] identifier[RANK] == literal[int] :
identifier[print] ( literal[string] %( identifier[time] ()- identifier[tic] ))
keyword[return] identifier[delays] | def get_all_synDelays(self):
"""
Create and load arrays of connection delays per connection on this rank
Get random normally distributed synaptic delays,
returns dict of nested list of same shape as SpCells.
Delays are rounded to dt.
This function takes no kwargs.
Parameters
----------
None
Returns
-------
dict
output[cellindex][populationname][layerindex]`, np.array of
delays per connection.
See also
--------
numpy.random.normal
"""
tic = time()
#ok then, we will draw random numbers across ranks, which have to
#be unique per cell. Now, we simply record the random state,
#change the seed per cell, and put the original state back below.
randomstate = np.random.get_state()
#container
delays = {}
for cellindex in self.RANK_CELLINDICES:
#set the random seed on for each cellindex
np.random.seed(self.POPULATIONSEED + cellindex + 2 * self.POPULATION_SIZE)
delays[cellindex] = {}
for (j, X) in enumerate(self.X):
delays[cellindex][X] = []
for i in self.k_yXL[:, j]:
loc = self.synDelayLoc[j]
loc /= self.dt
scale = self.synDelayScale[j]
if scale is not None:
scale /= self.dt
delay = np.random.normal(loc, scale, i).astype(int)
while np.any(delay < 1):
inds = delay < 1
delay[inds] = np.random.normal(loc, scale, inds.sum()).astype(int) # depends on [control=['while'], data=[]]
delay = delay.astype(float)
delay *= self.dt # depends on [control=['if'], data=['scale']]
else:
delay = np.zeros(i) + self.synDelayLoc[j]
delays[cellindex][X].append(delay) # depends on [control=['for'], data=['i']] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['cellindex']]
#reset the random number generator
np.random.set_state(randomstate)
if RANK == 0:
print('found delays in %.2f seconds' % (time() - tic)) # depends on [control=['if'], data=[]]
return delays |
def is_lower(self):
"""Asserts that val is non-empty string and all characters are lowercase."""
if not isinstance(self.val, str_types):
raise TypeError('val is not a string')
if len(self.val) == 0:
raise ValueError('val is empty')
if self.val != self.val.lower():
self._err('Expected <%s> to contain only lowercase chars, but did not.' % self.val)
return self | def function[is_lower, parameter[self]]:
constant[Asserts that val is non-empty string and all characters are lowercase.]
if <ast.UnaryOp object at 0x7da1b0125a80> begin[:]
<ast.Raise object at 0x7da1b0127370>
if compare[call[name[len], parameter[name[self].val]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da1b0126230>
if compare[name[self].val not_equal[!=] call[name[self].val.lower, parameter[]]] begin[:]
call[name[self]._err, parameter[binary_operation[constant[Expected <%s> to contain only lowercase chars, but did not.] <ast.Mod object at 0x7da2590d6920> name[self].val]]]
return[name[self]] | keyword[def] identifier[is_lower] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[self] . identifier[val] , identifier[str_types] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[len] ( identifier[self] . identifier[val] )== literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[self] . identifier[val] != identifier[self] . identifier[val] . identifier[lower] ():
identifier[self] . identifier[_err] ( literal[string] % identifier[self] . identifier[val] )
keyword[return] identifier[self] | def is_lower(self):
"""Asserts that val is non-empty string and all characters are lowercase."""
if not isinstance(self.val, str_types):
raise TypeError('val is not a string') # depends on [control=['if'], data=[]]
if len(self.val) == 0:
raise ValueError('val is empty') # depends on [control=['if'], data=[]]
if self.val != self.val.lower():
self._err('Expected <%s> to contain only lowercase chars, but did not.' % self.val) # depends on [control=['if'], data=[]]
return self |
def _raise_for_status(response):
'''
Custom raise_for_status with more appropriate error message.
'''
http_error_msg = ""
if 400 <= response.status_code < 500:
http_error_msg = "{0} Client Error: {1}".format(response.status_code,
response.reason)
elif 500 <= response.status_code < 600:
http_error_msg = "{0} Server Error: {1}".format(response.status_code,
response.reason)
if http_error_msg:
try:
more_info = response.json().get("message")
except ValueError:
more_info = None
if more_info and more_info.lower() != response.reason.lower():
http_error_msg += ".\n\t{0}".format(more_info)
raise requests.exceptions.HTTPError(http_error_msg, response=response) | def function[_raise_for_status, parameter[response]]:
constant[
Custom raise_for_status with more appropriate error message.
]
variable[http_error_msg] assign[=] constant[]
if compare[constant[400] less_or_equal[<=] name[response].status_code] begin[:]
variable[http_error_msg] assign[=] call[constant[{0} Client Error: {1}].format, parameter[name[response].status_code, name[response].reason]]
if name[http_error_msg] begin[:]
<ast.Try object at 0x7da1b12c6140>
if <ast.BoolOp object at 0x7da18f722740> begin[:]
<ast.AugAssign object at 0x7da18f723310>
<ast.Raise object at 0x7da18f721a50> | keyword[def] identifier[_raise_for_status] ( identifier[response] ):
literal[string]
identifier[http_error_msg] = literal[string]
keyword[if] literal[int] <= identifier[response] . identifier[status_code] < literal[int] :
identifier[http_error_msg] = literal[string] . identifier[format] ( identifier[response] . identifier[status_code] ,
identifier[response] . identifier[reason] )
keyword[elif] literal[int] <= identifier[response] . identifier[status_code] < literal[int] :
identifier[http_error_msg] = literal[string] . identifier[format] ( identifier[response] . identifier[status_code] ,
identifier[response] . identifier[reason] )
keyword[if] identifier[http_error_msg] :
keyword[try] :
identifier[more_info] = identifier[response] . identifier[json] (). identifier[get] ( literal[string] )
keyword[except] identifier[ValueError] :
identifier[more_info] = keyword[None]
keyword[if] identifier[more_info] keyword[and] identifier[more_info] . identifier[lower] ()!= identifier[response] . identifier[reason] . identifier[lower] ():
identifier[http_error_msg] += literal[string] . identifier[format] ( identifier[more_info] )
keyword[raise] identifier[requests] . identifier[exceptions] . identifier[HTTPError] ( identifier[http_error_msg] , identifier[response] = identifier[response] ) | def _raise_for_status(response):
"""
Custom raise_for_status with more appropriate error message.
"""
http_error_msg = ''
if 400 <= response.status_code < 500:
http_error_msg = '{0} Client Error: {1}'.format(response.status_code, response.reason) # depends on [control=['if'], data=[]]
elif 500 <= response.status_code < 600:
http_error_msg = '{0} Server Error: {1}'.format(response.status_code, response.reason) # depends on [control=['if'], data=[]]
if http_error_msg:
try:
more_info = response.json().get('message') # depends on [control=['try'], data=[]]
except ValueError:
more_info = None # depends on [control=['except'], data=[]]
if more_info and more_info.lower() != response.reason.lower():
http_error_msg += '.\n\t{0}'.format(more_info) # depends on [control=['if'], data=[]]
raise requests.exceptions.HTTPError(http_error_msg, response=response) # depends on [control=['if'], data=[]] |
def count_lines_to_next_cell(cell_end_marker, next_cell_start, total, explicit_eoc):
"""How many blank lines between end of cell marker and next cell?"""
if cell_end_marker < total:
lines_to_next_cell = next_cell_start - cell_end_marker
if explicit_eoc:
lines_to_next_cell -= 1
if next_cell_start >= total:
lines_to_next_cell += 1
return lines_to_next_cell
return 1 | def function[count_lines_to_next_cell, parameter[cell_end_marker, next_cell_start, total, explicit_eoc]]:
constant[How many blank lines between end of cell marker and next cell?]
if compare[name[cell_end_marker] less[<] name[total]] begin[:]
variable[lines_to_next_cell] assign[=] binary_operation[name[next_cell_start] - name[cell_end_marker]]
if name[explicit_eoc] begin[:]
<ast.AugAssign object at 0x7da2054a7e50>
if compare[name[next_cell_start] greater_or_equal[>=] name[total]] begin[:]
<ast.AugAssign object at 0x7da2054a69e0>
return[name[lines_to_next_cell]]
return[constant[1]] | keyword[def] identifier[count_lines_to_next_cell] ( identifier[cell_end_marker] , identifier[next_cell_start] , identifier[total] , identifier[explicit_eoc] ):
literal[string]
keyword[if] identifier[cell_end_marker] < identifier[total] :
identifier[lines_to_next_cell] = identifier[next_cell_start] - identifier[cell_end_marker]
keyword[if] identifier[explicit_eoc] :
identifier[lines_to_next_cell] -= literal[int]
keyword[if] identifier[next_cell_start] >= identifier[total] :
identifier[lines_to_next_cell] += literal[int]
keyword[return] identifier[lines_to_next_cell]
keyword[return] literal[int] | def count_lines_to_next_cell(cell_end_marker, next_cell_start, total, explicit_eoc):
"""How many blank lines between end of cell marker and next cell?"""
if cell_end_marker < total:
lines_to_next_cell = next_cell_start - cell_end_marker
if explicit_eoc:
lines_to_next_cell -= 1 # depends on [control=['if'], data=[]]
if next_cell_start >= total:
lines_to_next_cell += 1 # depends on [control=['if'], data=[]]
return lines_to_next_cell # depends on [control=['if'], data=['cell_end_marker', 'total']]
return 1 |
def connect_and_open_channel(host='localhost',
port=5672,
username='guest', password='guest',
virtual_host='/',
on_connection_close=None, *,
loop=None, **kwargs):
"""
Connect to an AMQP server and open a channel on the connection.
This function is a :ref:`coroutine <coroutine>`.
Parameters of this function are the same as :func:`connect`.
:return: a tuple of ``(connection, channel)``.
Equivalent to::
connection = yield from connect(host, port, username, password, virtual_host, on_connection_close, loop=loop, **kwargs)
channel = yield from connection.open_channel()
return connection, channel
"""
connection = yield from connect(host, port, username, password, virtual_host, on_connection_close, loop=loop, **kwargs)
channel = yield from connection.open_channel()
return connection, channel | def function[connect_and_open_channel, parameter[host, port, username, password, virtual_host, on_connection_close]]:
constant[
Connect to an AMQP server and open a channel on the connection.
This function is a :ref:`coroutine <coroutine>`.
Parameters of this function are the same as :func:`connect`.
:return: a tuple of ``(connection, channel)``.
Equivalent to::
connection = yield from connect(host, port, username, password, virtual_host, on_connection_close, loop=loop, **kwargs)
channel = yield from connection.open_channel()
return connection, channel
]
variable[connection] assign[=] <ast.YieldFrom object at 0x7da20cabc7c0>
variable[channel] assign[=] <ast.YieldFrom object at 0x7da20c6abe50>
return[tuple[[<ast.Name object at 0x7da20c6a8490>, <ast.Name object at 0x7da20c6aa4a0>]]] | keyword[def] identifier[connect_and_open_channel] ( identifier[host] = literal[string] ,
identifier[port] = literal[int] ,
identifier[username] = literal[string] , identifier[password] = literal[string] ,
identifier[virtual_host] = literal[string] ,
identifier[on_connection_close] = keyword[None] ,*,
identifier[loop] = keyword[None] ,** identifier[kwargs] ):
literal[string]
identifier[connection] = keyword[yield] keyword[from] identifier[connect] ( identifier[host] , identifier[port] , identifier[username] , identifier[password] , identifier[virtual_host] , identifier[on_connection_close] , identifier[loop] = identifier[loop] ,** identifier[kwargs] )
identifier[channel] = keyword[yield] keyword[from] identifier[connection] . identifier[open_channel] ()
keyword[return] identifier[connection] , identifier[channel] | def connect_and_open_channel(host='localhost', port=5672, username='guest', password='guest', virtual_host='/', on_connection_close=None, *, loop=None, **kwargs):
"""
Connect to an AMQP server and open a channel on the connection.
This function is a :ref:`coroutine <coroutine>`.
Parameters of this function are the same as :func:`connect`.
:return: a tuple of ``(connection, channel)``.
Equivalent to::
connection = yield from connect(host, port, username, password, virtual_host, on_connection_close, loop=loop, **kwargs)
channel = yield from connection.open_channel()
return connection, channel
"""
connection = (yield from connect(host, port, username, password, virtual_host, on_connection_close, loop=loop, **kwargs))
channel = (yield from connection.open_channel())
return (connection, channel) |
def get_users_by_email(cls, emails):
"""
Accept a list of emails, and separate them into users that exist on OpenEdX and users who don't.
Args:
emails: An iterable of email addresses to split between existing and nonexisting
Returns:
users: Queryset of users who exist in the OpenEdX platform and who were in the list of email addresses
missing_emails: List of unique emails which were in the original list, but do not yet exist as users
"""
users = User.objects.filter(email__in=emails)
present_emails = users.values_list('email', flat=True)
missing_emails = list(set(emails) - set(present_emails))
return users, missing_emails | def function[get_users_by_email, parameter[cls, emails]]:
constant[
Accept a list of emails, and separate them into users that exist on OpenEdX and users who don't.
Args:
emails: An iterable of email addresses to split between existing and nonexisting
Returns:
users: Queryset of users who exist in the OpenEdX platform and who were in the list of email addresses
missing_emails: List of unique emails which were in the original list, but do not yet exist as users
]
variable[users] assign[=] call[name[User].objects.filter, parameter[]]
variable[present_emails] assign[=] call[name[users].values_list, parameter[constant[email]]]
variable[missing_emails] assign[=] call[name[list], parameter[binary_operation[call[name[set], parameter[name[emails]]] - call[name[set], parameter[name[present_emails]]]]]]
return[tuple[[<ast.Name object at 0x7da1b0052410>, <ast.Name object at 0x7da1b0052d70>]]] | keyword[def] identifier[get_users_by_email] ( identifier[cls] , identifier[emails] ):
literal[string]
identifier[users] = identifier[User] . identifier[objects] . identifier[filter] ( identifier[email__in] = identifier[emails] )
identifier[present_emails] = identifier[users] . identifier[values_list] ( literal[string] , identifier[flat] = keyword[True] )
identifier[missing_emails] = identifier[list] ( identifier[set] ( identifier[emails] )- identifier[set] ( identifier[present_emails] ))
keyword[return] identifier[users] , identifier[missing_emails] | def get_users_by_email(cls, emails):
"""
Accept a list of emails, and separate them into users that exist on OpenEdX and users who don't.
Args:
emails: An iterable of email addresses to split between existing and nonexisting
Returns:
users: Queryset of users who exist in the OpenEdX platform and who were in the list of email addresses
missing_emails: List of unique emails which were in the original list, but do not yet exist as users
"""
users = User.objects.filter(email__in=emails)
present_emails = users.values_list('email', flat=True)
missing_emails = list(set(emails) - set(present_emails))
return (users, missing_emails) |
def col_to_numeric(df,col_name, dest = False):
""" Coerces a column in a DataFrame to numeric
Parameters:
df - DataFrame
DataFrame to operate on
col_name - string
Name of column to coerce
dest - bool, default False
Whether to apply the result to the DataFrame or return it.
True is apply, False is return.
"""
new_col = _pd.to_numeric(df[col_name], errors = 'coerce')
if dest:
set_col(df,col_name,new_col)
else:
return new_col | def function[col_to_numeric, parameter[df, col_name, dest]]:
constant[ Coerces a column in a DataFrame to numeric
Parameters:
df - DataFrame
DataFrame to operate on
col_name - string
Name of column to coerce
dest - bool, default False
Whether to apply the result to the DataFrame or return it.
True is apply, False is return.
]
variable[new_col] assign[=] call[name[_pd].to_numeric, parameter[call[name[df]][name[col_name]]]]
if name[dest] begin[:]
call[name[set_col], parameter[name[df], name[col_name], name[new_col]]] | keyword[def] identifier[col_to_numeric] ( identifier[df] , identifier[col_name] , identifier[dest] = keyword[False] ):
literal[string]
identifier[new_col] = identifier[_pd] . identifier[to_numeric] ( identifier[df] [ identifier[col_name] ], identifier[errors] = literal[string] )
keyword[if] identifier[dest] :
identifier[set_col] ( identifier[df] , identifier[col_name] , identifier[new_col] )
keyword[else] :
keyword[return] identifier[new_col] | def col_to_numeric(df, col_name, dest=False):
""" Coerces a column in a DataFrame to numeric
Parameters:
df - DataFrame
DataFrame to operate on
col_name - string
Name of column to coerce
dest - bool, default False
Whether to apply the result to the DataFrame or return it.
True is apply, False is return.
"""
new_col = _pd.to_numeric(df[col_name], errors='coerce')
if dest:
set_col(df, col_name, new_col) # depends on [control=['if'], data=[]]
else:
return new_col |
async def reseed_apply(self) -> DIDInfo:
"""
Replace verification key with new verification key from reseed operation.
Raise WalletState if wallet is closed.
:return: DIDInfo with new verification key and metadata for DID
"""
LOGGER.debug('Wallet.reseed_apply >>>')
if not self.handle:
LOGGER.debug('Wallet.reseed_init <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
await did.replace_keys_apply(self.handle, self.did)
self.verkey = await did.key_for_local_did(self.handle, self.did)
now = int(time())
rv = DIDInfo(self.did, self.verkey, {'anchor': True, 'since': now, 'modified': now})
await did.set_did_metadata(self.handle, self.did, json.dumps(rv.metadata))
LOGGER.info('Wallet %s set seed hash metadata for DID %s', self.name, self.did)
LOGGER.debug('Wallet.reseed_apply <<< %s', rv)
return rv | <ast.AsyncFunctionDef object at 0x7da18bc71960> | keyword[async] keyword[def] identifier[reseed_apply] ( identifier[self] )-> identifier[DIDInfo] :
literal[string]
identifier[LOGGER] . identifier[debug] ( literal[string] )
keyword[if] keyword[not] identifier[self] . identifier[handle] :
identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[self] . identifier[name] )
keyword[raise] identifier[WalletState] ( literal[string] . identifier[format] ( identifier[self] . identifier[name] ))
keyword[await] identifier[did] . identifier[replace_keys_apply] ( identifier[self] . identifier[handle] , identifier[self] . identifier[did] )
identifier[self] . identifier[verkey] = keyword[await] identifier[did] . identifier[key_for_local_did] ( identifier[self] . identifier[handle] , identifier[self] . identifier[did] )
identifier[now] = identifier[int] ( identifier[time] ())
identifier[rv] = identifier[DIDInfo] ( identifier[self] . identifier[did] , identifier[self] . identifier[verkey] ,{ literal[string] : keyword[True] , literal[string] : identifier[now] , literal[string] : identifier[now] })
keyword[await] identifier[did] . identifier[set_did_metadata] ( identifier[self] . identifier[handle] , identifier[self] . identifier[did] , identifier[json] . identifier[dumps] ( identifier[rv] . identifier[metadata] ))
identifier[LOGGER] . identifier[info] ( literal[string] , identifier[self] . identifier[name] , identifier[self] . identifier[did] )
identifier[LOGGER] . identifier[debug] ( literal[string] , identifier[rv] )
keyword[return] identifier[rv] | async def reseed_apply(self) -> DIDInfo:
"""
Replace verification key with new verification key from reseed operation.
Raise WalletState if wallet is closed.
:return: DIDInfo with new verification key and metadata for DID
"""
LOGGER.debug('Wallet.reseed_apply >>>')
if not self.handle:
LOGGER.debug('Wallet.reseed_init <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name)) # depends on [control=['if'], data=[]]
await did.replace_keys_apply(self.handle, self.did)
self.verkey = await did.key_for_local_did(self.handle, self.did)
now = int(time())
rv = DIDInfo(self.did, self.verkey, {'anchor': True, 'since': now, 'modified': now})
await did.set_did_metadata(self.handle, self.did, json.dumps(rv.metadata))
LOGGER.info('Wallet %s set seed hash metadata for DID %s', self.name, self.did)
LOGGER.debug('Wallet.reseed_apply <<< %s', rv)
return rv |
def delete_activity(self, activity_id=None):
"""Deletes the Activity identified by the given Id.
arg: activityId (osid.id.Id): the Id of the Activity to
delete
raise: NotFound - an Activity was not found identified by the
given Id
raise: NullArgument - activityId is null
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
compliance: mandatory - This method must be implemented.
"""
if activity_id is None:
raise NullArgument()
if not isinstance(activity_id, Id):
raise InvalidArgument('argument type is not an osid Id')
url_path = construct_url('activities',
bank_id=self._catalog_idstr,
act_id=activity_id)
result = self._delete_request(url_path)
return objects.Activity(result) | def function[delete_activity, parameter[self, activity_id]]:
constant[Deletes the Activity identified by the given Id.
arg: activityId (osid.id.Id): the Id of the Activity to
delete
raise: NotFound - an Activity was not found identified by the
given Id
raise: NullArgument - activityId is null
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
compliance: mandatory - This method must be implemented.
]
if compare[name[activity_id] is constant[None]] begin[:]
<ast.Raise object at 0x7da204620e20>
if <ast.UnaryOp object at 0x7da204620670> begin[:]
<ast.Raise object at 0x7da204620a00>
variable[url_path] assign[=] call[name[construct_url], parameter[constant[activities]]]
variable[result] assign[=] call[name[self]._delete_request, parameter[name[url_path]]]
return[call[name[objects].Activity, parameter[name[result]]]] | keyword[def] identifier[delete_activity] ( identifier[self] , identifier[activity_id] = keyword[None] ):
literal[string]
keyword[if] identifier[activity_id] keyword[is] keyword[None] :
keyword[raise] identifier[NullArgument] ()
keyword[if] keyword[not] identifier[isinstance] ( identifier[activity_id] , identifier[Id] ):
keyword[raise] identifier[InvalidArgument] ( literal[string] )
identifier[url_path] = identifier[construct_url] ( literal[string] ,
identifier[bank_id] = identifier[self] . identifier[_catalog_idstr] ,
identifier[act_id] = identifier[activity_id] )
identifier[result] = identifier[self] . identifier[_delete_request] ( identifier[url_path] )
keyword[return] identifier[objects] . identifier[Activity] ( identifier[result] ) | def delete_activity(self, activity_id=None):
"""Deletes the Activity identified by the given Id.
arg: activityId (osid.id.Id): the Id of the Activity to
delete
raise: NotFound - an Activity was not found identified by the
given Id
raise: NullArgument - activityId is null
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
compliance: mandatory - This method must be implemented.
"""
if activity_id is None:
raise NullArgument() # depends on [control=['if'], data=[]]
if not isinstance(activity_id, Id):
raise InvalidArgument('argument type is not an osid Id') # depends on [control=['if'], data=[]]
url_path = construct_url('activities', bank_id=self._catalog_idstr, act_id=activity_id)
result = self._delete_request(url_path)
return objects.Activity(result) |
def find_link(lid=None, sep_id=None, tep_id=None):
"""
find link according to link ID.
:param lid: link id
:return: the link if found or None if not found
"""
LOGGER.debug("LinkService.find_link")
ret = None
if (lid is None or not lid) and (sep_id is None or not sep_id) and (tep_id is None or not tep_id):
raise exceptions.ArianeCallParametersError('id, source endpoint ID, target endpoint ID')
if (lid is not None and lid) and ((sep_id is not None and sep_id) or (tep_id is not None and tep_id)):
LOGGER.warning('LinkService.find_link - Both lid and sep_id and tep_id are defined. '
'Will give you search on id.')
# traceback.print_stack()
sep_id = None
tep_id = None
if lid is not None and lid:
params = SessionService.complete_transactional_req({'ID': lid})
if MappingService.driver_type != DriverFactory.DRIVER_REST:
params['OPERATION'] = 'getLink'
elif sep_id is not None and sep_id and tep_id is not None and tep_id:
params = SessionService.complete_transactional_req({'SEPID': sep_id, 'TEPID': tep_id})
if MappingService.driver_type != DriverFactory.DRIVER_REST:
params['OPERATION'] = 'getLinkBySourceEPandDestinationEP'
elif sep_id is not None and sep_id and (tep_id is None or not tep_id):
params = SessionService.complete_transactional_req({'SEPID': sep_id})
if MappingService.driver_type != DriverFactory.DRIVER_REST:
params['OPERATION'] = 'getLinksBySourceEP'
else:
params = SessionService.complete_transactional_req({'TEPID': tep_id})
if MappingService.driver_type != DriverFactory.DRIVER_REST:
params['OPERATION'] = 'getLinksByDestinationEP'
if MappingService.driver_type != DriverFactory.DRIVER_REST:
args = {'properties': params}
else:
args = {'http_operation': 'GET', 'operation_path': 'get', 'parameters': params}
if MappingService.driver_type != DriverFactory.DRIVER_REST:
if lid is not None and lid:
response = LinkService.requester.call(args)
else:
response = MappingService.requester.call(args)
else:
response = LinkService.requester.call(args)
if MappingService.driver_type != DriverFactory.DRIVER_REST:
response = response.get()
if response.rc == 0:
if (lid is not None and lid) or (sep_id is not None and sep_id and tep_id is not None and tep_id):
ret = Link.json_2_link(response.response_content)
else:
ret = []
for link in response.response_content['links']:
ret.append(Link.json_2_link(link))
elif response.rc != 404:
err_msg = 'LinkService.find_link - Problem while searching link (id:' + str(lid) + '). ' + \
'Reason: ' + str(response.response_content) + ' - ' + str(response.error_message) + \
" (" + str(response.rc) + ")"
if response.rc == 500 and ArianeMappingOverloadError.ERROR_MSG in response.error_message:
raise ArianeMappingOverloadError("LinkService.find_link",
ArianeMappingOverloadError.ERROR_MSG)
LOGGER.warning(err_msg)
# traceback.print_stack()
return ret | def function[find_link, parameter[lid, sep_id, tep_id]]:
constant[
find link according to link ID.
:param lid: link id
:return: the link if found or None if not found
]
call[name[LOGGER].debug, parameter[constant[LinkService.find_link]]]
variable[ret] assign[=] constant[None]
if <ast.BoolOp object at 0x7da18f720df0> begin[:]
<ast.Raise object at 0x7da18f722f20>
if <ast.BoolOp object at 0x7da18f723ca0> begin[:]
call[name[LOGGER].warning, parameter[constant[LinkService.find_link - Both lid and sep_id and tep_id are defined. Will give you search on id.]]]
variable[sep_id] assign[=] constant[None]
variable[tep_id] assign[=] constant[None]
if <ast.BoolOp object at 0x7da18f720ac0> begin[:]
variable[params] assign[=] call[name[SessionService].complete_transactional_req, parameter[dictionary[[<ast.Constant object at 0x7da18f7205e0>], [<ast.Name object at 0x7da18f722bc0>]]]]
if compare[name[MappingService].driver_type not_equal[!=] name[DriverFactory].DRIVER_REST] begin[:]
call[name[params]][constant[OPERATION]] assign[=] constant[getLink]
if compare[name[MappingService].driver_type not_equal[!=] name[DriverFactory].DRIVER_REST] begin[:]
variable[args] assign[=] dictionary[[<ast.Constant object at 0x7da18f721990>], [<ast.Name object at 0x7da18f722cb0>]]
if compare[name[MappingService].driver_type not_equal[!=] name[DriverFactory].DRIVER_REST] begin[:]
if <ast.BoolOp object at 0x7da18f7206d0> begin[:]
variable[response] assign[=] call[name[LinkService].requester.call, parameter[name[args]]]
if compare[name[MappingService].driver_type not_equal[!=] name[DriverFactory].DRIVER_REST] begin[:]
variable[response] assign[=] call[name[response].get, parameter[]]
if compare[name[response].rc equal[==] constant[0]] begin[:]
if <ast.BoolOp object at 0x7da18f722fb0> begin[:]
variable[ret] assign[=] call[name[Link].json_2_link, parameter[name[response].response_content]]
return[name[ret]] | keyword[def] identifier[find_link] ( identifier[lid] = keyword[None] , identifier[sep_id] = keyword[None] , identifier[tep_id] = keyword[None] ):
literal[string]
identifier[LOGGER] . identifier[debug] ( literal[string] )
identifier[ret] = keyword[None]
keyword[if] ( identifier[lid] keyword[is] keyword[None] keyword[or] keyword[not] identifier[lid] ) keyword[and] ( identifier[sep_id] keyword[is] keyword[None] keyword[or] keyword[not] identifier[sep_id] ) keyword[and] ( identifier[tep_id] keyword[is] keyword[None] keyword[or] keyword[not] identifier[tep_id] ):
keyword[raise] identifier[exceptions] . identifier[ArianeCallParametersError] ( literal[string] )
keyword[if] ( identifier[lid] keyword[is] keyword[not] keyword[None] keyword[and] identifier[lid] ) keyword[and] (( identifier[sep_id] keyword[is] keyword[not] keyword[None] keyword[and] identifier[sep_id] ) keyword[or] ( identifier[tep_id] keyword[is] keyword[not] keyword[None] keyword[and] identifier[tep_id] )):
identifier[LOGGER] . identifier[warning] ( literal[string]
literal[string] )
identifier[sep_id] = keyword[None]
identifier[tep_id] = keyword[None]
keyword[if] identifier[lid] keyword[is] keyword[not] keyword[None] keyword[and] identifier[lid] :
identifier[params] = identifier[SessionService] . identifier[complete_transactional_req] ({ literal[string] : identifier[lid] })
keyword[if] identifier[MappingService] . identifier[driver_type] != identifier[DriverFactory] . identifier[DRIVER_REST] :
identifier[params] [ literal[string] ]= literal[string]
keyword[elif] identifier[sep_id] keyword[is] keyword[not] keyword[None] keyword[and] identifier[sep_id] keyword[and] identifier[tep_id] keyword[is] keyword[not] keyword[None] keyword[and] identifier[tep_id] :
identifier[params] = identifier[SessionService] . identifier[complete_transactional_req] ({ literal[string] : identifier[sep_id] , literal[string] : identifier[tep_id] })
keyword[if] identifier[MappingService] . identifier[driver_type] != identifier[DriverFactory] . identifier[DRIVER_REST] :
identifier[params] [ literal[string] ]= literal[string]
keyword[elif] identifier[sep_id] keyword[is] keyword[not] keyword[None] keyword[and] identifier[sep_id] keyword[and] ( identifier[tep_id] keyword[is] keyword[None] keyword[or] keyword[not] identifier[tep_id] ):
identifier[params] = identifier[SessionService] . identifier[complete_transactional_req] ({ literal[string] : identifier[sep_id] })
keyword[if] identifier[MappingService] . identifier[driver_type] != identifier[DriverFactory] . identifier[DRIVER_REST] :
identifier[params] [ literal[string] ]= literal[string]
keyword[else] :
identifier[params] = identifier[SessionService] . identifier[complete_transactional_req] ({ literal[string] : identifier[tep_id] })
keyword[if] identifier[MappingService] . identifier[driver_type] != identifier[DriverFactory] . identifier[DRIVER_REST] :
identifier[params] [ literal[string] ]= literal[string]
keyword[if] identifier[MappingService] . identifier[driver_type] != identifier[DriverFactory] . identifier[DRIVER_REST] :
identifier[args] ={ literal[string] : identifier[params] }
keyword[else] :
identifier[args] ={ literal[string] : literal[string] , literal[string] : literal[string] , literal[string] : identifier[params] }
keyword[if] identifier[MappingService] . identifier[driver_type] != identifier[DriverFactory] . identifier[DRIVER_REST] :
keyword[if] identifier[lid] keyword[is] keyword[not] keyword[None] keyword[and] identifier[lid] :
identifier[response] = identifier[LinkService] . identifier[requester] . identifier[call] ( identifier[args] )
keyword[else] :
identifier[response] = identifier[MappingService] . identifier[requester] . identifier[call] ( identifier[args] )
keyword[else] :
identifier[response] = identifier[LinkService] . identifier[requester] . identifier[call] ( identifier[args] )
keyword[if] identifier[MappingService] . identifier[driver_type] != identifier[DriverFactory] . identifier[DRIVER_REST] :
identifier[response] = identifier[response] . identifier[get] ()
keyword[if] identifier[response] . identifier[rc] == literal[int] :
keyword[if] ( identifier[lid] keyword[is] keyword[not] keyword[None] keyword[and] identifier[lid] ) keyword[or] ( identifier[sep_id] keyword[is] keyword[not] keyword[None] keyword[and] identifier[sep_id] keyword[and] identifier[tep_id] keyword[is] keyword[not] keyword[None] keyword[and] identifier[tep_id] ):
identifier[ret] = identifier[Link] . identifier[json_2_link] ( identifier[response] . identifier[response_content] )
keyword[else] :
identifier[ret] =[]
keyword[for] identifier[link] keyword[in] identifier[response] . identifier[response_content] [ literal[string] ]:
identifier[ret] . identifier[append] ( identifier[Link] . identifier[json_2_link] ( identifier[link] ))
keyword[elif] identifier[response] . identifier[rc] != literal[int] :
identifier[err_msg] = literal[string] + identifier[str] ( identifier[lid] )+ literal[string] + literal[string] + identifier[str] ( identifier[response] . identifier[response_content] )+ literal[string] + identifier[str] ( identifier[response] . identifier[error_message] )+ literal[string] + identifier[str] ( identifier[response] . identifier[rc] )+ literal[string]
keyword[if] identifier[response] . identifier[rc] == literal[int] keyword[and] identifier[ArianeMappingOverloadError] . identifier[ERROR_MSG] keyword[in] identifier[response] . identifier[error_message] :
keyword[raise] identifier[ArianeMappingOverloadError] ( literal[string] ,
identifier[ArianeMappingOverloadError] . identifier[ERROR_MSG] )
identifier[LOGGER] . identifier[warning] ( identifier[err_msg] )
keyword[return] identifier[ret] | def find_link(lid=None, sep_id=None, tep_id=None):
"""
find link according to link ID.
:param lid: link id
:return: the link if found or None if not found
"""
LOGGER.debug('LinkService.find_link')
ret = None
if (lid is None or not lid) and (sep_id is None or not sep_id) and (tep_id is None or not tep_id):
raise exceptions.ArianeCallParametersError('id, source endpoint ID, target endpoint ID') # depends on [control=['if'], data=[]]
if (lid is not None and lid) and (sep_id is not None and sep_id or (tep_id is not None and tep_id)):
LOGGER.warning('LinkService.find_link - Both lid and sep_id and tep_id are defined. Will give you search on id.')
# traceback.print_stack()
sep_id = None
tep_id = None # depends on [control=['if'], data=[]]
if lid is not None and lid:
params = SessionService.complete_transactional_req({'ID': lid})
if MappingService.driver_type != DriverFactory.DRIVER_REST:
params['OPERATION'] = 'getLink' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif sep_id is not None and sep_id and (tep_id is not None) and tep_id:
params = SessionService.complete_transactional_req({'SEPID': sep_id, 'TEPID': tep_id})
if MappingService.driver_type != DriverFactory.DRIVER_REST:
params['OPERATION'] = 'getLinkBySourceEPandDestinationEP' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
elif sep_id is not None and sep_id and (tep_id is None or not tep_id):
params = SessionService.complete_transactional_req({'SEPID': sep_id})
if MappingService.driver_type != DriverFactory.DRIVER_REST:
params['OPERATION'] = 'getLinksBySourceEP' # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
params = SessionService.complete_transactional_req({'TEPID': tep_id})
if MappingService.driver_type != DriverFactory.DRIVER_REST:
params['OPERATION'] = 'getLinksByDestinationEP' # depends on [control=['if'], data=[]]
if MappingService.driver_type != DriverFactory.DRIVER_REST:
args = {'properties': params} # depends on [control=['if'], data=[]]
else:
args = {'http_operation': 'GET', 'operation_path': 'get', 'parameters': params}
if MappingService.driver_type != DriverFactory.DRIVER_REST:
if lid is not None and lid:
response = LinkService.requester.call(args) # depends on [control=['if'], data=[]]
else:
response = MappingService.requester.call(args) # depends on [control=['if'], data=[]]
else:
response = LinkService.requester.call(args)
if MappingService.driver_type != DriverFactory.DRIVER_REST:
response = response.get() # depends on [control=['if'], data=[]]
if response.rc == 0:
if lid is not None and lid or (sep_id is not None and sep_id and (tep_id is not None) and tep_id):
ret = Link.json_2_link(response.response_content) # depends on [control=['if'], data=[]]
else:
ret = []
for link in response.response_content['links']:
ret.append(Link.json_2_link(link)) # depends on [control=['for'], data=['link']] # depends on [control=['if'], data=[]]
elif response.rc != 404:
err_msg = 'LinkService.find_link - Problem while searching link (id:' + str(lid) + '). ' + 'Reason: ' + str(response.response_content) + ' - ' + str(response.error_message) + ' (' + str(response.rc) + ')'
if response.rc == 500 and ArianeMappingOverloadError.ERROR_MSG in response.error_message:
raise ArianeMappingOverloadError('LinkService.find_link', ArianeMappingOverloadError.ERROR_MSG) # depends on [control=['if'], data=[]]
LOGGER.warning(err_msg) # depends on [control=['if'], data=[]]
# traceback.print_stack()
return ret |
def readUserSession(datafile):
"""
Reads the user session record from the file's cursor position
Args:
datafile: Data file whose cursor points at the beginning of the record
Returns:
list of pages in the order clicked by the user
"""
for line in datafile:
pages = line.split()
total = len(pages)
# Select user sessions with 2 or more pages
if total < 2:
continue
# Exclude outliers by removing extreme long sessions
if total > 500:
continue
return [PAGE_CATEGORIES[int(i) - 1] for i in pages]
return [] | def function[readUserSession, parameter[datafile]]:
constant[
Reads the user session record from the file's cursor position
Args:
datafile: Data file whose cursor points at the beginning of the record
Returns:
list of pages in the order clicked by the user
]
for taget[name[line]] in starred[name[datafile]] begin[:]
variable[pages] assign[=] call[name[line].split, parameter[]]
variable[total] assign[=] call[name[len], parameter[name[pages]]]
if compare[name[total] less[<] constant[2]] begin[:]
continue
if compare[name[total] greater[>] constant[500]] begin[:]
continue
return[<ast.ListComp object at 0x7da18dc99db0>]
return[list[[]]] | keyword[def] identifier[readUserSession] ( identifier[datafile] ):
literal[string]
keyword[for] identifier[line] keyword[in] identifier[datafile] :
identifier[pages] = identifier[line] . identifier[split] ()
identifier[total] = identifier[len] ( identifier[pages] )
keyword[if] identifier[total] < literal[int] :
keyword[continue]
keyword[if] identifier[total] > literal[int] :
keyword[continue]
keyword[return] [ identifier[PAGE_CATEGORIES] [ identifier[int] ( identifier[i] )- literal[int] ] keyword[for] identifier[i] keyword[in] identifier[pages] ]
keyword[return] [] | def readUserSession(datafile):
"""
Reads the user session record from the file's cursor position
Args:
datafile: Data file whose cursor points at the beginning of the record
Returns:
list of pages in the order clicked by the user
"""
for line in datafile:
pages = line.split()
total = len(pages)
# Select user sessions with 2 or more pages
if total < 2:
continue # depends on [control=['if'], data=[]]
# Exclude outliers by removing extreme long sessions
if total > 500:
continue # depends on [control=['if'], data=[]]
return [PAGE_CATEGORIES[int(i) - 1] for i in pages] # depends on [control=['for'], data=['line']]
return [] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.