code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def get(self, key, default=None):
"""Proxy to ``obj``.
:param key: Metadata key which holds the value.
:returns: Metadata value of the specified key or default.
"""
if hasattr(self.obj, key):
return getattr(self.obj, key)
return self.data.get(key, default) | def function[get, parameter[self, key, default]]:
constant[Proxy to ``obj``.
:param key: Metadata key which holds the value.
:returns: Metadata value of the specified key or default.
]
if call[name[hasattr], parameter[name[self].obj, name[key]]] begin[:]
return[call[name[getattr], parameter[name[self].obj, name[key]]]]
return[call[name[self].data.get, parameter[name[key], name[default]]]] | keyword[def] identifier[get] ( identifier[self] , identifier[key] , identifier[default] = keyword[None] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[self] . identifier[obj] , identifier[key] ):
keyword[return] identifier[getattr] ( identifier[self] . identifier[obj] , identifier[key] )
keyword[return] identifier[self] . identifier[data] . identifier[get] ( identifier[key] , identifier[default] ) | def get(self, key, default=None):
"""Proxy to ``obj``.
:param key: Metadata key which holds the value.
:returns: Metadata value of the specified key or default.
"""
if hasattr(self.obj, key):
return getattr(self.obj, key) # depends on [control=['if'], data=[]]
return self.data.get(key, default) |
def irange_key(self, min_key=None, max_key=None, inclusive=(True, True),
reverse=False):
"""
Create an iterator of values between `min_key` and `max_key`.
`inclusive` is a pair of booleans that indicates whether the min_key
and max_key ought to be included in the range, respectively. The
default is (True, True) such that the range is inclusive of both
`min_key` and `max_key`.
Both `min_key` and `max_key` default to `None` which is automatically
inclusive of the start and end of the list, respectively.
When `reverse` is `True` the values are yielded from the iterator in
reverse order; `reverse` defaults to `False`.
"""
_maxes = self._maxes
if not _maxes:
return iter(())
_keys = self._keys
# Calculate the minimum (pos, idx) pair. By default this location
# will be inclusive in our calculation.
if min_key is None:
min_pos = 0
min_idx = 0
else:
if inclusive[0]:
min_pos = bisect_left(_maxes, min_key)
if min_pos == len(_maxes):
return iter(())
min_idx = bisect_left(_keys[min_pos], min_key)
else:
min_pos = bisect_right(_maxes, min_key)
if min_pos == len(_maxes):
return iter(())
min_idx = bisect_right(_keys[min_pos], min_key)
# Calculate the maximum (pos, idx) pair. By default this location
# will be exclusive in our calculation.
if max_key is None:
max_pos = len(_maxes) - 1
max_idx = len(_keys[max_pos])
else:
if inclusive[1]:
max_pos = bisect_right(_maxes, max_key)
if max_pos == len(_maxes):
max_pos -= 1
max_idx = len(_keys[max_pos])
else:
max_idx = bisect_right(_keys[max_pos], max_key)
else:
max_pos = bisect_left(_maxes, max_key)
if max_pos == len(_maxes):
max_pos -= 1
max_idx = len(_keys[max_pos])
else:
max_idx = bisect_left(_keys[max_pos], max_key)
return self._islice(min_pos, min_idx, max_pos, max_idx, reverse) | def function[irange_key, parameter[self, min_key, max_key, inclusive, reverse]]:
constant[
Create an iterator of values between `min_key` and `max_key`.
`inclusive` is a pair of booleans that indicates whether the min_key
and max_key ought to be included in the range, respectively. The
default is (True, True) such that the range is inclusive of both
`min_key` and `max_key`.
Both `min_key` and `max_key` default to `None` which is automatically
inclusive of the start and end of the list, respectively.
When `reverse` is `True` the values are yielded from the iterator in
reverse order; `reverse` defaults to `False`.
]
variable[_maxes] assign[=] name[self]._maxes
if <ast.UnaryOp object at 0x7da1b170c0d0> begin[:]
return[call[name[iter], parameter[tuple[[]]]]]
variable[_keys] assign[=] name[self]._keys
if compare[name[min_key] is constant[None]] begin[:]
variable[min_pos] assign[=] constant[0]
variable[min_idx] assign[=] constant[0]
if compare[name[max_key] is constant[None]] begin[:]
variable[max_pos] assign[=] binary_operation[call[name[len], parameter[name[_maxes]]] - constant[1]]
variable[max_idx] assign[=] call[name[len], parameter[call[name[_keys]][name[max_pos]]]]
return[call[name[self]._islice, parameter[name[min_pos], name[min_idx], name[max_pos], name[max_idx], name[reverse]]]] | keyword[def] identifier[irange_key] ( identifier[self] , identifier[min_key] = keyword[None] , identifier[max_key] = keyword[None] , identifier[inclusive] =( keyword[True] , keyword[True] ),
identifier[reverse] = keyword[False] ):
literal[string]
identifier[_maxes] = identifier[self] . identifier[_maxes]
keyword[if] keyword[not] identifier[_maxes] :
keyword[return] identifier[iter] (())
identifier[_keys] = identifier[self] . identifier[_keys]
keyword[if] identifier[min_key] keyword[is] keyword[None] :
identifier[min_pos] = literal[int]
identifier[min_idx] = literal[int]
keyword[else] :
keyword[if] identifier[inclusive] [ literal[int] ]:
identifier[min_pos] = identifier[bisect_left] ( identifier[_maxes] , identifier[min_key] )
keyword[if] identifier[min_pos] == identifier[len] ( identifier[_maxes] ):
keyword[return] identifier[iter] (())
identifier[min_idx] = identifier[bisect_left] ( identifier[_keys] [ identifier[min_pos] ], identifier[min_key] )
keyword[else] :
identifier[min_pos] = identifier[bisect_right] ( identifier[_maxes] , identifier[min_key] )
keyword[if] identifier[min_pos] == identifier[len] ( identifier[_maxes] ):
keyword[return] identifier[iter] (())
identifier[min_idx] = identifier[bisect_right] ( identifier[_keys] [ identifier[min_pos] ], identifier[min_key] )
keyword[if] identifier[max_key] keyword[is] keyword[None] :
identifier[max_pos] = identifier[len] ( identifier[_maxes] )- literal[int]
identifier[max_idx] = identifier[len] ( identifier[_keys] [ identifier[max_pos] ])
keyword[else] :
keyword[if] identifier[inclusive] [ literal[int] ]:
identifier[max_pos] = identifier[bisect_right] ( identifier[_maxes] , identifier[max_key] )
keyword[if] identifier[max_pos] == identifier[len] ( identifier[_maxes] ):
identifier[max_pos] -= literal[int]
identifier[max_idx] = identifier[len] ( identifier[_keys] [ identifier[max_pos] ])
keyword[else] :
identifier[max_idx] = identifier[bisect_right] ( identifier[_keys] [ identifier[max_pos] ], identifier[max_key] )
keyword[else] :
identifier[max_pos] = identifier[bisect_left] ( identifier[_maxes] , identifier[max_key] )
keyword[if] identifier[max_pos] == identifier[len] ( identifier[_maxes] ):
identifier[max_pos] -= literal[int]
identifier[max_idx] = identifier[len] ( identifier[_keys] [ identifier[max_pos] ])
keyword[else] :
identifier[max_idx] = identifier[bisect_left] ( identifier[_keys] [ identifier[max_pos] ], identifier[max_key] )
keyword[return] identifier[self] . identifier[_islice] ( identifier[min_pos] , identifier[min_idx] , identifier[max_pos] , identifier[max_idx] , identifier[reverse] ) | def irange_key(self, min_key=None, max_key=None, inclusive=(True, True), reverse=False):
"""
Create an iterator of values between `min_key` and `max_key`.
`inclusive` is a pair of booleans that indicates whether the min_key
and max_key ought to be included in the range, respectively. The
default is (True, True) such that the range is inclusive of both
`min_key` and `max_key`.
Both `min_key` and `max_key` default to `None` which is automatically
inclusive of the start and end of the list, respectively.
When `reverse` is `True` the values are yielded from the iterator in
reverse order; `reverse` defaults to `False`.
"""
_maxes = self._maxes
if not _maxes:
return iter(()) # depends on [control=['if'], data=[]]
_keys = self._keys
# Calculate the minimum (pos, idx) pair. By default this location
# will be inclusive in our calculation.
if min_key is None:
min_pos = 0
min_idx = 0 # depends on [control=['if'], data=[]]
elif inclusive[0]:
min_pos = bisect_left(_maxes, min_key)
if min_pos == len(_maxes):
return iter(()) # depends on [control=['if'], data=[]]
min_idx = bisect_left(_keys[min_pos], min_key) # depends on [control=['if'], data=[]]
else:
min_pos = bisect_right(_maxes, min_key)
if min_pos == len(_maxes):
return iter(()) # depends on [control=['if'], data=[]]
min_idx = bisect_right(_keys[min_pos], min_key)
# Calculate the maximum (pos, idx) pair. By default this location
# will be exclusive in our calculation.
if max_key is None:
max_pos = len(_maxes) - 1
max_idx = len(_keys[max_pos]) # depends on [control=['if'], data=[]]
elif inclusive[1]:
max_pos = bisect_right(_maxes, max_key)
if max_pos == len(_maxes):
max_pos -= 1
max_idx = len(_keys[max_pos]) # depends on [control=['if'], data=['max_pos']]
else:
max_idx = bisect_right(_keys[max_pos], max_key) # depends on [control=['if'], data=[]]
else:
max_pos = bisect_left(_maxes, max_key)
if max_pos == len(_maxes):
max_pos -= 1
max_idx = len(_keys[max_pos]) # depends on [control=['if'], data=['max_pos']]
else:
max_idx = bisect_left(_keys[max_pos], max_key)
return self._islice(min_pos, min_idx, max_pos, max_idx, reverse) |
def to_unicode(x, unaccent=False):
"""Convert a string to unicode"""
s = str(x)
if unaccent:
cs = [c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn']
s = ''.join(cs)
return s | def function[to_unicode, parameter[x, unaccent]]:
constant[Convert a string to unicode]
variable[s] assign[=] call[name[str], parameter[name[x]]]
if name[unaccent] begin[:]
variable[cs] assign[=] <ast.ListComp object at 0x7da1b0ebe830>
variable[s] assign[=] call[constant[].join, parameter[name[cs]]]
return[name[s]] | keyword[def] identifier[to_unicode] ( identifier[x] , identifier[unaccent] = keyword[False] ):
literal[string]
identifier[s] = identifier[str] ( identifier[x] )
keyword[if] identifier[unaccent] :
identifier[cs] =[ identifier[c] keyword[for] identifier[c] keyword[in] identifier[unicodedata] . identifier[normalize] ( literal[string] , identifier[s] )
keyword[if] identifier[unicodedata] . identifier[category] ( identifier[c] )!= literal[string] ]
identifier[s] = literal[string] . identifier[join] ( identifier[cs] )
keyword[return] identifier[s] | def to_unicode(x, unaccent=False):
"""Convert a string to unicode"""
s = str(x)
if unaccent:
cs = [c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn']
s = ''.join(cs) # depends on [control=['if'], data=[]]
return s |
def _disable_encryption(self):
# () -> None
"""Enable encryption methods for ciphers that support them."""
self.encrypt = self._disabled_encrypt
self.decrypt = self._disabled_decrypt | def function[_disable_encryption, parameter[self]]:
constant[Enable encryption methods for ciphers that support them.]
name[self].encrypt assign[=] name[self]._disabled_encrypt
name[self].decrypt assign[=] name[self]._disabled_decrypt | keyword[def] identifier[_disable_encryption] ( identifier[self] ):
literal[string]
identifier[self] . identifier[encrypt] = identifier[self] . identifier[_disabled_encrypt]
identifier[self] . identifier[decrypt] = identifier[self] . identifier[_disabled_decrypt] | def _disable_encryption(self):
# () -> None
'Enable encryption methods for ciphers that support them.'
self.encrypt = self._disabled_encrypt
self.decrypt = self._disabled_decrypt |
def fetch_access_token(self, url, verifier=None, **request_kwargs):
"""Fetch an access token.
This is the final step in the OAuth 1 workflow. An access token is
obtained using all previously obtained credentials, including the
verifier from the authorization step.
Note that a previously set verifier will be reset for your
convenience, or else signature creation will be incorrect on
consecutive requests.
>>> access_token_url = 'https://api.twitter.com/oauth/access_token'
>>> redirect_response = 'https://127.0.0.1/callback?oauth_token=kjerht2309uf&oauth_token_secret=lsdajfh923874&oauth_verifier=w34o8967345'
>>> oauth_session = OAuth1Session('client-key', client_secret='secret')
>>> oauth_session.parse_authorization_response(redirect_response)
{
'oauth_token: 'kjerht2309u',
'oauth_token_secret: 'lsdajfh923874',
'oauth_verifier: 'w34o8967345',
}
>>> oauth_session.fetch_access_token(access_token_url)
{
'oauth_token': 'sdf0o9823sjdfsdf',
'oauth_token_secret': '2kjshdfp92i34asdasd',
}
"""
if verifier:
self._client.client.verifier = verifier
if not getattr(self._client.client, "verifier", None):
raise VerifierMissing("No client verifier has been set.")
token = self._fetch_token(url, **request_kwargs)
log.debug("Resetting verifier attribute, should not be used anymore.")
self._client.client.verifier = None
return token | def function[fetch_access_token, parameter[self, url, verifier]]:
constant[Fetch an access token.
This is the final step in the OAuth 1 workflow. An access token is
obtained using all previously obtained credentials, including the
verifier from the authorization step.
Note that a previously set verifier will be reset for your
convenience, or else signature creation will be incorrect on
consecutive requests.
>>> access_token_url = 'https://api.twitter.com/oauth/access_token'
>>> redirect_response = 'https://127.0.0.1/callback?oauth_token=kjerht2309uf&oauth_token_secret=lsdajfh923874&oauth_verifier=w34o8967345'
>>> oauth_session = OAuth1Session('client-key', client_secret='secret')
>>> oauth_session.parse_authorization_response(redirect_response)
{
'oauth_token: 'kjerht2309u',
'oauth_token_secret: 'lsdajfh923874',
'oauth_verifier: 'w34o8967345',
}
>>> oauth_session.fetch_access_token(access_token_url)
{
'oauth_token': 'sdf0o9823sjdfsdf',
'oauth_token_secret': '2kjshdfp92i34asdasd',
}
]
if name[verifier] begin[:]
name[self]._client.client.verifier assign[=] name[verifier]
if <ast.UnaryOp object at 0x7da1b015b0a0> begin[:]
<ast.Raise object at 0x7da1b0158e80>
variable[token] assign[=] call[name[self]._fetch_token, parameter[name[url]]]
call[name[log].debug, parameter[constant[Resetting verifier attribute, should not be used anymore.]]]
name[self]._client.client.verifier assign[=] constant[None]
return[name[token]] | keyword[def] identifier[fetch_access_token] ( identifier[self] , identifier[url] , identifier[verifier] = keyword[None] ,** identifier[request_kwargs] ):
literal[string]
keyword[if] identifier[verifier] :
identifier[self] . identifier[_client] . identifier[client] . identifier[verifier] = identifier[verifier]
keyword[if] keyword[not] identifier[getattr] ( identifier[self] . identifier[_client] . identifier[client] , literal[string] , keyword[None] ):
keyword[raise] identifier[VerifierMissing] ( literal[string] )
identifier[token] = identifier[self] . identifier[_fetch_token] ( identifier[url] ,** identifier[request_kwargs] )
identifier[log] . identifier[debug] ( literal[string] )
identifier[self] . identifier[_client] . identifier[client] . identifier[verifier] = keyword[None]
keyword[return] identifier[token] | def fetch_access_token(self, url, verifier=None, **request_kwargs):
"""Fetch an access token.
This is the final step in the OAuth 1 workflow. An access token is
obtained using all previously obtained credentials, including the
verifier from the authorization step.
Note that a previously set verifier will be reset for your
convenience, or else signature creation will be incorrect on
consecutive requests.
>>> access_token_url = 'https://api.twitter.com/oauth/access_token'
>>> redirect_response = 'https://127.0.0.1/callback?oauth_token=kjerht2309uf&oauth_token_secret=lsdajfh923874&oauth_verifier=w34o8967345'
>>> oauth_session = OAuth1Session('client-key', client_secret='secret')
>>> oauth_session.parse_authorization_response(redirect_response)
{
'oauth_token: 'kjerht2309u',
'oauth_token_secret: 'lsdajfh923874',
'oauth_verifier: 'w34o8967345',
}
>>> oauth_session.fetch_access_token(access_token_url)
{
'oauth_token': 'sdf0o9823sjdfsdf',
'oauth_token_secret': '2kjshdfp92i34asdasd',
}
"""
if verifier:
self._client.client.verifier = verifier # depends on [control=['if'], data=[]]
if not getattr(self._client.client, 'verifier', None):
raise VerifierMissing('No client verifier has been set.') # depends on [control=['if'], data=[]]
token = self._fetch_token(url, **request_kwargs)
log.debug('Resetting verifier attribute, should not be used anymore.')
self._client.client.verifier = None
return token |
def dump(self, msg):
'''
Dumps the provided message to this dump.
'''
msg_size = len(msg)
# We start a new batch if the resulting batch file is larger than the
# max batch file size. However, if the current batch file size is zero
# then that means the message alone is larger than the max batch file
# size. In this case instead of splitting up the message across files
# which would greatly increase complexity we simply dump that message
# into a file of its own even though it will be larger than the max
# batch file size.
if self._batch_size + msg_size > self._max_batch_file_size \
and self._batch_size > 0:
self._startNewBatch()
# Write the time stamp and information on how to retrieve the message
# from the batch files (batch filename, byte offset, and byte size)
global getTime
index_file_entry = '{:},{:09d},{:},{:}\n'.format(
getTime(), self._batch_index, self._batch_size, msg_size)
if sys.version_info >= (3,):
self._index_file.write(index_file_entry.encode('utf-8'))
else:
self._index_file.write(index_file_entry)
# Dump the message itself to the current batch file
self._batch_file.write(msg)
self._batch_size += msg_size
# Increment message count
self._message_count += 1 | def function[dump, parameter[self, msg]]:
constant[
Dumps the provided message to this dump.
]
variable[msg_size] assign[=] call[name[len], parameter[name[msg]]]
if <ast.BoolOp object at 0x7da18f721540> begin[:]
call[name[self]._startNewBatch, parameter[]]
<ast.Global object at 0x7da18f7200d0>
variable[index_file_entry] assign[=] call[constant[{:},{:09d},{:},{:}
].format, parameter[call[name[getTime], parameter[]], name[self]._batch_index, name[self]._batch_size, name[msg_size]]]
if compare[name[sys].version_info greater_or_equal[>=] tuple[[<ast.Constant object at 0x7da18f721e40>]]] begin[:]
call[name[self]._index_file.write, parameter[call[name[index_file_entry].encode, parameter[constant[utf-8]]]]]
call[name[self]._batch_file.write, parameter[name[msg]]]
<ast.AugAssign object at 0x7da18f7230d0>
<ast.AugAssign object at 0x7da18f720790> | keyword[def] identifier[dump] ( identifier[self] , identifier[msg] ):
literal[string]
identifier[msg_size] = identifier[len] ( identifier[msg] )
keyword[if] identifier[self] . identifier[_batch_size] + identifier[msg_size] > identifier[self] . identifier[_max_batch_file_size] keyword[and] identifier[self] . identifier[_batch_size] > literal[int] :
identifier[self] . identifier[_startNewBatch] ()
keyword[global] identifier[getTime]
identifier[index_file_entry] = literal[string] . identifier[format] (
identifier[getTime] (), identifier[self] . identifier[_batch_index] , identifier[self] . identifier[_batch_size] , identifier[msg_size] )
keyword[if] identifier[sys] . identifier[version_info] >=( literal[int] ,):
identifier[self] . identifier[_index_file] . identifier[write] ( identifier[index_file_entry] . identifier[encode] ( literal[string] ))
keyword[else] :
identifier[self] . identifier[_index_file] . identifier[write] ( identifier[index_file_entry] )
identifier[self] . identifier[_batch_file] . identifier[write] ( identifier[msg] )
identifier[self] . identifier[_batch_size] += identifier[msg_size]
identifier[self] . identifier[_message_count] += literal[int] | def dump(self, msg):
"""
Dumps the provided message to this dump.
"""
msg_size = len(msg)
# We start a new batch if the resulting batch file is larger than the
# max batch file size. However, if the current batch file size is zero
# then that means the message alone is larger than the max batch file
# size. In this case instead of splitting up the message across files
# which would greatly increase complexity we simply dump that message
# into a file of its own even though it will be larger than the max
# batch file size.
if self._batch_size + msg_size > self._max_batch_file_size and self._batch_size > 0:
self._startNewBatch() # depends on [control=['if'], data=[]]
# Write the time stamp and information on how to retrieve the message
# from the batch files (batch filename, byte offset, and byte size)
global getTime
index_file_entry = '{:},{:09d},{:},{:}\n'.format(getTime(), self._batch_index, self._batch_size, msg_size)
if sys.version_info >= (3,):
self._index_file.write(index_file_entry.encode('utf-8')) # depends on [control=['if'], data=[]]
else:
self._index_file.write(index_file_entry)
# Dump the message itself to the current batch file
self._batch_file.write(msg)
self._batch_size += msg_size
# Increment message count
self._message_count += 1 |
def add_case(self, case_obj, vtype='snv', mode='vcf', ped_svg=None):
"""Load a case with individuals.
Args:
case_obj (puzzle.models.Case): initialized case model
"""
new_case = Case(case_id=case_obj.case_id,
name=case_obj.name,
variant_source=case_obj.variant_source,
variant_type=vtype,
variant_mode=mode,
pedigree=ped_svg,
compressed=case_obj.compressed,
tabix_index=case_obj.tabix_index)
# build individuals
inds = [Individual(
ind_id=ind.ind_id,
name=ind.name,
mother=ind.mother,
father=ind.father,
sex=ind.sex,
phenotype=ind.phenotype,
ind_index=ind.ind_index,
variant_source=ind.variant_source,
bam_path=ind.bam_path,
) for ind in case_obj.individuals]
new_case.individuals = inds
if self.case(new_case.case_id):
logger.warning("Case already exists in database!")
else:
self.session.add(new_case)
self.save()
return new_case | def function[add_case, parameter[self, case_obj, vtype, mode, ped_svg]]:
constant[Load a case with individuals.
Args:
case_obj (puzzle.models.Case): initialized case model
]
variable[new_case] assign[=] call[name[Case], parameter[]]
variable[inds] assign[=] <ast.ListComp object at 0x7da204564c10>
name[new_case].individuals assign[=] name[inds]
if call[name[self].case, parameter[name[new_case].case_id]] begin[:]
call[name[logger].warning, parameter[constant[Case already exists in database!]]]
return[name[new_case]] | keyword[def] identifier[add_case] ( identifier[self] , identifier[case_obj] , identifier[vtype] = literal[string] , identifier[mode] = literal[string] , identifier[ped_svg] = keyword[None] ):
literal[string]
identifier[new_case] = identifier[Case] ( identifier[case_id] = identifier[case_obj] . identifier[case_id] ,
identifier[name] = identifier[case_obj] . identifier[name] ,
identifier[variant_source] = identifier[case_obj] . identifier[variant_source] ,
identifier[variant_type] = identifier[vtype] ,
identifier[variant_mode] = identifier[mode] ,
identifier[pedigree] = identifier[ped_svg] ,
identifier[compressed] = identifier[case_obj] . identifier[compressed] ,
identifier[tabix_index] = identifier[case_obj] . identifier[tabix_index] )
identifier[inds] =[ identifier[Individual] (
identifier[ind_id] = identifier[ind] . identifier[ind_id] ,
identifier[name] = identifier[ind] . identifier[name] ,
identifier[mother] = identifier[ind] . identifier[mother] ,
identifier[father] = identifier[ind] . identifier[father] ,
identifier[sex] = identifier[ind] . identifier[sex] ,
identifier[phenotype] = identifier[ind] . identifier[phenotype] ,
identifier[ind_index] = identifier[ind] . identifier[ind_index] ,
identifier[variant_source] = identifier[ind] . identifier[variant_source] ,
identifier[bam_path] = identifier[ind] . identifier[bam_path] ,
) keyword[for] identifier[ind] keyword[in] identifier[case_obj] . identifier[individuals] ]
identifier[new_case] . identifier[individuals] = identifier[inds]
keyword[if] identifier[self] . identifier[case] ( identifier[new_case] . identifier[case_id] ):
identifier[logger] . identifier[warning] ( literal[string] )
keyword[else] :
identifier[self] . identifier[session] . identifier[add] ( identifier[new_case] )
identifier[self] . identifier[save] ()
keyword[return] identifier[new_case] | def add_case(self, case_obj, vtype='snv', mode='vcf', ped_svg=None):
"""Load a case with individuals.
Args:
case_obj (puzzle.models.Case): initialized case model
"""
new_case = Case(case_id=case_obj.case_id, name=case_obj.name, variant_source=case_obj.variant_source, variant_type=vtype, variant_mode=mode, pedigree=ped_svg, compressed=case_obj.compressed, tabix_index=case_obj.tabix_index)
# build individuals
inds = [Individual(ind_id=ind.ind_id, name=ind.name, mother=ind.mother, father=ind.father, sex=ind.sex, phenotype=ind.phenotype, ind_index=ind.ind_index, variant_source=ind.variant_source, bam_path=ind.bam_path) for ind in case_obj.individuals]
new_case.individuals = inds
if self.case(new_case.case_id):
logger.warning('Case already exists in database!') # depends on [control=['if'], data=[]]
else:
self.session.add(new_case)
self.save()
return new_case |
def _scale_to_dtype(self, data, dtype):
"""Scale provided data to dtype range assuming a 0-1 range.
Float input data is assumed to be normalized to a 0 to 1 range.
Integer input data is not scaled, only clipped. A float output
type is not scaled since both outputs and inputs are assumed to
be in the 0-1 range already.
"""
if np.issubdtype(dtype, np.integer):
if np.issubdtype(data, np.integer):
# preserve integer data type
data = data.clip(np.iinfo(dtype).min, np.iinfo(dtype).max)
else:
# scale float data (assumed to be 0 to 1) to full integer space
dinfo = np.iinfo(dtype)
data = data.clip(0, 1) * (dinfo.max - dinfo.min) + dinfo.min
data = data.round()
return data | def function[_scale_to_dtype, parameter[self, data, dtype]]:
constant[Scale provided data to dtype range assuming a 0-1 range.
Float input data is assumed to be normalized to a 0 to 1 range.
Integer input data is not scaled, only clipped. A float output
type is not scaled since both outputs and inputs are assumed to
be in the 0-1 range already.
]
if call[name[np].issubdtype, parameter[name[dtype], name[np].integer]] begin[:]
if call[name[np].issubdtype, parameter[name[data], name[np].integer]] begin[:]
variable[data] assign[=] call[name[data].clip, parameter[call[name[np].iinfo, parameter[name[dtype]]].min, call[name[np].iinfo, parameter[name[dtype]]].max]]
variable[data] assign[=] call[name[data].round, parameter[]]
return[name[data]] | keyword[def] identifier[_scale_to_dtype] ( identifier[self] , identifier[data] , identifier[dtype] ):
literal[string]
keyword[if] identifier[np] . identifier[issubdtype] ( identifier[dtype] , identifier[np] . identifier[integer] ):
keyword[if] identifier[np] . identifier[issubdtype] ( identifier[data] , identifier[np] . identifier[integer] ):
identifier[data] = identifier[data] . identifier[clip] ( identifier[np] . identifier[iinfo] ( identifier[dtype] ). identifier[min] , identifier[np] . identifier[iinfo] ( identifier[dtype] ). identifier[max] )
keyword[else] :
identifier[dinfo] = identifier[np] . identifier[iinfo] ( identifier[dtype] )
identifier[data] = identifier[data] . identifier[clip] ( literal[int] , literal[int] )*( identifier[dinfo] . identifier[max] - identifier[dinfo] . identifier[min] )+ identifier[dinfo] . identifier[min]
identifier[data] = identifier[data] . identifier[round] ()
keyword[return] identifier[data] | def _scale_to_dtype(self, data, dtype):
"""Scale provided data to dtype range assuming a 0-1 range.
Float input data is assumed to be normalized to a 0 to 1 range.
Integer input data is not scaled, only clipped. A float output
type is not scaled since both outputs and inputs are assumed to
be in the 0-1 range already.
"""
if np.issubdtype(dtype, np.integer):
if np.issubdtype(data, np.integer):
# preserve integer data type
data = data.clip(np.iinfo(dtype).min, np.iinfo(dtype).max) # depends on [control=['if'], data=[]]
else:
# scale float data (assumed to be 0 to 1) to full integer space
dinfo = np.iinfo(dtype)
data = data.clip(0, 1) * (dinfo.max - dinfo.min) + dinfo.min
data = data.round() # depends on [control=['if'], data=[]]
return data |
def add_xy(self, x, y):
""" Add an motor order/sensory effect pair to the model
:arg x: an input (order) vector compatible with self.Mfeats.
:arg y: a output (effect) vector compatible with self.Sfeats.
"""
self.imodel.add_xy(self._pre_x(x), self._pre_y(y)) | def function[add_xy, parameter[self, x, y]]:
constant[ Add an motor order/sensory effect pair to the model
:arg x: an input (order) vector compatible with self.Mfeats.
:arg y: a output (effect) vector compatible with self.Sfeats.
]
call[name[self].imodel.add_xy, parameter[call[name[self]._pre_x, parameter[name[x]]], call[name[self]._pre_y, parameter[name[y]]]]] | keyword[def] identifier[add_xy] ( identifier[self] , identifier[x] , identifier[y] ):
literal[string]
identifier[self] . identifier[imodel] . identifier[add_xy] ( identifier[self] . identifier[_pre_x] ( identifier[x] ), identifier[self] . identifier[_pre_y] ( identifier[y] )) | def add_xy(self, x, y):
""" Add an motor order/sensory effect pair to the model
:arg x: an input (order) vector compatible with self.Mfeats.
:arg y: a output (effect) vector compatible with self.Sfeats.
"""
self.imodel.add_xy(self._pre_x(x), self._pre_y(y)) |
def feed(self, data):
"""
Feed data to the parser.
"""
assert isinstance(data, binary_type)
for b in iterbytes(data):
self._parser.send(int2byte(b)) | def function[feed, parameter[self, data]]:
constant[
Feed data to the parser.
]
assert[call[name[isinstance], parameter[name[data], name[binary_type]]]]
for taget[name[b]] in starred[call[name[iterbytes], parameter[name[data]]]] begin[:]
call[name[self]._parser.send, parameter[call[name[int2byte], parameter[name[b]]]]] | keyword[def] identifier[feed] ( identifier[self] , identifier[data] ):
literal[string]
keyword[assert] identifier[isinstance] ( identifier[data] , identifier[binary_type] )
keyword[for] identifier[b] keyword[in] identifier[iterbytes] ( identifier[data] ):
identifier[self] . identifier[_parser] . identifier[send] ( identifier[int2byte] ( identifier[b] )) | def feed(self, data):
"""
Feed data to the parser.
"""
assert isinstance(data, binary_type)
for b in iterbytes(data):
self._parser.send(int2byte(b)) # depends on [control=['for'], data=['b']] |
def bdib(ticker, dt, typ='TRADE', **kwargs) -> pd.DataFrame:
"""
Bloomberg intraday bar data
Args:
ticker: ticker name
dt: date to download
typ: [TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID, BEST_ASK]
**kwargs:
batch: whether is batch process to download data
log: level of logs
Returns:
pd.DataFrame
"""
from xbbg.core import missing
logger = logs.get_logger(bdib, level=kwargs.pop('log', logs.LOG_LEVEL))
t_1 = pd.Timestamp('today').date() - pd.Timedelta('1D')
whole_day = pd.Timestamp(dt).date() < t_1
batch = kwargs.pop('batch', False)
if (not whole_day) and batch:
logger.warning(f'querying date {t_1} is too close, ignoring download ...')
return pd.DataFrame()
cur_dt = pd.Timestamp(dt).strftime('%Y-%m-%d')
asset = ticker.split()[-1]
info_log = f'{ticker} / {cur_dt} / {typ}'
if asset in ['Equity', 'Curncy', 'Index', 'Comdty']:
exch = const.exch_info(ticker=ticker)
if exch.empty: return pd.DataFrame()
else:
logger.error(f'unknown asset type: {asset}')
return pd.DataFrame()
time_fmt = '%Y-%m-%dT%H:%M:%S'
time_idx = pd.DatetimeIndex([
f'{cur_dt} {exch.allday[0]}', f'{cur_dt} {exch.allday[-1]}']
).tz_localize(exch.tz).tz_convert(DEFAULT_TZ).tz_convert('UTC')
if time_idx[0] > time_idx[1]: time_idx -= pd.TimedeltaIndex(['1D', '0D'])
q_tckr = ticker
if exch.get('is_fut', False):
if 'freq' not in exch:
logger.error(f'[freq] missing in info for {info_log} ...')
is_sprd = exch.get('has_sprd', False) and (len(ticker[:-1]) != exch['tickers'][0])
if not is_sprd:
q_tckr = fut_ticker(gen_ticker=ticker, dt=dt, freq=exch['freq'])
if q_tckr == '':
logger.error(f'cannot find futures ticker for {ticker} ...')
return pd.DataFrame()
info_log = f'{q_tckr} / {cur_dt} / {typ}'
miss_kw = dict(ticker=ticker, dt=dt, typ=typ, func='bdib')
cur_miss = missing.current_missing(**miss_kw)
if cur_miss >= 2:
if batch: return pd.DataFrame()
logger.info(f'{cur_miss} trials with no data {info_log}')
return pd.DataFrame()
logger.info(f'loading data from Bloomberg: {info_log} ...')
con, _ = create_connection()
try:
data = con.bdib(
ticker=q_tckr, event_type=typ, interval=1,
start_datetime=time_idx[0].strftime(time_fmt),
end_datetime=time_idx[1].strftime(time_fmt),
)
except KeyError:
# Ignores missing data errors from pdblp library
# Warning msg will be displayed later
data = pd.DataFrame()
if not isinstance(data, pd.DataFrame):
raise ValueError(f'unknown output format: {type(data)}')
if data.empty:
logger.warning(f'no data for {info_log} ...')
missing.update_missing(**miss_kw)
return pd.DataFrame()
data = data.tz_localize('UTC').tz_convert(exch.tz)
storage.save_intraday(data=data, ticker=ticker, dt=dt, typ=typ)
return pd.DataFrame() if batch else assist.format_intraday(data=data, ticker=ticker) | def function[bdib, parameter[ticker, dt, typ]]:
constant[
Bloomberg intraday bar data
Args:
ticker: ticker name
dt: date to download
typ: [TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID, BEST_ASK]
**kwargs:
batch: whether is batch process to download data
log: level of logs
Returns:
pd.DataFrame
]
from relative_module[xbbg.core] import module[missing]
variable[logger] assign[=] call[name[logs].get_logger, parameter[name[bdib]]]
variable[t_1] assign[=] binary_operation[call[call[name[pd].Timestamp, parameter[constant[today]]].date, parameter[]] - call[name[pd].Timedelta, parameter[constant[1D]]]]
variable[whole_day] assign[=] compare[call[call[name[pd].Timestamp, parameter[name[dt]]].date, parameter[]] less[<] name[t_1]]
variable[batch] assign[=] call[name[kwargs].pop, parameter[constant[batch], constant[False]]]
if <ast.BoolOp object at 0x7da1b01370a0> begin[:]
call[name[logger].warning, parameter[<ast.JoinedStr object at 0x7da1b0136f20>]]
return[call[name[pd].DataFrame, parameter[]]]
variable[cur_dt] assign[=] call[call[name[pd].Timestamp, parameter[name[dt]]].strftime, parameter[constant[%Y-%m-%d]]]
variable[asset] assign[=] call[call[name[ticker].split, parameter[]]][<ast.UnaryOp object at 0x7da1b0136a10>]
variable[info_log] assign[=] <ast.JoinedStr object at 0x7da1b0136950>
if compare[name[asset] in list[[<ast.Constant object at 0x7da1b0136680>, <ast.Constant object at 0x7da1b0136650>, <ast.Constant object at 0x7da1b0136620>, <ast.Constant object at 0x7da1b01365f0>]]] begin[:]
variable[exch] assign[=] call[name[const].exch_info, parameter[]]
if name[exch].empty begin[:]
return[call[name[pd].DataFrame, parameter[]]]
variable[time_fmt] assign[=] constant[%Y-%m-%dT%H:%M:%S]
variable[time_idx] assign[=] call[call[call[call[name[pd].DatetimeIndex, parameter[list[[<ast.JoinedStr object at 0x7da1b0135d50>, <ast.JoinedStr object at 0x7da1b0135b70>]]]].tz_localize, parameter[name[exch].tz]].tz_convert, parameter[name[DEFAULT_TZ]]].tz_convert, parameter[constant[UTC]]]
if compare[call[name[time_idx]][constant[0]] greater[>] call[name[time_idx]][constant[1]]] begin[:]
<ast.AugAssign object at 0x7da1b0135720>
variable[q_tckr] assign[=] name[ticker]
if call[name[exch].get, parameter[constant[is_fut], constant[False]]] begin[:]
if compare[constant[freq] <ast.NotIn object at 0x7da2590d7190> name[exch]] begin[:]
call[name[logger].error, parameter[<ast.JoinedStr object at 0x7da1b0135270>]]
variable[is_sprd] assign[=] <ast.BoolOp object at 0x7da1b01350c0>
if <ast.UnaryOp object at 0x7da1b0134d00> begin[:]
variable[q_tckr] assign[=] call[name[fut_ticker], parameter[]]
if compare[name[q_tckr] equal[==] constant[]] begin[:]
call[name[logger].error, parameter[<ast.JoinedStr object at 0x7da1b01348e0>]]
return[call[name[pd].DataFrame, parameter[]]]
variable[info_log] assign[=] <ast.JoinedStr object at 0x7da1b0134640>
variable[miss_kw] assign[=] call[name[dict], parameter[]]
variable[cur_miss] assign[=] call[name[missing].current_missing, parameter[]]
if compare[name[cur_miss] greater_or_equal[>=] constant[2]] begin[:]
if name[batch] begin[:]
return[call[name[pd].DataFrame, parameter[]]]
call[name[logger].info, parameter[<ast.JoinedStr object at 0x7da1b0057f70>]]
return[call[name[pd].DataFrame, parameter[]]]
call[name[logger].info, parameter[<ast.JoinedStr object at 0x7da1b00562f0>]]
<ast.Tuple object at 0x7da1b0056b30> assign[=] call[name[create_connection], parameter[]]
<ast.Try object at 0x7da1b00556f0>
if <ast.UnaryOp object at 0x7da1b0056320> begin[:]
<ast.Raise object at 0x7da1b0057bb0>
if name[data].empty begin[:]
call[name[logger].warning, parameter[<ast.JoinedStr object at 0x7da1b00553f0>]]
call[name[missing].update_missing, parameter[]]
return[call[name[pd].DataFrame, parameter[]]]
variable[data] assign[=] call[call[name[data].tz_localize, parameter[constant[UTC]]].tz_convert, parameter[name[exch].tz]]
call[name[storage].save_intraday, parameter[]]
return[<ast.IfExp object at 0x7da1b00564d0>] | keyword[def] identifier[bdib] ( identifier[ticker] , identifier[dt] , identifier[typ] = literal[string] ,** identifier[kwargs] )-> identifier[pd] . identifier[DataFrame] :
literal[string]
keyword[from] identifier[xbbg] . identifier[core] keyword[import] identifier[missing]
identifier[logger] = identifier[logs] . identifier[get_logger] ( identifier[bdib] , identifier[level] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[logs] . identifier[LOG_LEVEL] ))
identifier[t_1] = identifier[pd] . identifier[Timestamp] ( literal[string] ). identifier[date] ()- identifier[pd] . identifier[Timedelta] ( literal[string] )
identifier[whole_day] = identifier[pd] . identifier[Timestamp] ( identifier[dt] ). identifier[date] ()< identifier[t_1]
identifier[batch] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[False] )
keyword[if] ( keyword[not] identifier[whole_day] ) keyword[and] identifier[batch] :
identifier[logger] . identifier[warning] ( literal[string] )
keyword[return] identifier[pd] . identifier[DataFrame] ()
identifier[cur_dt] = identifier[pd] . identifier[Timestamp] ( identifier[dt] ). identifier[strftime] ( literal[string] )
identifier[asset] = identifier[ticker] . identifier[split] ()[- literal[int] ]
identifier[info_log] = literal[string]
keyword[if] identifier[asset] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] ]:
identifier[exch] = identifier[const] . identifier[exch_info] ( identifier[ticker] = identifier[ticker] )
keyword[if] identifier[exch] . identifier[empty] : keyword[return] identifier[pd] . identifier[DataFrame] ()
keyword[else] :
identifier[logger] . identifier[error] ( literal[string] )
keyword[return] identifier[pd] . identifier[DataFrame] ()
identifier[time_fmt] = literal[string]
identifier[time_idx] = identifier[pd] . identifier[DatetimeIndex] ([
literal[string] , literal[string] ]
). identifier[tz_localize] ( identifier[exch] . identifier[tz] ). identifier[tz_convert] ( identifier[DEFAULT_TZ] ). identifier[tz_convert] ( literal[string] )
keyword[if] identifier[time_idx] [ literal[int] ]> identifier[time_idx] [ literal[int] ]: identifier[time_idx] -= identifier[pd] . identifier[TimedeltaIndex] ([ literal[string] , literal[string] ])
identifier[q_tckr] = identifier[ticker]
keyword[if] identifier[exch] . identifier[get] ( literal[string] , keyword[False] ):
keyword[if] literal[string] keyword[not] keyword[in] identifier[exch] :
identifier[logger] . identifier[error] ( literal[string] )
identifier[is_sprd] = identifier[exch] . identifier[get] ( literal[string] , keyword[False] ) keyword[and] ( identifier[len] ( identifier[ticker] [:- literal[int] ])!= identifier[exch] [ literal[string] ][ literal[int] ])
keyword[if] keyword[not] identifier[is_sprd] :
identifier[q_tckr] = identifier[fut_ticker] ( identifier[gen_ticker] = identifier[ticker] , identifier[dt] = identifier[dt] , identifier[freq] = identifier[exch] [ literal[string] ])
keyword[if] identifier[q_tckr] == literal[string] :
identifier[logger] . identifier[error] ( literal[string] )
keyword[return] identifier[pd] . identifier[DataFrame] ()
identifier[info_log] = literal[string]
identifier[miss_kw] = identifier[dict] ( identifier[ticker] = identifier[ticker] , identifier[dt] = identifier[dt] , identifier[typ] = identifier[typ] , identifier[func] = literal[string] )
identifier[cur_miss] = identifier[missing] . identifier[current_missing] (** identifier[miss_kw] )
keyword[if] identifier[cur_miss] >= literal[int] :
keyword[if] identifier[batch] : keyword[return] identifier[pd] . identifier[DataFrame] ()
identifier[logger] . identifier[info] ( literal[string] )
keyword[return] identifier[pd] . identifier[DataFrame] ()
identifier[logger] . identifier[info] ( literal[string] )
identifier[con] , identifier[_] = identifier[create_connection] ()
keyword[try] :
identifier[data] = identifier[con] . identifier[bdib] (
identifier[ticker] = identifier[q_tckr] , identifier[event_type] = identifier[typ] , identifier[interval] = literal[int] ,
identifier[start_datetime] = identifier[time_idx] [ literal[int] ]. identifier[strftime] ( identifier[time_fmt] ),
identifier[end_datetime] = identifier[time_idx] [ literal[int] ]. identifier[strftime] ( identifier[time_fmt] ),
)
keyword[except] identifier[KeyError] :
identifier[data] = identifier[pd] . identifier[DataFrame] ()
keyword[if] keyword[not] identifier[isinstance] ( identifier[data] , identifier[pd] . identifier[DataFrame] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[data] . identifier[empty] :
identifier[logger] . identifier[warning] ( literal[string] )
identifier[missing] . identifier[update_missing] (** identifier[miss_kw] )
keyword[return] identifier[pd] . identifier[DataFrame] ()
identifier[data] = identifier[data] . identifier[tz_localize] ( literal[string] ). identifier[tz_convert] ( identifier[exch] . identifier[tz] )
identifier[storage] . identifier[save_intraday] ( identifier[data] = identifier[data] , identifier[ticker] = identifier[ticker] , identifier[dt] = identifier[dt] , identifier[typ] = identifier[typ] )
keyword[return] identifier[pd] . identifier[DataFrame] () keyword[if] identifier[batch] keyword[else] identifier[assist] . identifier[format_intraday] ( identifier[data] = identifier[data] , identifier[ticker] = identifier[ticker] ) | def bdib(ticker, dt, typ='TRADE', **kwargs) -> pd.DataFrame:
"""
Bloomberg intraday bar data
Args:
ticker: ticker name
dt: date to download
typ: [TRADE, BID, ASK, BID_BEST, ASK_BEST, BEST_BID, BEST_ASK]
**kwargs:
batch: whether is batch process to download data
log: level of logs
Returns:
pd.DataFrame
"""
from xbbg.core import missing
logger = logs.get_logger(bdib, level=kwargs.pop('log', logs.LOG_LEVEL))
t_1 = pd.Timestamp('today').date() - pd.Timedelta('1D')
whole_day = pd.Timestamp(dt).date() < t_1
batch = kwargs.pop('batch', False)
if not whole_day and batch:
logger.warning(f'querying date {t_1} is too close, ignoring download ...')
return pd.DataFrame() # depends on [control=['if'], data=[]]
cur_dt = pd.Timestamp(dt).strftime('%Y-%m-%d')
asset = ticker.split()[-1]
info_log = f'{ticker} / {cur_dt} / {typ}'
if asset in ['Equity', 'Curncy', 'Index', 'Comdty']:
exch = const.exch_info(ticker=ticker)
if exch.empty:
return pd.DataFrame() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
logger.error(f'unknown asset type: {asset}')
return pd.DataFrame()
time_fmt = '%Y-%m-%dT%H:%M:%S'
time_idx = pd.DatetimeIndex([f'{cur_dt} {exch.allday[0]}', f'{cur_dt} {exch.allday[-1]}']).tz_localize(exch.tz).tz_convert(DEFAULT_TZ).tz_convert('UTC')
if time_idx[0] > time_idx[1]:
time_idx -= pd.TimedeltaIndex(['1D', '0D']) # depends on [control=['if'], data=[]]
q_tckr = ticker
if exch.get('is_fut', False):
if 'freq' not in exch:
logger.error(f'[freq] missing in info for {info_log} ...') # depends on [control=['if'], data=[]]
is_sprd = exch.get('has_sprd', False) and len(ticker[:-1]) != exch['tickers'][0]
if not is_sprd:
q_tckr = fut_ticker(gen_ticker=ticker, dt=dt, freq=exch['freq'])
if q_tckr == '':
logger.error(f'cannot find futures ticker for {ticker} ...')
return pd.DataFrame() # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
info_log = f'{q_tckr} / {cur_dt} / {typ}'
miss_kw = dict(ticker=ticker, dt=dt, typ=typ, func='bdib')
cur_miss = missing.current_missing(**miss_kw)
if cur_miss >= 2:
if batch:
return pd.DataFrame() # depends on [control=['if'], data=[]]
logger.info(f'{cur_miss} trials with no data {info_log}')
return pd.DataFrame() # depends on [control=['if'], data=['cur_miss']]
logger.info(f'loading data from Bloomberg: {info_log} ...')
(con, _) = create_connection()
try:
data = con.bdib(ticker=q_tckr, event_type=typ, interval=1, start_datetime=time_idx[0].strftime(time_fmt), end_datetime=time_idx[1].strftime(time_fmt)) # depends on [control=['try'], data=[]]
except KeyError:
# Ignores missing data errors from pdblp library
# Warning msg will be displayed later
data = pd.DataFrame() # depends on [control=['except'], data=[]]
if not isinstance(data, pd.DataFrame):
raise ValueError(f'unknown output format: {type(data)}') # depends on [control=['if'], data=[]]
if data.empty:
logger.warning(f'no data for {info_log} ...')
missing.update_missing(**miss_kw)
return pd.DataFrame() # depends on [control=['if'], data=[]]
data = data.tz_localize('UTC').tz_convert(exch.tz)
storage.save_intraday(data=data, ticker=ticker, dt=dt, typ=typ)
return pd.DataFrame() if batch else assist.format_intraday(data=data, ticker=ticker) |
def _cron_id(cron):
'''SAFETYBELT, Only set if we really have an identifier'''
cid = None
if cron['identifier']:
cid = cron['identifier']
else:
cid = SALT_CRON_NO_IDENTIFIER
if cid:
return _ensure_string(cid) | def function[_cron_id, parameter[cron]]:
constant[SAFETYBELT, Only set if we really have an identifier]
variable[cid] assign[=] constant[None]
if call[name[cron]][constant[identifier]] begin[:]
variable[cid] assign[=] call[name[cron]][constant[identifier]]
if name[cid] begin[:]
return[call[name[_ensure_string], parameter[name[cid]]]] | keyword[def] identifier[_cron_id] ( identifier[cron] ):
literal[string]
identifier[cid] = keyword[None]
keyword[if] identifier[cron] [ literal[string] ]:
identifier[cid] = identifier[cron] [ literal[string] ]
keyword[else] :
identifier[cid] = identifier[SALT_CRON_NO_IDENTIFIER]
keyword[if] identifier[cid] :
keyword[return] identifier[_ensure_string] ( identifier[cid] ) | def _cron_id(cron):
"""SAFETYBELT, Only set if we really have an identifier"""
cid = None
if cron['identifier']:
cid = cron['identifier'] # depends on [control=['if'], data=[]]
else:
cid = SALT_CRON_NO_IDENTIFIER
if cid:
return _ensure_string(cid) # depends on [control=['if'], data=[]] |
def endpoint_for(self, operation):
"""
Create a (unique) endpoint name from an operation and a namespace.
This naming convention matches how Flask blueprints routes are resolved
(assuming that the blueprint and resources share the same name).
Examples: `foo.search`, `bar.search_for.baz`
"""
return operation.value.pattern.format(
subject=self.subject_name,
operation=operation.value.name,
object_=self.object_name if self.object_ else None,
version=self.version or "v1",
) | def function[endpoint_for, parameter[self, operation]]:
constant[
Create a (unique) endpoint name from an operation and a namespace.
This naming convention matches how Flask blueprints routes are resolved
(assuming that the blueprint and resources share the same name).
Examples: `foo.search`, `bar.search_for.baz`
]
return[call[name[operation].value.pattern.format, parameter[]]] | keyword[def] identifier[endpoint_for] ( identifier[self] , identifier[operation] ):
literal[string]
keyword[return] identifier[operation] . identifier[value] . identifier[pattern] . identifier[format] (
identifier[subject] = identifier[self] . identifier[subject_name] ,
identifier[operation] = identifier[operation] . identifier[value] . identifier[name] ,
identifier[object_] = identifier[self] . identifier[object_name] keyword[if] identifier[self] . identifier[object_] keyword[else] keyword[None] ,
identifier[version] = identifier[self] . identifier[version] keyword[or] literal[string] ,
) | def endpoint_for(self, operation):
"""
Create a (unique) endpoint name from an operation and a namespace.
This naming convention matches how Flask blueprints routes are resolved
(assuming that the blueprint and resources share the same name).
Examples: `foo.search`, `bar.search_for.baz`
"""
return operation.value.pattern.format(subject=self.subject_name, operation=operation.value.name, object_=self.object_name if self.object_ else None, version=self.version or 'v1') |
def save_raw_data_from_data_queue(data_queue, filename, mode='a', title='', scan_parameters=None): # mode="r+" to append data, raw_data_file_h5 must exist, "w" to overwrite raw_data_file_h5, "a" to append data, if raw_data_file_h5 does not exist it is created
'''Writing raw data file from data queue
If you need to write raw data once in a while this function may make it easy for you.
'''
if not scan_parameters:
scan_parameters = {}
with open_raw_data_file(filename, mode='a', title='', scan_parameters=list(dict.iterkeys(scan_parameters))) as raw_data_file:
raw_data_file.append(data_queue, scan_parameters=scan_parameters) | def function[save_raw_data_from_data_queue, parameter[data_queue, filename, mode, title, scan_parameters]]:
constant[Writing raw data file from data queue
If you need to write raw data once in a while this function may make it easy for you.
]
if <ast.UnaryOp object at 0x7da1b11a0af0> begin[:]
variable[scan_parameters] assign[=] dictionary[[], []]
with call[name[open_raw_data_file], parameter[name[filename]]] begin[:]
call[name[raw_data_file].append, parameter[name[data_queue]]] | keyword[def] identifier[save_raw_data_from_data_queue] ( identifier[data_queue] , identifier[filename] , identifier[mode] = literal[string] , identifier[title] = literal[string] , identifier[scan_parameters] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[scan_parameters] :
identifier[scan_parameters] ={}
keyword[with] identifier[open_raw_data_file] ( identifier[filename] , identifier[mode] = literal[string] , identifier[title] = literal[string] , identifier[scan_parameters] = identifier[list] ( identifier[dict] . identifier[iterkeys] ( identifier[scan_parameters] ))) keyword[as] identifier[raw_data_file] :
identifier[raw_data_file] . identifier[append] ( identifier[data_queue] , identifier[scan_parameters] = identifier[scan_parameters] ) | def save_raw_data_from_data_queue(data_queue, filename, mode='a', title='', scan_parameters=None): # mode="r+" to append data, raw_data_file_h5 must exist, "w" to overwrite raw_data_file_h5, "a" to append data, if raw_data_file_h5 does not exist it is created
'Writing raw data file from data queue\n\n If you need to write raw data once in a while this function may make it easy for you.\n '
if not scan_parameters:
scan_parameters = {} # depends on [control=['if'], data=[]]
with open_raw_data_file(filename, mode='a', title='', scan_parameters=list(dict.iterkeys(scan_parameters))) as raw_data_file:
raw_data_file.append(data_queue, scan_parameters=scan_parameters) # depends on [control=['with'], data=['raw_data_file']] |
def report_error_event(self, error_report):
"""Report error payload.
:type error_report: dict
:param: error_report:
dict payload of the error report formatted according to
https://cloud.google.com/error-reporting/docs/formatting-error-messages
This object should be built using
:meth:~`google.cloud.error_reporting.client._build_error_report`
"""
logger = self.logging_client.logger("errors")
logger.log_struct(error_report) | def function[report_error_event, parameter[self, error_report]]:
constant[Report error payload.
:type error_report: dict
:param: error_report:
dict payload of the error report formatted according to
https://cloud.google.com/error-reporting/docs/formatting-error-messages
This object should be built using
:meth:~`google.cloud.error_reporting.client._build_error_report`
]
variable[logger] assign[=] call[name[self].logging_client.logger, parameter[constant[errors]]]
call[name[logger].log_struct, parameter[name[error_report]]] | keyword[def] identifier[report_error_event] ( identifier[self] , identifier[error_report] ):
literal[string]
identifier[logger] = identifier[self] . identifier[logging_client] . identifier[logger] ( literal[string] )
identifier[logger] . identifier[log_struct] ( identifier[error_report] ) | def report_error_event(self, error_report):
"""Report error payload.
:type error_report: dict
:param: error_report:
dict payload of the error report formatted according to
https://cloud.google.com/error-reporting/docs/formatting-error-messages
This object should be built using
:meth:~`google.cloud.error_reporting.client._build_error_report`
"""
logger = self.logging_client.logger('errors')
logger.log_struct(error_report) |
def recover(options):
""" recover from an existing export run. We do this by
finding the last time change between events, truncate the file
and restart from there """
event_format = options.kwargs['omode']
buffer_size = 64*1024
fpd = open(options.kwargs['output'], "r+")
fpd.seek(0, 2) # seek to end
fptr = max(fpd.tell() - buffer_size, 0)
fptr_eof = 0
while (fptr > 0):
fpd.seek(fptr)
event_buffer = fpd.read(buffer_size)
(event_start, next_event_start, last_time) = \
get_event_start(event_buffer, event_format)
if (event_start != -1):
fptr_eof = event_start + fptr
break
fptr = fptr - buffer_size
if fptr < 0:
# didn't find a valid event, so start over
fptr_eof = 0
last_time = 0
# truncate file here
fpd.truncate(fptr_eof)
fpd.seek(fptr_eof)
fpd.write("\n")
fpd.close()
return last_time | def function[recover, parameter[options]]:
constant[ recover from an existing export run. We do this by
finding the last time change between events, truncate the file
and restart from there ]
variable[event_format] assign[=] call[name[options].kwargs][constant[omode]]
variable[buffer_size] assign[=] binary_operation[constant[64] * constant[1024]]
variable[fpd] assign[=] call[name[open], parameter[call[name[options].kwargs][constant[output]], constant[r+]]]
call[name[fpd].seek, parameter[constant[0], constant[2]]]
variable[fptr] assign[=] call[name[max], parameter[binary_operation[call[name[fpd].tell, parameter[]] - name[buffer_size]], constant[0]]]
variable[fptr_eof] assign[=] constant[0]
while compare[name[fptr] greater[>] constant[0]] begin[:]
call[name[fpd].seek, parameter[name[fptr]]]
variable[event_buffer] assign[=] call[name[fpd].read, parameter[name[buffer_size]]]
<ast.Tuple object at 0x7da1b1951e10> assign[=] call[name[get_event_start], parameter[name[event_buffer], name[event_format]]]
if compare[name[event_start] not_equal[!=] <ast.UnaryOp object at 0x7da1b1951c30>] begin[:]
variable[fptr_eof] assign[=] binary_operation[name[event_start] + name[fptr]]
break
variable[fptr] assign[=] binary_operation[name[fptr] - name[buffer_size]]
if compare[name[fptr] less[<] constant[0]] begin[:]
variable[fptr_eof] assign[=] constant[0]
variable[last_time] assign[=] constant[0]
call[name[fpd].truncate, parameter[name[fptr_eof]]]
call[name[fpd].seek, parameter[name[fptr_eof]]]
call[name[fpd].write, parameter[constant[
]]]
call[name[fpd].close, parameter[]]
return[name[last_time]] | keyword[def] identifier[recover] ( identifier[options] ):
literal[string]
identifier[event_format] = identifier[options] . identifier[kwargs] [ literal[string] ]
identifier[buffer_size] = literal[int] * literal[int]
identifier[fpd] = identifier[open] ( identifier[options] . identifier[kwargs] [ literal[string] ], literal[string] )
identifier[fpd] . identifier[seek] ( literal[int] , literal[int] )
identifier[fptr] = identifier[max] ( identifier[fpd] . identifier[tell] ()- identifier[buffer_size] , literal[int] )
identifier[fptr_eof] = literal[int]
keyword[while] ( identifier[fptr] > literal[int] ):
identifier[fpd] . identifier[seek] ( identifier[fptr] )
identifier[event_buffer] = identifier[fpd] . identifier[read] ( identifier[buffer_size] )
( identifier[event_start] , identifier[next_event_start] , identifier[last_time] )= identifier[get_event_start] ( identifier[event_buffer] , identifier[event_format] )
keyword[if] ( identifier[event_start] !=- literal[int] ):
identifier[fptr_eof] = identifier[event_start] + identifier[fptr]
keyword[break]
identifier[fptr] = identifier[fptr] - identifier[buffer_size]
keyword[if] identifier[fptr] < literal[int] :
identifier[fptr_eof] = literal[int]
identifier[last_time] = literal[int]
identifier[fpd] . identifier[truncate] ( identifier[fptr_eof] )
identifier[fpd] . identifier[seek] ( identifier[fptr_eof] )
identifier[fpd] . identifier[write] ( literal[string] )
identifier[fpd] . identifier[close] ()
keyword[return] identifier[last_time] | def recover(options):
""" recover from an existing export run. We do this by
finding the last time change between events, truncate the file
and restart from there """
event_format = options.kwargs['omode']
buffer_size = 64 * 1024
fpd = open(options.kwargs['output'], 'r+')
fpd.seek(0, 2) # seek to end
fptr = max(fpd.tell() - buffer_size, 0)
fptr_eof = 0
while fptr > 0:
fpd.seek(fptr)
event_buffer = fpd.read(buffer_size)
(event_start, next_event_start, last_time) = get_event_start(event_buffer, event_format)
if event_start != -1:
fptr_eof = event_start + fptr
break # depends on [control=['if'], data=['event_start']]
fptr = fptr - buffer_size # depends on [control=['while'], data=['fptr']]
if fptr < 0:
# didn't find a valid event, so start over
fptr_eof = 0
last_time = 0 # depends on [control=['if'], data=[]]
# truncate file here
fpd.truncate(fptr_eof)
fpd.seek(fptr_eof)
fpd.write('\n')
fpd.close()
return last_time |
def _scatter_list(self, data, owner):
"""Distribute a list from one rank to other ranks in a cyclic manner
Parameters
----------
data: list of pickle-able data
owner: rank that owns the data
Returns
-------
A list containing the data in a cyclic layout across ranks
"""
rank = self.comm.rank
size = self.comm.size
subject_submatrices = []
nblocks = self.comm.bcast(len(data)
if rank == owner else None, root=owner)
# For each submatrix
for idx in range(0, nblocks, size):
padded = None
extra = max(0, idx+size - nblocks)
# Pad with "None" so scatter can go to all processes
if data is not None:
padded = data[idx:idx+size]
if extra > 0:
padded = padded + [None]*extra
# Scatter submatrices to all processes
mytrans = self.comm.scatter(padded, root=owner)
# Contribute submatrix to subject list
if mytrans is not None:
subject_submatrices += [mytrans]
return subject_submatrices | def function[_scatter_list, parameter[self, data, owner]]:
constant[Distribute a list from one rank to other ranks in a cyclic manner
Parameters
----------
data: list of pickle-able data
owner: rank that owns the data
Returns
-------
A list containing the data in a cyclic layout across ranks
]
variable[rank] assign[=] name[self].comm.rank
variable[size] assign[=] name[self].comm.size
variable[subject_submatrices] assign[=] list[[]]
variable[nblocks] assign[=] call[name[self].comm.bcast, parameter[<ast.IfExp object at 0x7da20c6e4fd0>]]
for taget[name[idx]] in starred[call[name[range], parameter[constant[0], name[nblocks], name[size]]]] begin[:]
variable[padded] assign[=] constant[None]
variable[extra] assign[=] call[name[max], parameter[constant[0], binary_operation[binary_operation[name[idx] + name[size]] - name[nblocks]]]]
if compare[name[data] is_not constant[None]] begin[:]
variable[padded] assign[=] call[name[data]][<ast.Slice object at 0x7da20c6e58d0>]
if compare[name[extra] greater[>] constant[0]] begin[:]
variable[padded] assign[=] binary_operation[name[padded] + binary_operation[list[[<ast.Constant object at 0x7da1b0733b20>]] * name[extra]]]
variable[mytrans] assign[=] call[name[self].comm.scatter, parameter[name[padded]]]
if compare[name[mytrans] is_not constant[None]] begin[:]
<ast.AugAssign object at 0x7da1b077b700>
return[name[subject_submatrices]] | keyword[def] identifier[_scatter_list] ( identifier[self] , identifier[data] , identifier[owner] ):
literal[string]
identifier[rank] = identifier[self] . identifier[comm] . identifier[rank]
identifier[size] = identifier[self] . identifier[comm] . identifier[size]
identifier[subject_submatrices] =[]
identifier[nblocks] = identifier[self] . identifier[comm] . identifier[bcast] ( identifier[len] ( identifier[data] )
keyword[if] identifier[rank] == identifier[owner] keyword[else] keyword[None] , identifier[root] = identifier[owner] )
keyword[for] identifier[idx] keyword[in] identifier[range] ( literal[int] , identifier[nblocks] , identifier[size] ):
identifier[padded] = keyword[None]
identifier[extra] = identifier[max] ( literal[int] , identifier[idx] + identifier[size] - identifier[nblocks] )
keyword[if] identifier[data] keyword[is] keyword[not] keyword[None] :
identifier[padded] = identifier[data] [ identifier[idx] : identifier[idx] + identifier[size] ]
keyword[if] identifier[extra] > literal[int] :
identifier[padded] = identifier[padded] +[ keyword[None] ]* identifier[extra]
identifier[mytrans] = identifier[self] . identifier[comm] . identifier[scatter] ( identifier[padded] , identifier[root] = identifier[owner] )
keyword[if] identifier[mytrans] keyword[is] keyword[not] keyword[None] :
identifier[subject_submatrices] +=[ identifier[mytrans] ]
keyword[return] identifier[subject_submatrices] | def _scatter_list(self, data, owner):
"""Distribute a list from one rank to other ranks in a cyclic manner
Parameters
----------
data: list of pickle-able data
owner: rank that owns the data
Returns
-------
A list containing the data in a cyclic layout across ranks
"""
rank = self.comm.rank
size = self.comm.size
subject_submatrices = []
nblocks = self.comm.bcast(len(data) if rank == owner else None, root=owner)
# For each submatrix
for idx in range(0, nblocks, size):
padded = None
extra = max(0, idx + size - nblocks)
# Pad with "None" so scatter can go to all processes
if data is not None:
padded = data[idx:idx + size]
if extra > 0:
padded = padded + [None] * extra # depends on [control=['if'], data=['extra']] # depends on [control=['if'], data=['data']]
# Scatter submatrices to all processes
mytrans = self.comm.scatter(padded, root=owner)
# Contribute submatrix to subject list
if mytrans is not None:
subject_submatrices += [mytrans] # depends on [control=['if'], data=['mytrans']] # depends on [control=['for'], data=['idx']]
return subject_submatrices |
def _self_destruct(self):
"""Auto quit exec if parent process failed
"""
# This will give parent process 15 seconds to reset.
self._kill = threading.Timer(15, lambda: os._exit(0))
self._kill.start() | def function[_self_destruct, parameter[self]]:
constant[Auto quit exec if parent process failed
]
name[self]._kill assign[=] call[name[threading].Timer, parameter[constant[15], <ast.Lambda object at 0x7da1b08898a0>]]
call[name[self]._kill.start, parameter[]] | keyword[def] identifier[_self_destruct] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_kill] = identifier[threading] . identifier[Timer] ( literal[int] , keyword[lambda] : identifier[os] . identifier[_exit] ( literal[int] ))
identifier[self] . identifier[_kill] . identifier[start] () | def _self_destruct(self):
"""Auto quit exec if parent process failed
"""
# This will give parent process 15 seconds to reset.
self._kill = threading.Timer(15, lambda : os._exit(0))
self._kill.start() |
def add_tarball(self, tarball, package):
"""Add a tarball, possibly creating the directory if needed."""
if tarball is None:
logger.error(
"No tarball found for %s: probably a renamed project?",
package)
return
target_dir = os.path.join(self.root_directory, package)
if not os.path.exists(target_dir):
os.mkdir(target_dir)
logger.info("Created %s", target_dir)
logger.info("Copying tarball to %s", target_dir)
shutil.copy(tarball, target_dir) | def function[add_tarball, parameter[self, tarball, package]]:
constant[Add a tarball, possibly creating the directory if needed.]
if compare[name[tarball] is constant[None]] begin[:]
call[name[logger].error, parameter[constant[No tarball found for %s: probably a renamed project?], name[package]]]
return[None]
variable[target_dir] assign[=] call[name[os].path.join, parameter[name[self].root_directory, name[package]]]
if <ast.UnaryOp object at 0x7da1b15f7790> begin[:]
call[name[os].mkdir, parameter[name[target_dir]]]
call[name[logger].info, parameter[constant[Created %s], name[target_dir]]]
call[name[logger].info, parameter[constant[Copying tarball to %s], name[target_dir]]]
call[name[shutil].copy, parameter[name[tarball], name[target_dir]]] | keyword[def] identifier[add_tarball] ( identifier[self] , identifier[tarball] , identifier[package] ):
literal[string]
keyword[if] identifier[tarball] keyword[is] keyword[None] :
identifier[logger] . identifier[error] (
literal[string] ,
identifier[package] )
keyword[return]
identifier[target_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[self] . identifier[root_directory] , identifier[package] )
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[target_dir] ):
identifier[os] . identifier[mkdir] ( identifier[target_dir] )
identifier[logger] . identifier[info] ( literal[string] , identifier[target_dir] )
identifier[logger] . identifier[info] ( literal[string] , identifier[target_dir] )
identifier[shutil] . identifier[copy] ( identifier[tarball] , identifier[target_dir] ) | def add_tarball(self, tarball, package):
"""Add a tarball, possibly creating the directory if needed."""
if tarball is None:
logger.error('No tarball found for %s: probably a renamed project?', package)
return # depends on [control=['if'], data=[]]
target_dir = os.path.join(self.root_directory, package)
if not os.path.exists(target_dir):
os.mkdir(target_dir)
logger.info('Created %s', target_dir) # depends on [control=['if'], data=[]]
logger.info('Copying tarball to %s', target_dir)
shutil.copy(tarball, target_dir) |
def find_version(include_dev_version=True, version_file='version.txt',
version_module_paths=(),
git_args=('git', 'describe', '--tags', '--long'),
Popen=subprocess.Popen):
"""Find an appropriate version number from version control.
It's much more convenient to be able to use your version control system's
tagging mechanism to derive a version number than to have to duplicate that
information all over the place. Currently, only git is supported.
The default behavior is to write out a ``version.txt`` file which contains
the git output, for systems where git isn't installed or there is no .git
directory present. ``version.txt`` can (and probably should!) be packaged
in release tarballs by way of the ``MANIFEST.in`` file.
:param include_dev_version: By default, if there are any commits after the
most recent tag (as reported by git), that
number will be included in the version number
as a ``.dev`` suffix. For example, if the most
recent tag is ``1.0`` and there have been three
commits after that tag, the version number will
be ``1.0.dev3``. This behavior can be disabled
by setting this parameter to ``False``.
:param version_file: The name of the file where version information will be
saved. Reading and writing version files can be
disabled altogether by setting this parameter to
``None``.
:param version_module_paths: A list of python modules which will be
automatically generated containing
``__version__`` and ``__sha__`` attributes.
For example, with ``package/_version.py`` as a
version module path, ``package/__init__.py``
could do ``from package._version import
__version__, __sha__``.
:param git_args: The git command to run to get a version. By default, this
is ``git describe --tags --long``. Specify this as a list
of string arguments including ``git``, e.g. ``['git',
'describe']``.
:param Popen: Defaults to ``subprocess.Popen``. This is for testing.
"""
# try to pull the version from git, or (perhaps) fall back on a
# previously-saved version.
try:
proc = Popen(git_args, stdout=subprocess.PIPE)
except OSError:
raw_version = None
else:
raw_version = proc.communicate()[0].strip().decode()
version_source = 'git'
# git failed if the string is empty
if not raw_version:
if version_file is None:
print('%r failed' % (git_args,))
raise SystemExit(2)
elif not os.path.exists(version_file):
print("%r failed and %r isn't present." % (git_args, version_file))
print("are you installing from a github tarball?")
raise SystemExit(2)
print("couldn't determine version from git; using %r" % version_file)
with open(version_file, 'r') as infile:
raw_version = infile.read()
version_source = repr(version_file)
# try to parse the version into something usable.
try:
tag_version, commits, sha = raw_version.rsplit('-', 2)
except ValueError:
print("%r (from %s) couldn't be parsed into a version" % (
raw_version, version_source))
raise SystemExit(2)
if version_file is not None:
with open(version_file, 'w') as outfile:
outfile.write(raw_version)
if commits == '0' or not include_dev_version:
version = tag_version
else:
version = '%s.dev%s' % (tag_version, commits)
for path in version_module_paths:
with open(path, 'w') as outfile:
outfile.write("""
# This file is automatically generated by setup.py.
__version__ = %s
__sha__ = %s
""" % (repr(version).lstrip('u'), repr(sha).lstrip('u')))
return Version(version, commits, sha) | def function[find_version, parameter[include_dev_version, version_file, version_module_paths, git_args, Popen]]:
constant[Find an appropriate version number from version control.
It's much more convenient to be able to use your version control system's
tagging mechanism to derive a version number than to have to duplicate that
information all over the place. Currently, only git is supported.
The default behavior is to write out a ``version.txt`` file which contains
the git output, for systems where git isn't installed or there is no .git
directory present. ``version.txt`` can (and probably should!) be packaged
in release tarballs by way of the ``MANIFEST.in`` file.
:param include_dev_version: By default, if there are any commits after the
most recent tag (as reported by git), that
number will be included in the version number
as a ``.dev`` suffix. For example, if the most
recent tag is ``1.0`` and there have been three
commits after that tag, the version number will
be ``1.0.dev3``. This behavior can be disabled
by setting this parameter to ``False``.
:param version_file: The name of the file where version information will be
saved. Reading and writing version files can be
disabled altogether by setting this parameter to
``None``.
:param version_module_paths: A list of python modules which will be
automatically generated containing
``__version__`` and ``__sha__`` attributes.
For example, with ``package/_version.py`` as a
version module path, ``package/__init__.py``
could do ``from package._version import
__version__, __sha__``.
:param git_args: The git command to run to get a version. By default, this
is ``git describe --tags --long``. Specify this as a list
of string arguments including ``git``, e.g. ``['git',
'describe']``.
:param Popen: Defaults to ``subprocess.Popen``. This is for testing.
]
<ast.Try object at 0x7da1b095f3a0>
if <ast.UnaryOp object at 0x7da1b095c0d0> begin[:]
if compare[name[version_file] is constant[None]] begin[:]
call[name[print], parameter[binary_operation[constant[%r failed] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b095ff40>]]]]]
<ast.Raise object at 0x7da1b095c0a0>
call[name[print], parameter[binary_operation[constant[couldn't determine version from git; using %r] <ast.Mod object at 0x7da2590d6920> name[version_file]]]]
with call[name[open], parameter[name[version_file], constant[r]]] begin[:]
variable[raw_version] assign[=] call[name[infile].read, parameter[]]
variable[version_source] assign[=] call[name[repr], parameter[name[version_file]]]
<ast.Try object at 0x7da1b095ffa0>
if compare[name[version_file] is_not constant[None]] begin[:]
with call[name[open], parameter[name[version_file], constant[w]]] begin[:]
call[name[outfile].write, parameter[name[raw_version]]]
if <ast.BoolOp object at 0x7da1b095f5b0> begin[:]
variable[version] assign[=] name[tag_version]
for taget[name[path]] in starred[name[version_module_paths]] begin[:]
with call[name[open], parameter[name[path], constant[w]]] begin[:]
call[name[outfile].write, parameter[binary_operation[constant[
# This file is automatically generated by setup.py.
__version__ = %s
__sha__ = %s
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Call object at 0x7da1b09d3670>, <ast.Call object at 0x7da1b09d1ed0>]]]]]
return[call[name[Version], parameter[name[version], name[commits], name[sha]]]] | keyword[def] identifier[find_version] ( identifier[include_dev_version] = keyword[True] , identifier[version_file] = literal[string] ,
identifier[version_module_paths] =(),
identifier[git_args] =( literal[string] , literal[string] , literal[string] , literal[string] ),
identifier[Popen] = identifier[subprocess] . identifier[Popen] ):
literal[string]
keyword[try] :
identifier[proc] = identifier[Popen] ( identifier[git_args] , identifier[stdout] = identifier[subprocess] . identifier[PIPE] )
keyword[except] identifier[OSError] :
identifier[raw_version] = keyword[None]
keyword[else] :
identifier[raw_version] = identifier[proc] . identifier[communicate] ()[ literal[int] ]. identifier[strip] (). identifier[decode] ()
identifier[version_source] = literal[string]
keyword[if] keyword[not] identifier[raw_version] :
keyword[if] identifier[version_file] keyword[is] keyword[None] :
identifier[print] ( literal[string] %( identifier[git_args] ,))
keyword[raise] identifier[SystemExit] ( literal[int] )
keyword[elif] keyword[not] identifier[os] . identifier[path] . identifier[exists] ( identifier[version_file] ):
identifier[print] ( literal[string] %( identifier[git_args] , identifier[version_file] ))
identifier[print] ( literal[string] )
keyword[raise] identifier[SystemExit] ( literal[int] )
identifier[print] ( literal[string] % identifier[version_file] )
keyword[with] identifier[open] ( identifier[version_file] , literal[string] ) keyword[as] identifier[infile] :
identifier[raw_version] = identifier[infile] . identifier[read] ()
identifier[version_source] = identifier[repr] ( identifier[version_file] )
keyword[try] :
identifier[tag_version] , identifier[commits] , identifier[sha] = identifier[raw_version] . identifier[rsplit] ( literal[string] , literal[int] )
keyword[except] identifier[ValueError] :
identifier[print] ( literal[string] %(
identifier[raw_version] , identifier[version_source] ))
keyword[raise] identifier[SystemExit] ( literal[int] )
keyword[if] identifier[version_file] keyword[is] keyword[not] keyword[None] :
keyword[with] identifier[open] ( identifier[version_file] , literal[string] ) keyword[as] identifier[outfile] :
identifier[outfile] . identifier[write] ( identifier[raw_version] )
keyword[if] identifier[commits] == literal[string] keyword[or] keyword[not] identifier[include_dev_version] :
identifier[version] = identifier[tag_version]
keyword[else] :
identifier[version] = literal[string] %( identifier[tag_version] , identifier[commits] )
keyword[for] identifier[path] keyword[in] identifier[version_module_paths] :
keyword[with] identifier[open] ( identifier[path] , literal[string] ) keyword[as] identifier[outfile] :
identifier[outfile] . identifier[write] ( literal[string] %( identifier[repr] ( identifier[version] ). identifier[lstrip] ( literal[string] ), identifier[repr] ( identifier[sha] ). identifier[lstrip] ( literal[string] )))
keyword[return] identifier[Version] ( identifier[version] , identifier[commits] , identifier[sha] ) | def find_version(include_dev_version=True, version_file='version.txt', version_module_paths=(), git_args=('git', 'describe', '--tags', '--long'), Popen=subprocess.Popen):
"""Find an appropriate version number from version control.
It's much more convenient to be able to use your version control system's
tagging mechanism to derive a version number than to have to duplicate that
information all over the place. Currently, only git is supported.
The default behavior is to write out a ``version.txt`` file which contains
the git output, for systems where git isn't installed or there is no .git
directory present. ``version.txt`` can (and probably should!) be packaged
in release tarballs by way of the ``MANIFEST.in`` file.
:param include_dev_version: By default, if there are any commits after the
most recent tag (as reported by git), that
number will be included in the version number
as a ``.dev`` suffix. For example, if the most
recent tag is ``1.0`` and there have been three
commits after that tag, the version number will
be ``1.0.dev3``. This behavior can be disabled
by setting this parameter to ``False``.
:param version_file: The name of the file where version information will be
saved. Reading and writing version files can be
disabled altogether by setting this parameter to
``None``.
:param version_module_paths: A list of python modules which will be
automatically generated containing
``__version__`` and ``__sha__`` attributes.
For example, with ``package/_version.py`` as a
version module path, ``package/__init__.py``
could do ``from package._version import
__version__, __sha__``.
:param git_args: The git command to run to get a version. By default, this
is ``git describe --tags --long``. Specify this as a list
of string arguments including ``git``, e.g. ``['git',
'describe']``.
:param Popen: Defaults to ``subprocess.Popen``. This is for testing.
"""
# try to pull the version from git, or (perhaps) fall back on a
# previously-saved version.
try:
proc = Popen(git_args, stdout=subprocess.PIPE) # depends on [control=['try'], data=[]]
except OSError:
raw_version = None # depends on [control=['except'], data=[]]
else:
raw_version = proc.communicate()[0].strip().decode()
version_source = 'git'
# git failed if the string is empty
if not raw_version:
if version_file is None:
print('%r failed' % (git_args,))
raise SystemExit(2) # depends on [control=['if'], data=[]]
elif not os.path.exists(version_file):
print("%r failed and %r isn't present." % (git_args, version_file))
print('are you installing from a github tarball?')
raise SystemExit(2) # depends on [control=['if'], data=[]]
print("couldn't determine version from git; using %r" % version_file)
with open(version_file, 'r') as infile:
raw_version = infile.read() # depends on [control=['with'], data=['infile']]
version_source = repr(version_file) # depends on [control=['if'], data=[]]
# try to parse the version into something usable.
try:
(tag_version, commits, sha) = raw_version.rsplit('-', 2) # depends on [control=['try'], data=[]]
except ValueError:
print("%r (from %s) couldn't be parsed into a version" % (raw_version, version_source))
raise SystemExit(2) # depends on [control=['except'], data=[]]
if version_file is not None:
with open(version_file, 'w') as outfile:
outfile.write(raw_version) # depends on [control=['with'], data=['outfile']] # depends on [control=['if'], data=['version_file']]
if commits == '0' or not include_dev_version:
version = tag_version # depends on [control=['if'], data=[]]
else:
version = '%s.dev%s' % (tag_version, commits)
for path in version_module_paths:
with open(path, 'w') as outfile:
outfile.write('\n# This file is automatically generated by setup.py.\n__version__ = %s\n__sha__ = %s\n' % (repr(version).lstrip('u'), repr(sha).lstrip('u'))) # depends on [control=['with'], data=['outfile']] # depends on [control=['for'], data=['path']]
return Version(version, commits, sha) |
def set(self, value):
"""
Sets the value of the string
:param value:
A unicode string
"""
if not isinstance(value, str_cls):
raise TypeError(unwrap(
'''
%s value must be a unicode string, not %s
''',
type_name(self),
type_name(value)
))
if value.find('@') != -1:
mailbox, hostname = value.rsplit('@', 1)
encoded_value = mailbox.encode('ascii') + b'@' + hostname.encode('idna')
else:
encoded_value = value.encode('ascii')
self._normalized = True
self._unicode = value
self.contents = encoded_value
self._header = None
if self._trailer != b'':
self._trailer = b'' | def function[set, parameter[self, value]]:
constant[
Sets the value of the string
:param value:
A unicode string
]
if <ast.UnaryOp object at 0x7da18fe92860> begin[:]
<ast.Raise object at 0x7da18fe90280>
if compare[call[name[value].find, parameter[constant[@]]] not_equal[!=] <ast.UnaryOp object at 0x7da1b2346ef0>] begin[:]
<ast.Tuple object at 0x7da1b23449d0> assign[=] call[name[value].rsplit, parameter[constant[@], constant[1]]]
variable[encoded_value] assign[=] binary_operation[binary_operation[call[name[mailbox].encode, parameter[constant[ascii]]] + constant[b'@']] + call[name[hostname].encode, parameter[constant[idna]]]]
name[self]._normalized assign[=] constant[True]
name[self]._unicode assign[=] name[value]
name[self].contents assign[=] name[encoded_value]
name[self]._header assign[=] constant[None]
if compare[name[self]._trailer not_equal[!=] constant[b'']] begin[:]
name[self]._trailer assign[=] constant[b''] | keyword[def] identifier[set] ( identifier[self] , identifier[value] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[value] , identifier[str_cls] ):
keyword[raise] identifier[TypeError] ( identifier[unwrap] (
literal[string] ,
identifier[type_name] ( identifier[self] ),
identifier[type_name] ( identifier[value] )
))
keyword[if] identifier[value] . identifier[find] ( literal[string] )!=- literal[int] :
identifier[mailbox] , identifier[hostname] = identifier[value] . identifier[rsplit] ( literal[string] , literal[int] )
identifier[encoded_value] = identifier[mailbox] . identifier[encode] ( literal[string] )+ literal[string] + identifier[hostname] . identifier[encode] ( literal[string] )
keyword[else] :
identifier[encoded_value] = identifier[value] . identifier[encode] ( literal[string] )
identifier[self] . identifier[_normalized] = keyword[True]
identifier[self] . identifier[_unicode] = identifier[value]
identifier[self] . identifier[contents] = identifier[encoded_value]
identifier[self] . identifier[_header] = keyword[None]
keyword[if] identifier[self] . identifier[_trailer] != literal[string] :
identifier[self] . identifier[_trailer] = literal[string] | def set(self, value):
"""
Sets the value of the string
:param value:
A unicode string
"""
if not isinstance(value, str_cls):
raise TypeError(unwrap('\n %s value must be a unicode string, not %s\n ', type_name(self), type_name(value))) # depends on [control=['if'], data=[]]
if value.find('@') != -1:
(mailbox, hostname) = value.rsplit('@', 1)
encoded_value = mailbox.encode('ascii') + b'@' + hostname.encode('idna') # depends on [control=['if'], data=[]]
else:
encoded_value = value.encode('ascii')
self._normalized = True
self._unicode = value
self.contents = encoded_value
self._header = None
if self._trailer != b'':
self._trailer = b'' # depends on [control=['if'], data=[]] |
def start(name, config_file=None):
'''
starts a container in daemon mode
'''
if not exists(name):
raise ContainerNotExists("The container (%s) does not exist!" % name)
if name in running():
raise ContainerAlreadyRunning('The container %s is already started!' % name)
cmd = ['lxc-start', '-n', name, '-d']
if config_file:
cmd += ['-f', config_file]
subprocess.check_call(cmd) | def function[start, parameter[name, config_file]]:
constant[
starts a container in daemon mode
]
if <ast.UnaryOp object at 0x7da20c6a98d0> begin[:]
<ast.Raise object at 0x7da20c6a8670>
if compare[name[name] in call[name[running], parameter[]]] begin[:]
<ast.Raise object at 0x7da20c6a9ab0>
variable[cmd] assign[=] list[[<ast.Constant object at 0x7da20c6aaf80>, <ast.Constant object at 0x7da20c6abf10>, <ast.Name object at 0x7da20c6a8e80>, <ast.Constant object at 0x7da20c6aa7a0>]]
if name[config_file] begin[:]
<ast.AugAssign object at 0x7da20c6ab370>
call[name[subprocess].check_call, parameter[name[cmd]]] | keyword[def] identifier[start] ( identifier[name] , identifier[config_file] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[exists] ( identifier[name] ):
keyword[raise] identifier[ContainerNotExists] ( literal[string] % identifier[name] )
keyword[if] identifier[name] keyword[in] identifier[running] ():
keyword[raise] identifier[ContainerAlreadyRunning] ( literal[string] % identifier[name] )
identifier[cmd] =[ literal[string] , literal[string] , identifier[name] , literal[string] ]
keyword[if] identifier[config_file] :
identifier[cmd] +=[ literal[string] , identifier[config_file] ]
identifier[subprocess] . identifier[check_call] ( identifier[cmd] ) | def start(name, config_file=None):
"""
starts a container in daemon mode
"""
if not exists(name):
raise ContainerNotExists('The container (%s) does not exist!' % name) # depends on [control=['if'], data=[]]
if name in running():
raise ContainerAlreadyRunning('The container %s is already started!' % name) # depends on [control=['if'], data=['name']]
cmd = ['lxc-start', '-n', name, '-d']
if config_file:
cmd += ['-f', config_file] # depends on [control=['if'], data=[]]
subprocess.check_call(cmd) |
def atime(self):
"""
Get most recent access time in timestamp.
"""
try:
return self._stat.st_atime
except: # pragma: no cover
self._stat = self.stat()
return self.atime | def function[atime, parameter[self]]:
constant[
Get most recent access time in timestamp.
]
<ast.Try object at 0x7da2044c36a0> | keyword[def] identifier[atime] ( identifier[self] ):
literal[string]
keyword[try] :
keyword[return] identifier[self] . identifier[_stat] . identifier[st_atime]
keyword[except] :
identifier[self] . identifier[_stat] = identifier[self] . identifier[stat] ()
keyword[return] identifier[self] . identifier[atime] | def atime(self):
"""
Get most recent access time in timestamp.
"""
try:
return self._stat.st_atime # depends on [control=['try'], data=[]]
except: # pragma: no cover
self._stat = self.stat()
return self.atime # depends on [control=['except'], data=[]] |
def libvlc_media_tracks_release(p_tracks, i_count):
'''Release media descriptor's elementary streams description array.
@param p_tracks: tracks info array to release.
@param i_count: number of elements in the array.
@version: LibVLC 2.1.0 and later.
'''
f = _Cfunctions.get('libvlc_media_tracks_release', None) or \
_Cfunction('libvlc_media_tracks_release', ((1,), (1,),), None,
None, ctypes.POINTER(MediaTrack), ctypes.c_uint)
return f(p_tracks, i_count) | def function[libvlc_media_tracks_release, parameter[p_tracks, i_count]]:
constant[Release media descriptor's elementary streams description array.
@param p_tracks: tracks info array to release.
@param i_count: number of elements in the array.
@version: LibVLC 2.1.0 and later.
]
variable[f] assign[=] <ast.BoolOp object at 0x7da1b1721210>
return[call[name[f], parameter[name[p_tracks], name[i_count]]]] | keyword[def] identifier[libvlc_media_tracks_release] ( identifier[p_tracks] , identifier[i_count] ):
literal[string]
identifier[f] = identifier[_Cfunctions] . identifier[get] ( literal[string] , keyword[None] ) keyword[or] identifier[_Cfunction] ( literal[string] ,(( literal[int] ,),( literal[int] ,),), keyword[None] ,
keyword[None] , identifier[ctypes] . identifier[POINTER] ( identifier[MediaTrack] ), identifier[ctypes] . identifier[c_uint] )
keyword[return] identifier[f] ( identifier[p_tracks] , identifier[i_count] ) | def libvlc_media_tracks_release(p_tracks, i_count):
"""Release media descriptor's elementary streams description array.
@param p_tracks: tracks info array to release.
@param i_count: number of elements in the array.
@version: LibVLC 2.1.0 and later.
"""
f = _Cfunctions.get('libvlc_media_tracks_release', None) or _Cfunction('libvlc_media_tracks_release', ((1,), (1,)), None, None, ctypes.POINTER(MediaTrack), ctypes.c_uint)
return f(p_tracks, i_count) |
def await_reply(self, msg_id, timeout=None):
"""
Continuously poll the kernel 'shell' stream for messages until:
- It receives an 'execute_reply' status for the given message id
- The timeout is reached awaiting a message, in which case
a `Queue.Empty` exception will be raised.
"""
while True:
msg = self.get_message(stream='shell', timeout=timeout)
# Is this the message we are waiting for?
if msg['parent_header'].get('msg_id') == msg_id:
if msg['content']['status'] == 'aborted':
# This should not occur!
raise RuntimeError('Kernel aborted execution request')
return | def function[await_reply, parameter[self, msg_id, timeout]]:
constant[
Continuously poll the kernel 'shell' stream for messages until:
- It receives an 'execute_reply' status for the given message id
- The timeout is reached awaiting a message, in which case
a `Queue.Empty` exception will be raised.
]
while constant[True] begin[:]
variable[msg] assign[=] call[name[self].get_message, parameter[]]
if compare[call[call[name[msg]][constant[parent_header]].get, parameter[constant[msg_id]]] equal[==] name[msg_id]] begin[:]
if compare[call[call[name[msg]][constant[content]]][constant[status]] equal[==] constant[aborted]] begin[:]
<ast.Raise object at 0x7da1b1e146a0>
return[None] | keyword[def] identifier[await_reply] ( identifier[self] , identifier[msg_id] , identifier[timeout] = keyword[None] ):
literal[string]
keyword[while] keyword[True] :
identifier[msg] = identifier[self] . identifier[get_message] ( identifier[stream] = literal[string] , identifier[timeout] = identifier[timeout] )
keyword[if] identifier[msg] [ literal[string] ]. identifier[get] ( literal[string] )== identifier[msg_id] :
keyword[if] identifier[msg] [ literal[string] ][ literal[string] ]== literal[string] :
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[return] | def await_reply(self, msg_id, timeout=None):
"""
Continuously poll the kernel 'shell' stream for messages until:
- It receives an 'execute_reply' status for the given message id
- The timeout is reached awaiting a message, in which case
a `Queue.Empty` exception will be raised.
"""
while True:
msg = self.get_message(stream='shell', timeout=timeout)
# Is this the message we are waiting for?
if msg['parent_header'].get('msg_id') == msg_id:
if msg['content']['status'] == 'aborted':
# This should not occur!
raise RuntimeError('Kernel aborted execution request') # depends on [control=['if'], data=[]]
return # depends on [control=['if'], data=[]] # depends on [control=['while'], data=[]] |
def remove(self, func):
"""Remove any provisioned log sink if auto created"""
if not self.data['name'].startswith(self.prefix):
return
parent = self.get_parent(self.get_log())
_, sink_path, _ = self.get_sink()
client = self.session.client(
'logging', 'v2', '%s.sinks' % (parent.split('/', 1)[0]))
try:
client.execute_command(
'delete', {'sinkName': sink_path})
except HttpError as e:
if e.resp.status != 404:
raise | def function[remove, parameter[self, func]]:
constant[Remove any provisioned log sink if auto created]
if <ast.UnaryOp object at 0x7da1b1f0ace0> begin[:]
return[None]
variable[parent] assign[=] call[name[self].get_parent, parameter[call[name[self].get_log, parameter[]]]]
<ast.Tuple object at 0x7da1b1f0aad0> assign[=] call[name[self].get_sink, parameter[]]
variable[client] assign[=] call[name[self].session.client, parameter[constant[logging], constant[v2], binary_operation[constant[%s.sinks] <ast.Mod object at 0x7da2590d6920> call[call[name[parent].split, parameter[constant[/], constant[1]]]][constant[0]]]]]
<ast.Try object at 0x7da1b1f0a830> | keyword[def] identifier[remove] ( identifier[self] , identifier[func] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[data] [ literal[string] ]. identifier[startswith] ( identifier[self] . identifier[prefix] ):
keyword[return]
identifier[parent] = identifier[self] . identifier[get_parent] ( identifier[self] . identifier[get_log] ())
identifier[_] , identifier[sink_path] , identifier[_] = identifier[self] . identifier[get_sink] ()
identifier[client] = identifier[self] . identifier[session] . identifier[client] (
literal[string] , literal[string] , literal[string] %( identifier[parent] . identifier[split] ( literal[string] , literal[int] )[ literal[int] ]))
keyword[try] :
identifier[client] . identifier[execute_command] (
literal[string] ,{ literal[string] : identifier[sink_path] })
keyword[except] identifier[HttpError] keyword[as] identifier[e] :
keyword[if] identifier[e] . identifier[resp] . identifier[status] != literal[int] :
keyword[raise] | def remove(self, func):
"""Remove any provisioned log sink if auto created"""
if not self.data['name'].startswith(self.prefix):
return # depends on [control=['if'], data=[]]
parent = self.get_parent(self.get_log())
(_, sink_path, _) = self.get_sink()
client = self.session.client('logging', 'v2', '%s.sinks' % parent.split('/', 1)[0])
try:
client.execute_command('delete', {'sinkName': sink_path}) # depends on [control=['try'], data=[]]
except HttpError as e:
if e.resp.status != 404:
raise # depends on [control=['if'], data=[]] # depends on [control=['except'], data=['e']] |
def importGurobiSolution(self, grbmodel):
"""
Import the solution from a gurobipy.Model object.
Args:
grbmodel: A :class:`gurobipy.Model` object with the model solved.
"""
self.eval(''.join(
'let {} := {};'.format(var.VarName, var.X)
for var in grbmodel.getVars()
if '$' not in var.VarName
)) | def function[importGurobiSolution, parameter[self, grbmodel]]:
constant[
Import the solution from a gurobipy.Model object.
Args:
grbmodel: A :class:`gurobipy.Model` object with the model solved.
]
call[name[self].eval, parameter[call[constant[].join, parameter[<ast.GeneratorExp object at 0x7da18ede4400>]]]] | keyword[def] identifier[importGurobiSolution] ( identifier[self] , identifier[grbmodel] ):
literal[string]
identifier[self] . identifier[eval] ( literal[string] . identifier[join] (
literal[string] . identifier[format] ( identifier[var] . identifier[VarName] , identifier[var] . identifier[X] )
keyword[for] identifier[var] keyword[in] identifier[grbmodel] . identifier[getVars] ()
keyword[if] literal[string] keyword[not] keyword[in] identifier[var] . identifier[VarName]
)) | def importGurobiSolution(self, grbmodel):
"""
Import the solution from a gurobipy.Model object.
Args:
grbmodel: A :class:`gurobipy.Model` object with the model solved.
"""
self.eval(''.join(('let {} := {};'.format(var.VarName, var.X) for var in grbmodel.getVars() if '$' not in var.VarName))) |
def get_ds_srs(ds):
"""Get srs object for GDAL Datset
"""
ds_srs = osr.SpatialReference()
ds_srs.ImportFromWkt(ds.GetProjectionRef())
return ds_srs | def function[get_ds_srs, parameter[ds]]:
constant[Get srs object for GDAL Datset
]
variable[ds_srs] assign[=] call[name[osr].SpatialReference, parameter[]]
call[name[ds_srs].ImportFromWkt, parameter[call[name[ds].GetProjectionRef, parameter[]]]]
return[name[ds_srs]] | keyword[def] identifier[get_ds_srs] ( identifier[ds] ):
literal[string]
identifier[ds_srs] = identifier[osr] . identifier[SpatialReference] ()
identifier[ds_srs] . identifier[ImportFromWkt] ( identifier[ds] . identifier[GetProjectionRef] ())
keyword[return] identifier[ds_srs] | def get_ds_srs(ds):
"""Get srs object for GDAL Datset
"""
ds_srs = osr.SpatialReference()
ds_srs.ImportFromWkt(ds.GetProjectionRef())
return ds_srs |
def get_preview_name(self):
"""Returns .SAFE name of full resolution L1C preview
:return: name of preview file
:rtype: str
"""
if self.safe_type == EsaSafeType.OLD_TYPE:
name = _edit_name(self.tile_id, AwsConstants.PVI, delete_end=True)
else:
name = '_'.join([self.tile_id.split('_')[1], self.get_datatake_time(), AwsConstants.PVI])
return '{}.jp2'.format(name) | def function[get_preview_name, parameter[self]]:
constant[Returns .SAFE name of full resolution L1C preview
:return: name of preview file
:rtype: str
]
if compare[name[self].safe_type equal[==] name[EsaSafeType].OLD_TYPE] begin[:]
variable[name] assign[=] call[name[_edit_name], parameter[name[self].tile_id, name[AwsConstants].PVI]]
return[call[constant[{}.jp2].format, parameter[name[name]]]] | keyword[def] identifier[get_preview_name] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[safe_type] == identifier[EsaSafeType] . identifier[OLD_TYPE] :
identifier[name] = identifier[_edit_name] ( identifier[self] . identifier[tile_id] , identifier[AwsConstants] . identifier[PVI] , identifier[delete_end] = keyword[True] )
keyword[else] :
identifier[name] = literal[string] . identifier[join] ([ identifier[self] . identifier[tile_id] . identifier[split] ( literal[string] )[ literal[int] ], identifier[self] . identifier[get_datatake_time] (), identifier[AwsConstants] . identifier[PVI] ])
keyword[return] literal[string] . identifier[format] ( identifier[name] ) | def get_preview_name(self):
"""Returns .SAFE name of full resolution L1C preview
:return: name of preview file
:rtype: str
"""
if self.safe_type == EsaSafeType.OLD_TYPE:
name = _edit_name(self.tile_id, AwsConstants.PVI, delete_end=True) # depends on [control=['if'], data=[]]
else:
name = '_'.join([self.tile_id.split('_')[1], self.get_datatake_time(), AwsConstants.PVI])
return '{}.jp2'.format(name) |
def get_units(username):
"""
Return all units of user 'username'
"""
connection, ldap_base = _get_LDAP_connection()
# Search the user dn
connection.search(
search_base=ldap_base,
search_filter='(uid={}@*)'.format(username),
)
# For each user dn give me the unit
dn_list = [connection.response[index]['dn'] for index in range(len(connection.response))]
units = []
# For each unit search unit information and give me the unit id
for dn in dn_list:
unit = dn.split(",ou=")[1]
connection.search(search_base=ldap_base, search_filter='(ou={})'.format(unit), attributes=['uniqueidentifier'])
units.append(get_attribute(connection.response, 'uniqueIdentifier'))
return units | def function[get_units, parameter[username]]:
constant[
Return all units of user 'username'
]
<ast.Tuple object at 0x7da20e9b1480> assign[=] call[name[_get_LDAP_connection], parameter[]]
call[name[connection].search, parameter[]]
variable[dn_list] assign[=] <ast.ListComp object at 0x7da20e9b2440>
variable[units] assign[=] list[[]]
for taget[name[dn]] in starred[name[dn_list]] begin[:]
variable[unit] assign[=] call[call[name[dn].split, parameter[constant[,ou=]]]][constant[1]]
call[name[connection].search, parameter[]]
call[name[units].append, parameter[call[name[get_attribute], parameter[name[connection].response, constant[uniqueIdentifier]]]]]
return[name[units]] | keyword[def] identifier[get_units] ( identifier[username] ):
literal[string]
identifier[connection] , identifier[ldap_base] = identifier[_get_LDAP_connection] ()
identifier[connection] . identifier[search] (
identifier[search_base] = identifier[ldap_base] ,
identifier[search_filter] = literal[string] . identifier[format] ( identifier[username] ),
)
identifier[dn_list] =[ identifier[connection] . identifier[response] [ identifier[index] ][ literal[string] ] keyword[for] identifier[index] keyword[in] identifier[range] ( identifier[len] ( identifier[connection] . identifier[response] ))]
identifier[units] =[]
keyword[for] identifier[dn] keyword[in] identifier[dn_list] :
identifier[unit] = identifier[dn] . identifier[split] ( literal[string] )[ literal[int] ]
identifier[connection] . identifier[search] ( identifier[search_base] = identifier[ldap_base] , identifier[search_filter] = literal[string] . identifier[format] ( identifier[unit] ), identifier[attributes] =[ literal[string] ])
identifier[units] . identifier[append] ( identifier[get_attribute] ( identifier[connection] . identifier[response] , literal[string] ))
keyword[return] identifier[units] | def get_units(username):
"""
Return all units of user 'username'
"""
(connection, ldap_base) = _get_LDAP_connection()
# Search the user dn
connection.search(search_base=ldap_base, search_filter='(uid={}@*)'.format(username))
# For each user dn give me the unit
dn_list = [connection.response[index]['dn'] for index in range(len(connection.response))]
units = []
# For each unit search unit information and give me the unit id
for dn in dn_list:
unit = dn.split(',ou=')[1]
connection.search(search_base=ldap_base, search_filter='(ou={})'.format(unit), attributes=['uniqueidentifier'])
units.append(get_attribute(connection.response, 'uniqueIdentifier')) # depends on [control=['for'], data=['dn']]
return units |
def _adaptation(self, f_l, l_a, xyz, xyz_w, xyz_b, xyz_p=None, p=None, helson_judd=False, discount_illuminant=True):
"""
:param f_l: Luminance adaptation factor
:param l_a: Adapting luminance
:param xyz: Stimulus color in XYZ
:param xyz_w: Reference white color in XYZ
:param xyz_b: Background color in XYZ
:param xyz_p: Proxima field color in XYZ
:param p: Simultaneous contrast/assimilation parameter.
"""
rgb = self.xyz_to_rgb(xyz)
logger.debug('RGB: {}'.format(rgb))
rgb_w = self.xyz_to_rgb(xyz_w)
logger.debug('RGB_W: {}'.format(rgb_w))
y_w = xyz_w[1]
y_b = xyz_b[1]
h_rgb = 3 * rgb_w / (rgb_w.sum())
logger.debug('H_RGB: {}'.format(h_rgb))
# Chromatic adaptation factors
if not discount_illuminant:
f_rgb = (1 + (l_a ** (1 / 3)) + h_rgb) / (1 + (l_a ** (1 / 3)) + (1 / h_rgb))
else:
f_rgb = numpy.ones(numpy.shape(h_rgb))
logger.debug('F_RGB: {}'.format(f_rgb))
# Adaptation factor
if helson_judd:
d_rgb = self._f_n((y_b / y_w) * f_l * f_rgb[1]) - self._f_n((y_b / y_w) * f_l * f_rgb)
assert d_rgb[1] == 0
else:
d_rgb = numpy.zeros(numpy.shape(f_rgb))
logger.debug('D_RGB: {}'.format(d_rgb))
# Cone bleaching factors
rgb_b = (10 ** 7) / ((10 ** 7) + 5 * l_a * (rgb_w / 100))
logger.debug('B_RGB: {}'.format(rgb_b))
if xyz_p is not None and p is not None:
logger.debug('Account for simultaneous chromatic contrast')
rgb_p = self.xyz_to_rgb(xyz_p)
rgb_w = self.adjust_white_for_scc(rgb_p, rgb_b, rgb_w, p)
# Adapt rgb using modified
rgb_a = 1 + rgb_b * (self._f_n(f_l * f_rgb * rgb / rgb_w) + d_rgb)
logger.debug('RGB_A: {}'.format(rgb_a))
return rgb_a | def function[_adaptation, parameter[self, f_l, l_a, xyz, xyz_w, xyz_b, xyz_p, p, helson_judd, discount_illuminant]]:
constant[
:param f_l: Luminance adaptation factor
:param l_a: Adapting luminance
:param xyz: Stimulus color in XYZ
:param xyz_w: Reference white color in XYZ
:param xyz_b: Background color in XYZ
:param xyz_p: Proxima field color in XYZ
:param p: Simultaneous contrast/assimilation parameter.
]
variable[rgb] assign[=] call[name[self].xyz_to_rgb, parameter[name[xyz]]]
call[name[logger].debug, parameter[call[constant[RGB: {}].format, parameter[name[rgb]]]]]
variable[rgb_w] assign[=] call[name[self].xyz_to_rgb, parameter[name[xyz_w]]]
call[name[logger].debug, parameter[call[constant[RGB_W: {}].format, parameter[name[rgb_w]]]]]
variable[y_w] assign[=] call[name[xyz_w]][constant[1]]
variable[y_b] assign[=] call[name[xyz_b]][constant[1]]
variable[h_rgb] assign[=] binary_operation[binary_operation[constant[3] * name[rgb_w]] / call[name[rgb_w].sum, parameter[]]]
call[name[logger].debug, parameter[call[constant[H_RGB: {}].format, parameter[name[h_rgb]]]]]
if <ast.UnaryOp object at 0x7da2041d8b80> begin[:]
variable[f_rgb] assign[=] binary_operation[binary_operation[binary_operation[constant[1] + binary_operation[name[l_a] ** binary_operation[constant[1] / constant[3]]]] + name[h_rgb]] / binary_operation[binary_operation[constant[1] + binary_operation[name[l_a] ** binary_operation[constant[1] / constant[3]]]] + binary_operation[constant[1] / name[h_rgb]]]]
call[name[logger].debug, parameter[call[constant[F_RGB: {}].format, parameter[name[f_rgb]]]]]
if name[helson_judd] begin[:]
variable[d_rgb] assign[=] binary_operation[call[name[self]._f_n, parameter[binary_operation[binary_operation[binary_operation[name[y_b] / name[y_w]] * name[f_l]] * call[name[f_rgb]][constant[1]]]]] - call[name[self]._f_n, parameter[binary_operation[binary_operation[binary_operation[name[y_b] / name[y_w]] * name[f_l]] * name[f_rgb]]]]]
assert[compare[call[name[d_rgb]][constant[1]] equal[==] constant[0]]]
call[name[logger].debug, parameter[call[constant[D_RGB: {}].format, parameter[name[d_rgb]]]]]
variable[rgb_b] assign[=] binary_operation[binary_operation[constant[10] ** constant[7]] / binary_operation[binary_operation[constant[10] ** constant[7]] + binary_operation[binary_operation[constant[5] * name[l_a]] * binary_operation[name[rgb_w] / constant[100]]]]]
call[name[logger].debug, parameter[call[constant[B_RGB: {}].format, parameter[name[rgb_b]]]]]
if <ast.BoolOp object at 0x7da207f9abc0> begin[:]
call[name[logger].debug, parameter[constant[Account for simultaneous chromatic contrast]]]
variable[rgb_p] assign[=] call[name[self].xyz_to_rgb, parameter[name[xyz_p]]]
variable[rgb_w] assign[=] call[name[self].adjust_white_for_scc, parameter[name[rgb_p], name[rgb_b], name[rgb_w], name[p]]]
variable[rgb_a] assign[=] binary_operation[constant[1] + binary_operation[name[rgb_b] * binary_operation[call[name[self]._f_n, parameter[binary_operation[binary_operation[binary_operation[name[f_l] * name[f_rgb]] * name[rgb]] / name[rgb_w]]]] + name[d_rgb]]]]
call[name[logger].debug, parameter[call[constant[RGB_A: {}].format, parameter[name[rgb_a]]]]]
return[name[rgb_a]] | keyword[def] identifier[_adaptation] ( identifier[self] , identifier[f_l] , identifier[l_a] , identifier[xyz] , identifier[xyz_w] , identifier[xyz_b] , identifier[xyz_p] = keyword[None] , identifier[p] = keyword[None] , identifier[helson_judd] = keyword[False] , identifier[discount_illuminant] = keyword[True] ):
literal[string]
identifier[rgb] = identifier[self] . identifier[xyz_to_rgb] ( identifier[xyz] )
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[rgb] ))
identifier[rgb_w] = identifier[self] . identifier[xyz_to_rgb] ( identifier[xyz_w] )
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[rgb_w] ))
identifier[y_w] = identifier[xyz_w] [ literal[int] ]
identifier[y_b] = identifier[xyz_b] [ literal[int] ]
identifier[h_rgb] = literal[int] * identifier[rgb_w] /( identifier[rgb_w] . identifier[sum] ())
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[h_rgb] ))
keyword[if] keyword[not] identifier[discount_illuminant] :
identifier[f_rgb] =( literal[int] +( identifier[l_a] **( literal[int] / literal[int] ))+ identifier[h_rgb] )/( literal[int] +( identifier[l_a] **( literal[int] / literal[int] ))+( literal[int] / identifier[h_rgb] ))
keyword[else] :
identifier[f_rgb] = identifier[numpy] . identifier[ones] ( identifier[numpy] . identifier[shape] ( identifier[h_rgb] ))
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[f_rgb] ))
keyword[if] identifier[helson_judd] :
identifier[d_rgb] = identifier[self] . identifier[_f_n] (( identifier[y_b] / identifier[y_w] )* identifier[f_l] * identifier[f_rgb] [ literal[int] ])- identifier[self] . identifier[_f_n] (( identifier[y_b] / identifier[y_w] )* identifier[f_l] * identifier[f_rgb] )
keyword[assert] identifier[d_rgb] [ literal[int] ]== literal[int]
keyword[else] :
identifier[d_rgb] = identifier[numpy] . identifier[zeros] ( identifier[numpy] . identifier[shape] ( identifier[f_rgb] ))
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[d_rgb] ))
identifier[rgb_b] =( literal[int] ** literal[int] )/(( literal[int] ** literal[int] )+ literal[int] * identifier[l_a] *( identifier[rgb_w] / literal[int] ))
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[rgb_b] ))
keyword[if] identifier[xyz_p] keyword[is] keyword[not] keyword[None] keyword[and] identifier[p] keyword[is] keyword[not] keyword[None] :
identifier[logger] . identifier[debug] ( literal[string] )
identifier[rgb_p] = identifier[self] . identifier[xyz_to_rgb] ( identifier[xyz_p] )
identifier[rgb_w] = identifier[self] . identifier[adjust_white_for_scc] ( identifier[rgb_p] , identifier[rgb_b] , identifier[rgb_w] , identifier[p] )
identifier[rgb_a] = literal[int] + identifier[rgb_b] *( identifier[self] . identifier[_f_n] ( identifier[f_l] * identifier[f_rgb] * identifier[rgb] / identifier[rgb_w] )+ identifier[d_rgb] )
identifier[logger] . identifier[debug] ( literal[string] . identifier[format] ( identifier[rgb_a] ))
keyword[return] identifier[rgb_a] | def _adaptation(self, f_l, l_a, xyz, xyz_w, xyz_b, xyz_p=None, p=None, helson_judd=False, discount_illuminant=True):
"""
:param f_l: Luminance adaptation factor
:param l_a: Adapting luminance
:param xyz: Stimulus color in XYZ
:param xyz_w: Reference white color in XYZ
:param xyz_b: Background color in XYZ
:param xyz_p: Proxima field color in XYZ
:param p: Simultaneous contrast/assimilation parameter.
"""
rgb = self.xyz_to_rgb(xyz)
logger.debug('RGB: {}'.format(rgb))
rgb_w = self.xyz_to_rgb(xyz_w)
logger.debug('RGB_W: {}'.format(rgb_w))
y_w = xyz_w[1]
y_b = xyz_b[1]
h_rgb = 3 * rgb_w / rgb_w.sum()
logger.debug('H_RGB: {}'.format(h_rgb))
# Chromatic adaptation factors
if not discount_illuminant:
f_rgb = (1 + l_a ** (1 / 3) + h_rgb) / (1 + l_a ** (1 / 3) + 1 / h_rgb) # depends on [control=['if'], data=[]]
else:
f_rgb = numpy.ones(numpy.shape(h_rgb))
logger.debug('F_RGB: {}'.format(f_rgb))
# Adaptation factor
if helson_judd:
d_rgb = self._f_n(y_b / y_w * f_l * f_rgb[1]) - self._f_n(y_b / y_w * f_l * f_rgb)
assert d_rgb[1] == 0 # depends on [control=['if'], data=[]]
else:
d_rgb = numpy.zeros(numpy.shape(f_rgb))
logger.debug('D_RGB: {}'.format(d_rgb))
# Cone bleaching factors
rgb_b = 10 ** 7 / (10 ** 7 + 5 * l_a * (rgb_w / 100))
logger.debug('B_RGB: {}'.format(rgb_b))
if xyz_p is not None and p is not None:
logger.debug('Account for simultaneous chromatic contrast')
rgb_p = self.xyz_to_rgb(xyz_p)
rgb_w = self.adjust_white_for_scc(rgb_p, rgb_b, rgb_w, p) # depends on [control=['if'], data=[]]
# Adapt rgb using modified
rgb_a = 1 + rgb_b * (self._f_n(f_l * f_rgb * rgb / rgb_w) + d_rgb)
logger.debug('RGB_A: {}'.format(rgb_a))
return rgb_a |
def wait_for_service(host, port, timeout=DEFAULT_TIMEOUT):
"""
Return True if connection to the host and port is successful within the timeout.
@param host: str: hostname of the server
@param port: int: TCP port to which to connect
@param timeout: int: length of time in seconds to try to connect before giving up
@return: bool
"""
service = ServiceURL('tcp://{}:{}'.format(host, port), timeout)
return service.wait() | def function[wait_for_service, parameter[host, port, timeout]]:
constant[
Return True if connection to the host and port is successful within the timeout.
@param host: str: hostname of the server
@param port: int: TCP port to which to connect
@param timeout: int: length of time in seconds to try to connect before giving up
@return: bool
]
variable[service] assign[=] call[name[ServiceURL], parameter[call[constant[tcp://{}:{}].format, parameter[name[host], name[port]]], name[timeout]]]
return[call[name[service].wait, parameter[]]] | keyword[def] identifier[wait_for_service] ( identifier[host] , identifier[port] , identifier[timeout] = identifier[DEFAULT_TIMEOUT] ):
literal[string]
identifier[service] = identifier[ServiceURL] ( literal[string] . identifier[format] ( identifier[host] , identifier[port] ), identifier[timeout] )
keyword[return] identifier[service] . identifier[wait] () | def wait_for_service(host, port, timeout=DEFAULT_TIMEOUT):
"""
Return True if connection to the host and port is successful within the timeout.
@param host: str: hostname of the server
@param port: int: TCP port to which to connect
@param timeout: int: length of time in seconds to try to connect before giving up
@return: bool
"""
service = ServiceURL('tcp://{}:{}'.format(host, port), timeout)
return service.wait() |
def get_parents(self, uri, type='all'):
"""Return parents of a given entry.
Parameters
----------
uri : str
The URI of the entry whose parents are to be returned. See the
get_uri method to construct this URI from a name space and id.
type : str
'all': return all parents irrespective of level;
'immediate': return only the immediate parents;
'top': return only the highest level parents
"""
# First do a quick dict lookup to see if there are any parents
all_parents = set(self.isa_or_partof_closure.get(uri, []))
# If there are no parents or we are looking for all, we can return here
if not all_parents or type == 'all':
return all_parents
# If we need immediate parents, we search again, this time knowing that
# the uri is definitely in the graph since it has some parents
if type == 'immediate':
node = rdflib.term.URIRef(uri)
immediate_parents = list(set(self.isa_or_partof_objects(node)))
return [p.toPython() for p in immediate_parents]
elif type == 'top':
top_parents = [p for p in all_parents if
not self.isa_or_partof_closure.get(p)]
return top_parents | def function[get_parents, parameter[self, uri, type]]:
constant[Return parents of a given entry.
Parameters
----------
uri : str
The URI of the entry whose parents are to be returned. See the
get_uri method to construct this URI from a name space and id.
type : str
'all': return all parents irrespective of level;
'immediate': return only the immediate parents;
'top': return only the highest level parents
]
variable[all_parents] assign[=] call[name[set], parameter[call[name[self].isa_or_partof_closure.get, parameter[name[uri], list[[]]]]]]
if <ast.BoolOp object at 0x7da2041d8070> begin[:]
return[name[all_parents]]
if compare[name[type] equal[==] constant[immediate]] begin[:]
variable[node] assign[=] call[name[rdflib].term.URIRef, parameter[name[uri]]]
variable[immediate_parents] assign[=] call[name[list], parameter[call[name[set], parameter[call[name[self].isa_or_partof_objects, parameter[name[node]]]]]]]
return[<ast.ListComp object at 0x7da2041daec0>] | keyword[def] identifier[get_parents] ( identifier[self] , identifier[uri] , identifier[type] = literal[string] ):
literal[string]
identifier[all_parents] = identifier[set] ( identifier[self] . identifier[isa_or_partof_closure] . identifier[get] ( identifier[uri] ,[]))
keyword[if] keyword[not] identifier[all_parents] keyword[or] identifier[type] == literal[string] :
keyword[return] identifier[all_parents]
keyword[if] identifier[type] == literal[string] :
identifier[node] = identifier[rdflib] . identifier[term] . identifier[URIRef] ( identifier[uri] )
identifier[immediate_parents] = identifier[list] ( identifier[set] ( identifier[self] . identifier[isa_or_partof_objects] ( identifier[node] )))
keyword[return] [ identifier[p] . identifier[toPython] () keyword[for] identifier[p] keyword[in] identifier[immediate_parents] ]
keyword[elif] identifier[type] == literal[string] :
identifier[top_parents] =[ identifier[p] keyword[for] identifier[p] keyword[in] identifier[all_parents] keyword[if]
keyword[not] identifier[self] . identifier[isa_or_partof_closure] . identifier[get] ( identifier[p] )]
keyword[return] identifier[top_parents] | def get_parents(self, uri, type='all'):
"""Return parents of a given entry.
Parameters
----------
uri : str
The URI of the entry whose parents are to be returned. See the
get_uri method to construct this URI from a name space and id.
type : str
'all': return all parents irrespective of level;
'immediate': return only the immediate parents;
'top': return only the highest level parents
"""
# First do a quick dict lookup to see if there are any parents
all_parents = set(self.isa_or_partof_closure.get(uri, []))
# If there are no parents or we are looking for all, we can return here
if not all_parents or type == 'all':
return all_parents # depends on [control=['if'], data=[]]
# If we need immediate parents, we search again, this time knowing that
# the uri is definitely in the graph since it has some parents
if type == 'immediate':
node = rdflib.term.URIRef(uri)
immediate_parents = list(set(self.isa_or_partof_objects(node)))
return [p.toPython() for p in immediate_parents] # depends on [control=['if'], data=[]]
elif type == 'top':
top_parents = [p for p in all_parents if not self.isa_or_partof_closure.get(p)]
return top_parents # depends on [control=['if'], data=[]] |
def handle_user(self, data):
'''
Insert user informations in data
Override it to add extra user attributes.
'''
# Default to unauthenticated anonymous user
data['user'] = {
'username': '',
'is_authenticated': False,
'is_staff': False,
'is_superuser': False,
'permissions': tuple(),
}
if 'django.contrib.sessions.middleware.SessionMiddleware' in settings.MIDDLEWARE_CLASSES:
user = self.request.user
data['user']['is_authenticated'] = user.is_authenticated()
if hasattr(user, 'username'):
data['user']['username'] = user.username
elif hasattr(user, 'get_username'):
data['user']['username'] = user.get_username()
if hasattr(user, 'is_staff'):
data['user']['is_staff'] = user.is_staff
if hasattr(user, 'is_superuser'):
data['user']['is_superuser'] = user.is_superuser
if hasattr(user, 'get_all_permissions'):
data['user']['permissions'] = tuple(user.get_all_permissions()) | def function[handle_user, parameter[self, data]]:
constant[
Insert user informations in data
Override it to add extra user attributes.
]
call[name[data]][constant[user]] assign[=] dictionary[[<ast.Constant object at 0x7da1b1123b80>, <ast.Constant object at 0x7da1b1123d30>, <ast.Constant object at 0x7da1b11209a0>, <ast.Constant object at 0x7da1b1122d10>, <ast.Constant object at 0x7da1b11219c0>], [<ast.Constant object at 0x7da1b1123c40>, <ast.Constant object at 0x7da1b1120dc0>, <ast.Constant object at 0x7da1b1121cc0>, <ast.Constant object at 0x7da1b1121480>, <ast.Call object at 0x7da1b1122e60>]]
if compare[constant[django.contrib.sessions.middleware.SessionMiddleware] in name[settings].MIDDLEWARE_CLASSES] begin[:]
variable[user] assign[=] name[self].request.user
call[call[name[data]][constant[user]]][constant[is_authenticated]] assign[=] call[name[user].is_authenticated, parameter[]]
if call[name[hasattr], parameter[name[user], constant[username]]] begin[:]
call[call[name[data]][constant[user]]][constant[username]] assign[=] name[user].username
if call[name[hasattr], parameter[name[user], constant[is_staff]]] begin[:]
call[call[name[data]][constant[user]]][constant[is_staff]] assign[=] name[user].is_staff
if call[name[hasattr], parameter[name[user], constant[is_superuser]]] begin[:]
call[call[name[data]][constant[user]]][constant[is_superuser]] assign[=] name[user].is_superuser
if call[name[hasattr], parameter[name[user], constant[get_all_permissions]]] begin[:]
call[call[name[data]][constant[user]]][constant[permissions]] assign[=] call[name[tuple], parameter[call[name[user].get_all_permissions, parameter[]]]] | keyword[def] identifier[handle_user] ( identifier[self] , identifier[data] ):
literal[string]
identifier[data] [ literal[string] ]={
literal[string] : literal[string] ,
literal[string] : keyword[False] ,
literal[string] : keyword[False] ,
literal[string] : keyword[False] ,
literal[string] : identifier[tuple] (),
}
keyword[if] literal[string] keyword[in] identifier[settings] . identifier[MIDDLEWARE_CLASSES] :
identifier[user] = identifier[self] . identifier[request] . identifier[user]
identifier[data] [ literal[string] ][ literal[string] ]= identifier[user] . identifier[is_authenticated] ()
keyword[if] identifier[hasattr] ( identifier[user] , literal[string] ):
identifier[data] [ literal[string] ][ literal[string] ]= identifier[user] . identifier[username]
keyword[elif] identifier[hasattr] ( identifier[user] , literal[string] ):
identifier[data] [ literal[string] ][ literal[string] ]= identifier[user] . identifier[get_username] ()
keyword[if] identifier[hasattr] ( identifier[user] , literal[string] ):
identifier[data] [ literal[string] ][ literal[string] ]= identifier[user] . identifier[is_staff]
keyword[if] identifier[hasattr] ( identifier[user] , literal[string] ):
identifier[data] [ literal[string] ][ literal[string] ]= identifier[user] . identifier[is_superuser]
keyword[if] identifier[hasattr] ( identifier[user] , literal[string] ):
identifier[data] [ literal[string] ][ literal[string] ]= identifier[tuple] ( identifier[user] . identifier[get_all_permissions] ()) | def handle_user(self, data):
"""
Insert user informations in data
Override it to add extra user attributes.
"""
# Default to unauthenticated anonymous user
data['user'] = {'username': '', 'is_authenticated': False, 'is_staff': False, 'is_superuser': False, 'permissions': tuple()}
if 'django.contrib.sessions.middleware.SessionMiddleware' in settings.MIDDLEWARE_CLASSES:
user = self.request.user
data['user']['is_authenticated'] = user.is_authenticated()
if hasattr(user, 'username'):
data['user']['username'] = user.username # depends on [control=['if'], data=[]]
elif hasattr(user, 'get_username'):
data['user']['username'] = user.get_username() # depends on [control=['if'], data=[]]
if hasattr(user, 'is_staff'):
data['user']['is_staff'] = user.is_staff # depends on [control=['if'], data=[]]
if hasattr(user, 'is_superuser'):
data['user']['is_superuser'] = user.is_superuser # depends on [control=['if'], data=[]]
if hasattr(user, 'get_all_permissions'):
data['user']['permissions'] = tuple(user.get_all_permissions()) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] |
def search_code(self, query, sort=None, order=None, per_page=None,
text_match=False, number=-1, etag=None):
"""Find code via the code search API.
The query can contain any combination of the following supported
qualifiers:
- ``in`` Qualifies which fields are searched. With this qualifier you
can restrict the search to just the file contents, the file path, or
both.
- ``language`` Searches code based on the language it’s written in.
- ``fork`` Specifies that code from forked repositories should be
searched. Repository forks will not be searchable unless the fork
has more stars than the parent repository.
- ``size`` Finds files that match a certain size (in bytes).
- ``path`` Specifies the path that the resulting file must be at.
- ``extension`` Matches files with a certain extension.
- ``user`` or ``repo`` Limits searches to a specific user or
repository.
For more information about these qualifiers, see: http://git.io/-DvAuA
:param str query: (required), a valid query as described above, e.g.,
``addClass in:file language:js repo:jquery/jquery``
:param str sort: (optional), how the results should be sorted;
option(s): ``indexed``; default: best match
:param str order: (optional), the direction of the sorted results,
options: ``asc``, ``desc``; default: ``desc``
:param int per_page: (optional)
:param bool text_match: (optional), if True, return matching search
terms. See http://git.io/iRmJxg for more information
:param int number: (optional), number of repositories to return.
Default: -1, returns all available repositories
:param str etag: (optional), previous ETag header value
:return: generator of :class:`CodeSearchResult
<github3.search.CodeSearchResult>`
"""
params = {'q': query}
headers = {}
if sort == 'indexed':
params['sort'] = sort
if sort and order in ('asc', 'desc'):
params['order'] = order
if text_match:
headers = {
'Accept': 'application/vnd.github.v3.full.text-match+json'
}
url = self._build_url('search', 'code')
return SearchIterator(number, url, CodeSearchResult, self, params,
etag, headers) | def function[search_code, parameter[self, query, sort, order, per_page, text_match, number, etag]]:
constant[Find code via the code search API.
The query can contain any combination of the following supported
qualifiers:
- ``in`` Qualifies which fields are searched. With this qualifier you
can restrict the search to just the file contents, the file path, or
both.
- ``language`` Searches code based on the language it’s written in.
- ``fork`` Specifies that code from forked repositories should be
searched. Repository forks will not be searchable unless the fork
has more stars than the parent repository.
- ``size`` Finds files that match a certain size (in bytes).
- ``path`` Specifies the path that the resulting file must be at.
- ``extension`` Matches files with a certain extension.
- ``user`` or ``repo`` Limits searches to a specific user or
repository.
For more information about these qualifiers, see: http://git.io/-DvAuA
:param str query: (required), a valid query as described above, e.g.,
``addClass in:file language:js repo:jquery/jquery``
:param str sort: (optional), how the results should be sorted;
option(s): ``indexed``; default: best match
:param str order: (optional), the direction of the sorted results,
options: ``asc``, ``desc``; default: ``desc``
:param int per_page: (optional)
:param bool text_match: (optional), if True, return matching search
terms. See http://git.io/iRmJxg for more information
:param int number: (optional), number of repositories to return.
Default: -1, returns all available repositories
:param str etag: (optional), previous ETag header value
:return: generator of :class:`CodeSearchResult
<github3.search.CodeSearchResult>`
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b0f990f0>], [<ast.Name object at 0x7da1b0f9a6b0>]]
variable[headers] assign[=] dictionary[[], []]
if compare[name[sort] equal[==] constant[indexed]] begin[:]
call[name[params]][constant[sort]] assign[=] name[sort]
if <ast.BoolOp object at 0x7da1b0e0f4f0> begin[:]
call[name[params]][constant[order]] assign[=] name[order]
if name[text_match] begin[:]
variable[headers] assign[=] dictionary[[<ast.Constant object at 0x7da1b0fefa60>], [<ast.Constant object at 0x7da1b0fed5a0>]]
variable[url] assign[=] call[name[self]._build_url, parameter[constant[search], constant[code]]]
return[call[name[SearchIterator], parameter[name[number], name[url], name[CodeSearchResult], name[self], name[params], name[etag], name[headers]]]] | keyword[def] identifier[search_code] ( identifier[self] , identifier[query] , identifier[sort] = keyword[None] , identifier[order] = keyword[None] , identifier[per_page] = keyword[None] ,
identifier[text_match] = keyword[False] , identifier[number] =- literal[int] , identifier[etag] = keyword[None] ):
literal[string]
identifier[params] ={ literal[string] : identifier[query] }
identifier[headers] ={}
keyword[if] identifier[sort] == literal[string] :
identifier[params] [ literal[string] ]= identifier[sort]
keyword[if] identifier[sort] keyword[and] identifier[order] keyword[in] ( literal[string] , literal[string] ):
identifier[params] [ literal[string] ]= identifier[order]
keyword[if] identifier[text_match] :
identifier[headers] ={
literal[string] : literal[string]
}
identifier[url] = identifier[self] . identifier[_build_url] ( literal[string] , literal[string] )
keyword[return] identifier[SearchIterator] ( identifier[number] , identifier[url] , identifier[CodeSearchResult] , identifier[self] , identifier[params] ,
identifier[etag] , identifier[headers] ) | def search_code(self, query, sort=None, order=None, per_page=None, text_match=False, number=-1, etag=None):
"""Find code via the code search API.
The query can contain any combination of the following supported
qualifiers:
- ``in`` Qualifies which fields are searched. With this qualifier you
can restrict the search to just the file contents, the file path, or
both.
- ``language`` Searches code based on the language it’s written in.
- ``fork`` Specifies that code from forked repositories should be
searched. Repository forks will not be searchable unless the fork
has more stars than the parent repository.
- ``size`` Finds files that match a certain size (in bytes).
- ``path`` Specifies the path that the resulting file must be at.
- ``extension`` Matches files with a certain extension.
- ``user`` or ``repo`` Limits searches to a specific user or
repository.
For more information about these qualifiers, see: http://git.io/-DvAuA
:param str query: (required), a valid query as described above, e.g.,
``addClass in:file language:js repo:jquery/jquery``
:param str sort: (optional), how the results should be sorted;
option(s): ``indexed``; default: best match
:param str order: (optional), the direction of the sorted results,
options: ``asc``, ``desc``; default: ``desc``
:param int per_page: (optional)
:param bool text_match: (optional), if True, return matching search
terms. See http://git.io/iRmJxg for more information
:param int number: (optional), number of repositories to return.
Default: -1, returns all available repositories
:param str etag: (optional), previous ETag header value
:return: generator of :class:`CodeSearchResult
<github3.search.CodeSearchResult>`
"""
params = {'q': query}
headers = {}
if sort == 'indexed':
params['sort'] = sort # depends on [control=['if'], data=['sort']]
if sort and order in ('asc', 'desc'):
params['order'] = order # depends on [control=['if'], data=[]]
if text_match:
headers = {'Accept': 'application/vnd.github.v3.full.text-match+json'} # depends on [control=['if'], data=[]]
url = self._build_url('search', 'code')
return SearchIterator(number, url, CodeSearchResult, self, params, etag, headers) |
def rotateZ(self, angle, axis_point=(0, 0, 0), rad=False):
"""Rotate around z-axis. If angle is in radians set ``rad=True``."""
if rad:
angle *= 57.29578
self.RotateZ(angle)
if self.trail:
self.updateTrail()
return self | def function[rotateZ, parameter[self, angle, axis_point, rad]]:
constant[Rotate around z-axis. If angle is in radians set ``rad=True``.]
if name[rad] begin[:]
<ast.AugAssign object at 0x7da20e957a90>
call[name[self].RotateZ, parameter[name[angle]]]
if name[self].trail begin[:]
call[name[self].updateTrail, parameter[]]
return[name[self]] | keyword[def] identifier[rotateZ] ( identifier[self] , identifier[angle] , identifier[axis_point] =( literal[int] , literal[int] , literal[int] ), identifier[rad] = keyword[False] ):
literal[string]
keyword[if] identifier[rad] :
identifier[angle] *= literal[int]
identifier[self] . identifier[RotateZ] ( identifier[angle] )
keyword[if] identifier[self] . identifier[trail] :
identifier[self] . identifier[updateTrail] ()
keyword[return] identifier[self] | def rotateZ(self, angle, axis_point=(0, 0, 0), rad=False):
"""Rotate around z-axis. If angle is in radians set ``rad=True``."""
if rad:
angle *= 57.29578 # depends on [control=['if'], data=[]]
self.RotateZ(angle)
if self.trail:
self.updateTrail() # depends on [control=['if'], data=[]]
return self |
def perform_authentication(self):
"""
Perform authentication on the incoming request.
"""
if not self.authenticators:
return
request.user = None
request.auth = None
for authenticator in self.authenticators:
auth_tuple = authenticator.authenticate()
if auth_tuple:
request.user = auth_tuple[0]
request.auth = auth_tuple[1]
break | def function[perform_authentication, parameter[self]]:
constant[
Perform authentication on the incoming request.
]
if <ast.UnaryOp object at 0x7da20c6e6d70> begin[:]
return[None]
name[request].user assign[=] constant[None]
name[request].auth assign[=] constant[None]
for taget[name[authenticator]] in starred[name[self].authenticators] begin[:]
variable[auth_tuple] assign[=] call[name[authenticator].authenticate, parameter[]]
if name[auth_tuple] begin[:]
name[request].user assign[=] call[name[auth_tuple]][constant[0]]
name[request].auth assign[=] call[name[auth_tuple]][constant[1]]
break | keyword[def] identifier[perform_authentication] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[authenticators] :
keyword[return]
identifier[request] . identifier[user] = keyword[None]
identifier[request] . identifier[auth] = keyword[None]
keyword[for] identifier[authenticator] keyword[in] identifier[self] . identifier[authenticators] :
identifier[auth_tuple] = identifier[authenticator] . identifier[authenticate] ()
keyword[if] identifier[auth_tuple] :
identifier[request] . identifier[user] = identifier[auth_tuple] [ literal[int] ]
identifier[request] . identifier[auth] = identifier[auth_tuple] [ literal[int] ]
keyword[break] | def perform_authentication(self):
"""
Perform authentication on the incoming request.
"""
if not self.authenticators:
return # depends on [control=['if'], data=[]]
request.user = None
request.auth = None
for authenticator in self.authenticators:
auth_tuple = authenticator.authenticate()
if auth_tuple:
request.user = auth_tuple[0]
request.auth = auth_tuple[1]
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['authenticator']] |
def describe(self, bucket, descriptor=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Set descriptor
if descriptor is not None:
self.__descriptors[bucket] = descriptor
# Get descriptor
else:
descriptor = self.__descriptors.get(bucket)
if descriptor is None:
table = self.__get_table(bucket)
descriptor = self.__mapper.restore_descriptor(
table.name, table.columns, table.constraints, self.__autoincrement)
return descriptor | def function[describe, parameter[self, bucket, descriptor]]:
constant[https://github.com/frictionlessdata/tableschema-sql-py#storage
]
if compare[name[descriptor] is_not constant[None]] begin[:]
call[name[self].__descriptors][name[bucket]] assign[=] name[descriptor]
return[name[descriptor]] | keyword[def] identifier[describe] ( identifier[self] , identifier[bucket] , identifier[descriptor] = keyword[None] ):
literal[string]
keyword[if] identifier[descriptor] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[__descriptors] [ identifier[bucket] ]= identifier[descriptor]
keyword[else] :
identifier[descriptor] = identifier[self] . identifier[__descriptors] . identifier[get] ( identifier[bucket] )
keyword[if] identifier[descriptor] keyword[is] keyword[None] :
identifier[table] = identifier[self] . identifier[__get_table] ( identifier[bucket] )
identifier[descriptor] = identifier[self] . identifier[__mapper] . identifier[restore_descriptor] (
identifier[table] . identifier[name] , identifier[table] . identifier[columns] , identifier[table] . identifier[constraints] , identifier[self] . identifier[__autoincrement] )
keyword[return] identifier[descriptor] | def describe(self, bucket, descriptor=None):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Set descriptor
if descriptor is not None:
self.__descriptors[bucket] = descriptor # depends on [control=['if'], data=['descriptor']]
else:
# Get descriptor
descriptor = self.__descriptors.get(bucket)
if descriptor is None:
table = self.__get_table(bucket)
descriptor = self.__mapper.restore_descriptor(table.name, table.columns, table.constraints, self.__autoincrement) # depends on [control=['if'], data=['descriptor']]
return descriptor |
def _syndromes(self, r, k=None):
'''Given the received codeword r in the form of a Polynomial object,
computes the syndromes and returns the syndrome polynomial.
Mathematically, it's essentially equivalent to a Fourrier Transform (Chien search being the inverse).
'''
n = self.n
if not k: k = self.k
# Note the + [GF2int(0)] : we add a 0 coefficient for the lowest degree (the constant). This effectively shifts the syndrome, and will shift every computations depending on the syndromes (such as the errors locator polynomial, errors evaluator polynomial, etc. but not the errors positions).
# This is not necessary as anyway syndromes are defined such as there are only non-zero coefficients (the only 0 is the shift of the constant here) and subsequent computations will/must account for the shift by skipping the first iteration (eg, the often seen range(1, n-k+1)), but you can also avoid prepending the 0 coeff and adapt every subsequent computations to start from 0 instead of 1.
return Polynomial( [r.evaluate( GF2int(self.generator)**(l+self.fcr) ) for l in _range(n-k-1, -1, -1)] + [GF2int(0)], keep_zero=True ) | def function[_syndromes, parameter[self, r, k]]:
constant[Given the received codeword r in the form of a Polynomial object,
computes the syndromes and returns the syndrome polynomial.
Mathematically, it's essentially equivalent to a Fourrier Transform (Chien search being the inverse).
]
variable[n] assign[=] name[self].n
if <ast.UnaryOp object at 0x7da18f00cb50> begin[:]
variable[k] assign[=] name[self].k
return[call[name[Polynomial], parameter[binary_operation[<ast.ListComp object at 0x7da18f00fcd0> + list[[<ast.Call object at 0x7da18f00caf0>]]]]]] | keyword[def] identifier[_syndromes] ( identifier[self] , identifier[r] , identifier[k] = keyword[None] ):
literal[string]
identifier[n] = identifier[self] . identifier[n]
keyword[if] keyword[not] identifier[k] : identifier[k] = identifier[self] . identifier[k]
keyword[return] identifier[Polynomial] ([ identifier[r] . identifier[evaluate] ( identifier[GF2int] ( identifier[self] . identifier[generator] )**( identifier[l] + identifier[self] . identifier[fcr] )) keyword[for] identifier[l] keyword[in] identifier[_range] ( identifier[n] - identifier[k] - literal[int] ,- literal[int] ,- literal[int] )]+[ identifier[GF2int] ( literal[int] )], identifier[keep_zero] = keyword[True] ) | def _syndromes(self, r, k=None):
"""Given the received codeword r in the form of a Polynomial object,
computes the syndromes and returns the syndrome polynomial.
Mathematically, it's essentially equivalent to a Fourrier Transform (Chien search being the inverse).
"""
n = self.n
if not k:
k = self.k # depends on [control=['if'], data=[]]
# Note the + [GF2int(0)] : we add a 0 coefficient for the lowest degree (the constant). This effectively shifts the syndrome, and will shift every computations depending on the syndromes (such as the errors locator polynomial, errors evaluator polynomial, etc. but not the errors positions).
# This is not necessary as anyway syndromes are defined such as there are only non-zero coefficients (the only 0 is the shift of the constant here) and subsequent computations will/must account for the shift by skipping the first iteration (eg, the often seen range(1, n-k+1)), but you can also avoid prepending the 0 coeff and adapt every subsequent computations to start from 0 instead of 1.
return Polynomial([r.evaluate(GF2int(self.generator) ** (l + self.fcr)) for l in _range(n - k - 1, -1, -1)] + [GF2int(0)], keep_zero=True) |
def _ncc_c_3dim(x, y):
"""
Variant of NCCc that operates with 2 dimensional X arrays and 2 dimensional
y vector
Returns a 3 dimensional array of normalized fourier transforms
"""
den = norm(x, axis=1)[:, None] * norm(y, axis=1)
den[den == 0] = np.Inf
x_len = x.shape[-1]
fft_size = 1 << (2*x_len-1).bit_length()
cc = ifft(fft(x, fft_size) * np.conj(fft(y, fft_size))[:, None])
cc = np.concatenate((cc[:,:,-(x_len-1):], cc[:,:,:x_len]), axis=2)
return np.real(cc) / den.T[:, :, None] | def function[_ncc_c_3dim, parameter[x, y]]:
constant[
Variant of NCCc that operates with 2 dimensional X arrays and 2 dimensional
y vector
Returns a 3 dimensional array of normalized fourier transforms
]
variable[den] assign[=] binary_operation[call[call[name[norm], parameter[name[x]]]][tuple[[<ast.Slice object at 0x7da20c6e6ef0>, <ast.Constant object at 0x7da20c6e5cc0>]]] * call[name[norm], parameter[name[y]]]]
call[name[den]][compare[name[den] equal[==] constant[0]]] assign[=] name[np].Inf
variable[x_len] assign[=] call[name[x].shape][<ast.UnaryOp object at 0x7da20c6e4ee0>]
variable[fft_size] assign[=] binary_operation[constant[1] <ast.LShift object at 0x7da2590d69e0> call[binary_operation[binary_operation[constant[2] * name[x_len]] - constant[1]].bit_length, parameter[]]]
variable[cc] assign[=] call[name[ifft], parameter[binary_operation[call[name[fft], parameter[name[x], name[fft_size]]] * call[call[name[np].conj, parameter[call[name[fft], parameter[name[y], name[fft_size]]]]]][tuple[[<ast.Slice object at 0x7da1b1913610>, <ast.Constant object at 0x7da1b1913f40>]]]]]]
variable[cc] assign[=] call[name[np].concatenate, parameter[tuple[[<ast.Subscript object at 0x7da1b1912350>, <ast.Subscript object at 0x7da1b1912bc0>]]]]
return[binary_operation[call[name[np].real, parameter[name[cc]]] / call[name[den].T][tuple[[<ast.Slice object at 0x7da20c6e6c20>, <ast.Slice object at 0x7da20c6e7be0>, <ast.Constant object at 0x7da20c6e69e0>]]]]] | keyword[def] identifier[_ncc_c_3dim] ( identifier[x] , identifier[y] ):
literal[string]
identifier[den] = identifier[norm] ( identifier[x] , identifier[axis] = literal[int] )[:, keyword[None] ]* identifier[norm] ( identifier[y] , identifier[axis] = literal[int] )
identifier[den] [ identifier[den] == literal[int] ]= identifier[np] . identifier[Inf]
identifier[x_len] = identifier[x] . identifier[shape] [- literal[int] ]
identifier[fft_size] = literal[int] <<( literal[int] * identifier[x_len] - literal[int] ). identifier[bit_length] ()
identifier[cc] = identifier[ifft] ( identifier[fft] ( identifier[x] , identifier[fft_size] )* identifier[np] . identifier[conj] ( identifier[fft] ( identifier[y] , identifier[fft_size] ))[:, keyword[None] ])
identifier[cc] = identifier[np] . identifier[concatenate] (( identifier[cc] [:,:,-( identifier[x_len] - literal[int] ):], identifier[cc] [:,:,: identifier[x_len] ]), identifier[axis] = literal[int] )
keyword[return] identifier[np] . identifier[real] ( identifier[cc] )/ identifier[den] . identifier[T] [:,:, keyword[None] ] | def _ncc_c_3dim(x, y):
"""
Variant of NCCc that operates with 2 dimensional X arrays and 2 dimensional
y vector
Returns a 3 dimensional array of normalized fourier transforms
"""
den = norm(x, axis=1)[:, None] * norm(y, axis=1)
den[den == 0] = np.Inf
x_len = x.shape[-1]
fft_size = 1 << (2 * x_len - 1).bit_length()
cc = ifft(fft(x, fft_size) * np.conj(fft(y, fft_size))[:, None])
cc = np.concatenate((cc[:, :, -(x_len - 1):], cc[:, :, :x_len]), axis=2)
return np.real(cc) / den.T[:, :, None] |
def search(self, query, index='default', **kwargs):
"""
kwargs supported are the parameters listed at:
http://www.elasticsearch.org/guide/reference/api/search/request-body/
Namely: timeout, from, size and search_type.
IMPORTANT: prepend ALL keys with "es_" as pyelasticsearch requires this
"""
# Looking up the index
if index not in self.conf.indexes:
self.raise_improperly_configured(index=index)
# Calling the backend search method
esurl = self.conf.connections[index]['URL']
esinst = pyelasticsearch.ElasticSearch(esurl)
query = isinstance(query, Query) and str(query) or query
self.raw_results = esinst.search(query, index=index, **kwargs)
return self | def function[search, parameter[self, query, index]]:
constant[
kwargs supported are the parameters listed at:
http://www.elasticsearch.org/guide/reference/api/search/request-body/
Namely: timeout, from, size and search_type.
IMPORTANT: prepend ALL keys with "es_" as pyelasticsearch requires this
]
if compare[name[index] <ast.NotIn object at 0x7da2590d7190> name[self].conf.indexes] begin[:]
call[name[self].raise_improperly_configured, parameter[]]
variable[esurl] assign[=] call[call[name[self].conf.connections][name[index]]][constant[URL]]
variable[esinst] assign[=] call[name[pyelasticsearch].ElasticSearch, parameter[name[esurl]]]
variable[query] assign[=] <ast.BoolOp object at 0x7da207f02d70>
name[self].raw_results assign[=] call[name[esinst].search, parameter[name[query]]]
return[name[self]] | keyword[def] identifier[search] ( identifier[self] , identifier[query] , identifier[index] = literal[string] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[index] keyword[not] keyword[in] identifier[self] . identifier[conf] . identifier[indexes] :
identifier[self] . identifier[raise_improperly_configured] ( identifier[index] = identifier[index] )
identifier[esurl] = identifier[self] . identifier[conf] . identifier[connections] [ identifier[index] ][ literal[string] ]
identifier[esinst] = identifier[pyelasticsearch] . identifier[ElasticSearch] ( identifier[esurl] )
identifier[query] = identifier[isinstance] ( identifier[query] , identifier[Query] ) keyword[and] identifier[str] ( identifier[query] ) keyword[or] identifier[query]
identifier[self] . identifier[raw_results] = identifier[esinst] . identifier[search] ( identifier[query] , identifier[index] = identifier[index] ,** identifier[kwargs] )
keyword[return] identifier[self] | def search(self, query, index='default', **kwargs):
"""
kwargs supported are the parameters listed at:
http://www.elasticsearch.org/guide/reference/api/search/request-body/
Namely: timeout, from, size and search_type.
IMPORTANT: prepend ALL keys with "es_" as pyelasticsearch requires this
"""
# Looking up the index
if index not in self.conf.indexes:
self.raise_improperly_configured(index=index) # depends on [control=['if'], data=['index']]
# Calling the backend search method
esurl = self.conf.connections[index]['URL']
esinst = pyelasticsearch.ElasticSearch(esurl)
query = isinstance(query, Query) and str(query) or query
self.raw_results = esinst.search(query, index=index, **kwargs)
return self |
def do_map(*args, **kwargs):
"""Applies a filter on a sequence of objects or looks up an attribute.
This is useful when dealing with lists of objects but you are really
only interested in a certain value of it.
The basic usage is mapping on an attribute. Imagine you have a list
of users but you are only interested in a list of usernames:
.. sourcecode:: jinja
Users on this page: {{ users|map(attribute='username')|join(', ') }}
Alternatively you can let it invoke a filter by passing the name of the
filter and the arguments afterwards. A good example would be applying a
text conversion filter on a sequence:
.. sourcecode:: jinja
Users on this page: {{ titles|map('lower')|join(', ') }}
.. versionadded:: 2.7
"""
context = args[0]
seq = args[1]
if len(args) == 2 and 'attribute' in kwargs:
attribute = kwargs.pop('attribute')
if kwargs:
raise FilterArgumentError('Unexpected keyword argument %r' %
next(iter(kwargs)))
func = make_attrgetter(context.environment, attribute)
else:
try:
name = args[2]
args = args[3:]
except LookupError:
raise FilterArgumentError('map requires a filter argument')
func = lambda item: context.environment.call_filter(
name, item, args, kwargs, context=context)
if seq:
for item in seq:
yield func(item) | def function[do_map, parameter[]]:
constant[Applies a filter on a sequence of objects or looks up an attribute.
This is useful when dealing with lists of objects but you are really
only interested in a certain value of it.
The basic usage is mapping on an attribute. Imagine you have a list
of users but you are only interested in a list of usernames:
.. sourcecode:: jinja
Users on this page: {{ users|map(attribute='username')|join(', ') }}
Alternatively you can let it invoke a filter by passing the name of the
filter and the arguments afterwards. A good example would be applying a
text conversion filter on a sequence:
.. sourcecode:: jinja
Users on this page: {{ titles|map('lower')|join(', ') }}
.. versionadded:: 2.7
]
variable[context] assign[=] call[name[args]][constant[0]]
variable[seq] assign[=] call[name[args]][constant[1]]
if <ast.BoolOp object at 0x7da2044c0eb0> begin[:]
variable[attribute] assign[=] call[name[kwargs].pop, parameter[constant[attribute]]]
if name[kwargs] begin[:]
<ast.Raise object at 0x7da20c6a99f0>
variable[func] assign[=] call[name[make_attrgetter], parameter[name[context].environment, name[attribute]]]
if name[seq] begin[:]
for taget[name[item]] in starred[name[seq]] begin[:]
<ast.Yield object at 0x7da20c6aa9b0> | keyword[def] identifier[do_map] (* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[context] = identifier[args] [ literal[int] ]
identifier[seq] = identifier[args] [ literal[int] ]
keyword[if] identifier[len] ( identifier[args] )== literal[int] keyword[and] literal[string] keyword[in] identifier[kwargs] :
identifier[attribute] = identifier[kwargs] . identifier[pop] ( literal[string] )
keyword[if] identifier[kwargs] :
keyword[raise] identifier[FilterArgumentError] ( literal[string] %
identifier[next] ( identifier[iter] ( identifier[kwargs] )))
identifier[func] = identifier[make_attrgetter] ( identifier[context] . identifier[environment] , identifier[attribute] )
keyword[else] :
keyword[try] :
identifier[name] = identifier[args] [ literal[int] ]
identifier[args] = identifier[args] [ literal[int] :]
keyword[except] identifier[LookupError] :
keyword[raise] identifier[FilterArgumentError] ( literal[string] )
identifier[func] = keyword[lambda] identifier[item] : identifier[context] . identifier[environment] . identifier[call_filter] (
identifier[name] , identifier[item] , identifier[args] , identifier[kwargs] , identifier[context] = identifier[context] )
keyword[if] identifier[seq] :
keyword[for] identifier[item] keyword[in] identifier[seq] :
keyword[yield] identifier[func] ( identifier[item] ) | def do_map(*args, **kwargs):
"""Applies a filter on a sequence of objects or looks up an attribute.
This is useful when dealing with lists of objects but you are really
only interested in a certain value of it.
The basic usage is mapping on an attribute. Imagine you have a list
of users but you are only interested in a list of usernames:
.. sourcecode:: jinja
Users on this page: {{ users|map(attribute='username')|join(', ') }}
Alternatively you can let it invoke a filter by passing the name of the
filter and the arguments afterwards. A good example would be applying a
text conversion filter on a sequence:
.. sourcecode:: jinja
Users on this page: {{ titles|map('lower')|join(', ') }}
.. versionadded:: 2.7
"""
context = args[0]
seq = args[1]
if len(args) == 2 and 'attribute' in kwargs:
attribute = kwargs.pop('attribute')
if kwargs:
raise FilterArgumentError('Unexpected keyword argument %r' % next(iter(kwargs))) # depends on [control=['if'], data=[]]
func = make_attrgetter(context.environment, attribute) # depends on [control=['if'], data=[]]
else:
try:
name = args[2]
args = args[3:] # depends on [control=['try'], data=[]]
except LookupError:
raise FilterArgumentError('map requires a filter argument') # depends on [control=['except'], data=[]]
func = lambda item: context.environment.call_filter(name, item, args, kwargs, context=context)
if seq:
for item in seq:
yield func(item) # depends on [control=['for'], data=['item']] # depends on [control=['if'], data=[]] |
def reminders_add(self, *, text: str, time: str, **kwargs) -> SlackResponse:
"""Creates a reminder.
Args:
text (str): The content of the reminder. e.g. 'eat a banana'
time (str): When this reminder should happen:
the Unix timestamp (up to five years from now e.g. '1602288000'),
the number of seconds until the reminder (if within 24 hours),
or a natural language description (Ex. 'in 15 minutes' or 'every Thursday')
"""
self._validate_xoxp_token()
kwargs.update({"text": text, "time": time})
return self.api_call("reminders.add", json=kwargs) | def function[reminders_add, parameter[self]]:
constant[Creates a reminder.
Args:
text (str): The content of the reminder. e.g. 'eat a banana'
time (str): When this reminder should happen:
the Unix timestamp (up to five years from now e.g. '1602288000'),
the number of seconds until the reminder (if within 24 hours),
or a natural language description (Ex. 'in 15 minutes' or 'every Thursday')
]
call[name[self]._validate_xoxp_token, parameter[]]
call[name[kwargs].update, parameter[dictionary[[<ast.Constant object at 0x7da1b1beeec0>, <ast.Constant object at 0x7da1b1bed3f0>], [<ast.Name object at 0x7da1b1bec520>, <ast.Name object at 0x7da1b1beec50>]]]]
return[call[name[self].api_call, parameter[constant[reminders.add]]]] | keyword[def] identifier[reminders_add] ( identifier[self] ,*, identifier[text] : identifier[str] , identifier[time] : identifier[str] ,** identifier[kwargs] )-> identifier[SlackResponse] :
literal[string]
identifier[self] . identifier[_validate_xoxp_token] ()
identifier[kwargs] . identifier[update] ({ literal[string] : identifier[text] , literal[string] : identifier[time] })
keyword[return] identifier[self] . identifier[api_call] ( literal[string] , identifier[json] = identifier[kwargs] ) | def reminders_add(self, *, text: str, time: str, **kwargs) -> SlackResponse:
"""Creates a reminder.
Args:
text (str): The content of the reminder. e.g. 'eat a banana'
time (str): When this reminder should happen:
the Unix timestamp (up to five years from now e.g. '1602288000'),
the number of seconds until the reminder (if within 24 hours),
or a natural language description (Ex. 'in 15 minutes' or 'every Thursday')
"""
self._validate_xoxp_token()
kwargs.update({'text': text, 'time': time})
return self.api_call('reminders.add', json=kwargs) |
def get_dataset_files(self, dataset_id, glob=".", is_dir=False, version_number=None):
"""
Retrieves URLs for the files matched by a glob or a path to a directory
in a given dataset.
:param dataset_id: The id of the dataset to retrieve files from
:type dataset_id: int
:param glob: A regex used to select one or more files in the dataset
:type glob: str
:param is_dir: Whether or not the supplied pattern should be treated as a directory to search in
:type is_dir: bool
:param version_number: The version number of the dataset to retrieve files from
:type version_number: int
:return: A list of dataset files whose paths match the provided pattern.
:rtype: list of :class:`DatasetFile`
"""
if version_number is None:
latest = True
else:
latest = False
data = {
"download_request": {
"glob": glob,
"isDir": is_dir,
"latest": latest
}
}
failure_message = "Failed to get matched files in dataset {}".format(dataset_id)
versions = self._get_success_json(self._post_json(routes.matched_files(dataset_id), data, failure_message=failure_message))['versions']
# if you don't provide a version number, only the latest
# will be included in the response body
if version_number is None:
version = versions[0]
else:
try:
version = list(filter(lambda v: v['number'] == version_number, versions))[0]
except IndexError:
raise ResourceNotFoundException()
return list(
map(
lambda f: DatasetFile(path=f['filename'], url=f['url']), version['files']
)
) | def function[get_dataset_files, parameter[self, dataset_id, glob, is_dir, version_number]]:
constant[
Retrieves URLs for the files matched by a glob or a path to a directory
in a given dataset.
:param dataset_id: The id of the dataset to retrieve files from
:type dataset_id: int
:param glob: A regex used to select one or more files in the dataset
:type glob: str
:param is_dir: Whether or not the supplied pattern should be treated as a directory to search in
:type is_dir: bool
:param version_number: The version number of the dataset to retrieve files from
:type version_number: int
:return: A list of dataset files whose paths match the provided pattern.
:rtype: list of :class:`DatasetFile`
]
if compare[name[version_number] is constant[None]] begin[:]
variable[latest] assign[=] constant[True]
variable[data] assign[=] dictionary[[<ast.Constant object at 0x7da2044c1d50>], [<ast.Dict object at 0x7da2044c3460>]]
variable[failure_message] assign[=] call[constant[Failed to get matched files in dataset {}].format, parameter[name[dataset_id]]]
variable[versions] assign[=] call[call[name[self]._get_success_json, parameter[call[name[self]._post_json, parameter[call[name[routes].matched_files, parameter[name[dataset_id]]], name[data]]]]]][constant[versions]]
if compare[name[version_number] is constant[None]] begin[:]
variable[version] assign[=] call[name[versions]][constant[0]]
return[call[name[list], parameter[call[name[map], parameter[<ast.Lambda object at 0x7da2044c3130>, call[name[version]][constant[files]]]]]]] | keyword[def] identifier[get_dataset_files] ( identifier[self] , identifier[dataset_id] , identifier[glob] = literal[string] , identifier[is_dir] = keyword[False] , identifier[version_number] = keyword[None] ):
literal[string]
keyword[if] identifier[version_number] keyword[is] keyword[None] :
identifier[latest] = keyword[True]
keyword[else] :
identifier[latest] = keyword[False]
identifier[data] ={
literal[string] :{
literal[string] : identifier[glob] ,
literal[string] : identifier[is_dir] ,
literal[string] : identifier[latest]
}
}
identifier[failure_message] = literal[string] . identifier[format] ( identifier[dataset_id] )
identifier[versions] = identifier[self] . identifier[_get_success_json] ( identifier[self] . identifier[_post_json] ( identifier[routes] . identifier[matched_files] ( identifier[dataset_id] ), identifier[data] , identifier[failure_message] = identifier[failure_message] ))[ literal[string] ]
keyword[if] identifier[version_number] keyword[is] keyword[None] :
identifier[version] = identifier[versions] [ literal[int] ]
keyword[else] :
keyword[try] :
identifier[version] = identifier[list] ( identifier[filter] ( keyword[lambda] identifier[v] : identifier[v] [ literal[string] ]== identifier[version_number] , identifier[versions] ))[ literal[int] ]
keyword[except] identifier[IndexError] :
keyword[raise] identifier[ResourceNotFoundException] ()
keyword[return] identifier[list] (
identifier[map] (
keyword[lambda] identifier[f] : identifier[DatasetFile] ( identifier[path] = identifier[f] [ literal[string] ], identifier[url] = identifier[f] [ literal[string] ]), identifier[version] [ literal[string] ]
)
) | def get_dataset_files(self, dataset_id, glob='.', is_dir=False, version_number=None):
"""
Retrieves URLs for the files matched by a glob or a path to a directory
in a given dataset.
:param dataset_id: The id of the dataset to retrieve files from
:type dataset_id: int
:param glob: A regex used to select one or more files in the dataset
:type glob: str
:param is_dir: Whether or not the supplied pattern should be treated as a directory to search in
:type is_dir: bool
:param version_number: The version number of the dataset to retrieve files from
:type version_number: int
:return: A list of dataset files whose paths match the provided pattern.
:rtype: list of :class:`DatasetFile`
"""
if version_number is None:
latest = True # depends on [control=['if'], data=[]]
else:
latest = False
data = {'download_request': {'glob': glob, 'isDir': is_dir, 'latest': latest}}
failure_message = 'Failed to get matched files in dataset {}'.format(dataset_id)
versions = self._get_success_json(self._post_json(routes.matched_files(dataset_id), data, failure_message=failure_message))['versions']
# if you don't provide a version number, only the latest
# will be included in the response body
if version_number is None:
version = versions[0] # depends on [control=['if'], data=[]]
else:
try:
version = list(filter(lambda v: v['number'] == version_number, versions))[0] # depends on [control=['try'], data=[]]
except IndexError:
raise ResourceNotFoundException() # depends on [control=['except'], data=[]]
return list(map(lambda f: DatasetFile(path=f['filename'], url=f['url']), version['files'])) |
def _deleteRecordsFromKNN(self, recordsToDelete):
"""
This method will remove the given records from the classifier.
parameters
------------
recordsToDelete - list of records to delete from the classififier
"""
classifier = self.htm_prediction_model._getAnomalyClassifier()
knn = classifier.getSelf()._knn
prototype_idx = classifier.getSelf().getParameter('categoryRecencyList')
idsToDelete = [r.ROWID for r in recordsToDelete if \
not r.setByUser and r.ROWID in prototype_idx]
nProtos = knn._numPatterns
knn.removeIds(idsToDelete)
assert knn._numPatterns == nProtos - len(idsToDelete) | def function[_deleteRecordsFromKNN, parameter[self, recordsToDelete]]:
constant[
This method will remove the given records from the classifier.
parameters
------------
recordsToDelete - list of records to delete from the classififier
]
variable[classifier] assign[=] call[name[self].htm_prediction_model._getAnomalyClassifier, parameter[]]
variable[knn] assign[=] call[name[classifier].getSelf, parameter[]]._knn
variable[prototype_idx] assign[=] call[call[name[classifier].getSelf, parameter[]].getParameter, parameter[constant[categoryRecencyList]]]
variable[idsToDelete] assign[=] <ast.ListComp object at 0x7da18dc04fd0>
variable[nProtos] assign[=] name[knn]._numPatterns
call[name[knn].removeIds, parameter[name[idsToDelete]]]
assert[compare[name[knn]._numPatterns equal[==] binary_operation[name[nProtos] - call[name[len], parameter[name[idsToDelete]]]]]] | keyword[def] identifier[_deleteRecordsFromKNN] ( identifier[self] , identifier[recordsToDelete] ):
literal[string]
identifier[classifier] = identifier[self] . identifier[htm_prediction_model] . identifier[_getAnomalyClassifier] ()
identifier[knn] = identifier[classifier] . identifier[getSelf] (). identifier[_knn]
identifier[prototype_idx] = identifier[classifier] . identifier[getSelf] (). identifier[getParameter] ( literal[string] )
identifier[idsToDelete] =[ identifier[r] . identifier[ROWID] keyword[for] identifier[r] keyword[in] identifier[recordsToDelete] keyword[if] keyword[not] identifier[r] . identifier[setByUser] keyword[and] identifier[r] . identifier[ROWID] keyword[in] identifier[prototype_idx] ]
identifier[nProtos] = identifier[knn] . identifier[_numPatterns]
identifier[knn] . identifier[removeIds] ( identifier[idsToDelete] )
keyword[assert] identifier[knn] . identifier[_numPatterns] == identifier[nProtos] - identifier[len] ( identifier[idsToDelete] ) | def _deleteRecordsFromKNN(self, recordsToDelete):
"""
This method will remove the given records from the classifier.
parameters
------------
recordsToDelete - list of records to delete from the classififier
"""
classifier = self.htm_prediction_model._getAnomalyClassifier()
knn = classifier.getSelf()._knn
prototype_idx = classifier.getSelf().getParameter('categoryRecencyList')
idsToDelete = [r.ROWID for r in recordsToDelete if not r.setByUser and r.ROWID in prototype_idx]
nProtos = knn._numPatterns
knn.removeIds(idsToDelete)
assert knn._numPatterns == nProtos - len(idsToDelete) |
def dens_alum_nanocluster(coag):
"""Return the density of the aluminum in the nanocluster.
This is useful for determining the volume of nanoclusters
given a concentration of aluminum.
"""
density = (coag.PrecipDensity * MOLEC_WEIGHT_ALUMINUM
* coag.PrecipAluminumMPM / coag.PrecipMolecWeight)
return density | def function[dens_alum_nanocluster, parameter[coag]]:
constant[Return the density of the aluminum in the nanocluster.
This is useful for determining the volume of nanoclusters
given a concentration of aluminum.
]
variable[density] assign[=] binary_operation[binary_operation[binary_operation[name[coag].PrecipDensity * name[MOLEC_WEIGHT_ALUMINUM]] * name[coag].PrecipAluminumMPM] / name[coag].PrecipMolecWeight]
return[name[density]] | keyword[def] identifier[dens_alum_nanocluster] ( identifier[coag] ):
literal[string]
identifier[density] =( identifier[coag] . identifier[PrecipDensity] * identifier[MOLEC_WEIGHT_ALUMINUM]
* identifier[coag] . identifier[PrecipAluminumMPM] / identifier[coag] . identifier[PrecipMolecWeight] )
keyword[return] identifier[density] | def dens_alum_nanocluster(coag):
"""Return the density of the aluminum in the nanocluster.
This is useful for determining the volume of nanoclusters
given a concentration of aluminum.
"""
density = coag.PrecipDensity * MOLEC_WEIGHT_ALUMINUM * coag.PrecipAluminumMPM / coag.PrecipMolecWeight
return density |
def as_int(n):
"""
Convert the argument to a builtin integer.
The return value is guaranteed to be equal to the input. ValueError is
raised if the input has a non-integral value.
Examples
========
>>> from sympy.core.compatibility import as_int
>>> from sympy import sqrt
>>> 3.0
3.0
>>> as_int(3.0) # convert to int and test for equality
3
>>> int(sqrt(10))
3
>>> as_int(sqrt(10))
Traceback (most recent call last):
...
ValueError: ... is not an integer
"""
try:
result = int(n)
if result != n:
raise TypeError
except TypeError:
raise ValueError('%s is not an integer' % n)
return result | def function[as_int, parameter[n]]:
constant[
Convert the argument to a builtin integer.
The return value is guaranteed to be equal to the input. ValueError is
raised if the input has a non-integral value.
Examples
========
>>> from sympy.core.compatibility import as_int
>>> from sympy import sqrt
>>> 3.0
3.0
>>> as_int(3.0) # convert to int and test for equality
3
>>> int(sqrt(10))
3
>>> as_int(sqrt(10))
Traceback (most recent call last):
...
ValueError: ... is not an integer
]
<ast.Try object at 0x7da1b11a7820>
return[name[result]] | keyword[def] identifier[as_int] ( identifier[n] ):
literal[string]
keyword[try] :
identifier[result] = identifier[int] ( identifier[n] )
keyword[if] identifier[result] != identifier[n] :
keyword[raise] identifier[TypeError]
keyword[except] identifier[TypeError] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[n] )
keyword[return] identifier[result] | def as_int(n):
"""
Convert the argument to a builtin integer.
The return value is guaranteed to be equal to the input. ValueError is
raised if the input has a non-integral value.
Examples
========
>>> from sympy.core.compatibility import as_int
>>> from sympy import sqrt
>>> 3.0
3.0
>>> as_int(3.0) # convert to int and test for equality
3
>>> int(sqrt(10))
3
>>> as_int(sqrt(10))
Traceback (most recent call last):
...
ValueError: ... is not an integer
"""
try:
result = int(n)
if result != n:
raise TypeError # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except TypeError:
raise ValueError('%s is not an integer' % n) # depends on [control=['except'], data=[]]
return result |
def get_record(self):
"""Override the base get_record."""
self.update_system_numbers()
self.add_systemnumber("CDS")
self.fields_list = [
"024", "041", "035", "037", "088", "100",
"110", "111", "242", "245", "246", "260",
"269", "300", "502", "650", "653", "693",
"700", "710", "773", "856", "520", "500",
"980"
]
self.keep_only_fields()
self.determine_collections()
self.add_cms_link()
self.update_languages()
self.update_reportnumbers()
self.update_date()
self.update_pagenumber()
self.update_authors()
self.update_subject_categories("SzGeCERN", "INSPIRE", "categories_inspire")
self.update_keywords()
self.update_experiments()
self.update_collaboration()
self.update_journals()
self.update_links_and_ffts()
if 'THESIS' in self.collections:
self.update_thesis_supervisors()
self.update_thesis_information()
if 'NOTE' in self.collections:
self.add_notes()
for collection in self.collections:
record_add_field(self.record,
tag='980',
subfields=[('a', collection)])
self.remove_controlfields()
return self.record | def function[get_record, parameter[self]]:
constant[Override the base get_record.]
call[name[self].update_system_numbers, parameter[]]
call[name[self].add_systemnumber, parameter[constant[CDS]]]
name[self].fields_list assign[=] list[[<ast.Constant object at 0x7da207f014b0>, <ast.Constant object at 0x7da207f030d0>, <ast.Constant object at 0x7da207f01f60>, <ast.Constant object at 0x7da207f013c0>, <ast.Constant object at 0x7da207f01690>, <ast.Constant object at 0x7da207f03220>, <ast.Constant object at 0x7da207f02740>, <ast.Constant object at 0x7da207f01f00>, <ast.Constant object at 0x7da207f026e0>, <ast.Constant object at 0x7da207f01ed0>, <ast.Constant object at 0x7da207f024d0>, <ast.Constant object at 0x7da207f004c0>, <ast.Constant object at 0x7da207f00c40>, <ast.Constant object at 0x7da207f02170>, <ast.Constant object at 0x7da207f00f10>, <ast.Constant object at 0x7da207f03a90>, <ast.Constant object at 0x7da207f02830>, <ast.Constant object at 0x7da207f01e70>, <ast.Constant object at 0x7da207f03eb0>, <ast.Constant object at 0x7da207f02ec0>, <ast.Constant object at 0x7da207f00460>, <ast.Constant object at 0x7da207f02d70>, <ast.Constant object at 0x7da207f00a60>, <ast.Constant object at 0x7da207f03f40>, <ast.Constant object at 0x7da207f017e0>]]
call[name[self].keep_only_fields, parameter[]]
call[name[self].determine_collections, parameter[]]
call[name[self].add_cms_link, parameter[]]
call[name[self].update_languages, parameter[]]
call[name[self].update_reportnumbers, parameter[]]
call[name[self].update_date, parameter[]]
call[name[self].update_pagenumber, parameter[]]
call[name[self].update_authors, parameter[]]
call[name[self].update_subject_categories, parameter[constant[SzGeCERN], constant[INSPIRE], constant[categories_inspire]]]
call[name[self].update_keywords, parameter[]]
call[name[self].update_experiments, parameter[]]
call[name[self].update_collaboration, parameter[]]
call[name[self].update_journals, parameter[]]
call[name[self].update_links_and_ffts, parameter[]]
if compare[constant[THESIS] in name[self].collections] begin[:]
call[name[self].update_thesis_supervisors, parameter[]]
call[name[self].update_thesis_information, parameter[]]
if compare[constant[NOTE] in name[self].collections] begin[:]
call[name[self].add_notes, parameter[]]
for taget[name[collection]] in starred[name[self].collections] begin[:]
call[name[record_add_field], parameter[name[self].record]]
call[name[self].remove_controlfields, parameter[]]
return[name[self].record] | keyword[def] identifier[get_record] ( identifier[self] ):
literal[string]
identifier[self] . identifier[update_system_numbers] ()
identifier[self] . identifier[add_systemnumber] ( literal[string] )
identifier[self] . identifier[fields_list] =[
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string]
]
identifier[self] . identifier[keep_only_fields] ()
identifier[self] . identifier[determine_collections] ()
identifier[self] . identifier[add_cms_link] ()
identifier[self] . identifier[update_languages] ()
identifier[self] . identifier[update_reportnumbers] ()
identifier[self] . identifier[update_date] ()
identifier[self] . identifier[update_pagenumber] ()
identifier[self] . identifier[update_authors] ()
identifier[self] . identifier[update_subject_categories] ( literal[string] , literal[string] , literal[string] )
identifier[self] . identifier[update_keywords] ()
identifier[self] . identifier[update_experiments] ()
identifier[self] . identifier[update_collaboration] ()
identifier[self] . identifier[update_journals] ()
identifier[self] . identifier[update_links_and_ffts] ()
keyword[if] literal[string] keyword[in] identifier[self] . identifier[collections] :
identifier[self] . identifier[update_thesis_supervisors] ()
identifier[self] . identifier[update_thesis_information] ()
keyword[if] literal[string] keyword[in] identifier[self] . identifier[collections] :
identifier[self] . identifier[add_notes] ()
keyword[for] identifier[collection] keyword[in] identifier[self] . identifier[collections] :
identifier[record_add_field] ( identifier[self] . identifier[record] ,
identifier[tag] = literal[string] ,
identifier[subfields] =[( literal[string] , identifier[collection] )])
identifier[self] . identifier[remove_controlfields] ()
keyword[return] identifier[self] . identifier[record] | def get_record(self):
"""Override the base get_record."""
self.update_system_numbers()
self.add_systemnumber('CDS')
self.fields_list = ['024', '041', '035', '037', '088', '100', '110', '111', '242', '245', '246', '260', '269', '300', '502', '650', '653', '693', '700', '710', '773', '856', '520', '500', '980']
self.keep_only_fields()
self.determine_collections()
self.add_cms_link()
self.update_languages()
self.update_reportnumbers()
self.update_date()
self.update_pagenumber()
self.update_authors()
self.update_subject_categories('SzGeCERN', 'INSPIRE', 'categories_inspire')
self.update_keywords()
self.update_experiments()
self.update_collaboration()
self.update_journals()
self.update_links_and_ffts()
if 'THESIS' in self.collections:
self.update_thesis_supervisors()
self.update_thesis_information() # depends on [control=['if'], data=[]]
if 'NOTE' in self.collections:
self.add_notes() # depends on [control=['if'], data=[]]
for collection in self.collections:
record_add_field(self.record, tag='980', subfields=[('a', collection)]) # depends on [control=['for'], data=['collection']]
self.remove_controlfields()
return self.record |
def quarter_to_daterange(quarter):
"""Convert a quarter in arbitrary filename-ready format (e.g. 2015Q1)
into start and end datetimes"""
assert len(quarter) == 6
year = int(quarter[0:4])
quarter = quarter[5]
MONTH_DAY = {
'1': ((1, 1), (3, 31)),
'2': ((4, 1), (6, 30)),
'3': ((7, 1), (9, 30)),
'4': ((10, 1), (12, 31))
}
md = MONTH_DAY[quarter]
start_md, end_md = md
return (
date(year, *start_md),
date(year, *end_md)
) | def function[quarter_to_daterange, parameter[quarter]]:
constant[Convert a quarter in arbitrary filename-ready format (e.g. 2015Q1)
into start and end datetimes]
assert[compare[call[name[len], parameter[name[quarter]]] equal[==] constant[6]]]
variable[year] assign[=] call[name[int], parameter[call[name[quarter]][<ast.Slice object at 0x7da1b26af370>]]]
variable[quarter] assign[=] call[name[quarter]][constant[5]]
variable[MONTH_DAY] assign[=] dictionary[[<ast.Constant object at 0x7da1b26ad600>, <ast.Constant object at 0x7da1b26ad4e0>, <ast.Constant object at 0x7da1b26ad720>, <ast.Constant object at 0x7da1b26accd0>], [<ast.Tuple object at 0x7da1b26ac6a0>, <ast.Tuple object at 0x7da1b26acf70>, <ast.Tuple object at 0x7da1b26ac850>, <ast.Tuple object at 0x7da1b26acdc0>]]
variable[md] assign[=] call[name[MONTH_DAY]][name[quarter]]
<ast.Tuple object at 0x7da1b26ace20> assign[=] name[md]
return[tuple[[<ast.Call object at 0x7da1b26addb0>, <ast.Call object at 0x7da1b26ad6f0>]]] | keyword[def] identifier[quarter_to_daterange] ( identifier[quarter] ):
literal[string]
keyword[assert] identifier[len] ( identifier[quarter] )== literal[int]
identifier[year] = identifier[int] ( identifier[quarter] [ literal[int] : literal[int] ])
identifier[quarter] = identifier[quarter] [ literal[int] ]
identifier[MONTH_DAY] ={
literal[string] :(( literal[int] , literal[int] ),( literal[int] , literal[int] )),
literal[string] :(( literal[int] , literal[int] ),( literal[int] , literal[int] )),
literal[string] :(( literal[int] , literal[int] ),( literal[int] , literal[int] )),
literal[string] :(( literal[int] , literal[int] ),( literal[int] , literal[int] ))
}
identifier[md] = identifier[MONTH_DAY] [ identifier[quarter] ]
identifier[start_md] , identifier[end_md] = identifier[md]
keyword[return] (
identifier[date] ( identifier[year] ,* identifier[start_md] ),
identifier[date] ( identifier[year] ,* identifier[end_md] )
) | def quarter_to_daterange(quarter):
"""Convert a quarter in arbitrary filename-ready format (e.g. 2015Q1)
into start and end datetimes"""
assert len(quarter) == 6
year = int(quarter[0:4])
quarter = quarter[5]
MONTH_DAY = {'1': ((1, 1), (3, 31)), '2': ((4, 1), (6, 30)), '3': ((7, 1), (9, 30)), '4': ((10, 1), (12, 31))}
md = MONTH_DAY[quarter]
(start_md, end_md) = md
return (date(year, *start_md), date(year, *end_md)) |
def negotiate_header(url):
"""
Return the "Authorization" HTTP header value to use for this URL.
"""
hostname = urlparse(url).hostname
_, krb_context = kerberos.authGSSClientInit('HTTP@%s' % hostname)
# authGSSClientStep goes over the network to the KDC (ie blocking).
yield threads.deferToThread(kerberos.authGSSClientStep,
krb_context, '')
negotiate_details = kerberos.authGSSClientResponse(krb_context)
defer.returnValue('Negotiate ' + negotiate_details) | def function[negotiate_header, parameter[url]]:
constant[
Return the "Authorization" HTTP header value to use for this URL.
]
variable[hostname] assign[=] call[name[urlparse], parameter[name[url]]].hostname
<ast.Tuple object at 0x7da1b27773a0> assign[=] call[name[kerberos].authGSSClientInit, parameter[binary_operation[constant[HTTP@%s] <ast.Mod object at 0x7da2590d6920> name[hostname]]]]
<ast.Yield object at 0x7da20c76dfc0>
variable[negotiate_details] assign[=] call[name[kerberos].authGSSClientResponse, parameter[name[krb_context]]]
call[name[defer].returnValue, parameter[binary_operation[constant[Negotiate ] + name[negotiate_details]]]] | keyword[def] identifier[negotiate_header] ( identifier[url] ):
literal[string]
identifier[hostname] = identifier[urlparse] ( identifier[url] ). identifier[hostname]
identifier[_] , identifier[krb_context] = identifier[kerberos] . identifier[authGSSClientInit] ( literal[string] % identifier[hostname] )
keyword[yield] identifier[threads] . identifier[deferToThread] ( identifier[kerberos] . identifier[authGSSClientStep] ,
identifier[krb_context] , literal[string] )
identifier[negotiate_details] = identifier[kerberos] . identifier[authGSSClientResponse] ( identifier[krb_context] )
identifier[defer] . identifier[returnValue] ( literal[string] + identifier[negotiate_details] ) | def negotiate_header(url):
"""
Return the "Authorization" HTTP header value to use for this URL.
"""
hostname = urlparse(url).hostname
(_, krb_context) = kerberos.authGSSClientInit('HTTP@%s' % hostname)
# authGSSClientStep goes over the network to the KDC (ie blocking).
yield threads.deferToThread(kerberos.authGSSClientStep, krb_context, '')
negotiate_details = kerberos.authGSSClientResponse(krb_context)
defer.returnValue('Negotiate ' + negotiate_details) |
def parse(self, fo):
"""
Convert AMD output to motifs
Parameters
----------
fo : file-like
File object containing AMD output.
Returns
-------
motifs : list
List of Motif instances.
"""
motifs = []
#160: 112 CACGTGC 7.25 chr14:32308489-32308689
p = re.compile(r'\d+\s+([\d.]+)\s+([\d.]+)\s+([\d.]+)\s+([\d.]+)')
wm = []
name = ""
for line in fo.readlines():
if line.startswith("Motif") and line.strip().endswith(":"):
if name:
motifs.append(Motif(wm))
motifs[-1].id = name
name = ""
wm = []
name = "%s_%s" % (self.name, line.split(":")[0])
else:
m = p.search(line)
if m:
wm.append([float(m.group(x)) for x in range(1,5)])
motifs.append(Motif(wm))
motifs[-1].id = name
return motifs | def function[parse, parameter[self, fo]]:
constant[
Convert AMD output to motifs
Parameters
----------
fo : file-like
File object containing AMD output.
Returns
-------
motifs : list
List of Motif instances.
]
variable[motifs] assign[=] list[[]]
variable[p] assign[=] call[name[re].compile, parameter[constant[\d+\s+([\d.]+)\s+([\d.]+)\s+([\d.]+)\s+([\d.]+)]]]
variable[wm] assign[=] list[[]]
variable[name] assign[=] constant[]
for taget[name[line]] in starred[call[name[fo].readlines, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da2054a6e90> begin[:]
if name[name] begin[:]
call[name[motifs].append, parameter[call[name[Motif], parameter[name[wm]]]]]
call[name[motifs]][<ast.UnaryOp object at 0x7da2054a4c10>].id assign[=] name[name]
variable[name] assign[=] constant[]
variable[wm] assign[=] list[[]]
variable[name] assign[=] binary_operation[constant[%s_%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Attribute object at 0x7da2054a5fc0>, <ast.Subscript object at 0x7da2054a70d0>]]]
call[name[motifs].append, parameter[call[name[Motif], parameter[name[wm]]]]]
call[name[motifs]][<ast.UnaryOp object at 0x7da18bcca0e0>].id assign[=] name[name]
return[name[motifs]] | keyword[def] identifier[parse] ( identifier[self] , identifier[fo] ):
literal[string]
identifier[motifs] =[]
identifier[p] = identifier[re] . identifier[compile] ( literal[string] )
identifier[wm] =[]
identifier[name] = literal[string]
keyword[for] identifier[line] keyword[in] identifier[fo] . identifier[readlines] ():
keyword[if] identifier[line] . identifier[startswith] ( literal[string] ) keyword[and] identifier[line] . identifier[strip] (). identifier[endswith] ( literal[string] ):
keyword[if] identifier[name] :
identifier[motifs] . identifier[append] ( identifier[Motif] ( identifier[wm] ))
identifier[motifs] [- literal[int] ]. identifier[id] = identifier[name]
identifier[name] = literal[string]
identifier[wm] =[]
identifier[name] = literal[string] %( identifier[self] . identifier[name] , identifier[line] . identifier[split] ( literal[string] )[ literal[int] ])
keyword[else] :
identifier[m] = identifier[p] . identifier[search] ( identifier[line] )
keyword[if] identifier[m] :
identifier[wm] . identifier[append] ([ identifier[float] ( identifier[m] . identifier[group] ( identifier[x] )) keyword[for] identifier[x] keyword[in] identifier[range] ( literal[int] , literal[int] )])
identifier[motifs] . identifier[append] ( identifier[Motif] ( identifier[wm] ))
identifier[motifs] [- literal[int] ]. identifier[id] = identifier[name]
keyword[return] identifier[motifs] | def parse(self, fo):
"""
Convert AMD output to motifs
Parameters
----------
fo : file-like
File object containing AMD output.
Returns
-------
motifs : list
List of Motif instances.
"""
motifs = []
#160: 112 CACGTGC 7.25 chr14:32308489-32308689
p = re.compile('\\d+\\s+([\\d.]+)\\s+([\\d.]+)\\s+([\\d.]+)\\s+([\\d.]+)')
wm = []
name = ''
for line in fo.readlines():
if line.startswith('Motif') and line.strip().endswith(':'):
if name:
motifs.append(Motif(wm))
motifs[-1].id = name
name = ''
wm = [] # depends on [control=['if'], data=[]]
name = '%s_%s' % (self.name, line.split(':')[0]) # depends on [control=['if'], data=[]]
else:
m = p.search(line)
if m:
wm.append([float(m.group(x)) for x in range(1, 5)]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
motifs.append(Motif(wm))
motifs[-1].id = name
return motifs |
def clear(self):
"""Reset this PeerGroup.
This closes all connections to all known peers and forgets about
these peers.
:returns:
A Future that resolves with a value of None when the operation
has finished
"""
try:
for peer in self._peers.values():
peer.close()
finally:
self._peers = {}
self._resetting = False | def function[clear, parameter[self]]:
constant[Reset this PeerGroup.
This closes all connections to all known peers and forgets about
these peers.
:returns:
A Future that resolves with a value of None when the operation
has finished
]
<ast.Try object at 0x7da20c6e6230> | keyword[def] identifier[clear] ( identifier[self] ):
literal[string]
keyword[try] :
keyword[for] identifier[peer] keyword[in] identifier[self] . identifier[_peers] . identifier[values] ():
identifier[peer] . identifier[close] ()
keyword[finally] :
identifier[self] . identifier[_peers] ={}
identifier[self] . identifier[_resetting] = keyword[False] | def clear(self):
"""Reset this PeerGroup.
This closes all connections to all known peers and forgets about
these peers.
:returns:
A Future that resolves with a value of None when the operation
has finished
"""
try:
for peer in self._peers.values():
peer.close() # depends on [control=['for'], data=['peer']] # depends on [control=['try'], data=[]]
finally:
self._peers = {}
self._resetting = False |
def do_set_device(self, args):
"""Set the PLM OS device.
Device defaults to /dev/ttyUSB0
Usage:
set_device device
Arguments:
device: Required - INSTEON PLM device
"""
params = args.split()
device = None
try:
device = params[0]
except IndexError:
_LOGGING.error('Device name required.')
self.do_help('set_device')
if device:
self.tools.device = device | def function[do_set_device, parameter[self, args]]:
constant[Set the PLM OS device.
Device defaults to /dev/ttyUSB0
Usage:
set_device device
Arguments:
device: Required - INSTEON PLM device
]
variable[params] assign[=] call[name[args].split, parameter[]]
variable[device] assign[=] constant[None]
<ast.Try object at 0x7da1b1a47b50>
if name[device] begin[:]
name[self].tools.device assign[=] name[device] | keyword[def] identifier[do_set_device] ( identifier[self] , identifier[args] ):
literal[string]
identifier[params] = identifier[args] . identifier[split] ()
identifier[device] = keyword[None]
keyword[try] :
identifier[device] = identifier[params] [ literal[int] ]
keyword[except] identifier[IndexError] :
identifier[_LOGGING] . identifier[error] ( literal[string] )
identifier[self] . identifier[do_help] ( literal[string] )
keyword[if] identifier[device] :
identifier[self] . identifier[tools] . identifier[device] = identifier[device] | def do_set_device(self, args):
"""Set the PLM OS device.
Device defaults to /dev/ttyUSB0
Usage:
set_device device
Arguments:
device: Required - INSTEON PLM device
"""
params = args.split()
device = None
try:
device = params[0] # depends on [control=['try'], data=[]]
except IndexError:
_LOGGING.error('Device name required.')
self.do_help('set_device') # depends on [control=['except'], data=[]]
if device:
self.tools.device = device # depends on [control=['if'], data=[]] |
def get_percolation_threshold(self):
r"""
Find the invasion threshold at which a cluster spans from the inlet to
the outlet sites
"""
if np.sum(self['pore.inlets']) == 0:
raise Exception('Inlet pores must be specified first')
if np.sum(self['pore.outlets']) == 0:
raise Exception('Outlet pores must be specified first')
else:
Pout = self['pore.outlets']
# Do a simple check of pressures on the outlet pores first...
if self.settings['access_limited']:
thresh = np.amin(self['pore.invasion_pressure'][Pout])
else:
raise Exception('This is currently only implemented for access ' +
'limited simulations')
return thresh | def function[get_percolation_threshold, parameter[self]]:
constant[
Find the invasion threshold at which a cluster spans from the inlet to
the outlet sites
]
if compare[call[name[np].sum, parameter[call[name[self]][constant[pore.inlets]]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da18f00ca90>
if compare[call[name[np].sum, parameter[call[name[self]][constant[pore.outlets]]]] equal[==] constant[0]] begin[:]
<ast.Raise object at 0x7da18f00d840>
if call[name[self].settings][constant[access_limited]] begin[:]
variable[thresh] assign[=] call[name[np].amin, parameter[call[call[name[self]][constant[pore.invasion_pressure]]][name[Pout]]]]
return[name[thresh]] | keyword[def] identifier[get_percolation_threshold] ( identifier[self] ):
literal[string]
keyword[if] identifier[np] . identifier[sum] ( identifier[self] [ literal[string] ])== literal[int] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] identifier[np] . identifier[sum] ( identifier[self] [ literal[string] ])== literal[int] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[else] :
identifier[Pout] = identifier[self] [ literal[string] ]
keyword[if] identifier[self] . identifier[settings] [ literal[string] ]:
identifier[thresh] = identifier[np] . identifier[amin] ( identifier[self] [ literal[string] ][ identifier[Pout] ])
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] +
literal[string] )
keyword[return] identifier[thresh] | def get_percolation_threshold(self):
"""
Find the invasion threshold at which a cluster spans from the inlet to
the outlet sites
"""
if np.sum(self['pore.inlets']) == 0:
raise Exception('Inlet pores must be specified first') # depends on [control=['if'], data=[]]
if np.sum(self['pore.outlets']) == 0:
raise Exception('Outlet pores must be specified first') # depends on [control=['if'], data=[]]
else:
Pout = self['pore.outlets']
# Do a simple check of pressures on the outlet pores first...
if self.settings['access_limited']:
thresh = np.amin(self['pore.invasion_pressure'][Pout]) # depends on [control=['if'], data=[]]
else:
raise Exception('This is currently only implemented for access ' + 'limited simulations')
return thresh |
def isiterable(item):
"""
Check if item is iterable ans is not a string (or unicode)
"""
if isinstance(item, collections.Iterable):
if not isinstance(item,(str,unicode)) :return True
else : return False
else : False | def function[isiterable, parameter[item]]:
constant[
Check if item is iterable ans is not a string (or unicode)
]
if call[name[isinstance], parameter[name[item], name[collections].Iterable]] begin[:]
if <ast.UnaryOp object at 0x7da1b0932080> begin[:]
return[constant[True]] | keyword[def] identifier[isiterable] ( identifier[item] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[item] , identifier[collections] . identifier[Iterable] ):
keyword[if] keyword[not] identifier[isinstance] ( identifier[item] ,( identifier[str] , identifier[unicode] )): keyword[return] keyword[True]
keyword[else] : keyword[return] keyword[False]
keyword[else] : keyword[False] | def isiterable(item):
"""
Check if item is iterable ans is not a string (or unicode)
"""
if isinstance(item, collections.Iterable):
if not isinstance(item, (str, unicode)):
return True # depends on [control=['if'], data=[]]
else:
return False # depends on [control=['if'], data=[]]
else:
False |
def to_child_field(cls):
"""
Returns an callable instance that will convert a value to a Child object.
:param cls: Valid class type of the Child.
:return: instance of ChildConverter.
"""
class ChildConverter(object):
def __init__(self, cls):
self._cls = cls
@property
def cls(self):
return resolve_class(self._cls)
def __call__(self, value):
try:
# Issue #33: if value is the class and callable, then invoke
if value == self._cls and callable(value):
value = value()
return to_model(self.cls, value)
except ValueError as e:
error_msg = CHILD_ERROR_MSG.format(value, self.cls, str(e))
raise ValueError(error_msg)
return ChildConverter(cls) | def function[to_child_field, parameter[cls]]:
constant[
Returns an callable instance that will convert a value to a Child object.
:param cls: Valid class type of the Child.
:return: instance of ChildConverter.
]
class class[ChildConverter, parameter[]] begin[:]
def function[__init__, parameter[self, cls]]:
name[self]._cls assign[=] name[cls]
def function[cls, parameter[self]]:
return[call[name[resolve_class], parameter[name[self]._cls]]]
def function[__call__, parameter[self, value]]:
<ast.Try object at 0x7da1b0b48b50>
return[call[name[ChildConverter], parameter[name[cls]]]] | keyword[def] identifier[to_child_field] ( identifier[cls] ):
literal[string]
keyword[class] identifier[ChildConverter] ( identifier[object] ):
keyword[def] identifier[__init__] ( identifier[self] , identifier[cls] ):
identifier[self] . identifier[_cls] = identifier[cls]
@ identifier[property]
keyword[def] identifier[cls] ( identifier[self] ):
keyword[return] identifier[resolve_class] ( identifier[self] . identifier[_cls] )
keyword[def] identifier[__call__] ( identifier[self] , identifier[value] ):
keyword[try] :
keyword[if] identifier[value] == identifier[self] . identifier[_cls] keyword[and] identifier[callable] ( identifier[value] ):
identifier[value] = identifier[value] ()
keyword[return] identifier[to_model] ( identifier[self] . identifier[cls] , identifier[value] )
keyword[except] identifier[ValueError] keyword[as] identifier[e] :
identifier[error_msg] = identifier[CHILD_ERROR_MSG] . identifier[format] ( identifier[value] , identifier[self] . identifier[cls] , identifier[str] ( identifier[e] ))
keyword[raise] identifier[ValueError] ( identifier[error_msg] )
keyword[return] identifier[ChildConverter] ( identifier[cls] ) | def to_child_field(cls):
"""
Returns an callable instance that will convert a value to a Child object.
:param cls: Valid class type of the Child.
:return: instance of ChildConverter.
"""
class ChildConverter(object):
def __init__(self, cls):
self._cls = cls
@property
def cls(self):
return resolve_class(self._cls)
def __call__(self, value):
try:
# Issue #33: if value is the class and callable, then invoke
if value == self._cls and callable(value):
value = value() # depends on [control=['if'], data=[]]
return to_model(self.cls, value) # depends on [control=['try'], data=[]]
except ValueError as e:
error_msg = CHILD_ERROR_MSG.format(value, self.cls, str(e))
raise ValueError(error_msg) # depends on [control=['except'], data=['e']]
return ChildConverter(cls) |
def convert_to_pixels(self, value):
"""
Convert value in the scale's unit into a position in pixels.
:param value: value to convert
:type value: float
:return: the corresponding position in pixels
:rtype: float
"""
percent = ((value - self._start) / self._extent)
return percent * (self.get_scale_length() - self._sliderlength) + self._sliderlength / 2 | def function[convert_to_pixels, parameter[self, value]]:
constant[
Convert value in the scale's unit into a position in pixels.
:param value: value to convert
:type value: float
:return: the corresponding position in pixels
:rtype: float
]
variable[percent] assign[=] binary_operation[binary_operation[name[value] - name[self]._start] / name[self]._extent]
return[binary_operation[binary_operation[name[percent] * binary_operation[call[name[self].get_scale_length, parameter[]] - name[self]._sliderlength]] + binary_operation[name[self]._sliderlength / constant[2]]]] | keyword[def] identifier[convert_to_pixels] ( identifier[self] , identifier[value] ):
literal[string]
identifier[percent] =(( identifier[value] - identifier[self] . identifier[_start] )/ identifier[self] . identifier[_extent] )
keyword[return] identifier[percent] *( identifier[self] . identifier[get_scale_length] ()- identifier[self] . identifier[_sliderlength] )+ identifier[self] . identifier[_sliderlength] / literal[int] | def convert_to_pixels(self, value):
"""
Convert value in the scale's unit into a position in pixels.
:param value: value to convert
:type value: float
:return: the corresponding position in pixels
:rtype: float
"""
percent = (value - self._start) / self._extent
return percent * (self.get_scale_length() - self._sliderlength) + self._sliderlength / 2 |
def model_builders(self, algo=None, timeoutSecs=10, **kwargs):
'''
Return a model builder or all of the model builders known to the
h2o cluster. The model builders are contained in a dictionary
called "model_builders" at the top level of the result. The
dictionary maps algorithm names to parameters lists. Each of the
parameters contains all the metdata required by a client to
present a model building interface to the user.
if parameters = True, return the parameters?
'''
params_dict = {}
h2o_methods.check_params_update_kwargs(params_dict, kwargs, 'model_builders', False)
request = '3/ModelBuilders.json'
if algo:
request += "/" + algo
result = self.do_json_request(request, timeout=timeoutSecs, params=params_dict)
# verboseprint(request, "result:", dump_json(result))
h2o_sandbox.check_sandbox_for_errors()
return result | def function[model_builders, parameter[self, algo, timeoutSecs]]:
constant[
Return a model builder or all of the model builders known to the
h2o cluster. The model builders are contained in a dictionary
called "model_builders" at the top level of the result. The
dictionary maps algorithm names to parameters lists. Each of the
parameters contains all the metdata required by a client to
present a model building interface to the user.
if parameters = True, return the parameters?
]
variable[params_dict] assign[=] dictionary[[], []]
call[name[h2o_methods].check_params_update_kwargs, parameter[name[params_dict], name[kwargs], constant[model_builders], constant[False]]]
variable[request] assign[=] constant[3/ModelBuilders.json]
if name[algo] begin[:]
<ast.AugAssign object at 0x7da2054a7910>
variable[result] assign[=] call[name[self].do_json_request, parameter[name[request]]]
call[name[h2o_sandbox].check_sandbox_for_errors, parameter[]]
return[name[result]] | keyword[def] identifier[model_builders] ( identifier[self] , identifier[algo] = keyword[None] , identifier[timeoutSecs] = literal[int] ,** identifier[kwargs] ):
literal[string]
identifier[params_dict] ={}
identifier[h2o_methods] . identifier[check_params_update_kwargs] ( identifier[params_dict] , identifier[kwargs] , literal[string] , keyword[False] )
identifier[request] = literal[string]
keyword[if] identifier[algo] :
identifier[request] += literal[string] + identifier[algo]
identifier[result] = identifier[self] . identifier[do_json_request] ( identifier[request] , identifier[timeout] = identifier[timeoutSecs] , identifier[params] = identifier[params_dict] )
identifier[h2o_sandbox] . identifier[check_sandbox_for_errors] ()
keyword[return] identifier[result] | def model_builders(self, algo=None, timeoutSecs=10, **kwargs):
"""
Return a model builder or all of the model builders known to the
h2o cluster. The model builders are contained in a dictionary
called "model_builders" at the top level of the result. The
dictionary maps algorithm names to parameters lists. Each of the
parameters contains all the metdata required by a client to
present a model building interface to the user.
if parameters = True, return the parameters?
"""
params_dict = {}
h2o_methods.check_params_update_kwargs(params_dict, kwargs, 'model_builders', False)
request = '3/ModelBuilders.json'
if algo:
request += '/' + algo # depends on [control=['if'], data=[]]
result = self.do_json_request(request, timeout=timeoutSecs, params=params_dict)
# verboseprint(request, "result:", dump_json(result))
h2o_sandbox.check_sandbox_for_errors()
return result |
def legendre_symbol(a, p):
""" Compute the Legendre symbol a|p using
Euler's criterion. p is a prime, a is
relatively prime to p (if p divides
a, then a|p = 0)
Returns 1 if a has a square root modulo
p, -1 otherwise.
"""
ls = pow(a, (p - 1) / 2, p)
return -1 if ls == p - 1 else ls | def function[legendre_symbol, parameter[a, p]]:
constant[ Compute the Legendre symbol a|p using
Euler's criterion. p is a prime, a is
relatively prime to p (if p divides
a, then a|p = 0)
Returns 1 if a has a square root modulo
p, -1 otherwise.
]
variable[ls] assign[=] call[name[pow], parameter[name[a], binary_operation[binary_operation[name[p] - constant[1]] / constant[2]], name[p]]]
return[<ast.IfExp object at 0x7da1b0c58a00>] | keyword[def] identifier[legendre_symbol] ( identifier[a] , identifier[p] ):
literal[string]
identifier[ls] = identifier[pow] ( identifier[a] ,( identifier[p] - literal[int] )/ literal[int] , identifier[p] )
keyword[return] - literal[int] keyword[if] identifier[ls] == identifier[p] - literal[int] keyword[else] identifier[ls] | def legendre_symbol(a, p):
""" Compute the Legendre symbol a|p using
Euler's criterion. p is a prime, a is
relatively prime to p (if p divides
a, then a|p = 0)
Returns 1 if a has a square root modulo
p, -1 otherwise.
"""
ls = pow(a, (p - 1) / 2, p)
return -1 if ls == p - 1 else ls |
def read_raw_data(self, f):
"""Read signal data from file"""
if not self.toc["kTocRawData"]:
return
f.seek(self.data_position)
total_data_size = self.next_segment_offset - self.raw_data_offset
log.debug(
"Reading %d bytes of data at %d in %d chunks" %
(total_data_size, f.tell(), self.num_chunks))
for chunk in range(self.num_chunks):
if self.toc["kTocInterleavedData"]:
log.debug("Data is interleaved")
data_objects = [o for o in self.ordered_objects if o.has_data]
# If all data types have numpy types and all the lengths are
# the same, then we can read all data at once with numpy,
# which is much faster
all_numpy = all(
(o.data_type.nptype is not None for o in data_objects))
same_length = (len(
set((o.number_values for o in data_objects))) == 1)
if (all_numpy and same_length):
self._read_interleaved_numpy(f, data_objects)
else:
self._read_interleaved(f, data_objects)
else:
object_data = {}
log.debug("Data is contiguous")
for obj in self.ordered_objects:
if obj.has_data:
if (chunk == (self.num_chunks - 1) and
self.final_chunk_proportion != 1.0):
number_values = int(
obj.number_values *
self.final_chunk_proportion)
else:
number_values = obj.number_values
object_data[obj.path] = (
obj._read_values(f, number_values))
for obj in self.ordered_objects:
if obj.has_data:
obj.tdms_object._update_data(object_data[obj.path]) | def function[read_raw_data, parameter[self, f]]:
constant[Read signal data from file]
if <ast.UnaryOp object at 0x7da1b15164d0> begin[:]
return[None]
call[name[f].seek, parameter[name[self].data_position]]
variable[total_data_size] assign[=] binary_operation[name[self].next_segment_offset - name[self].raw_data_offset]
call[name[log].debug, parameter[binary_operation[constant[Reading %d bytes of data at %d in %d chunks] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b1517bb0>, <ast.Call object at 0x7da1b1517070>, <ast.Attribute object at 0x7da1b1516440>]]]]]
for taget[name[chunk]] in starred[call[name[range], parameter[name[self].num_chunks]]] begin[:]
if call[name[self].toc][constant[kTocInterleavedData]] begin[:]
call[name[log].debug, parameter[constant[Data is interleaved]]]
variable[data_objects] assign[=] <ast.ListComp object at 0x7da1b15166b0>
variable[all_numpy] assign[=] call[name[all], parameter[<ast.GeneratorExp object at 0x7da1b1517700>]]
variable[same_length] assign[=] compare[call[name[len], parameter[call[name[set], parameter[<ast.GeneratorExp object at 0x7da1b1516770>]]]] equal[==] constant[1]]
if <ast.BoolOp object at 0x7da1b15177c0> begin[:]
call[name[self]._read_interleaved_numpy, parameter[name[f], name[data_objects]]] | keyword[def] identifier[read_raw_data] ( identifier[self] , identifier[f] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[toc] [ literal[string] ]:
keyword[return]
identifier[f] . identifier[seek] ( identifier[self] . identifier[data_position] )
identifier[total_data_size] = identifier[self] . identifier[next_segment_offset] - identifier[self] . identifier[raw_data_offset]
identifier[log] . identifier[debug] (
literal[string] %
( identifier[total_data_size] , identifier[f] . identifier[tell] (), identifier[self] . identifier[num_chunks] ))
keyword[for] identifier[chunk] keyword[in] identifier[range] ( identifier[self] . identifier[num_chunks] ):
keyword[if] identifier[self] . identifier[toc] [ literal[string] ]:
identifier[log] . identifier[debug] ( literal[string] )
identifier[data_objects] =[ identifier[o] keyword[for] identifier[o] keyword[in] identifier[self] . identifier[ordered_objects] keyword[if] identifier[o] . identifier[has_data] ]
identifier[all_numpy] = identifier[all] (
( identifier[o] . identifier[data_type] . identifier[nptype] keyword[is] keyword[not] keyword[None] keyword[for] identifier[o] keyword[in] identifier[data_objects] ))
identifier[same_length] =( identifier[len] (
identifier[set] (( identifier[o] . identifier[number_values] keyword[for] identifier[o] keyword[in] identifier[data_objects] )))== literal[int] )
keyword[if] ( identifier[all_numpy] keyword[and] identifier[same_length] ):
identifier[self] . identifier[_read_interleaved_numpy] ( identifier[f] , identifier[data_objects] )
keyword[else] :
identifier[self] . identifier[_read_interleaved] ( identifier[f] , identifier[data_objects] )
keyword[else] :
identifier[object_data] ={}
identifier[log] . identifier[debug] ( literal[string] )
keyword[for] identifier[obj] keyword[in] identifier[self] . identifier[ordered_objects] :
keyword[if] identifier[obj] . identifier[has_data] :
keyword[if] ( identifier[chunk] ==( identifier[self] . identifier[num_chunks] - literal[int] ) keyword[and]
identifier[self] . identifier[final_chunk_proportion] != literal[int] ):
identifier[number_values] = identifier[int] (
identifier[obj] . identifier[number_values] *
identifier[self] . identifier[final_chunk_proportion] )
keyword[else] :
identifier[number_values] = identifier[obj] . identifier[number_values]
identifier[object_data] [ identifier[obj] . identifier[path] ]=(
identifier[obj] . identifier[_read_values] ( identifier[f] , identifier[number_values] ))
keyword[for] identifier[obj] keyword[in] identifier[self] . identifier[ordered_objects] :
keyword[if] identifier[obj] . identifier[has_data] :
identifier[obj] . identifier[tdms_object] . identifier[_update_data] ( identifier[object_data] [ identifier[obj] . identifier[path] ]) | def read_raw_data(self, f):
"""Read signal data from file"""
if not self.toc['kTocRawData']:
return # depends on [control=['if'], data=[]]
f.seek(self.data_position)
total_data_size = self.next_segment_offset - self.raw_data_offset
log.debug('Reading %d bytes of data at %d in %d chunks' % (total_data_size, f.tell(), self.num_chunks))
for chunk in range(self.num_chunks):
if self.toc['kTocInterleavedData']:
log.debug('Data is interleaved')
data_objects = [o for o in self.ordered_objects if o.has_data]
# If all data types have numpy types and all the lengths are
# the same, then we can read all data at once with numpy,
# which is much faster
all_numpy = all((o.data_type.nptype is not None for o in data_objects))
same_length = len(set((o.number_values for o in data_objects))) == 1
if all_numpy and same_length:
self._read_interleaved_numpy(f, data_objects) # depends on [control=['if'], data=[]]
else:
self._read_interleaved(f, data_objects) # depends on [control=['if'], data=[]]
else:
object_data = {}
log.debug('Data is contiguous')
for obj in self.ordered_objects:
if obj.has_data:
if chunk == self.num_chunks - 1 and self.final_chunk_proportion != 1.0:
number_values = int(obj.number_values * self.final_chunk_proportion) # depends on [control=['if'], data=[]]
else:
number_values = obj.number_values
object_data[obj.path] = obj._read_values(f, number_values) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['obj']]
for obj in self.ordered_objects:
if obj.has_data:
obj.tdms_object._update_data(object_data[obj.path]) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['obj']] # depends on [control=['for'], data=['chunk']] |
def read_data(file_path):
"""
Reads a file and returns a json encoded representation of the file.
"""
if not is_valid(file_path):
write_data(file_path, {})
db = open_file_for_reading(file_path)
content = db.read()
obj = decode(content)
db.close()
return obj | def function[read_data, parameter[file_path]]:
constant[
Reads a file and returns a json encoded representation of the file.
]
if <ast.UnaryOp object at 0x7da1b11e2f20> begin[:]
call[name[write_data], parameter[name[file_path], dictionary[[], []]]]
variable[db] assign[=] call[name[open_file_for_reading], parameter[name[file_path]]]
variable[content] assign[=] call[name[db].read, parameter[]]
variable[obj] assign[=] call[name[decode], parameter[name[content]]]
call[name[db].close, parameter[]]
return[name[obj]] | keyword[def] identifier[read_data] ( identifier[file_path] ):
literal[string]
keyword[if] keyword[not] identifier[is_valid] ( identifier[file_path] ):
identifier[write_data] ( identifier[file_path] ,{})
identifier[db] = identifier[open_file_for_reading] ( identifier[file_path] )
identifier[content] = identifier[db] . identifier[read] ()
identifier[obj] = identifier[decode] ( identifier[content] )
identifier[db] . identifier[close] ()
keyword[return] identifier[obj] | def read_data(file_path):
"""
Reads a file and returns a json encoded representation of the file.
"""
if not is_valid(file_path):
write_data(file_path, {}) # depends on [control=['if'], data=[]]
db = open_file_for_reading(file_path)
content = db.read()
obj = decode(content)
db.close()
return obj |
def get_owner_asset_ids(self, address):
"""
Get the list of assets owned by an address owner.
:param address: ethereum account address, hex str
:return:
"""
block_filter = self._get_event_filter(owner=address)
log_items = block_filter.get_all_entries(max_tries=5)
did_list = []
for log_i in log_items:
did_list.append(id_to_did(log_i.args['_did']))
return did_list | def function[get_owner_asset_ids, parameter[self, address]]:
constant[
Get the list of assets owned by an address owner.
:param address: ethereum account address, hex str
:return:
]
variable[block_filter] assign[=] call[name[self]._get_event_filter, parameter[]]
variable[log_items] assign[=] call[name[block_filter].get_all_entries, parameter[]]
variable[did_list] assign[=] list[[]]
for taget[name[log_i]] in starred[name[log_items]] begin[:]
call[name[did_list].append, parameter[call[name[id_to_did], parameter[call[name[log_i].args][constant[_did]]]]]]
return[name[did_list]] | keyword[def] identifier[get_owner_asset_ids] ( identifier[self] , identifier[address] ):
literal[string]
identifier[block_filter] = identifier[self] . identifier[_get_event_filter] ( identifier[owner] = identifier[address] )
identifier[log_items] = identifier[block_filter] . identifier[get_all_entries] ( identifier[max_tries] = literal[int] )
identifier[did_list] =[]
keyword[for] identifier[log_i] keyword[in] identifier[log_items] :
identifier[did_list] . identifier[append] ( identifier[id_to_did] ( identifier[log_i] . identifier[args] [ literal[string] ]))
keyword[return] identifier[did_list] | def get_owner_asset_ids(self, address):
"""
Get the list of assets owned by an address owner.
:param address: ethereum account address, hex str
:return:
"""
block_filter = self._get_event_filter(owner=address)
log_items = block_filter.get_all_entries(max_tries=5)
did_list = []
for log_i in log_items:
did_list.append(id_to_did(log_i.args['_did'])) # depends on [control=['for'], data=['log_i']]
return did_list |
def has_register(self, register):
"""
Test if this circuit has the register r.
Args:
register (Register): a quantum or classical register.
Returns:
bool: True if the register is contained in this circuit.
"""
has_reg = False
if (isinstance(register, QuantumRegister) and
register in self.qregs):
has_reg = True
elif (isinstance(register, ClassicalRegister) and
register in self.cregs):
has_reg = True
return has_reg | def function[has_register, parameter[self, register]]:
constant[
Test if this circuit has the register r.
Args:
register (Register): a quantum or classical register.
Returns:
bool: True if the register is contained in this circuit.
]
variable[has_reg] assign[=] constant[False]
if <ast.BoolOp object at 0x7da1b0536050> begin[:]
variable[has_reg] assign[=] constant[True]
return[name[has_reg]] | keyword[def] identifier[has_register] ( identifier[self] , identifier[register] ):
literal[string]
identifier[has_reg] = keyword[False]
keyword[if] ( identifier[isinstance] ( identifier[register] , identifier[QuantumRegister] ) keyword[and]
identifier[register] keyword[in] identifier[self] . identifier[qregs] ):
identifier[has_reg] = keyword[True]
keyword[elif] ( identifier[isinstance] ( identifier[register] , identifier[ClassicalRegister] ) keyword[and]
identifier[register] keyword[in] identifier[self] . identifier[cregs] ):
identifier[has_reg] = keyword[True]
keyword[return] identifier[has_reg] | def has_register(self, register):
"""
Test if this circuit has the register r.
Args:
register (Register): a quantum or classical register.
Returns:
bool: True if the register is contained in this circuit.
"""
has_reg = False
if isinstance(register, QuantumRegister) and register in self.qregs:
has_reg = True # depends on [control=['if'], data=[]]
elif isinstance(register, ClassicalRegister) and register in self.cregs:
has_reg = True # depends on [control=['if'], data=[]]
return has_reg |
def get(self, remotepath, localpath, callback=None):
"""
Copy a remote file (C{remotepath}) from the SFTP server to the local
host as C{localpath}. Any exception raised by operations will be
passed through. This method is primarily provided as a convenience.
@param remotepath: the remote file to copy
@type remotepath: str
@param localpath: the destination path on the local host
@type localpath: str
@param callback: optional callback function that accepts the bytes
transferred so far and the total bytes to be transferred
(since 1.7.4)
@type callback: function(int, int)
@since: 1.4
"""
fr = self.file(remotepath, 'rb')
file_size = self.stat(remotepath).st_size
fr.prefetch()
try:
fl = file(localpath, 'wb')
try:
size = 0
while True:
data = fr.read(32768)
if len(data) == 0:
break
fl.write(data)
size += len(data)
if callback is not None:
callback(size, file_size)
finally:
fl.close()
finally:
fr.close()
s = os.stat(localpath)
if s.st_size != size:
raise IOError('size mismatch in get! %d != %d' % (s.st_size, size)) | def function[get, parameter[self, remotepath, localpath, callback]]:
constant[
Copy a remote file (C{remotepath}) from the SFTP server to the local
host as C{localpath}. Any exception raised by operations will be
passed through. This method is primarily provided as a convenience.
@param remotepath: the remote file to copy
@type remotepath: str
@param localpath: the destination path on the local host
@type localpath: str
@param callback: optional callback function that accepts the bytes
transferred so far and the total bytes to be transferred
(since 1.7.4)
@type callback: function(int, int)
@since: 1.4
]
variable[fr] assign[=] call[name[self].file, parameter[name[remotepath], constant[rb]]]
variable[file_size] assign[=] call[name[self].stat, parameter[name[remotepath]]].st_size
call[name[fr].prefetch, parameter[]]
<ast.Try object at 0x7da20c6c45b0>
variable[s] assign[=] call[name[os].stat, parameter[name[localpath]]]
if compare[name[s].st_size not_equal[!=] name[size]] begin[:]
<ast.Raise object at 0x7da1b10c61d0> | keyword[def] identifier[get] ( identifier[self] , identifier[remotepath] , identifier[localpath] , identifier[callback] = keyword[None] ):
literal[string]
identifier[fr] = identifier[self] . identifier[file] ( identifier[remotepath] , literal[string] )
identifier[file_size] = identifier[self] . identifier[stat] ( identifier[remotepath] ). identifier[st_size]
identifier[fr] . identifier[prefetch] ()
keyword[try] :
identifier[fl] = identifier[file] ( identifier[localpath] , literal[string] )
keyword[try] :
identifier[size] = literal[int]
keyword[while] keyword[True] :
identifier[data] = identifier[fr] . identifier[read] ( literal[int] )
keyword[if] identifier[len] ( identifier[data] )== literal[int] :
keyword[break]
identifier[fl] . identifier[write] ( identifier[data] )
identifier[size] += identifier[len] ( identifier[data] )
keyword[if] identifier[callback] keyword[is] keyword[not] keyword[None] :
identifier[callback] ( identifier[size] , identifier[file_size] )
keyword[finally] :
identifier[fl] . identifier[close] ()
keyword[finally] :
identifier[fr] . identifier[close] ()
identifier[s] = identifier[os] . identifier[stat] ( identifier[localpath] )
keyword[if] identifier[s] . identifier[st_size] != identifier[size] :
keyword[raise] identifier[IOError] ( literal[string] %( identifier[s] . identifier[st_size] , identifier[size] )) | def get(self, remotepath, localpath, callback=None):
"""
Copy a remote file (C{remotepath}) from the SFTP server to the local
host as C{localpath}. Any exception raised by operations will be
passed through. This method is primarily provided as a convenience.
@param remotepath: the remote file to copy
@type remotepath: str
@param localpath: the destination path on the local host
@type localpath: str
@param callback: optional callback function that accepts the bytes
transferred so far and the total bytes to be transferred
(since 1.7.4)
@type callback: function(int, int)
@since: 1.4
"""
fr = self.file(remotepath, 'rb')
file_size = self.stat(remotepath).st_size
fr.prefetch()
try:
fl = file(localpath, 'wb')
try:
size = 0
while True:
data = fr.read(32768)
if len(data) == 0:
break # depends on [control=['if'], data=[]]
fl.write(data)
size += len(data)
if callback is not None:
callback(size, file_size) # depends on [control=['if'], data=['callback']] # depends on [control=['while'], data=[]] # depends on [control=['try'], data=[]]
finally:
fl.close() # depends on [control=['try'], data=[]]
finally:
fr.close()
s = os.stat(localpath)
if s.st_size != size:
raise IOError('size mismatch in get! %d != %d' % (s.st_size, size)) # depends on [control=['if'], data=['size']] |
def commit(self):
"""
Commit this transaction.
"""
if not self._parent._is_active:
raise exc.InvalidRequestError("This transaction is inactive")
yield from self._do_commit()
self._is_active = False | def function[commit, parameter[self]]:
constant[
Commit this transaction.
]
if <ast.UnaryOp object at 0x7da1b0f39e10> begin[:]
<ast.Raise object at 0x7da1b0f3bc40>
<ast.YieldFrom object at 0x7da1b0f38670>
name[self]._is_active assign[=] constant[False] | keyword[def] identifier[commit] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_parent] . identifier[_is_active] :
keyword[raise] identifier[exc] . identifier[InvalidRequestError] ( literal[string] )
keyword[yield] keyword[from] identifier[self] . identifier[_do_commit] ()
identifier[self] . identifier[_is_active] = keyword[False] | def commit(self):
"""
Commit this transaction.
"""
if not self._parent._is_active:
raise exc.InvalidRequestError('This transaction is inactive') # depends on [control=['if'], data=[]]
yield from self._do_commit()
self._is_active = False |
def validate_extra_link(self, extra_link):
"""validate extra link"""
if EXTRA_LINK_NAME_KEY not in extra_link or EXTRA_LINK_FORMATTER_KEY not in extra_link:
raise Exception("Invalid extra.links format. " +
"Extra link must include a 'name' and 'formatter' field")
self.validated_formatter(extra_link[EXTRA_LINK_FORMATTER_KEY])
return extra_link | def function[validate_extra_link, parameter[self, extra_link]]:
constant[validate extra link]
if <ast.BoolOp object at 0x7da18dc043a0> begin[:]
<ast.Raise object at 0x7da18dc06b90>
call[name[self].validated_formatter, parameter[call[name[extra_link]][name[EXTRA_LINK_FORMATTER_KEY]]]]
return[name[extra_link]] | keyword[def] identifier[validate_extra_link] ( identifier[self] , identifier[extra_link] ):
literal[string]
keyword[if] identifier[EXTRA_LINK_NAME_KEY] keyword[not] keyword[in] identifier[extra_link] keyword[or] identifier[EXTRA_LINK_FORMATTER_KEY] keyword[not] keyword[in] identifier[extra_link] :
keyword[raise] identifier[Exception] ( literal[string] +
literal[string] )
identifier[self] . identifier[validated_formatter] ( identifier[extra_link] [ identifier[EXTRA_LINK_FORMATTER_KEY] ])
keyword[return] identifier[extra_link] | def validate_extra_link(self, extra_link):
"""validate extra link"""
if EXTRA_LINK_NAME_KEY not in extra_link or EXTRA_LINK_FORMATTER_KEY not in extra_link:
raise Exception('Invalid extra.links format. ' + "Extra link must include a 'name' and 'formatter' field") # depends on [control=['if'], data=[]]
self.validated_formatter(extra_link[EXTRA_LINK_FORMATTER_KEY])
return extra_link |
def put_metric_alarm(self, alarm):
"""
Creates or updates an alarm and associates it with the specified Amazon
CloudWatch metric. Optionally, this operation can associate one or more
Amazon Simple Notification Service resources with the alarm.
When this operation creates an alarm, the alarm state is immediately
set to INSUFFICIENT_DATA. The alarm is evaluated and its StateValue is
set appropriately. Any actions associated with the StateValue is then
executed.
When updating an existing alarm, its StateValue is left unchanged.
:type alarm: boto.ec2.cloudwatch.alarm.MetricAlarm
:param alarm: MetricAlarm object.
"""
params = {
'AlarmName' : alarm.name,
'MetricName' : alarm.metric,
'Namespace' : alarm.namespace,
'Statistic' : alarm.statistic,
'ComparisonOperator' : alarm.comparison,
'Threshold' : alarm.threshold,
'EvaluationPeriods' : alarm.evaluation_periods,
'Period' : alarm.period,
}
if alarm.actions_enabled is not None:
params['ActionsEnabled'] = alarm.actions_enabled
if alarm.alarm_actions:
self.build_list_params(params, alarm.alarm_actions,
'AlarmActions.member.%s')
if alarm.description:
params['AlarmDescription'] = alarm.description
if alarm.dimensions:
self.build_dimension_param(alarm.dimensions, params)
if alarm.insufficient_data_actions:
self.build_list_params(params, alarm.insufficient_data_actions,
'InsufficientDataActions.member.%s')
if alarm.ok_actions:
self.build_list_params(params, alarm.ok_actions,
'OKActions.member.%s')
if alarm.unit:
params['Unit'] = alarm.unit
alarm.connection = self
return self.get_status('PutMetricAlarm', params) | def function[put_metric_alarm, parameter[self, alarm]]:
constant[
Creates or updates an alarm and associates it with the specified Amazon
CloudWatch metric. Optionally, this operation can associate one or more
Amazon Simple Notification Service resources with the alarm.
When this operation creates an alarm, the alarm state is immediately
set to INSUFFICIENT_DATA. The alarm is evaluated and its StateValue is
set appropriately. Any actions associated with the StateValue is then
executed.
When updating an existing alarm, its StateValue is left unchanged.
:type alarm: boto.ec2.cloudwatch.alarm.MetricAlarm
:param alarm: MetricAlarm object.
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da20e9576d0>, <ast.Constant object at 0x7da20e956290>, <ast.Constant object at 0x7da20e9551b0>, <ast.Constant object at 0x7da20e957eb0>, <ast.Constant object at 0x7da20e956530>, <ast.Constant object at 0x7da1b2676f50>, <ast.Constant object at 0x7da1b2677220>, <ast.Constant object at 0x7da1b2677160>], [<ast.Attribute object at 0x7da1b26776a0>, <ast.Attribute object at 0x7da1b2677400>, <ast.Attribute object at 0x7da1b2677100>, <ast.Attribute object at 0x7da1b26777c0>, <ast.Attribute object at 0x7da1b2677d30>, <ast.Attribute object at 0x7da1b2676f80>, <ast.Attribute object at 0x7da1b26773a0>, <ast.Attribute object at 0x7da1b26773d0>]]
if compare[name[alarm].actions_enabled is_not constant[None]] begin[:]
call[name[params]][constant[ActionsEnabled]] assign[=] name[alarm].actions_enabled
if name[alarm].alarm_actions begin[:]
call[name[self].build_list_params, parameter[name[params], name[alarm].alarm_actions, constant[AlarmActions.member.%s]]]
if name[alarm].description begin[:]
call[name[params]][constant[AlarmDescription]] assign[=] name[alarm].description
if name[alarm].dimensions begin[:]
call[name[self].build_dimension_param, parameter[name[alarm].dimensions, name[params]]]
if name[alarm].insufficient_data_actions begin[:]
call[name[self].build_list_params, parameter[name[params], name[alarm].insufficient_data_actions, constant[InsufficientDataActions.member.%s]]]
if name[alarm].ok_actions begin[:]
call[name[self].build_list_params, parameter[name[params], name[alarm].ok_actions, constant[OKActions.member.%s]]]
if name[alarm].unit begin[:]
call[name[params]][constant[Unit]] assign[=] name[alarm].unit
name[alarm].connection assign[=] name[self]
return[call[name[self].get_status, parameter[constant[PutMetricAlarm], name[params]]]] | keyword[def] identifier[put_metric_alarm] ( identifier[self] , identifier[alarm] ):
literal[string]
identifier[params] ={
literal[string] : identifier[alarm] . identifier[name] ,
literal[string] : identifier[alarm] . identifier[metric] ,
literal[string] : identifier[alarm] . identifier[namespace] ,
literal[string] : identifier[alarm] . identifier[statistic] ,
literal[string] : identifier[alarm] . identifier[comparison] ,
literal[string] : identifier[alarm] . identifier[threshold] ,
literal[string] : identifier[alarm] . identifier[evaluation_periods] ,
literal[string] : identifier[alarm] . identifier[period] ,
}
keyword[if] identifier[alarm] . identifier[actions_enabled] keyword[is] keyword[not] keyword[None] :
identifier[params] [ literal[string] ]= identifier[alarm] . identifier[actions_enabled]
keyword[if] identifier[alarm] . identifier[alarm_actions] :
identifier[self] . identifier[build_list_params] ( identifier[params] , identifier[alarm] . identifier[alarm_actions] ,
literal[string] )
keyword[if] identifier[alarm] . identifier[description] :
identifier[params] [ literal[string] ]= identifier[alarm] . identifier[description]
keyword[if] identifier[alarm] . identifier[dimensions] :
identifier[self] . identifier[build_dimension_param] ( identifier[alarm] . identifier[dimensions] , identifier[params] )
keyword[if] identifier[alarm] . identifier[insufficient_data_actions] :
identifier[self] . identifier[build_list_params] ( identifier[params] , identifier[alarm] . identifier[insufficient_data_actions] ,
literal[string] )
keyword[if] identifier[alarm] . identifier[ok_actions] :
identifier[self] . identifier[build_list_params] ( identifier[params] , identifier[alarm] . identifier[ok_actions] ,
literal[string] )
keyword[if] identifier[alarm] . identifier[unit] :
identifier[params] [ literal[string] ]= identifier[alarm] . identifier[unit]
identifier[alarm] . identifier[connection] = identifier[self]
keyword[return] identifier[self] . identifier[get_status] ( literal[string] , identifier[params] ) | def put_metric_alarm(self, alarm):
"""
Creates or updates an alarm and associates it with the specified Amazon
CloudWatch metric. Optionally, this operation can associate one or more
Amazon Simple Notification Service resources with the alarm.
When this operation creates an alarm, the alarm state is immediately
set to INSUFFICIENT_DATA. The alarm is evaluated and its StateValue is
set appropriately. Any actions associated with the StateValue is then
executed.
When updating an existing alarm, its StateValue is left unchanged.
:type alarm: boto.ec2.cloudwatch.alarm.MetricAlarm
:param alarm: MetricAlarm object.
"""
params = {'AlarmName': alarm.name, 'MetricName': alarm.metric, 'Namespace': alarm.namespace, 'Statistic': alarm.statistic, 'ComparisonOperator': alarm.comparison, 'Threshold': alarm.threshold, 'EvaluationPeriods': alarm.evaluation_periods, 'Period': alarm.period}
if alarm.actions_enabled is not None:
params['ActionsEnabled'] = alarm.actions_enabled # depends on [control=['if'], data=[]]
if alarm.alarm_actions:
self.build_list_params(params, alarm.alarm_actions, 'AlarmActions.member.%s') # depends on [control=['if'], data=[]]
if alarm.description:
params['AlarmDescription'] = alarm.description # depends on [control=['if'], data=[]]
if alarm.dimensions:
self.build_dimension_param(alarm.dimensions, params) # depends on [control=['if'], data=[]]
if alarm.insufficient_data_actions:
self.build_list_params(params, alarm.insufficient_data_actions, 'InsufficientDataActions.member.%s') # depends on [control=['if'], data=[]]
if alarm.ok_actions:
self.build_list_params(params, alarm.ok_actions, 'OKActions.member.%s') # depends on [control=['if'], data=[]]
if alarm.unit:
params['Unit'] = alarm.unit # depends on [control=['if'], data=[]]
alarm.connection = self
return self.get_status('PutMetricAlarm', params) |
def parse(argv):
"""Parse cli args."""
args = docopt(__doc__, argv=argv)
try:
call(sys.argv[2], args)
except KytosException as exception:
print("Error parsing args: {}".format(exception))
exit() | def function[parse, parameter[argv]]:
constant[Parse cli args.]
variable[args] assign[=] call[name[docopt], parameter[name[__doc__]]]
<ast.Try object at 0x7da18dc07ac0> | keyword[def] identifier[parse] ( identifier[argv] ):
literal[string]
identifier[args] = identifier[docopt] ( identifier[__doc__] , identifier[argv] = identifier[argv] )
keyword[try] :
identifier[call] ( identifier[sys] . identifier[argv] [ literal[int] ], identifier[args] )
keyword[except] identifier[KytosException] keyword[as] identifier[exception] :
identifier[print] ( literal[string] . identifier[format] ( identifier[exception] ))
identifier[exit] () | def parse(argv):
"""Parse cli args."""
args = docopt(__doc__, argv=argv)
try:
call(sys.argv[2], args) # depends on [control=['try'], data=[]]
except KytosException as exception:
print('Error parsing args: {}'.format(exception))
exit() # depends on [control=['except'], data=['exception']] |
def get_fun(fun):
'''
Return a dict of the last function called for all minions
'''
serv = _get_serv(ret=None)
minions = _get_list(serv, 'minions')
returns = serv.get_multi(minions, key_prefix='{0}:'.format(fun))
# returns = {minion: return, minion: return, ...}
ret = {}
for minion, data in six.iteritems(returns):
ret[minion] = salt.utils.json.loads(data)
return ret | def function[get_fun, parameter[fun]]:
constant[
Return a dict of the last function called for all minions
]
variable[serv] assign[=] call[name[_get_serv], parameter[]]
variable[minions] assign[=] call[name[_get_list], parameter[name[serv], constant[minions]]]
variable[returns] assign[=] call[name[serv].get_multi, parameter[name[minions]]]
variable[ret] assign[=] dictionary[[], []]
for taget[tuple[[<ast.Name object at 0x7da204621e40>, <ast.Name object at 0x7da204622f80>]]] in starred[call[name[six].iteritems, parameter[name[returns]]]] begin[:]
call[name[ret]][name[minion]] assign[=] call[name[salt].utils.json.loads, parameter[name[data]]]
return[name[ret]] | keyword[def] identifier[get_fun] ( identifier[fun] ):
literal[string]
identifier[serv] = identifier[_get_serv] ( identifier[ret] = keyword[None] )
identifier[minions] = identifier[_get_list] ( identifier[serv] , literal[string] )
identifier[returns] = identifier[serv] . identifier[get_multi] ( identifier[minions] , identifier[key_prefix] = literal[string] . identifier[format] ( identifier[fun] ))
identifier[ret] ={}
keyword[for] identifier[minion] , identifier[data] keyword[in] identifier[six] . identifier[iteritems] ( identifier[returns] ):
identifier[ret] [ identifier[minion] ]= identifier[salt] . identifier[utils] . identifier[json] . identifier[loads] ( identifier[data] )
keyword[return] identifier[ret] | def get_fun(fun):
"""
Return a dict of the last function called for all minions
"""
serv = _get_serv(ret=None)
minions = _get_list(serv, 'minions')
returns = serv.get_multi(minions, key_prefix='{0}:'.format(fun))
# returns = {minion: return, minion: return, ...}
ret = {}
for (minion, data) in six.iteritems(returns):
ret[minion] = salt.utils.json.loads(data) # depends on [control=['for'], data=[]]
return ret |
def image_gen(normalizer, denorm, sz, tfms=None, max_zoom=None, pad=0, crop_type=None,
tfm_y=None, sz_y=None, pad_mode=cv2.BORDER_REFLECT, scale=None):
"""
Generate a standard set of transformations
Arguments
---------
normalizer :
image normalizing function
denorm :
image denormalizing function
sz :
size, sz_y = sz if not specified.
tfms :
iterable collection of transformation functions
max_zoom : float,
maximum zoom
pad : int,
padding on top, left, right and bottom
crop_type :
crop type
tfm_y :
y axis specific transformations
sz_y :
y size, height
pad_mode :
cv2 padding style: repeat, reflect, etc.
Returns
-------
type : ``Transforms``
transformer for specified image operations.
See Also
--------
Transforms: the transformer object returned by this function
"""
if tfm_y is None: tfm_y=TfmType.NO
if tfms is None: tfms=[]
elif not isinstance(tfms, collections.Iterable): tfms=[tfms]
if sz_y is None: sz_y = sz
if scale is None:
scale = [RandomScale(sz, max_zoom, tfm_y=tfm_y, sz_y=sz_y) if max_zoom is not None
else Scale(sz, tfm_y, sz_y=sz_y)]
elif not is_listy(scale): scale = [scale]
if pad: scale.append(AddPadding(pad, mode=pad_mode))
if crop_type!=CropType.GOOGLENET: tfms=scale+tfms
return Transforms(sz, tfms, normalizer, denorm, crop_type,
tfm_y=tfm_y, sz_y=sz_y) | def function[image_gen, parameter[normalizer, denorm, sz, tfms, max_zoom, pad, crop_type, tfm_y, sz_y, pad_mode, scale]]:
constant[
Generate a standard set of transformations
Arguments
---------
normalizer :
image normalizing function
denorm :
image denormalizing function
sz :
size, sz_y = sz if not specified.
tfms :
iterable collection of transformation functions
max_zoom : float,
maximum zoom
pad : int,
padding on top, left, right and bottom
crop_type :
crop type
tfm_y :
y axis specific transformations
sz_y :
y size, height
pad_mode :
cv2 padding style: repeat, reflect, etc.
Returns
-------
type : ``Transforms``
transformer for specified image operations.
See Also
--------
Transforms: the transformer object returned by this function
]
if compare[name[tfm_y] is constant[None]] begin[:]
variable[tfm_y] assign[=] name[TfmType].NO
if compare[name[tfms] is constant[None]] begin[:]
variable[tfms] assign[=] list[[]]
if compare[name[sz_y] is constant[None]] begin[:]
variable[sz_y] assign[=] name[sz]
if compare[name[scale] is constant[None]] begin[:]
variable[scale] assign[=] list[[<ast.IfExp object at 0x7da1b1e14610>]]
if name[pad] begin[:]
call[name[scale].append, parameter[call[name[AddPadding], parameter[name[pad]]]]]
if compare[name[crop_type] not_equal[!=] name[CropType].GOOGLENET] begin[:]
variable[tfms] assign[=] binary_operation[name[scale] + name[tfms]]
return[call[name[Transforms], parameter[name[sz], name[tfms], name[normalizer], name[denorm], name[crop_type]]]] | keyword[def] identifier[image_gen] ( identifier[normalizer] , identifier[denorm] , identifier[sz] , identifier[tfms] = keyword[None] , identifier[max_zoom] = keyword[None] , identifier[pad] = literal[int] , identifier[crop_type] = keyword[None] ,
identifier[tfm_y] = keyword[None] , identifier[sz_y] = keyword[None] , identifier[pad_mode] = identifier[cv2] . identifier[BORDER_REFLECT] , identifier[scale] = keyword[None] ):
literal[string]
keyword[if] identifier[tfm_y] keyword[is] keyword[None] : identifier[tfm_y] = identifier[TfmType] . identifier[NO]
keyword[if] identifier[tfms] keyword[is] keyword[None] : identifier[tfms] =[]
keyword[elif] keyword[not] identifier[isinstance] ( identifier[tfms] , identifier[collections] . identifier[Iterable] ): identifier[tfms] =[ identifier[tfms] ]
keyword[if] identifier[sz_y] keyword[is] keyword[None] : identifier[sz_y] = identifier[sz]
keyword[if] identifier[scale] keyword[is] keyword[None] :
identifier[scale] =[ identifier[RandomScale] ( identifier[sz] , identifier[max_zoom] , identifier[tfm_y] = identifier[tfm_y] , identifier[sz_y] = identifier[sz_y] ) keyword[if] identifier[max_zoom] keyword[is] keyword[not] keyword[None]
keyword[else] identifier[Scale] ( identifier[sz] , identifier[tfm_y] , identifier[sz_y] = identifier[sz_y] )]
keyword[elif] keyword[not] identifier[is_listy] ( identifier[scale] ): identifier[scale] =[ identifier[scale] ]
keyword[if] identifier[pad] : identifier[scale] . identifier[append] ( identifier[AddPadding] ( identifier[pad] , identifier[mode] = identifier[pad_mode] ))
keyword[if] identifier[crop_type] != identifier[CropType] . identifier[GOOGLENET] : identifier[tfms] = identifier[scale] + identifier[tfms]
keyword[return] identifier[Transforms] ( identifier[sz] , identifier[tfms] , identifier[normalizer] , identifier[denorm] , identifier[crop_type] ,
identifier[tfm_y] = identifier[tfm_y] , identifier[sz_y] = identifier[sz_y] ) | def image_gen(normalizer, denorm, sz, tfms=None, max_zoom=None, pad=0, crop_type=None, tfm_y=None, sz_y=None, pad_mode=cv2.BORDER_REFLECT, scale=None):
"""
Generate a standard set of transformations
Arguments
---------
normalizer :
image normalizing function
denorm :
image denormalizing function
sz :
size, sz_y = sz if not specified.
tfms :
iterable collection of transformation functions
max_zoom : float,
maximum zoom
pad : int,
padding on top, left, right and bottom
crop_type :
crop type
tfm_y :
y axis specific transformations
sz_y :
y size, height
pad_mode :
cv2 padding style: repeat, reflect, etc.
Returns
-------
type : ``Transforms``
transformer for specified image operations.
See Also
--------
Transforms: the transformer object returned by this function
"""
if tfm_y is None:
tfm_y = TfmType.NO # depends on [control=['if'], data=['tfm_y']]
if tfms is None:
tfms = [] # depends on [control=['if'], data=['tfms']]
elif not isinstance(tfms, collections.Iterable):
tfms = [tfms] # depends on [control=['if'], data=[]]
if sz_y is None:
sz_y = sz # depends on [control=['if'], data=['sz_y']]
if scale is None:
scale = [RandomScale(sz, max_zoom, tfm_y=tfm_y, sz_y=sz_y) if max_zoom is not None else Scale(sz, tfm_y, sz_y=sz_y)] # depends on [control=['if'], data=['scale']]
elif not is_listy(scale):
scale = [scale] # depends on [control=['if'], data=[]]
if pad:
scale.append(AddPadding(pad, mode=pad_mode)) # depends on [control=['if'], data=[]]
if crop_type != CropType.GOOGLENET:
tfms = scale + tfms # depends on [control=['if'], data=[]]
return Transforms(sz, tfms, normalizer, denorm, crop_type, tfm_y=tfm_y, sz_y=sz_y) |
async def prepare_container(self, size, container, elem_type=None):
"""
Prepares container for serialization
:param size:
:param container:
:return:
"""
if not self.writing:
if container is None:
return gen_elem_array(size, elem_type)
fvalue = get_elem(container)
if fvalue is None:
fvalue = []
fvalue += gen_elem_array(max(0, size - len(fvalue)), elem_type)
set_elem(container, fvalue)
return fvalue | <ast.AsyncFunctionDef object at 0x7da1b245b8e0> | keyword[async] keyword[def] identifier[prepare_container] ( identifier[self] , identifier[size] , identifier[container] , identifier[elem_type] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[writing] :
keyword[if] identifier[container] keyword[is] keyword[None] :
keyword[return] identifier[gen_elem_array] ( identifier[size] , identifier[elem_type] )
identifier[fvalue] = identifier[get_elem] ( identifier[container] )
keyword[if] identifier[fvalue] keyword[is] keyword[None] :
identifier[fvalue] =[]
identifier[fvalue] += identifier[gen_elem_array] ( identifier[max] ( literal[int] , identifier[size] - identifier[len] ( identifier[fvalue] )), identifier[elem_type] )
identifier[set_elem] ( identifier[container] , identifier[fvalue] )
keyword[return] identifier[fvalue] | async def prepare_container(self, size, container, elem_type=None):
"""
Prepares container for serialization
:param size:
:param container:
:return:
"""
if not self.writing:
if container is None:
return gen_elem_array(size, elem_type) # depends on [control=['if'], data=[]]
fvalue = get_elem(container)
if fvalue is None:
fvalue = [] # depends on [control=['if'], data=['fvalue']]
fvalue += gen_elem_array(max(0, size - len(fvalue)), elem_type)
set_elem(container, fvalue)
return fvalue # depends on [control=['if'], data=[]] |
def default(self) -> typing.Optional[Profile]:
"""The name of the default profile to use, or `None`."""
found = self.database.execute(
"SELECT name, data FROM profiles WHERE selected"
" ORDER BY name LIMIT 1").fetchone()
if found is None:
return None
else:
state = json.loads(found[1])
state["name"] = found[0] # Belt-n-braces.
return Profile(**state) | def function[default, parameter[self]]:
constant[The name of the default profile to use, or `None`.]
variable[found] assign[=] call[call[name[self].database.execute, parameter[constant[SELECT name, data FROM profiles WHERE selected ORDER BY name LIMIT 1]]].fetchone, parameter[]]
if compare[name[found] is constant[None]] begin[:]
return[constant[None]] | keyword[def] identifier[default] ( identifier[self] )-> identifier[typing] . identifier[Optional] [ identifier[Profile] ]:
literal[string]
identifier[found] = identifier[self] . identifier[database] . identifier[execute] (
literal[string]
literal[string] ). identifier[fetchone] ()
keyword[if] identifier[found] keyword[is] keyword[None] :
keyword[return] keyword[None]
keyword[else] :
identifier[state] = identifier[json] . identifier[loads] ( identifier[found] [ literal[int] ])
identifier[state] [ literal[string] ]= identifier[found] [ literal[int] ]
keyword[return] identifier[Profile] (** identifier[state] ) | def default(self) -> typing.Optional[Profile]:
"""The name of the default profile to use, or `None`."""
found = self.database.execute('SELECT name, data FROM profiles WHERE selected ORDER BY name LIMIT 1').fetchone()
if found is None:
return None # depends on [control=['if'], data=[]]
else:
state = json.loads(found[1])
state['name'] = found[0] # Belt-n-braces.
return Profile(**state) |
def close(self):
"""Close the file, and for mode "w" and "a" write the ending
records."""
if self.fp is None:
return
if self.mode in ("w", "a") and self._didModify: # write ending records
count = 0
pos1 = self.fp.tell()
for zinfo in self.filelist: # write central directory
count = count + 1
dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
extra = []
if zinfo.file_size > ZIP64_LIMIT \
or zinfo.compress_size > ZIP64_LIMIT:
extra.append(zinfo.file_size)
extra.append(zinfo.compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
else:
file_size = zinfo.file_size
compress_size = zinfo.compress_size
if zinfo.header_offset > ZIP64_LIMIT:
extra.append(zinfo.header_offset)
header_offset = 0xffffffff
else:
header_offset = zinfo.header_offset
extra_data = zinfo.extra
if extra:
# Append a ZIP64 field to the extra's
extra_data = struct.pack(
'<HH' + 'Q'*len(extra),
1, 8*len(extra), *extra) + extra_data
extract_version = max(45, zinfo.extract_version)
create_version = max(45, zinfo.create_version)
else:
extract_version = zinfo.extract_version
create_version = zinfo.create_version
try:
filename, flag_bits = zinfo._encodeFilenameFlags()
centdir = struct.pack(structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
except DeprecationWarning:
print((structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(zinfo.filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset), file=sys.stderr)
raise
self.fp.write(centdir)
self.fp.write(filename)
self.fp.write(extra_data)
self.fp.write(zinfo.comment)
pos2 = self.fp.tell()
# Write end-of-zip-archive record
centDirCount = count
centDirSize = pos2 - pos1
centDirOffset = pos1
if (centDirCount >= ZIP_FILECOUNT_LIMIT or
centDirOffset > ZIP64_LIMIT or
centDirSize > ZIP64_LIMIT):
# Need to write the ZIP64 end-of-archive records
zip64endrec = struct.pack(
structEndArchive64, stringEndArchive64,
44, 45, 45, 0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset)
self.fp.write(zip64endrec)
zip64locrec = struct.pack(
structEndArchive64Locator,
stringEndArchive64Locator, 0, pos2, 1)
self.fp.write(zip64locrec)
centDirCount = min(centDirCount, 0xFFFF)
centDirSize = min(centDirSize, 0xFFFFFFFF)
centDirOffset = min(centDirOffset, 0xFFFFFFFF)
# check for valid comment length
if len(self.comment) >= ZIP_MAX_COMMENT:
if self.debug > 0:
msg = 'Archive comment is too long; truncating to %d bytes' \
% ZIP_MAX_COMMENT
self.comment = self.comment[:ZIP_MAX_COMMENT]
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset, len(self.comment))
self.fp.write(endrec)
self.fp.write(self.comment)
self.fp.flush()
if not self._filePassed:
self.fp.close()
self.fp = None | def function[close, parameter[self]]:
constant[Close the file, and for mode "w" and "a" write the ending
records.]
if compare[name[self].fp is constant[None]] begin[:]
return[None]
if <ast.BoolOp object at 0x7da1b09b9ea0> begin[:]
variable[count] assign[=] constant[0]
variable[pos1] assign[=] call[name[self].fp.tell, parameter[]]
for taget[name[zinfo]] in starred[name[self].filelist] begin[:]
variable[count] assign[=] binary_operation[name[count] + constant[1]]
variable[dt] assign[=] name[zinfo].date_time
variable[dosdate] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[call[name[dt]][constant[0]] - constant[1980]] <ast.LShift object at 0x7da2590d69e0> constant[9]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[call[name[dt]][constant[1]] <ast.LShift object at 0x7da2590d69e0> constant[5]]] <ast.BitOr object at 0x7da2590d6aa0> call[name[dt]][constant[2]]]
variable[dostime] assign[=] binary_operation[binary_operation[binary_operation[call[name[dt]][constant[3]] <ast.LShift object at 0x7da2590d69e0> constant[11]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[call[name[dt]][constant[4]] <ast.LShift object at 0x7da2590d69e0> constant[5]]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[call[name[dt]][constant[5]] <ast.FloorDiv object at 0x7da2590d6bc0> constant[2]]]
variable[extra] assign[=] list[[]]
if <ast.BoolOp object at 0x7da1b09bbdf0> begin[:]
call[name[extra].append, parameter[name[zinfo].file_size]]
call[name[extra].append, parameter[name[zinfo].compress_size]]
variable[file_size] assign[=] constant[4294967295]
variable[compress_size] assign[=] constant[4294967295]
if compare[name[zinfo].header_offset greater[>] name[ZIP64_LIMIT]] begin[:]
call[name[extra].append, parameter[name[zinfo].header_offset]]
variable[header_offset] assign[=] constant[4294967295]
variable[extra_data] assign[=] name[zinfo].extra
if name[extra] begin[:]
variable[extra_data] assign[=] binary_operation[call[name[struct].pack, parameter[binary_operation[constant[<HH] + binary_operation[constant[Q] * call[name[len], parameter[name[extra]]]]], constant[1], binary_operation[constant[8] * call[name[len], parameter[name[extra]]]], <ast.Starred object at 0x7da1b0a48850>]] + name[extra_data]]
variable[extract_version] assign[=] call[name[max], parameter[constant[45], name[zinfo].extract_version]]
variable[create_version] assign[=] call[name[max], parameter[constant[45], name[zinfo].create_version]]
<ast.Try object at 0x7da1b0a482b0>
call[name[self].fp.write, parameter[name[centdir]]]
call[name[self].fp.write, parameter[name[filename]]]
call[name[self].fp.write, parameter[name[extra_data]]]
call[name[self].fp.write, parameter[name[zinfo].comment]]
variable[pos2] assign[=] call[name[self].fp.tell, parameter[]]
variable[centDirCount] assign[=] name[count]
variable[centDirSize] assign[=] binary_operation[name[pos2] - name[pos1]]
variable[centDirOffset] assign[=] name[pos1]
if <ast.BoolOp object at 0x7da1b0ae00d0> begin[:]
variable[zip64endrec] assign[=] call[name[struct].pack, parameter[name[structEndArchive64], name[stringEndArchive64], constant[44], constant[45], constant[45], constant[0], constant[0], name[centDirCount], name[centDirCount], name[centDirSize], name[centDirOffset]]]
call[name[self].fp.write, parameter[name[zip64endrec]]]
variable[zip64locrec] assign[=] call[name[struct].pack, parameter[name[structEndArchive64Locator], name[stringEndArchive64Locator], constant[0], name[pos2], constant[1]]]
call[name[self].fp.write, parameter[name[zip64locrec]]]
variable[centDirCount] assign[=] call[name[min], parameter[name[centDirCount], constant[65535]]]
variable[centDirSize] assign[=] call[name[min], parameter[name[centDirSize], constant[4294967295]]]
variable[centDirOffset] assign[=] call[name[min], parameter[name[centDirOffset], constant[4294967295]]]
if compare[call[name[len], parameter[name[self].comment]] greater_or_equal[>=] name[ZIP_MAX_COMMENT]] begin[:]
if compare[name[self].debug greater[>] constant[0]] begin[:]
variable[msg] assign[=] binary_operation[constant[Archive comment is too long; truncating to %d bytes] <ast.Mod object at 0x7da2590d6920> name[ZIP_MAX_COMMENT]]
name[self].comment assign[=] call[name[self].comment][<ast.Slice object at 0x7da1b09e8b20>]
variable[endrec] assign[=] call[name[struct].pack, parameter[name[structEndArchive], name[stringEndArchive], constant[0], constant[0], name[centDirCount], name[centDirCount], name[centDirSize], name[centDirOffset], call[name[len], parameter[name[self].comment]]]]
call[name[self].fp.write, parameter[name[endrec]]]
call[name[self].fp.write, parameter[name[self].comment]]
call[name[self].fp.flush, parameter[]]
if <ast.UnaryOp object at 0x7da20c794bb0> begin[:]
call[name[self].fp.close, parameter[]]
name[self].fp assign[=] constant[None] | keyword[def] identifier[close] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[fp] keyword[is] keyword[None] :
keyword[return]
keyword[if] identifier[self] . identifier[mode] keyword[in] ( literal[string] , literal[string] ) keyword[and] identifier[self] . identifier[_didModify] :
identifier[count] = literal[int]
identifier[pos1] = identifier[self] . identifier[fp] . identifier[tell] ()
keyword[for] identifier[zinfo] keyword[in] identifier[self] . identifier[filelist] :
identifier[count] = identifier[count] + literal[int]
identifier[dt] = identifier[zinfo] . identifier[date_time]
identifier[dosdate] =( identifier[dt] [ literal[int] ]- literal[int] )<< literal[int] | identifier[dt] [ literal[int] ]<< literal[int] | identifier[dt] [ literal[int] ]
identifier[dostime] = identifier[dt] [ literal[int] ]<< literal[int] | identifier[dt] [ literal[int] ]<< literal[int] |( identifier[dt] [ literal[int] ]// literal[int] )
identifier[extra] =[]
keyword[if] identifier[zinfo] . identifier[file_size] > identifier[ZIP64_LIMIT] keyword[or] identifier[zinfo] . identifier[compress_size] > identifier[ZIP64_LIMIT] :
identifier[extra] . identifier[append] ( identifier[zinfo] . identifier[file_size] )
identifier[extra] . identifier[append] ( identifier[zinfo] . identifier[compress_size] )
identifier[file_size] = literal[int]
identifier[compress_size] = literal[int]
keyword[else] :
identifier[file_size] = identifier[zinfo] . identifier[file_size]
identifier[compress_size] = identifier[zinfo] . identifier[compress_size]
keyword[if] identifier[zinfo] . identifier[header_offset] > identifier[ZIP64_LIMIT] :
identifier[extra] . identifier[append] ( identifier[zinfo] . identifier[header_offset] )
identifier[header_offset] = literal[int]
keyword[else] :
identifier[header_offset] = identifier[zinfo] . identifier[header_offset]
identifier[extra_data] = identifier[zinfo] . identifier[extra]
keyword[if] identifier[extra] :
identifier[extra_data] = identifier[struct] . identifier[pack] (
literal[string] + literal[string] * identifier[len] ( identifier[extra] ),
literal[int] , literal[int] * identifier[len] ( identifier[extra] ),* identifier[extra] )+ identifier[extra_data]
identifier[extract_version] = identifier[max] ( literal[int] , identifier[zinfo] . identifier[extract_version] )
identifier[create_version] = identifier[max] ( literal[int] , identifier[zinfo] . identifier[create_version] )
keyword[else] :
identifier[extract_version] = identifier[zinfo] . identifier[extract_version]
identifier[create_version] = identifier[zinfo] . identifier[create_version]
keyword[try] :
identifier[filename] , identifier[flag_bits] = identifier[zinfo] . identifier[_encodeFilenameFlags] ()
identifier[centdir] = identifier[struct] . identifier[pack] ( identifier[structCentralDir] ,
identifier[stringCentralDir] , identifier[create_version] ,
identifier[zinfo] . identifier[create_system] , identifier[extract_version] , identifier[zinfo] . identifier[reserved] ,
identifier[flag_bits] , identifier[zinfo] . identifier[compress_type] , identifier[dostime] , identifier[dosdate] ,
identifier[zinfo] . identifier[CRC] , identifier[compress_size] , identifier[file_size] ,
identifier[len] ( identifier[filename] ), identifier[len] ( identifier[extra_data] ), identifier[len] ( identifier[zinfo] . identifier[comment] ),
literal[int] , identifier[zinfo] . identifier[internal_attr] , identifier[zinfo] . identifier[external_attr] ,
identifier[header_offset] )
keyword[except] identifier[DeprecationWarning] :
identifier[print] (( identifier[structCentralDir] ,
identifier[stringCentralDir] , identifier[create_version] ,
identifier[zinfo] . identifier[create_system] , identifier[extract_version] , identifier[zinfo] . identifier[reserved] ,
identifier[zinfo] . identifier[flag_bits] , identifier[zinfo] . identifier[compress_type] , identifier[dostime] , identifier[dosdate] ,
identifier[zinfo] . identifier[CRC] , identifier[compress_size] , identifier[file_size] ,
identifier[len] ( identifier[zinfo] . identifier[filename] ), identifier[len] ( identifier[extra_data] ), identifier[len] ( identifier[zinfo] . identifier[comment] ),
literal[int] , identifier[zinfo] . identifier[internal_attr] , identifier[zinfo] . identifier[external_attr] ,
identifier[header_offset] ), identifier[file] = identifier[sys] . identifier[stderr] )
keyword[raise]
identifier[self] . identifier[fp] . identifier[write] ( identifier[centdir] )
identifier[self] . identifier[fp] . identifier[write] ( identifier[filename] )
identifier[self] . identifier[fp] . identifier[write] ( identifier[extra_data] )
identifier[self] . identifier[fp] . identifier[write] ( identifier[zinfo] . identifier[comment] )
identifier[pos2] = identifier[self] . identifier[fp] . identifier[tell] ()
identifier[centDirCount] = identifier[count]
identifier[centDirSize] = identifier[pos2] - identifier[pos1]
identifier[centDirOffset] = identifier[pos1]
keyword[if] ( identifier[centDirCount] >= identifier[ZIP_FILECOUNT_LIMIT] keyword[or]
identifier[centDirOffset] > identifier[ZIP64_LIMIT] keyword[or]
identifier[centDirSize] > identifier[ZIP64_LIMIT] ):
identifier[zip64endrec] = identifier[struct] . identifier[pack] (
identifier[structEndArchive64] , identifier[stringEndArchive64] ,
literal[int] , literal[int] , literal[int] , literal[int] , literal[int] , identifier[centDirCount] , identifier[centDirCount] ,
identifier[centDirSize] , identifier[centDirOffset] )
identifier[self] . identifier[fp] . identifier[write] ( identifier[zip64endrec] )
identifier[zip64locrec] = identifier[struct] . identifier[pack] (
identifier[structEndArchive64Locator] ,
identifier[stringEndArchive64Locator] , literal[int] , identifier[pos2] , literal[int] )
identifier[self] . identifier[fp] . identifier[write] ( identifier[zip64locrec] )
identifier[centDirCount] = identifier[min] ( identifier[centDirCount] , literal[int] )
identifier[centDirSize] = identifier[min] ( identifier[centDirSize] , literal[int] )
identifier[centDirOffset] = identifier[min] ( identifier[centDirOffset] , literal[int] )
keyword[if] identifier[len] ( identifier[self] . identifier[comment] )>= identifier[ZIP_MAX_COMMENT] :
keyword[if] identifier[self] . identifier[debug] > literal[int] :
identifier[msg] = literal[string] % identifier[ZIP_MAX_COMMENT]
identifier[self] . identifier[comment] = identifier[self] . identifier[comment] [: identifier[ZIP_MAX_COMMENT] ]
identifier[endrec] = identifier[struct] . identifier[pack] ( identifier[structEndArchive] , identifier[stringEndArchive] ,
literal[int] , literal[int] , identifier[centDirCount] , identifier[centDirCount] ,
identifier[centDirSize] , identifier[centDirOffset] , identifier[len] ( identifier[self] . identifier[comment] ))
identifier[self] . identifier[fp] . identifier[write] ( identifier[endrec] )
identifier[self] . identifier[fp] . identifier[write] ( identifier[self] . identifier[comment] )
identifier[self] . identifier[fp] . identifier[flush] ()
keyword[if] keyword[not] identifier[self] . identifier[_filePassed] :
identifier[self] . identifier[fp] . identifier[close] ()
identifier[self] . identifier[fp] = keyword[None] | def close(self):
"""Close the file, and for mode "w" and "a" write the ending
records."""
if self.fp is None:
return # depends on [control=['if'], data=[]]
if self.mode in ('w', 'a') and self._didModify: # write ending records
count = 0
pos1 = self.fp.tell()
for zinfo in self.filelist: # write central directory
count = count + 1
dt = zinfo.date_time
dosdate = dt[0] - 1980 << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | dt[5] // 2
extra = []
if zinfo.file_size > ZIP64_LIMIT or zinfo.compress_size > ZIP64_LIMIT:
extra.append(zinfo.file_size)
extra.append(zinfo.compress_size)
file_size = 4294967295
compress_size = 4294967295 # depends on [control=['if'], data=[]]
else:
file_size = zinfo.file_size
compress_size = zinfo.compress_size
if zinfo.header_offset > ZIP64_LIMIT:
extra.append(zinfo.header_offset)
header_offset = 4294967295 # depends on [control=['if'], data=[]]
else:
header_offset = zinfo.header_offset
extra_data = zinfo.extra
if extra:
# Append a ZIP64 field to the extra's
extra_data = struct.pack('<HH' + 'Q' * len(extra), 1, 8 * len(extra), *extra) + extra_data
extract_version = max(45, zinfo.extract_version)
create_version = max(45, zinfo.create_version) # depends on [control=['if'], data=[]]
else:
extract_version = zinfo.extract_version
create_version = zinfo.create_version
try:
(filename, flag_bits) = zinfo._encodeFilenameFlags()
centdir = struct.pack(structCentralDir, stringCentralDir, create_version, zinfo.create_system, extract_version, zinfo.reserved, flag_bits, zinfo.compress_type, dostime, dosdate, zinfo.CRC, compress_size, file_size, len(filename), len(extra_data), len(zinfo.comment), 0, zinfo.internal_attr, zinfo.external_attr, header_offset) # depends on [control=['try'], data=[]]
except DeprecationWarning:
print((structCentralDir, stringCentralDir, create_version, zinfo.create_system, extract_version, zinfo.reserved, zinfo.flag_bits, zinfo.compress_type, dostime, dosdate, zinfo.CRC, compress_size, file_size, len(zinfo.filename), len(extra_data), len(zinfo.comment), 0, zinfo.internal_attr, zinfo.external_attr, header_offset), file=sys.stderr)
raise # depends on [control=['except'], data=[]]
self.fp.write(centdir)
self.fp.write(filename)
self.fp.write(extra_data)
self.fp.write(zinfo.comment) # depends on [control=['for'], data=['zinfo']]
pos2 = self.fp.tell()
# Write end-of-zip-archive record
centDirCount = count
centDirSize = pos2 - pos1
centDirOffset = pos1
if centDirCount >= ZIP_FILECOUNT_LIMIT or centDirOffset > ZIP64_LIMIT or centDirSize > ZIP64_LIMIT:
# Need to write the ZIP64 end-of-archive records
zip64endrec = struct.pack(structEndArchive64, stringEndArchive64, 44, 45, 45, 0, 0, centDirCount, centDirCount, centDirSize, centDirOffset)
self.fp.write(zip64endrec)
zip64locrec = struct.pack(structEndArchive64Locator, stringEndArchive64Locator, 0, pos2, 1)
self.fp.write(zip64locrec)
centDirCount = min(centDirCount, 65535)
centDirSize = min(centDirSize, 4294967295)
centDirOffset = min(centDirOffset, 4294967295) # depends on [control=['if'], data=[]]
# check for valid comment length
if len(self.comment) >= ZIP_MAX_COMMENT:
if self.debug > 0:
msg = 'Archive comment is too long; truncating to %d bytes' % ZIP_MAX_COMMENT # depends on [control=['if'], data=[]]
self.comment = self.comment[:ZIP_MAX_COMMENT] # depends on [control=['if'], data=['ZIP_MAX_COMMENT']]
endrec = struct.pack(structEndArchive, stringEndArchive, 0, 0, centDirCount, centDirCount, centDirSize, centDirOffset, len(self.comment))
self.fp.write(endrec)
self.fp.write(self.comment)
self.fp.flush() # depends on [control=['if'], data=[]]
if not self._filePassed:
self.fp.close() # depends on [control=['if'], data=[]]
self.fp = None |
def extend_config(config, config_items):
"""
We are handling config value setting like this for a cleaner api.
Users just need to pass in a named param to this source and we can
dynamically generate a config object for it.
"""
for key, val in list(config_items.items()):
if hasattr(config, key):
setattr(config, key, val)
return config | def function[extend_config, parameter[config, config_items]]:
constant[
We are handling config value setting like this for a cleaner api.
Users just need to pass in a named param to this source and we can
dynamically generate a config object for it.
]
for taget[tuple[[<ast.Name object at 0x7da1b0862b90>, <ast.Name object at 0x7da1b08626e0>]]] in starred[call[name[list], parameter[call[name[config_items].items, parameter[]]]]] begin[:]
if call[name[hasattr], parameter[name[config], name[key]]] begin[:]
call[name[setattr], parameter[name[config], name[key], name[val]]]
return[name[config]] | keyword[def] identifier[extend_config] ( identifier[config] , identifier[config_items] ):
literal[string]
keyword[for] identifier[key] , identifier[val] keyword[in] identifier[list] ( identifier[config_items] . identifier[items] ()):
keyword[if] identifier[hasattr] ( identifier[config] , identifier[key] ):
identifier[setattr] ( identifier[config] , identifier[key] , identifier[val] )
keyword[return] identifier[config] | def extend_config(config, config_items):
"""
We are handling config value setting like this for a cleaner api.
Users just need to pass in a named param to this source and we can
dynamically generate a config object for it.
"""
for (key, val) in list(config_items.items()):
if hasattr(config, key):
setattr(config, key, val) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return config |
def clean(self):
"""
Make sure there is at least a translation has been filled in. If a
default language has been specified, make sure that it exists amongst
translations.
"""
# First make sure the super's clean method is called upon.
super(TranslationFormSet, self).clean()
if settings.HIDE_LANGUAGE:
return
if len(self.forms) > 0:
# If a default language has been provided, make sure a translation
# is available
if settings.DEFAULT_LANGUAGE and not any(self.errors):
# Don't bother validating the formset unless each form is
# valid on its own. Reference:
# http://docs.djangoproject.com/en/dev/topics/forms/formsets/#custom-formset-validation
for form in self.forms:
language_code = form.cleaned_data.get(
'language_code', None
)
if language_code == settings.DEFAULT_LANGUAGE:
# All is good, don't bother checking any further
return
raise forms.ValidationError(_(
'No translation provided for default language \'%s\'.'
) % settings.DEFAULT_LANGUAGE)
else:
raise forms.ValidationError(
_('At least one translation should be provided.')
) | def function[clean, parameter[self]]:
constant[
Make sure there is at least a translation has been filled in. If a
default language has been specified, make sure that it exists amongst
translations.
]
call[call[name[super], parameter[name[TranslationFormSet], name[self]]].clean, parameter[]]
if name[settings].HIDE_LANGUAGE begin[:]
return[None]
if compare[call[name[len], parameter[name[self].forms]] greater[>] constant[0]] begin[:]
if <ast.BoolOp object at 0x7da2047ead70> begin[:]
for taget[name[form]] in starred[name[self].forms] begin[:]
variable[language_code] assign[=] call[name[form].cleaned_data.get, parameter[constant[language_code], constant[None]]]
if compare[name[language_code] equal[==] name[settings].DEFAULT_LANGUAGE] begin[:]
return[None]
<ast.Raise object at 0x7da2047e8880> | keyword[def] identifier[clean] ( identifier[self] ):
literal[string]
identifier[super] ( identifier[TranslationFormSet] , identifier[self] ). identifier[clean] ()
keyword[if] identifier[settings] . identifier[HIDE_LANGUAGE] :
keyword[return]
keyword[if] identifier[len] ( identifier[self] . identifier[forms] )> literal[int] :
keyword[if] identifier[settings] . identifier[DEFAULT_LANGUAGE] keyword[and] keyword[not] identifier[any] ( identifier[self] . identifier[errors] ):
keyword[for] identifier[form] keyword[in] identifier[self] . identifier[forms] :
identifier[language_code] = identifier[form] . identifier[cleaned_data] . identifier[get] (
literal[string] , keyword[None]
)
keyword[if] identifier[language_code] == identifier[settings] . identifier[DEFAULT_LANGUAGE] :
keyword[return]
keyword[raise] identifier[forms] . identifier[ValidationError] ( identifier[_] (
literal[string]
)% identifier[settings] . identifier[DEFAULT_LANGUAGE] )
keyword[else] :
keyword[raise] identifier[forms] . identifier[ValidationError] (
identifier[_] ( literal[string] )
) | def clean(self):
"""
Make sure there is at least a translation has been filled in. If a
default language has been specified, make sure that it exists amongst
translations.
"""
# First make sure the super's clean method is called upon.
super(TranslationFormSet, self).clean()
if settings.HIDE_LANGUAGE:
return # depends on [control=['if'], data=[]]
if len(self.forms) > 0:
# If a default language has been provided, make sure a translation
# is available
if settings.DEFAULT_LANGUAGE and (not any(self.errors)):
# Don't bother validating the formset unless each form is
# valid on its own. Reference:
# http://docs.djangoproject.com/en/dev/topics/forms/formsets/#custom-formset-validation
for form in self.forms:
language_code = form.cleaned_data.get('language_code', None)
if language_code == settings.DEFAULT_LANGUAGE:
# All is good, don't bother checking any further
return # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['form']]
raise forms.ValidationError(_("No translation provided for default language '%s'.") % settings.DEFAULT_LANGUAGE) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
raise forms.ValidationError(_('At least one translation should be provided.')) |
def get_eval_func(obj, feature, slice=np.s_[...]):
"""
Return the function of interest (kernel or mean) for the expectation
depending on the type of :obj: and whether any features are given
"""
if feature is not None:
# kernel + feature combination
if not isinstance(feature, InducingFeature) or not isinstance(obj, kernels.Kernel):
raise TypeError("If `feature` is supplied, `obj` must be a kernel.")
return lambda x: tf.transpose(Kuf(feature, obj, x))[slice]
elif isinstance(obj, mean_functions.MeanFunction):
return lambda x: obj(x)[slice]
elif isinstance(obj, kernels.Kernel):
return lambda x: obj.Kdiag(x)
else:
raise NotImplementedError() | def function[get_eval_func, parameter[obj, feature, slice]]:
constant[
Return the function of interest (kernel or mean) for the expectation
depending on the type of :obj: and whether any features are given
]
if compare[name[feature] is_not constant[None]] begin[:]
if <ast.BoolOp object at 0x7da20c7cb220> begin[:]
<ast.Raise object at 0x7da20c7c9240>
return[<ast.Lambda object at 0x7da20c7c8310>] | keyword[def] identifier[get_eval_func] ( identifier[obj] , identifier[feature] , identifier[slice] = identifier[np] . identifier[s_] [...]):
literal[string]
keyword[if] identifier[feature] keyword[is] keyword[not] keyword[None] :
keyword[if] keyword[not] identifier[isinstance] ( identifier[feature] , identifier[InducingFeature] ) keyword[or] keyword[not] identifier[isinstance] ( identifier[obj] , identifier[kernels] . identifier[Kernel] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[return] keyword[lambda] identifier[x] : identifier[tf] . identifier[transpose] ( identifier[Kuf] ( identifier[feature] , identifier[obj] , identifier[x] ))[ identifier[slice] ]
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[mean_functions] . identifier[MeanFunction] ):
keyword[return] keyword[lambda] identifier[x] : identifier[obj] ( identifier[x] )[ identifier[slice] ]
keyword[elif] identifier[isinstance] ( identifier[obj] , identifier[kernels] . identifier[Kernel] ):
keyword[return] keyword[lambda] identifier[x] : identifier[obj] . identifier[Kdiag] ( identifier[x] )
keyword[else] :
keyword[raise] identifier[NotImplementedError] () | def get_eval_func(obj, feature, slice=np.s_[...]):
"""
Return the function of interest (kernel or mean) for the expectation
depending on the type of :obj: and whether any features are given
"""
if feature is not None:
# kernel + feature combination
if not isinstance(feature, InducingFeature) or not isinstance(obj, kernels.Kernel):
raise TypeError('If `feature` is supplied, `obj` must be a kernel.') # depends on [control=['if'], data=[]]
return lambda x: tf.transpose(Kuf(feature, obj, x))[slice] # depends on [control=['if'], data=['feature']]
elif isinstance(obj, mean_functions.MeanFunction):
return lambda x: obj(x)[slice] # depends on [control=['if'], data=[]]
elif isinstance(obj, kernels.Kernel):
return lambda x: obj.Kdiag(x) # depends on [control=['if'], data=[]]
else:
raise NotImplementedError() |
def _factln(num):
# type: (int) -> float
"""
Computes logfactorial regularly for tractable numbers, uses Ramanujans approximation otherwise.
"""
if num < 20:
log_factorial = log(factorial(num))
else:
log_factorial = num * log(num) - num + log(num * (1 + 4 * num * (
1 + 2 * num))) / 6.0 + log(pi) / 2
return log_factorial | def function[_factln, parameter[num]]:
constant[
Computes logfactorial regularly for tractable numbers, uses Ramanujans approximation otherwise.
]
if compare[name[num] less[<] constant[20]] begin[:]
variable[log_factorial] assign[=] call[name[log], parameter[call[name[factorial], parameter[name[num]]]]]
return[name[log_factorial]] | keyword[def] identifier[_factln] ( identifier[num] ):
literal[string]
keyword[if] identifier[num] < literal[int] :
identifier[log_factorial] = identifier[log] ( identifier[factorial] ( identifier[num] ))
keyword[else] :
identifier[log_factorial] = identifier[num] * identifier[log] ( identifier[num] )- identifier[num] + identifier[log] ( identifier[num] *( literal[int] + literal[int] * identifier[num] *(
literal[int] + literal[int] * identifier[num] )))/ literal[int] + identifier[log] ( identifier[pi] )/ literal[int]
keyword[return] identifier[log_factorial] | def _factln(num):
# type: (int) -> float
'\n Computes logfactorial regularly for tractable numbers, uses Ramanujans approximation otherwise.\n '
if num < 20:
log_factorial = log(factorial(num)) # depends on [control=['if'], data=['num']]
else:
log_factorial = num * log(num) - num + log(num * (1 + 4 * num * (1 + 2 * num))) / 6.0 + log(pi) / 2
return log_factorial |
def build_archive(cls, **kwargs):
"""Return the singleton `JobArchive` instance, building it if needed """
if cls._archive is None:
cls._archive = cls(**kwargs)
return cls._archive | def function[build_archive, parameter[cls]]:
constant[Return the singleton `JobArchive` instance, building it if needed ]
if compare[name[cls]._archive is constant[None]] begin[:]
name[cls]._archive assign[=] call[name[cls], parameter[]]
return[name[cls]._archive] | keyword[def] identifier[build_archive] ( identifier[cls] ,** identifier[kwargs] ):
literal[string]
keyword[if] identifier[cls] . identifier[_archive] keyword[is] keyword[None] :
identifier[cls] . identifier[_archive] = identifier[cls] (** identifier[kwargs] )
keyword[return] identifier[cls] . identifier[_archive] | def build_archive(cls, **kwargs):
"""Return the singleton `JobArchive` instance, building it if needed """
if cls._archive is None:
cls._archive = cls(**kwargs) # depends on [control=['if'], data=[]]
return cls._archive |
def __gzip(filename):
""" Compress a file returning the new filename (.gz)
"""
zipname = filename + '.gz'
file_pointer = open(filename,'rb')
zip_pointer = gzip.open(zipname,'wb')
zip_pointer.writelines(file_pointer)
file_pointer.close()
zip_pointer.close()
return zipname | def function[__gzip, parameter[filename]]:
constant[ Compress a file returning the new filename (.gz)
]
variable[zipname] assign[=] binary_operation[name[filename] + constant[.gz]]
variable[file_pointer] assign[=] call[name[open], parameter[name[filename], constant[rb]]]
variable[zip_pointer] assign[=] call[name[gzip].open, parameter[name[zipname], constant[wb]]]
call[name[zip_pointer].writelines, parameter[name[file_pointer]]]
call[name[file_pointer].close, parameter[]]
call[name[zip_pointer].close, parameter[]]
return[name[zipname]] | keyword[def] identifier[__gzip] ( identifier[filename] ):
literal[string]
identifier[zipname] = identifier[filename] + literal[string]
identifier[file_pointer] = identifier[open] ( identifier[filename] , literal[string] )
identifier[zip_pointer] = identifier[gzip] . identifier[open] ( identifier[zipname] , literal[string] )
identifier[zip_pointer] . identifier[writelines] ( identifier[file_pointer] )
identifier[file_pointer] . identifier[close] ()
identifier[zip_pointer] . identifier[close] ()
keyword[return] identifier[zipname] | def __gzip(filename):
""" Compress a file returning the new filename (.gz)
"""
zipname = filename + '.gz'
file_pointer = open(filename, 'rb')
zip_pointer = gzip.open(zipname, 'wb')
zip_pointer.writelines(file_pointer)
file_pointer.close()
zip_pointer.close()
return zipname |
def add_ability(self, phase, ability):
"""Add the given ability to this Card under the given phase. Returns
the length of the abilities for the given phase after the addition.
"""
if phase not in self.abilities:
self.abilities[phase] = []
self.abilities[phase].append(ability)
return len(self.abilities[phase]) | def function[add_ability, parameter[self, phase, ability]]:
constant[Add the given ability to this Card under the given phase. Returns
the length of the abilities for the given phase after the addition.
]
if compare[name[phase] <ast.NotIn object at 0x7da2590d7190> name[self].abilities] begin[:]
call[name[self].abilities][name[phase]] assign[=] list[[]]
call[call[name[self].abilities][name[phase]].append, parameter[name[ability]]]
return[call[name[len], parameter[call[name[self].abilities][name[phase]]]]] | keyword[def] identifier[add_ability] ( identifier[self] , identifier[phase] , identifier[ability] ):
literal[string]
keyword[if] identifier[phase] keyword[not] keyword[in] identifier[self] . identifier[abilities] :
identifier[self] . identifier[abilities] [ identifier[phase] ]=[]
identifier[self] . identifier[abilities] [ identifier[phase] ]. identifier[append] ( identifier[ability] )
keyword[return] identifier[len] ( identifier[self] . identifier[abilities] [ identifier[phase] ]) | def add_ability(self, phase, ability):
"""Add the given ability to this Card under the given phase. Returns
the length of the abilities for the given phase after the addition.
"""
if phase not in self.abilities:
self.abilities[phase] = [] # depends on [control=['if'], data=['phase']]
self.abilities[phase].append(ability)
return len(self.abilities[phase]) |
def deep_merge(source, dest):
"""
Deep merges source dict into dest dict.
This code was taken directly from the mongothon project:
https://github.com/gamechanger/mongothon/tree/master/mongothon
"""
for key, value in source.items():
if key in dest:
if isinstance(value, dict) and isinstance(dest[key], dict):
deep_merge(value, dest[key])
continue
elif isinstance(value, list) and isinstance(dest[key], list):
for item in value:
if item not in dest[key]:
dest[key].append(item)
continue
dest[key] = value | def function[deep_merge, parameter[source, dest]]:
constant[
Deep merges source dict into dest dict.
This code was taken directly from the mongothon project:
https://github.com/gamechanger/mongothon/tree/master/mongothon
]
for taget[tuple[[<ast.Name object at 0x7da1b0c883d0>, <ast.Name object at 0x7da1b0c89600>]]] in starred[call[name[source].items, parameter[]]] begin[:]
if compare[name[key] in name[dest]] begin[:]
if <ast.BoolOp object at 0x7da1b0c8add0> begin[:]
call[name[deep_merge], parameter[name[value], call[name[dest]][name[key]]]]
continue
call[name[dest]][name[key]] assign[=] name[value] | keyword[def] identifier[deep_merge] ( identifier[source] , identifier[dest] ):
literal[string]
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[source] . identifier[items] ():
keyword[if] identifier[key] keyword[in] identifier[dest] :
keyword[if] identifier[isinstance] ( identifier[value] , identifier[dict] ) keyword[and] identifier[isinstance] ( identifier[dest] [ identifier[key] ], identifier[dict] ):
identifier[deep_merge] ( identifier[value] , identifier[dest] [ identifier[key] ])
keyword[continue]
keyword[elif] identifier[isinstance] ( identifier[value] , identifier[list] ) keyword[and] identifier[isinstance] ( identifier[dest] [ identifier[key] ], identifier[list] ):
keyword[for] identifier[item] keyword[in] identifier[value] :
keyword[if] identifier[item] keyword[not] keyword[in] identifier[dest] [ identifier[key] ]:
identifier[dest] [ identifier[key] ]. identifier[append] ( identifier[item] )
keyword[continue]
identifier[dest] [ identifier[key] ]= identifier[value] | def deep_merge(source, dest):
"""
Deep merges source dict into dest dict.
This code was taken directly from the mongothon project:
https://github.com/gamechanger/mongothon/tree/master/mongothon
"""
for (key, value) in source.items():
if key in dest:
if isinstance(value, dict) and isinstance(dest[key], dict):
deep_merge(value, dest[key])
continue # depends on [control=['if'], data=[]]
elif isinstance(value, list) and isinstance(dest[key], list):
for item in value:
if item not in dest[key]:
dest[key].append(item) # depends on [control=['if'], data=['item']] # depends on [control=['for'], data=['item']]
continue # depends on [control=['if'], data=[]] # depends on [control=['if'], data=['key', 'dest']]
dest[key] = value # depends on [control=['for'], data=[]] |
def as_euler_angles(q):
"""Open Pandora's Box
If somebody is trying to make you use Euler angles, tell them no, and
walk away, and go and tell your mum.
You don't want to use Euler angles. They are awful. Stay away. It's
one thing to convert from Euler angles to quaternions; at least you're
moving in the right direction. But to go the other way?! It's just not
right.
Assumes the Euler angles correspond to the quaternion R via
R = exp(alpha*z/2) * exp(beta*y/2) * exp(gamma*z/2)
The angles are naturally in radians.
NOTE: Before opening an issue reporting something "wrong" with this
function, be sure to read all of the following page, *especially* the
very last section about opening issues or pull requests.
<https://github.com/moble/quaternion/wiki/Euler-angles-are-horrible>
Parameters
----------
q: quaternion or array of quaternions
The quaternion(s) need not be normalized, but must all be nonzero
Returns
-------
alpha_beta_gamma: float array
Output shape is q.shape+(3,). These represent the angles (alpha,
beta, gamma) in radians, where the normalized input quaternion
represents `exp(alpha*z/2) * exp(beta*y/2) * exp(gamma*z/2)`.
Raises
------
AllHell
...if you try to actually use Euler angles, when you could have
been using quaternions like a sensible person.
"""
alpha_beta_gamma = np.empty(q.shape + (3,), dtype=np.float)
n = np.norm(q)
q = as_float_array(q)
alpha_beta_gamma[..., 0] = np.arctan2(q[..., 3], q[..., 0]) + np.arctan2(-q[..., 1], q[..., 2])
alpha_beta_gamma[..., 1] = 2*np.arccos(np.sqrt((q[..., 0]**2 + q[..., 3]**2)/n))
alpha_beta_gamma[..., 2] = np.arctan2(q[..., 3], q[..., 0]) - np.arctan2(-q[..., 1], q[..., 2])
return alpha_beta_gamma | def function[as_euler_angles, parameter[q]]:
constant[Open Pandora's Box
If somebody is trying to make you use Euler angles, tell them no, and
walk away, and go and tell your mum.
You don't want to use Euler angles. They are awful. Stay away. It's
one thing to convert from Euler angles to quaternions; at least you're
moving in the right direction. But to go the other way?! It's just not
right.
Assumes the Euler angles correspond to the quaternion R via
R = exp(alpha*z/2) * exp(beta*y/2) * exp(gamma*z/2)
The angles are naturally in radians.
NOTE: Before opening an issue reporting something "wrong" with this
function, be sure to read all of the following page, *especially* the
very last section about opening issues or pull requests.
<https://github.com/moble/quaternion/wiki/Euler-angles-are-horrible>
Parameters
----------
q: quaternion or array of quaternions
The quaternion(s) need not be normalized, but must all be nonzero
Returns
-------
alpha_beta_gamma: float array
Output shape is q.shape+(3,). These represent the angles (alpha,
beta, gamma) in radians, where the normalized input quaternion
represents `exp(alpha*z/2) * exp(beta*y/2) * exp(gamma*z/2)`.
Raises
------
AllHell
...if you try to actually use Euler angles, when you could have
been using quaternions like a sensible person.
]
variable[alpha_beta_gamma] assign[=] call[name[np].empty, parameter[binary_operation[name[q].shape + tuple[[<ast.Constant object at 0x7da1b1db7700>]]]]]
variable[n] assign[=] call[name[np].norm, parameter[name[q]]]
variable[q] assign[=] call[name[as_float_array], parameter[name[q]]]
call[name[alpha_beta_gamma]][tuple[[<ast.Constant object at 0x7da1b1db7130>, <ast.Constant object at 0x7da1b1db7a30>]]] assign[=] binary_operation[call[name[np].arctan2, parameter[call[name[q]][tuple[[<ast.Constant object at 0x7da1b1db4760>, <ast.Constant object at 0x7da1b1db7970>]]], call[name[q]][tuple[[<ast.Constant object at 0x7da1b1db46a0>, <ast.Constant object at 0x7da1b1db47c0>]]]]] + call[name[np].arctan2, parameter[<ast.UnaryOp object at 0x7da1b1db74f0>, call[name[q]][tuple[[<ast.Constant object at 0x7da1b1db7340>, <ast.Constant object at 0x7da1b1db4820>]]]]]]
call[name[alpha_beta_gamma]][tuple[[<ast.Constant object at 0x7da1b1db7160>, <ast.Constant object at 0x7da1b1db4610>]]] assign[=] binary_operation[constant[2] * call[name[np].arccos, parameter[call[name[np].sqrt, parameter[binary_operation[binary_operation[binary_operation[call[name[q]][tuple[[<ast.Constant object at 0x7da1b1db4130>, <ast.Constant object at 0x7da1b1db79d0>]]] ** constant[2]] + binary_operation[call[name[q]][tuple[[<ast.Constant object at 0x7da1b1e05ab0>, <ast.Constant object at 0x7da1b1e05a80>]]] ** constant[2]]] / name[n]]]]]]]
call[name[alpha_beta_gamma]][tuple[[<ast.Constant object at 0x7da1b1e05930>, <ast.Constant object at 0x7da1b1e05900>]]] assign[=] binary_operation[call[name[np].arctan2, parameter[call[name[q]][tuple[[<ast.Constant object at 0x7da1b1e05780>, <ast.Constant object at 0x7da1b1e05750>]]], call[name[q]][tuple[[<ast.Constant object at 0x7da1b1e05690>, <ast.Constant object at 0x7da1b1e05660>]]]]] - call[name[np].arctan2, parameter[<ast.UnaryOp object at 0x7da1b1e055a0>, call[name[q]][tuple[[<ast.Constant object at 0x7da1b1e042b0>, <ast.Constant object at 0x7da1b1e042e0>]]]]]]
return[name[alpha_beta_gamma]] | keyword[def] identifier[as_euler_angles] ( identifier[q] ):
literal[string]
identifier[alpha_beta_gamma] = identifier[np] . identifier[empty] ( identifier[q] . identifier[shape] +( literal[int] ,), identifier[dtype] = identifier[np] . identifier[float] )
identifier[n] = identifier[np] . identifier[norm] ( identifier[q] )
identifier[q] = identifier[as_float_array] ( identifier[q] )
identifier[alpha_beta_gamma] [..., literal[int] ]= identifier[np] . identifier[arctan2] ( identifier[q] [..., literal[int] ], identifier[q] [..., literal[int] ])+ identifier[np] . identifier[arctan2] (- identifier[q] [..., literal[int] ], identifier[q] [..., literal[int] ])
identifier[alpha_beta_gamma] [..., literal[int] ]= literal[int] * identifier[np] . identifier[arccos] ( identifier[np] . identifier[sqrt] (( identifier[q] [..., literal[int] ]** literal[int] + identifier[q] [..., literal[int] ]** literal[int] )/ identifier[n] ))
identifier[alpha_beta_gamma] [..., literal[int] ]= identifier[np] . identifier[arctan2] ( identifier[q] [..., literal[int] ], identifier[q] [..., literal[int] ])- identifier[np] . identifier[arctan2] (- identifier[q] [..., literal[int] ], identifier[q] [..., literal[int] ])
keyword[return] identifier[alpha_beta_gamma] | def as_euler_angles(q):
"""Open Pandora's Box
If somebody is trying to make you use Euler angles, tell them no, and
walk away, and go and tell your mum.
You don't want to use Euler angles. They are awful. Stay away. It's
one thing to convert from Euler angles to quaternions; at least you're
moving in the right direction. But to go the other way?! It's just not
right.
Assumes the Euler angles correspond to the quaternion R via
R = exp(alpha*z/2) * exp(beta*y/2) * exp(gamma*z/2)
The angles are naturally in radians.
NOTE: Before opening an issue reporting something "wrong" with this
function, be sure to read all of the following page, *especially* the
very last section about opening issues or pull requests.
<https://github.com/moble/quaternion/wiki/Euler-angles-are-horrible>
Parameters
----------
q: quaternion or array of quaternions
The quaternion(s) need not be normalized, but must all be nonzero
Returns
-------
alpha_beta_gamma: float array
Output shape is q.shape+(3,). These represent the angles (alpha,
beta, gamma) in radians, where the normalized input quaternion
represents `exp(alpha*z/2) * exp(beta*y/2) * exp(gamma*z/2)`.
Raises
------
AllHell
...if you try to actually use Euler angles, when you could have
been using quaternions like a sensible person.
"""
alpha_beta_gamma = np.empty(q.shape + (3,), dtype=np.float)
n = np.norm(q)
q = as_float_array(q)
alpha_beta_gamma[..., 0] = np.arctan2(q[..., 3], q[..., 0]) + np.arctan2(-q[..., 1], q[..., 2])
alpha_beta_gamma[..., 1] = 2 * np.arccos(np.sqrt((q[..., 0] ** 2 + q[..., 3] ** 2) / n))
alpha_beta_gamma[..., 2] = np.arctan2(q[..., 3], q[..., 0]) - np.arctan2(-q[..., 1], q[..., 2])
return alpha_beta_gamma |
def match_pattern(self, pat, word):
"""Implements fixed-width pattern matching.
Matches just in case pattern is the same length (in segments) as the
word and each of the segments in the pattern is a featural subset of the
corresponding segment in the word. Matches return the corresponding list
of feature sets; failed matches return None.
Args:
pat (list): pattern consisting of a sequence of sets of (value,
feature) tuples
word (unicode): a Unicode IPA string consisting of zero or more
segments
Returns:
list: corresponding list of feature sets or, if there is no match,
None
"""
segs = self.word_fts(word)
if len(pat) != len(segs):
return None
else:
if all([set(p) <= s for (p, s) in zip(pat, segs)]):
return segs | def function[match_pattern, parameter[self, pat, word]]:
constant[Implements fixed-width pattern matching.
Matches just in case pattern is the same length (in segments) as the
word and each of the segments in the pattern is a featural subset of the
corresponding segment in the word. Matches return the corresponding list
of feature sets; failed matches return None.
Args:
pat (list): pattern consisting of a sequence of sets of (value,
feature) tuples
word (unicode): a Unicode IPA string consisting of zero or more
segments
Returns:
list: corresponding list of feature sets or, if there is no match,
None
]
variable[segs] assign[=] call[name[self].word_fts, parameter[name[word]]]
if compare[call[name[len], parameter[name[pat]]] not_equal[!=] call[name[len], parameter[name[segs]]]] begin[:]
return[constant[None]] | keyword[def] identifier[match_pattern] ( identifier[self] , identifier[pat] , identifier[word] ):
literal[string]
identifier[segs] = identifier[self] . identifier[word_fts] ( identifier[word] )
keyword[if] identifier[len] ( identifier[pat] )!= identifier[len] ( identifier[segs] ):
keyword[return] keyword[None]
keyword[else] :
keyword[if] identifier[all] ([ identifier[set] ( identifier[p] )<= identifier[s] keyword[for] ( identifier[p] , identifier[s] ) keyword[in] identifier[zip] ( identifier[pat] , identifier[segs] )]):
keyword[return] identifier[segs] | def match_pattern(self, pat, word):
"""Implements fixed-width pattern matching.
Matches just in case pattern is the same length (in segments) as the
word and each of the segments in the pattern is a featural subset of the
corresponding segment in the word. Matches return the corresponding list
of feature sets; failed matches return None.
Args:
pat (list): pattern consisting of a sequence of sets of (value,
feature) tuples
word (unicode): a Unicode IPA string consisting of zero or more
segments
Returns:
list: corresponding list of feature sets or, if there is no match,
None
"""
segs = self.word_fts(word)
if len(pat) != len(segs):
return None # depends on [control=['if'], data=[]]
elif all([set(p) <= s for (p, s) in zip(pat, segs)]):
return segs # depends on [control=['if'], data=[]] |
def dequote(s):
"""Remove excess quotes from a string."""
if len(s) < 2:
return s
elif (s[0] == s[-1]) and s.startswith(('"', "'")):
return s[1: -1]
else:
return s | def function[dequote, parameter[s]]:
constant[Remove excess quotes from a string.]
if compare[call[name[len], parameter[name[s]]] less[<] constant[2]] begin[:]
return[name[s]] | keyword[def] identifier[dequote] ( identifier[s] ):
literal[string]
keyword[if] identifier[len] ( identifier[s] )< literal[int] :
keyword[return] identifier[s]
keyword[elif] ( identifier[s] [ literal[int] ]== identifier[s] [- literal[int] ]) keyword[and] identifier[s] . identifier[startswith] (( literal[string] , literal[string] )):
keyword[return] identifier[s] [ literal[int] :- literal[int] ]
keyword[else] :
keyword[return] identifier[s] | def dequote(s):
"""Remove excess quotes from a string."""
if len(s) < 2:
return s # depends on [control=['if'], data=[]]
elif s[0] == s[-1] and s.startswith(('"', "'")):
return s[1:-1] # depends on [control=['if'], data=[]]
else:
return s |
def _clean_string(v, sinfo):
"""Test for and clean unicode present in template CSVs.
"""
if isinstance(v, (list, tuple)):
return [_clean_string(x, sinfo) for x in v]
else:
assert isinstance(v, six.string_types), v
try:
if hasattr(v, "decode"):
return str(v.decode("ascii"))
else:
return str(v.encode("ascii").decode("ascii"))
except UnicodeDecodeError as msg:
raise ValueError("Found unicode character in template CSV line %s:\n%s" % (sinfo, str(msg))) | def function[_clean_string, parameter[v, sinfo]]:
constant[Test for and clean unicode present in template CSVs.
]
if call[name[isinstance], parameter[name[v], tuple[[<ast.Name object at 0x7da1b1985210>, <ast.Name object at 0x7da1b1987190>]]]] begin[:]
return[<ast.ListComp object at 0x7da1b1987070>] | keyword[def] identifier[_clean_string] ( identifier[v] , identifier[sinfo] ):
literal[string]
keyword[if] identifier[isinstance] ( identifier[v] ,( identifier[list] , identifier[tuple] )):
keyword[return] [ identifier[_clean_string] ( identifier[x] , identifier[sinfo] ) keyword[for] identifier[x] keyword[in] identifier[v] ]
keyword[else] :
keyword[assert] identifier[isinstance] ( identifier[v] , identifier[six] . identifier[string_types] ), identifier[v]
keyword[try] :
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
keyword[return] identifier[str] ( identifier[v] . identifier[decode] ( literal[string] ))
keyword[else] :
keyword[return] identifier[str] ( identifier[v] . identifier[encode] ( literal[string] ). identifier[decode] ( literal[string] ))
keyword[except] identifier[UnicodeDecodeError] keyword[as] identifier[msg] :
keyword[raise] identifier[ValueError] ( literal[string] %( identifier[sinfo] , identifier[str] ( identifier[msg] ))) | def _clean_string(v, sinfo):
"""Test for and clean unicode present in template CSVs.
"""
if isinstance(v, (list, tuple)):
return [_clean_string(x, sinfo) for x in v] # depends on [control=['if'], data=[]]
else:
assert isinstance(v, six.string_types), v
try:
if hasattr(v, 'decode'):
return str(v.decode('ascii')) # depends on [control=['if'], data=[]]
else:
return str(v.encode('ascii').decode('ascii')) # depends on [control=['try'], data=[]]
except UnicodeDecodeError as msg:
raise ValueError('Found unicode character in template CSV line %s:\n%s' % (sinfo, str(msg))) # depends on [control=['except'], data=['msg']] |
def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l))
return loss_averages_op | def function[_add_loss_summaries, parameter[total_loss]]:
constant[Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
]
variable[loss_averages] assign[=] call[name[tf].train.ExponentialMovingAverage, parameter[constant[0.9]]]
variable[losses] assign[=] call[name[tf].get_collection, parameter[constant[losses]]]
variable[loss_averages_op] assign[=] call[name[loss_averages].apply, parameter[binary_operation[name[losses] + list[[<ast.Name object at 0x7da2054a6c80>]]]]]
for taget[name[l]] in starred[binary_operation[name[losses] + list[[<ast.Name object at 0x7da2054a5570>]]]] begin[:]
call[name[tf].summary.scalar, parameter[binary_operation[name[l].op.name + constant[ (raw)]], name[l]]]
call[name[tf].summary.scalar, parameter[name[l].op.name, call[name[loss_averages].average, parameter[name[l]]]]]
return[name[loss_averages_op]] | keyword[def] identifier[_add_loss_summaries] ( identifier[total_loss] ):
literal[string]
identifier[loss_averages] = identifier[tf] . identifier[train] . identifier[ExponentialMovingAverage] ( literal[int] , identifier[name] = literal[string] )
identifier[losses] = identifier[tf] . identifier[get_collection] ( literal[string] )
identifier[loss_averages_op] = identifier[loss_averages] . identifier[apply] ( identifier[losses] +[ identifier[total_loss] ])
keyword[for] identifier[l] keyword[in] identifier[losses] +[ identifier[total_loss] ]:
identifier[tf] . identifier[summary] . identifier[scalar] ( identifier[l] . identifier[op] . identifier[name] + literal[string] , identifier[l] )
identifier[tf] . identifier[summary] . identifier[scalar] ( identifier[l] . identifier[op] . identifier[name] , identifier[loss_averages] . identifier[average] ( identifier[l] ))
keyword[return] identifier[loss_averages_op] | def _add_loss_summaries(total_loss):
"""Add summaries for losses in CIFAR-10 model.
Generates moving average for all losses and associated summaries for
visualizing the performance of the network.
Args:
total_loss: Total loss from loss().
Returns:
loss_averages_op: op for generating moving averages of losses.
"""
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
losses = tf.get_collection('losses')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.summary.scalar(l.op.name + ' (raw)', l)
tf.summary.scalar(l.op.name, loss_averages.average(l)) # depends on [control=['for'], data=['l']]
return loss_averages_op |
def _calc_font_size(self, win_wd):
"""Heuristic to calculate an appropriate font size based on the
width of the viewer window.
Parameters
----------
win_wd : int
The width of the viewer window.
Returns
-------
font_size : int
Approximately appropriate font size in points
"""
font_size = 4
if win_wd >= 1600:
font_size = 24
elif win_wd >= 1000:
font_size = 18
elif win_wd >= 800:
font_size = 16
elif win_wd >= 600:
font_size = 14
elif win_wd >= 500:
font_size = 12
elif win_wd >= 400:
font_size = 11
elif win_wd >= 300:
font_size = 10
elif win_wd >= 250:
font_size = 8
elif win_wd >= 200:
font_size = 6
return font_size | def function[_calc_font_size, parameter[self, win_wd]]:
constant[Heuristic to calculate an appropriate font size based on the
width of the viewer window.
Parameters
----------
win_wd : int
The width of the viewer window.
Returns
-------
font_size : int
Approximately appropriate font size in points
]
variable[font_size] assign[=] constant[4]
if compare[name[win_wd] greater_or_equal[>=] constant[1600]] begin[:]
variable[font_size] assign[=] constant[24]
return[name[font_size]] | keyword[def] identifier[_calc_font_size] ( identifier[self] , identifier[win_wd] ):
literal[string]
identifier[font_size] = literal[int]
keyword[if] identifier[win_wd] >= literal[int] :
identifier[font_size] = literal[int]
keyword[elif] identifier[win_wd] >= literal[int] :
identifier[font_size] = literal[int]
keyword[elif] identifier[win_wd] >= literal[int] :
identifier[font_size] = literal[int]
keyword[elif] identifier[win_wd] >= literal[int] :
identifier[font_size] = literal[int]
keyword[elif] identifier[win_wd] >= literal[int] :
identifier[font_size] = literal[int]
keyword[elif] identifier[win_wd] >= literal[int] :
identifier[font_size] = literal[int]
keyword[elif] identifier[win_wd] >= literal[int] :
identifier[font_size] = literal[int]
keyword[elif] identifier[win_wd] >= literal[int] :
identifier[font_size] = literal[int]
keyword[elif] identifier[win_wd] >= literal[int] :
identifier[font_size] = literal[int]
keyword[return] identifier[font_size] | def _calc_font_size(self, win_wd):
"""Heuristic to calculate an appropriate font size based on the
width of the viewer window.
Parameters
----------
win_wd : int
The width of the viewer window.
Returns
-------
font_size : int
Approximately appropriate font size in points
"""
font_size = 4
if win_wd >= 1600:
font_size = 24 # depends on [control=['if'], data=[]]
elif win_wd >= 1000:
font_size = 18 # depends on [control=['if'], data=[]]
elif win_wd >= 800:
font_size = 16 # depends on [control=['if'], data=[]]
elif win_wd >= 600:
font_size = 14 # depends on [control=['if'], data=[]]
elif win_wd >= 500:
font_size = 12 # depends on [control=['if'], data=[]]
elif win_wd >= 400:
font_size = 11 # depends on [control=['if'], data=[]]
elif win_wd >= 300:
font_size = 10 # depends on [control=['if'], data=[]]
elif win_wd >= 250:
font_size = 8 # depends on [control=['if'], data=[]]
elif win_wd >= 200:
font_size = 6 # depends on [control=['if'], data=[]]
return font_size |
def SecurityCheck(self, func, request, *args, **kwargs):
"""A decorator applied to protected web handlers."""
request.user = self.username
request.token = access_control.ACLToken(
username="Testing", reason="Just a test")
return func(request, *args, **kwargs) | def function[SecurityCheck, parameter[self, func, request]]:
constant[A decorator applied to protected web handlers.]
name[request].user assign[=] name[self].username
name[request].token assign[=] call[name[access_control].ACLToken, parameter[]]
return[call[name[func], parameter[name[request], <ast.Starred object at 0x7da1b1b06a10>]]] | keyword[def] identifier[SecurityCheck] ( identifier[self] , identifier[func] , identifier[request] ,* identifier[args] ,** identifier[kwargs] ):
literal[string]
identifier[request] . identifier[user] = identifier[self] . identifier[username]
identifier[request] . identifier[token] = identifier[access_control] . identifier[ACLToken] (
identifier[username] = literal[string] , identifier[reason] = literal[string] )
keyword[return] identifier[func] ( identifier[request] ,* identifier[args] ,** identifier[kwargs] ) | def SecurityCheck(self, func, request, *args, **kwargs):
"""A decorator applied to protected web handlers."""
request.user = self.username
request.token = access_control.ACLToken(username='Testing', reason='Just a test')
return func(request, *args, **kwargs) |
def _parse_extra_features(node, NHX_string):
"""
Reads node's extra data form its NHX string. NHX uses this
format: [&&NHX:prop1=value1:prop2=value2]
"""
NHX_string = NHX_string.replace("[&&NHX:", "")
NHX_string = NHX_string.replace("]", "")
for field in NHX_string.split(":"):
try:
pname, pvalue = field.split("=")
except ValueError as e:
raise NewickError('Invalid NHX format %s' %field)
node.add_feature(pname, pvalue) | def function[_parse_extra_features, parameter[node, NHX_string]]:
constant[
Reads node's extra data form its NHX string. NHX uses this
format: [&&NHX:prop1=value1:prop2=value2]
]
variable[NHX_string] assign[=] call[name[NHX_string].replace, parameter[constant[[&&NHX:], constant[]]]
variable[NHX_string] assign[=] call[name[NHX_string].replace, parameter[constant[]], constant[]]]
for taget[name[field]] in starred[call[name[NHX_string].split, parameter[constant[:]]]] begin[:]
<ast.Try object at 0x7da18f58ed10>
call[name[node].add_feature, parameter[name[pname], name[pvalue]]] | keyword[def] identifier[_parse_extra_features] ( identifier[node] , identifier[NHX_string] ):
literal[string]
identifier[NHX_string] = identifier[NHX_string] . identifier[replace] ( literal[string] , literal[string] )
identifier[NHX_string] = identifier[NHX_string] . identifier[replace] ( literal[string] , literal[string] )
keyword[for] identifier[field] keyword[in] identifier[NHX_string] . identifier[split] ( literal[string] ):
keyword[try] :
identifier[pname] , identifier[pvalue] = identifier[field] . identifier[split] ( literal[string] )
keyword[except] identifier[ValueError] keyword[as] identifier[e] :
keyword[raise] identifier[NewickError] ( literal[string] % identifier[field] )
identifier[node] . identifier[add_feature] ( identifier[pname] , identifier[pvalue] ) | def _parse_extra_features(node, NHX_string):
"""
Reads node's extra data form its NHX string. NHX uses this
format: [&&NHX:prop1=value1:prop2=value2]
"""
NHX_string = NHX_string.replace('[&&NHX:', '')
NHX_string = NHX_string.replace(']', '')
for field in NHX_string.split(':'):
try:
(pname, pvalue) = field.split('=') # depends on [control=['try'], data=[]]
except ValueError as e:
raise NewickError('Invalid NHX format %s' % field) # depends on [control=['except'], data=[]]
node.add_feature(pname, pvalue) # depends on [control=['for'], data=['field']] |
def unhandled(exception, opt):
""" Handle uncaught/unexpected errors and be polite about it"""
exmod = type(exception).__module__
name = "%s.%s" % (exmod, type(exception).__name__)
# this is a Vault error
if exmod == 'aomi.exceptions' or exmod == 'cryptorito':
# This may be set for Validation or similar errors
if hasattr(exception, 'source'):
output(exception.message, opt, extra=exception.source)
else:
output(exception.message, opt)
else:
output("Unexpected error: %s" % name, opt)
sys.exit(1) | def function[unhandled, parameter[exception, opt]]:
constant[ Handle uncaught/unexpected errors and be polite about it]
variable[exmod] assign[=] call[name[type], parameter[name[exception]]].__module__
variable[name] assign[=] binary_operation[constant[%s.%s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b18396c0>, <ast.Attribute object at 0x7da1b183ac80>]]]
if <ast.BoolOp object at 0x7da1b18387f0> begin[:]
if call[name[hasattr], parameter[name[exception], constant[source]]] begin[:]
call[name[output], parameter[name[exception].message, name[opt]]]
call[name[sys].exit, parameter[constant[1]]] | keyword[def] identifier[unhandled] ( identifier[exception] , identifier[opt] ):
literal[string]
identifier[exmod] = identifier[type] ( identifier[exception] ). identifier[__module__]
identifier[name] = literal[string] %( identifier[exmod] , identifier[type] ( identifier[exception] ). identifier[__name__] )
keyword[if] identifier[exmod] == literal[string] keyword[or] identifier[exmod] == literal[string] :
keyword[if] identifier[hasattr] ( identifier[exception] , literal[string] ):
identifier[output] ( identifier[exception] . identifier[message] , identifier[opt] , identifier[extra] = identifier[exception] . identifier[source] )
keyword[else] :
identifier[output] ( identifier[exception] . identifier[message] , identifier[opt] )
keyword[else] :
identifier[output] ( literal[string] % identifier[name] , identifier[opt] )
identifier[sys] . identifier[exit] ( literal[int] ) | def unhandled(exception, opt):
""" Handle uncaught/unexpected errors and be polite about it"""
exmod = type(exception).__module__
name = '%s.%s' % (exmod, type(exception).__name__)
# this is a Vault error
if exmod == 'aomi.exceptions' or exmod == 'cryptorito':
# This may be set for Validation or similar errors
if hasattr(exception, 'source'):
output(exception.message, opt, extra=exception.source) # depends on [control=['if'], data=[]]
else:
output(exception.message, opt) # depends on [control=['if'], data=[]]
else:
output('Unexpected error: %s' % name, opt)
sys.exit(1) |
def _on_motion(self, event):
"""Drag around label if visible."""
if not self._visual_drag.winfo_ismapped():
return
if self._drag_cols and self._dragged_col is not None:
self._drag_col(event)
elif self._drag_rows and self._dragged_row is not None:
self._drag_row(event) | def function[_on_motion, parameter[self, event]]:
constant[Drag around label if visible.]
if <ast.UnaryOp object at 0x7da18f00c3a0> begin[:]
return[None]
if <ast.BoolOp object at 0x7da18f00d960> begin[:]
call[name[self]._drag_col, parameter[name[event]]] | keyword[def] identifier[_on_motion] ( identifier[self] , identifier[event] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_visual_drag] . identifier[winfo_ismapped] ():
keyword[return]
keyword[if] identifier[self] . identifier[_drag_cols] keyword[and] identifier[self] . identifier[_dragged_col] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_drag_col] ( identifier[event] )
keyword[elif] identifier[self] . identifier[_drag_rows] keyword[and] identifier[self] . identifier[_dragged_row] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_drag_row] ( identifier[event] ) | def _on_motion(self, event):
"""Drag around label if visible."""
if not self._visual_drag.winfo_ismapped():
return # depends on [control=['if'], data=[]]
if self._drag_cols and self._dragged_col is not None:
self._drag_col(event) # depends on [control=['if'], data=[]]
elif self._drag_rows and self._dragged_row is not None:
self._drag_row(event) # depends on [control=['if'], data=[]] |
def gcd2(a, b):
"""Greatest common divisor using Euclid's algorithm."""
while a:
a, b = b%a, a
return b | def function[gcd2, parameter[a, b]]:
constant[Greatest common divisor using Euclid's algorithm.]
while name[a] begin[:]
<ast.Tuple object at 0x7da1b2346aa0> assign[=] tuple[[<ast.BinOp object at 0x7da1b2347a90>, <ast.Name object at 0x7da1b23478e0>]]
return[name[b]] | keyword[def] identifier[gcd2] ( identifier[a] , identifier[b] ):
literal[string]
keyword[while] identifier[a] :
identifier[a] , identifier[b] = identifier[b] % identifier[a] , identifier[a]
keyword[return] identifier[b] | def gcd2(a, b):
"""Greatest common divisor using Euclid's algorithm."""
while a:
(a, b) = (b % a, a) # depends on [control=['while'], data=[]]
return b |
def addsitepackages(known_paths, sys_prefix=sys.prefix, exec_prefix=sys.exec_prefix):
"""Add site-packages (and possibly site-python) to sys.path"""
prefixes = [os.path.join(sys_prefix, "local"), sys_prefix]
if exec_prefix != sys_prefix:
prefixes.append(os.path.join(exec_prefix, "local"))
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos') or _is_jython:
sitedirs = [os.path.join(prefix, "Lib", "site-packages")]
elif _is_pypy:
sitedirs = [os.path.join(prefix, 'site-packages')]
elif sys.platform == 'darwin' and prefix == sys_prefix:
if prefix.startswith("/System/Library/Frameworks/"): # Apple's Python
sitedirs = [os.path.join("/Library/Python", sys.version[:3], "site-packages"),
os.path.join(prefix, "Extras", "lib", "python")]
else: # any other Python distros on OSX work this way
sitedirs = [os.path.join(prefix, "lib",
"python" + sys.version[:3], "site-packages")]
elif os.sep == '/':
sitedirs = [os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python"),
os.path.join(prefix, "python" + sys.version[:3], "lib-dynload")]
lib64_dir = os.path.join(prefix, "lib64", "python" + sys.version[:3], "site-packages")
if (os.path.exists(lib64_dir) and
os.path.realpath(lib64_dir) not in [os.path.realpath(p) for p in sitedirs]):
if _is_64bit:
sitedirs.insert(0, lib64_dir)
else:
sitedirs.append(lib64_dir)
try:
# sys.getobjects only available in --with-pydebug build
sys.getobjects
sitedirs.insert(0, os.path.join(sitedirs[0], 'debug'))
except AttributeError:
pass
# Debian-specific dist-packages directories:
sitedirs.append(os.path.join(prefix, "local/lib",
"python" + sys.version[:3],
"dist-packages"))
if sys.version[0] == '2':
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"dist-packages"))
else:
sitedirs.append(os.path.join(prefix, "lib",
"python" + sys.version[0],
"dist-packages"))
sitedirs.append(os.path.join(prefix, "lib", "dist-python"))
else:
sitedirs = [prefix, os.path.join(prefix, "lib", "site-packages")]
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
for sitedir in sitedirs:
if os.path.isdir(sitedir):
addsitedir(sitedir, known_paths)
return None | def function[addsitepackages, parameter[known_paths, sys_prefix, exec_prefix]]:
constant[Add site-packages (and possibly site-python) to sys.path]
variable[prefixes] assign[=] list[[<ast.Call object at 0x7da2054a6ce0>, <ast.Name object at 0x7da2054a6d10>]]
if compare[name[exec_prefix] not_equal[!=] name[sys_prefix]] begin[:]
call[name[prefixes].append, parameter[call[name[os].path.join, parameter[name[exec_prefix], constant[local]]]]]
for taget[name[prefix]] in starred[name[prefixes]] begin[:]
if name[prefix] begin[:]
if <ast.BoolOp object at 0x7da2054a6650> begin[:]
variable[sitedirs] assign[=] list[[<ast.Call object at 0x7da2054a4b50>]]
if compare[name[sys].platform equal[==] constant[darwin]] begin[:]
if compare[constant[Python.framework] in name[prefix]] begin[:]
variable[home] assign[=] call[name[os].environ.get, parameter[constant[HOME]]]
if name[home] begin[:]
call[name[sitedirs].append, parameter[call[name[os].path.join, parameter[name[home], constant[Library], constant[Python], call[name[sys].version][<ast.Slice object at 0x7da204623c40>], constant[site-packages]]]]]
for taget[name[sitedir]] in starred[name[sitedirs]] begin[:]
if call[name[os].path.isdir, parameter[name[sitedir]]] begin[:]
call[name[addsitedir], parameter[name[sitedir], name[known_paths]]]
return[constant[None]] | keyword[def] identifier[addsitepackages] ( identifier[known_paths] , identifier[sys_prefix] = identifier[sys] . identifier[prefix] , identifier[exec_prefix] = identifier[sys] . identifier[exec_prefix] ):
literal[string]
identifier[prefixes] =[ identifier[os] . identifier[path] . identifier[join] ( identifier[sys_prefix] , literal[string] ), identifier[sys_prefix] ]
keyword[if] identifier[exec_prefix] != identifier[sys_prefix] :
identifier[prefixes] . identifier[append] ( identifier[os] . identifier[path] . identifier[join] ( identifier[exec_prefix] , literal[string] ))
keyword[for] identifier[prefix] keyword[in] identifier[prefixes] :
keyword[if] identifier[prefix] :
keyword[if] identifier[sys] . identifier[platform] keyword[in] ( literal[string] , literal[string] ) keyword[or] identifier[_is_jython] :
identifier[sitedirs] =[ identifier[os] . identifier[path] . identifier[join] ( identifier[prefix] , literal[string] , literal[string] )]
keyword[elif] identifier[_is_pypy] :
identifier[sitedirs] =[ identifier[os] . identifier[path] . identifier[join] ( identifier[prefix] , literal[string] )]
keyword[elif] identifier[sys] . identifier[platform] == literal[string] keyword[and] identifier[prefix] == identifier[sys_prefix] :
keyword[if] identifier[prefix] . identifier[startswith] ( literal[string] ):
identifier[sitedirs] =[ identifier[os] . identifier[path] . identifier[join] ( literal[string] , identifier[sys] . identifier[version] [: literal[int] ], literal[string] ),
identifier[os] . identifier[path] . identifier[join] ( identifier[prefix] , literal[string] , literal[string] , literal[string] )]
keyword[else] :
identifier[sitedirs] =[ identifier[os] . identifier[path] . identifier[join] ( identifier[prefix] , literal[string] ,
literal[string] + identifier[sys] . identifier[version] [: literal[int] ], literal[string] )]
keyword[elif] identifier[os] . identifier[sep] == literal[string] :
identifier[sitedirs] =[ identifier[os] . identifier[path] . identifier[join] ( identifier[prefix] ,
literal[string] ,
literal[string] + identifier[sys] . identifier[version] [: literal[int] ],
literal[string] ),
identifier[os] . identifier[path] . identifier[join] ( identifier[prefix] , literal[string] , literal[string] ),
identifier[os] . identifier[path] . identifier[join] ( identifier[prefix] , literal[string] + identifier[sys] . identifier[version] [: literal[int] ], literal[string] )]
identifier[lib64_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[prefix] , literal[string] , literal[string] + identifier[sys] . identifier[version] [: literal[int] ], literal[string] )
keyword[if] ( identifier[os] . identifier[path] . identifier[exists] ( identifier[lib64_dir] ) keyword[and]
identifier[os] . identifier[path] . identifier[realpath] ( identifier[lib64_dir] ) keyword[not] keyword[in] [ identifier[os] . identifier[path] . identifier[realpath] ( identifier[p] ) keyword[for] identifier[p] keyword[in] identifier[sitedirs] ]):
keyword[if] identifier[_is_64bit] :
identifier[sitedirs] . identifier[insert] ( literal[int] , identifier[lib64_dir] )
keyword[else] :
identifier[sitedirs] . identifier[append] ( identifier[lib64_dir] )
keyword[try] :
identifier[sys] . identifier[getobjects]
identifier[sitedirs] . identifier[insert] ( literal[int] , identifier[os] . identifier[path] . identifier[join] ( identifier[sitedirs] [ literal[int] ], literal[string] ))
keyword[except] identifier[AttributeError] :
keyword[pass]
identifier[sitedirs] . identifier[append] ( identifier[os] . identifier[path] . identifier[join] ( identifier[prefix] , literal[string] ,
literal[string] + identifier[sys] . identifier[version] [: literal[int] ],
literal[string] ))
keyword[if] identifier[sys] . identifier[version] [ literal[int] ]== literal[string] :
identifier[sitedirs] . identifier[append] ( identifier[os] . identifier[path] . identifier[join] ( identifier[prefix] , literal[string] ,
literal[string] + identifier[sys] . identifier[version] [: literal[int] ],
literal[string] ))
keyword[else] :
identifier[sitedirs] . identifier[append] ( identifier[os] . identifier[path] . identifier[join] ( identifier[prefix] , literal[string] ,
literal[string] + identifier[sys] . identifier[version] [ literal[int] ],
literal[string] ))
identifier[sitedirs] . identifier[append] ( identifier[os] . identifier[path] . identifier[join] ( identifier[prefix] , literal[string] , literal[string] ))
keyword[else] :
identifier[sitedirs] =[ identifier[prefix] , identifier[os] . identifier[path] . identifier[join] ( identifier[prefix] , literal[string] , literal[string] )]
keyword[if] identifier[sys] . identifier[platform] == literal[string] :
keyword[if] literal[string] keyword[in] identifier[prefix] :
identifier[home] = identifier[os] . identifier[environ] . identifier[get] ( literal[string] )
keyword[if] identifier[home] :
identifier[sitedirs] . identifier[append] (
identifier[os] . identifier[path] . identifier[join] ( identifier[home] ,
literal[string] ,
literal[string] ,
identifier[sys] . identifier[version] [: literal[int] ],
literal[string] ))
keyword[for] identifier[sitedir] keyword[in] identifier[sitedirs] :
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[sitedir] ):
identifier[addsitedir] ( identifier[sitedir] , identifier[known_paths] )
keyword[return] keyword[None] | def addsitepackages(known_paths, sys_prefix=sys.prefix, exec_prefix=sys.exec_prefix):
"""Add site-packages (and possibly site-python) to sys.path"""
prefixes = [os.path.join(sys_prefix, 'local'), sys_prefix]
if exec_prefix != sys_prefix:
prefixes.append(os.path.join(exec_prefix, 'local')) # depends on [control=['if'], data=['exec_prefix']]
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos') or _is_jython:
sitedirs = [os.path.join(prefix, 'Lib', 'site-packages')] # depends on [control=['if'], data=[]]
elif _is_pypy:
sitedirs = [os.path.join(prefix, 'site-packages')] # depends on [control=['if'], data=[]]
elif sys.platform == 'darwin' and prefix == sys_prefix:
if prefix.startswith('/System/Library/Frameworks/'): # Apple's Python
sitedirs = [os.path.join('/Library/Python', sys.version[:3], 'site-packages'), os.path.join(prefix, 'Extras', 'lib', 'python')] # depends on [control=['if'], data=[]]
else: # any other Python distros on OSX work this way
sitedirs = [os.path.join(prefix, 'lib', 'python' + sys.version[:3], 'site-packages')] # depends on [control=['if'], data=[]]
elif os.sep == '/':
sitedirs = [os.path.join(prefix, 'lib', 'python' + sys.version[:3], 'site-packages'), os.path.join(prefix, 'lib', 'site-python'), os.path.join(prefix, 'python' + sys.version[:3], 'lib-dynload')]
lib64_dir = os.path.join(prefix, 'lib64', 'python' + sys.version[:3], 'site-packages')
if os.path.exists(lib64_dir) and os.path.realpath(lib64_dir) not in [os.path.realpath(p) for p in sitedirs]:
if _is_64bit:
sitedirs.insert(0, lib64_dir) # depends on [control=['if'], data=[]]
else:
sitedirs.append(lib64_dir) # depends on [control=['if'], data=[]]
try:
# sys.getobjects only available in --with-pydebug build
sys.getobjects
sitedirs.insert(0, os.path.join(sitedirs[0], 'debug')) # depends on [control=['try'], data=[]]
except AttributeError:
pass # depends on [control=['except'], data=[]]
# Debian-specific dist-packages directories:
sitedirs.append(os.path.join(prefix, 'local/lib', 'python' + sys.version[:3], 'dist-packages'))
if sys.version[0] == '2':
sitedirs.append(os.path.join(prefix, 'lib', 'python' + sys.version[:3], 'dist-packages')) # depends on [control=['if'], data=[]]
else:
sitedirs.append(os.path.join(prefix, 'lib', 'python' + sys.version[0], 'dist-packages'))
sitedirs.append(os.path.join(prefix, 'lib', 'dist-python')) # depends on [control=['if'], data=[]]
else:
sitedirs = [prefix, os.path.join(prefix, 'lib', 'site-packages')]
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(os.path.join(home, 'Library', 'Python', sys.version[:3], 'site-packages')) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
for sitedir in sitedirs:
if os.path.isdir(sitedir):
addsitedir(sitedir, known_paths) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['sitedir']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['prefix']]
return None |
def describe(self, bucket, descriptor=None):
"""https://github.com/frictionlessdata/tableschema-bigquery-py#storage
"""
# Set descriptor
if descriptor is not None:
self.__descriptors[bucket] = descriptor
# Get descriptor
else:
descriptor = self.__descriptors.get(bucket)
if descriptor is None:
table_name = self.__mapper.convert_bucket(bucket)
response = self.__service.tables().get(
projectId=self.__project,
datasetId=self.__dataset,
tableId=table_name).execute()
converted_descriptor = response['schema']
descriptor = self.__mapper.restore_descriptor(converted_descriptor)
return descriptor | def function[describe, parameter[self, bucket, descriptor]]:
constant[https://github.com/frictionlessdata/tableschema-bigquery-py#storage
]
if compare[name[descriptor] is_not constant[None]] begin[:]
call[name[self].__descriptors][name[bucket]] assign[=] name[descriptor]
return[name[descriptor]] | keyword[def] identifier[describe] ( identifier[self] , identifier[bucket] , identifier[descriptor] = keyword[None] ):
literal[string]
keyword[if] identifier[descriptor] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[__descriptors] [ identifier[bucket] ]= identifier[descriptor]
keyword[else] :
identifier[descriptor] = identifier[self] . identifier[__descriptors] . identifier[get] ( identifier[bucket] )
keyword[if] identifier[descriptor] keyword[is] keyword[None] :
identifier[table_name] = identifier[self] . identifier[__mapper] . identifier[convert_bucket] ( identifier[bucket] )
identifier[response] = identifier[self] . identifier[__service] . identifier[tables] (). identifier[get] (
identifier[projectId] = identifier[self] . identifier[__project] ,
identifier[datasetId] = identifier[self] . identifier[__dataset] ,
identifier[tableId] = identifier[table_name] ). identifier[execute] ()
identifier[converted_descriptor] = identifier[response] [ literal[string] ]
identifier[descriptor] = identifier[self] . identifier[__mapper] . identifier[restore_descriptor] ( identifier[converted_descriptor] )
keyword[return] identifier[descriptor] | def describe(self, bucket, descriptor=None):
"""https://github.com/frictionlessdata/tableschema-bigquery-py#storage
"""
# Set descriptor
if descriptor is not None:
self.__descriptors[bucket] = descriptor # depends on [control=['if'], data=['descriptor']]
else:
# Get descriptor
descriptor = self.__descriptors.get(bucket)
if descriptor is None:
table_name = self.__mapper.convert_bucket(bucket)
response = self.__service.tables().get(projectId=self.__project, datasetId=self.__dataset, tableId=table_name).execute()
converted_descriptor = response['schema']
descriptor = self.__mapper.restore_descriptor(converted_descriptor) # depends on [control=['if'], data=['descriptor']]
return descriptor |
def update_config(self, config):
"""Sends TraceConfig to the agent and gets agent's config in reply.
:type config: `~opencensus.proto.trace.v1.TraceConfig`
:param config: Trace config with sampling and other settings
:rtype: `~opencensus.proto.trace.v1.TraceConfig`
:returns: Trace config from agent.
"""
# do not allow updating config simultaneously
lock = Lock()
with lock:
# TODO: keep the stream alive.
# The stream is terminated after iteration completes.
# To keep it alive, we can enqueue proto configs here
# and asyncronously read them and send to the agent.
config_responses = self.client.Config(
self.generate_config_request(config))
agent_config = next(config_responses)
return agent_config | def function[update_config, parameter[self, config]]:
constant[Sends TraceConfig to the agent and gets agent's config in reply.
:type config: `~opencensus.proto.trace.v1.TraceConfig`
:param config: Trace config with sampling and other settings
:rtype: `~opencensus.proto.trace.v1.TraceConfig`
:returns: Trace config from agent.
]
variable[lock] assign[=] call[name[Lock], parameter[]]
with name[lock] begin[:]
variable[config_responses] assign[=] call[name[self].client.Config, parameter[call[name[self].generate_config_request, parameter[name[config]]]]]
variable[agent_config] assign[=] call[name[next], parameter[name[config_responses]]]
return[name[agent_config]] | keyword[def] identifier[update_config] ( identifier[self] , identifier[config] ):
literal[string]
identifier[lock] = identifier[Lock] ()
keyword[with] identifier[lock] :
identifier[config_responses] = identifier[self] . identifier[client] . identifier[Config] (
identifier[self] . identifier[generate_config_request] ( identifier[config] ))
identifier[agent_config] = identifier[next] ( identifier[config_responses] )
keyword[return] identifier[agent_config] | def update_config(self, config):
"""Sends TraceConfig to the agent and gets agent's config in reply.
:type config: `~opencensus.proto.trace.v1.TraceConfig`
:param config: Trace config with sampling and other settings
:rtype: `~opencensus.proto.trace.v1.TraceConfig`
:returns: Trace config from agent.
"""
# do not allow updating config simultaneously
lock = Lock()
with lock:
# TODO: keep the stream alive.
# The stream is terminated after iteration completes.
# To keep it alive, we can enqueue proto configs here
# and asyncronously read them and send to the agent.
config_responses = self.client.Config(self.generate_config_request(config))
agent_config = next(config_responses)
return agent_config # depends on [control=['with'], data=[]] |
def stem(self, word):
"""
Stem a German word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
word = word.replace("\xDF", "ss")
# Every occurrence of 'u' and 'y'
# between vowels is put into upper case.
for i in range(1, len(word)-1):
if word[i-1] in self.__vowels and word[i+1] in self.__vowels:
if word[i] == "u":
word = "".join((word[:i], "U", word[i+1:]))
elif word[i] == "y":
word = "".join((word[:i], "Y", word[i+1:]))
r1, r2 = self._r1r2_standard(word, self.__vowels)
# R1 is adjusted so that the region before it
# contains at least 3 letters.
for i in range(1, len(word)):
if word[i] not in self.__vowels and word[i-1] in self.__vowels:
if len(word[:i+1]) < 3 and len(word[:i+1]) > 0:
r1 = word[3:]
elif len(word[:i+1]) == 0:
return word
break
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if (suffix in ("en", "es", "e") and
word[-len(suffix)-4:-len(suffix)] == "niss"):
word = word[:-len(suffix)-1]
r1 = r1[:-len(suffix)-1]
r2 = r2[:-len(suffix)-1]
elif suffix == "s":
if word[-2] in self.__s_ending:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
if suffix == "st":
if word[-3] in self.__st_ending and len(word[:-3]) >= 3:
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break
# STEP 3: Derivational suffixes
for suffix in self.__step3_suffixes:
if r2.endswith(suffix):
if suffix in ("end", "ung"):
if ("ig" in r2[-len(suffix)-2:-len(suffix)] and
"e" not in r2[-len(suffix)-3:-len(suffix)-2]):
word = word[:-len(suffix)-2]
else:
word = word[:-len(suffix)]
elif (suffix in ("ig", "ik", "isch") and
"e" not in r2[-len(suffix)-1:-len(suffix)]):
word = word[:-len(suffix)]
elif suffix in ("lich", "heit"):
if ("er" in r1[-len(suffix)-2:-len(suffix)] or
"en" in r1[-len(suffix)-2:-len(suffix)]):
word = word[:-len(suffix)-2]
else:
word = word[:-len(suffix)]
elif suffix == "keit":
if "lich" in r2[-len(suffix)-4:-len(suffix)]:
word = word[:-len(suffix)-4]
elif "ig" in r2[-len(suffix)-2:-len(suffix)]:
word = word[:-len(suffix)-2]
else:
word = word[:-len(suffix)]
break
# Umlaut accents are removed and
# 'u' and 'y' are put back into lower case.
word = (word.replace("\xE4", "a").replace("\xF6", "o")
.replace("\xFC", "u").replace("U", "u")
.replace("Y", "y"))
return word | def function[stem, parameter[self, word]]:
constant[
Stem a German word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
]
variable[word] assign[=] call[name[word].lower, parameter[]]
variable[word] assign[=] call[name[word].replace, parameter[constant[ß], constant[ss]]]
for taget[name[i]] in starred[call[name[range], parameter[constant[1], binary_operation[call[name[len], parameter[name[word]]] - constant[1]]]]] begin[:]
if <ast.BoolOp object at 0x7da1b22b8af0> begin[:]
if compare[call[name[word]][name[i]] equal[==] constant[u]] begin[:]
variable[word] assign[=] call[constant[].join, parameter[tuple[[<ast.Subscript object at 0x7da1b22bb310>, <ast.Constant object at 0x7da1b22b9f90>, <ast.Subscript object at 0x7da1b22bbc10>]]]]
<ast.Tuple object at 0x7da1b22ba5c0> assign[=] call[name[self]._r1r2_standard, parameter[name[word], name[self].__vowels]]
for taget[name[i]] in starred[call[name[range], parameter[constant[1], call[name[len], parameter[name[word]]]]]] begin[:]
if <ast.BoolOp object at 0x7da1b22b8be0> begin[:]
if <ast.BoolOp object at 0x7da1b22b8520> begin[:]
variable[r1] assign[=] call[name[word]][<ast.Slice object at 0x7da1b22b82b0>]
break
for taget[name[suffix]] in starred[name[self].__step1_suffixes] begin[:]
if call[name[r1].endswith, parameter[name[suffix]]] begin[:]
if <ast.BoolOp object at 0x7da1b22bb850> begin[:]
variable[word] assign[=] call[name[word]][<ast.Slice object at 0x7da1b22b8580>]
variable[r1] assign[=] call[name[r1]][<ast.Slice object at 0x7da1b22b8190>]
variable[r2] assign[=] call[name[r2]][<ast.Slice object at 0x7da1b22ba5f0>]
break
for taget[name[suffix]] in starred[name[self].__step2_suffixes] begin[:]
if call[name[r1].endswith, parameter[name[suffix]]] begin[:]
if compare[name[suffix] equal[==] constant[st]] begin[:]
if <ast.BoolOp object at 0x7da1b22ce020> begin[:]
variable[word] assign[=] call[name[word]][<ast.Slice object at 0x7da1b23c2dd0>]
variable[r1] assign[=] call[name[r1]][<ast.Slice object at 0x7da1b23c2650>]
variable[r2] assign[=] call[name[r2]][<ast.Slice object at 0x7da1b23c2ad0>]
break
for taget[name[suffix]] in starred[name[self].__step3_suffixes] begin[:]
if call[name[r2].endswith, parameter[name[suffix]]] begin[:]
if compare[name[suffix] in tuple[[<ast.Constant object at 0x7da1b23895d0>, <ast.Constant object at 0x7da1b23884f0>]]] begin[:]
if <ast.BoolOp object at 0x7da1b2388040> begin[:]
variable[word] assign[=] call[name[word]][<ast.Slice object at 0x7da1b238aad0>]
break
variable[word] assign[=] call[call[call[call[call[name[word].replace, parameter[constant[ä], constant[a]]].replace, parameter[constant[ö], constant[o]]].replace, parameter[constant[ü], constant[u]]].replace, parameter[constant[U], constant[u]]].replace, parameter[constant[Y], constant[y]]]
return[name[word]] | keyword[def] identifier[stem] ( identifier[self] , identifier[word] ):
literal[string]
identifier[word] = identifier[word] . identifier[lower] ()
identifier[word] = identifier[word] . identifier[replace] ( literal[string] , literal[string] )
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[word] )- literal[int] ):
keyword[if] identifier[word] [ identifier[i] - literal[int] ] keyword[in] identifier[self] . identifier[__vowels] keyword[and] identifier[word] [ identifier[i] + literal[int] ] keyword[in] identifier[self] . identifier[__vowels] :
keyword[if] identifier[word] [ identifier[i] ]== literal[string] :
identifier[word] = literal[string] . identifier[join] (( identifier[word] [: identifier[i] ], literal[string] , identifier[word] [ identifier[i] + literal[int] :]))
keyword[elif] identifier[word] [ identifier[i] ]== literal[string] :
identifier[word] = literal[string] . identifier[join] (( identifier[word] [: identifier[i] ], literal[string] , identifier[word] [ identifier[i] + literal[int] :]))
identifier[r1] , identifier[r2] = identifier[self] . identifier[_r1r2_standard] ( identifier[word] , identifier[self] . identifier[__vowels] )
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] , identifier[len] ( identifier[word] )):
keyword[if] identifier[word] [ identifier[i] ] keyword[not] keyword[in] identifier[self] . identifier[__vowels] keyword[and] identifier[word] [ identifier[i] - literal[int] ] keyword[in] identifier[self] . identifier[__vowels] :
keyword[if] identifier[len] ( identifier[word] [: identifier[i] + literal[int] ])< literal[int] keyword[and] identifier[len] ( identifier[word] [: identifier[i] + literal[int] ])> literal[int] :
identifier[r1] = identifier[word] [ literal[int] :]
keyword[elif] identifier[len] ( identifier[word] [: identifier[i] + literal[int] ])== literal[int] :
keyword[return] identifier[word]
keyword[break]
keyword[for] identifier[suffix] keyword[in] identifier[self] . identifier[__step1_suffixes] :
keyword[if] identifier[r1] . identifier[endswith] ( identifier[suffix] ):
keyword[if] ( identifier[suffix] keyword[in] ( literal[string] , literal[string] , literal[string] ) keyword[and]
identifier[word] [- identifier[len] ( identifier[suffix] )- literal[int] :- identifier[len] ( identifier[suffix] )]== literal[string] ):
identifier[word] = identifier[word] [:- identifier[len] ( identifier[suffix] )- literal[int] ]
identifier[r1] = identifier[r1] [:- identifier[len] ( identifier[suffix] )- literal[int] ]
identifier[r2] = identifier[r2] [:- identifier[len] ( identifier[suffix] )- literal[int] ]
keyword[elif] identifier[suffix] == literal[string] :
keyword[if] identifier[word] [- literal[int] ] keyword[in] identifier[self] . identifier[__s_ending] :
identifier[word] = identifier[word] [:- literal[int] ]
identifier[r1] = identifier[r1] [:- literal[int] ]
identifier[r2] = identifier[r2] [:- literal[int] ]
keyword[else] :
identifier[word] = identifier[word] [:- identifier[len] ( identifier[suffix] )]
identifier[r1] = identifier[r1] [:- identifier[len] ( identifier[suffix] )]
identifier[r2] = identifier[r2] [:- identifier[len] ( identifier[suffix] )]
keyword[break]
keyword[for] identifier[suffix] keyword[in] identifier[self] . identifier[__step2_suffixes] :
keyword[if] identifier[r1] . identifier[endswith] ( identifier[suffix] ):
keyword[if] identifier[suffix] == literal[string] :
keyword[if] identifier[word] [- literal[int] ] keyword[in] identifier[self] . identifier[__st_ending] keyword[and] identifier[len] ( identifier[word] [:- literal[int] ])>= literal[int] :
identifier[word] = identifier[word] [:- literal[int] ]
identifier[r1] = identifier[r1] [:- literal[int] ]
identifier[r2] = identifier[r2] [:- literal[int] ]
keyword[else] :
identifier[word] = identifier[word] [:- identifier[len] ( identifier[suffix] )]
identifier[r1] = identifier[r1] [:- identifier[len] ( identifier[suffix] )]
identifier[r2] = identifier[r2] [:- identifier[len] ( identifier[suffix] )]
keyword[break]
keyword[for] identifier[suffix] keyword[in] identifier[self] . identifier[__step3_suffixes] :
keyword[if] identifier[r2] . identifier[endswith] ( identifier[suffix] ):
keyword[if] identifier[suffix] keyword[in] ( literal[string] , literal[string] ):
keyword[if] ( literal[string] keyword[in] identifier[r2] [- identifier[len] ( identifier[suffix] )- literal[int] :- identifier[len] ( identifier[suffix] )] keyword[and]
literal[string] keyword[not] keyword[in] identifier[r2] [- identifier[len] ( identifier[suffix] )- literal[int] :- identifier[len] ( identifier[suffix] )- literal[int] ]):
identifier[word] = identifier[word] [:- identifier[len] ( identifier[suffix] )- literal[int] ]
keyword[else] :
identifier[word] = identifier[word] [:- identifier[len] ( identifier[suffix] )]
keyword[elif] ( identifier[suffix] keyword[in] ( literal[string] , literal[string] , literal[string] ) keyword[and]
literal[string] keyword[not] keyword[in] identifier[r2] [- identifier[len] ( identifier[suffix] )- literal[int] :- identifier[len] ( identifier[suffix] )]):
identifier[word] = identifier[word] [:- identifier[len] ( identifier[suffix] )]
keyword[elif] identifier[suffix] keyword[in] ( literal[string] , literal[string] ):
keyword[if] ( literal[string] keyword[in] identifier[r1] [- identifier[len] ( identifier[suffix] )- literal[int] :- identifier[len] ( identifier[suffix] )] keyword[or]
literal[string] keyword[in] identifier[r1] [- identifier[len] ( identifier[suffix] )- literal[int] :- identifier[len] ( identifier[suffix] )]):
identifier[word] = identifier[word] [:- identifier[len] ( identifier[suffix] )- literal[int] ]
keyword[else] :
identifier[word] = identifier[word] [:- identifier[len] ( identifier[suffix] )]
keyword[elif] identifier[suffix] == literal[string] :
keyword[if] literal[string] keyword[in] identifier[r2] [- identifier[len] ( identifier[suffix] )- literal[int] :- identifier[len] ( identifier[suffix] )]:
identifier[word] = identifier[word] [:- identifier[len] ( identifier[suffix] )- literal[int] ]
keyword[elif] literal[string] keyword[in] identifier[r2] [- identifier[len] ( identifier[suffix] )- literal[int] :- identifier[len] ( identifier[suffix] )]:
identifier[word] = identifier[word] [:- identifier[len] ( identifier[suffix] )- literal[int] ]
keyword[else] :
identifier[word] = identifier[word] [:- identifier[len] ( identifier[suffix] )]
keyword[break]
identifier[word] =( identifier[word] . identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] )
. identifier[replace] ( literal[string] , literal[string] ). identifier[replace] ( literal[string] , literal[string] )
. identifier[replace] ( literal[string] , literal[string] ))
keyword[return] identifier[word] | def stem(self, word):
"""
Stem a German word and return the stemmed form.
:param word: The word that is stemmed.
:type word: str or unicode
:return: The stemmed form.
:rtype: unicode
"""
word = word.lower()
word = word.replace('ß', 'ss')
# Every occurrence of 'u' and 'y'
# between vowels is put into upper case.
for i in range(1, len(word) - 1):
if word[i - 1] in self.__vowels and word[i + 1] in self.__vowels:
if word[i] == 'u':
word = ''.join((word[:i], 'U', word[i + 1:])) # depends on [control=['if'], data=[]]
elif word[i] == 'y':
word = ''.join((word[:i], 'Y', word[i + 1:])) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
(r1, r2) = self._r1r2_standard(word, self.__vowels)
# R1 is adjusted so that the region before it
# contains at least 3 letters.
for i in range(1, len(word)):
if word[i] not in self.__vowels and word[i - 1] in self.__vowels:
if len(word[:i + 1]) < 3 and len(word[:i + 1]) > 0:
r1 = word[3:] # depends on [control=['if'], data=[]]
elif len(word[:i + 1]) == 0:
return word # depends on [control=['if'], data=[]]
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
# STEP 1
for suffix in self.__step1_suffixes:
if r1.endswith(suffix):
if suffix in ('en', 'es', 'e') and word[-len(suffix) - 4:-len(suffix)] == 'niss':
word = word[:-len(suffix) - 1]
r1 = r1[:-len(suffix) - 1]
r2 = r2[:-len(suffix) - 1] # depends on [control=['if'], data=[]]
elif suffix == 's':
if word[-2] in self.__s_ending:
word = word[:-1]
r1 = r1[:-1]
r2 = r2[:-1] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['suffix']]
# STEP 2
for suffix in self.__step2_suffixes:
if r1.endswith(suffix):
if suffix == 'st':
if word[-3] in self.__st_ending and len(word[:-3]) >= 3:
word = word[:-2]
r1 = r1[:-2]
r2 = r2[:-2] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
word = word[:-len(suffix)]
r1 = r1[:-len(suffix)]
r2 = r2[:-len(suffix)]
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['suffix']]
# STEP 3: Derivational suffixes
for suffix in self.__step3_suffixes:
if r2.endswith(suffix):
if suffix in ('end', 'ung'):
if 'ig' in r2[-len(suffix) - 2:-len(suffix)] and 'e' not in r2[-len(suffix) - 3:-len(suffix) - 2]:
word = word[:-len(suffix) - 2] # depends on [control=['if'], data=[]]
else:
word = word[:-len(suffix)] # depends on [control=['if'], data=['suffix']]
elif suffix in ('ig', 'ik', 'isch') and 'e' not in r2[-len(suffix) - 1:-len(suffix)]:
word = word[:-len(suffix)] # depends on [control=['if'], data=[]]
elif suffix in ('lich', 'heit'):
if 'er' in r1[-len(suffix) - 2:-len(suffix)] or 'en' in r1[-len(suffix) - 2:-len(suffix)]:
word = word[:-len(suffix) - 2] # depends on [control=['if'], data=[]]
else:
word = word[:-len(suffix)] # depends on [control=['if'], data=['suffix']]
elif suffix == 'keit':
if 'lich' in r2[-len(suffix) - 4:-len(suffix)]:
word = word[:-len(suffix) - 4] # depends on [control=['if'], data=[]]
elif 'ig' in r2[-len(suffix) - 2:-len(suffix)]:
word = word[:-len(suffix) - 2] # depends on [control=['if'], data=[]]
else:
word = word[:-len(suffix)] # depends on [control=['if'], data=['suffix']]
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['suffix']]
# Umlaut accents are removed and
# 'u' and 'y' are put back into lower case.
word = word.replace('ä', 'a').replace('ö', 'o').replace('ü', 'u').replace('U', 'u').replace('Y', 'y')
return word |
def get_supported_connections(self):
"""Returns the number of supported simultaneous BLE connections.
The BLED112 is capable of supporting up to 8 simultaneous BLE connections.
However, the default firmware image has a limit of just 3 devices, which
is a lot easier to run up against. This method retrieves the current value
of this setting.
Returns:
int. The number of supported simultaneous connections, or -1 on error
"""
if self.supported_connections != -1:
return self.supported_connections
if self.api is None:
return -1
self._set_state(self._STATE_DONGLE_COMMAND)
self.api.ble_cmd_system_get_connections()
self._wait_for_state(self._STATE_DONGLE_COMMAND)
return self.supported_connections | def function[get_supported_connections, parameter[self]]:
constant[Returns the number of supported simultaneous BLE connections.
The BLED112 is capable of supporting up to 8 simultaneous BLE connections.
However, the default firmware image has a limit of just 3 devices, which
is a lot easier to run up against. This method retrieves the current value
of this setting.
Returns:
int. The number of supported simultaneous connections, or -1 on error
]
if compare[name[self].supported_connections not_equal[!=] <ast.UnaryOp object at 0x7da1b14358a0>] begin[:]
return[name[self].supported_connections]
if compare[name[self].api is constant[None]] begin[:]
return[<ast.UnaryOp object at 0x7da1b14e65c0>]
call[name[self]._set_state, parameter[name[self]._STATE_DONGLE_COMMAND]]
call[name[self].api.ble_cmd_system_get_connections, parameter[]]
call[name[self]._wait_for_state, parameter[name[self]._STATE_DONGLE_COMMAND]]
return[name[self].supported_connections] | keyword[def] identifier[get_supported_connections] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[supported_connections] !=- literal[int] :
keyword[return] identifier[self] . identifier[supported_connections]
keyword[if] identifier[self] . identifier[api] keyword[is] keyword[None] :
keyword[return] - literal[int]
identifier[self] . identifier[_set_state] ( identifier[self] . identifier[_STATE_DONGLE_COMMAND] )
identifier[self] . identifier[api] . identifier[ble_cmd_system_get_connections] ()
identifier[self] . identifier[_wait_for_state] ( identifier[self] . identifier[_STATE_DONGLE_COMMAND] )
keyword[return] identifier[self] . identifier[supported_connections] | def get_supported_connections(self):
"""Returns the number of supported simultaneous BLE connections.
The BLED112 is capable of supporting up to 8 simultaneous BLE connections.
However, the default firmware image has a limit of just 3 devices, which
is a lot easier to run up against. This method retrieves the current value
of this setting.
Returns:
int. The number of supported simultaneous connections, or -1 on error
"""
if self.supported_connections != -1:
return self.supported_connections # depends on [control=['if'], data=[]]
if self.api is None:
return -1 # depends on [control=['if'], data=[]]
self._set_state(self._STATE_DONGLE_COMMAND)
self.api.ble_cmd_system_get_connections()
self._wait_for_state(self._STATE_DONGLE_COMMAND)
return self.supported_connections |
def secret(self, s):
"""
Parse text either a private key or a private hierarchical key.
Return a subclass of :class:`Key <pycoin.key.Key>`, or None.
"""
s = parseable_str(s)
for f in [self.private_key, self.hierarchical_key]:
v = f(s)
if v:
return v | def function[secret, parameter[self, s]]:
constant[
Parse text either a private key or a private hierarchical key.
Return a subclass of :class:`Key <pycoin.key.Key>`, or None.
]
variable[s] assign[=] call[name[parseable_str], parameter[name[s]]]
for taget[name[f]] in starred[list[[<ast.Attribute object at 0x7da1b1ddf760>, <ast.Attribute object at 0x7da1b1ddfa90>]]] begin[:]
variable[v] assign[=] call[name[f], parameter[name[s]]]
if name[v] begin[:]
return[name[v]] | keyword[def] identifier[secret] ( identifier[self] , identifier[s] ):
literal[string]
identifier[s] = identifier[parseable_str] ( identifier[s] )
keyword[for] identifier[f] keyword[in] [ identifier[self] . identifier[private_key] , identifier[self] . identifier[hierarchical_key] ]:
identifier[v] = identifier[f] ( identifier[s] )
keyword[if] identifier[v] :
keyword[return] identifier[v] | def secret(self, s):
"""
Parse text either a private key or a private hierarchical key.
Return a subclass of :class:`Key <pycoin.key.Key>`, or None.
"""
s = parseable_str(s)
for f in [self.private_key, self.hierarchical_key]:
v = f(s)
if v:
return v # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['f']] |
def trigger_script(self):
"""Actually process a script."""
if self.remote_bridge.status not in (BRIDGE_STATUS.RECEIVED,):
return [1] #FIXME: State change
# This is asynchronous in real life so just cache the error
try:
self.remote_bridge.parsed_script = UpdateScript.FromBinary(self._device.script)
#FIXME: Actually run the script
self.remote_bridge.status = BRIDGE_STATUS.IDLE
except Exception as exc:
self._logger.exception("Error parsing script streamed to device")
self.remote_bridge.script_error = exc
self.remote_bridge.error = 1 # FIXME: Error code
return [0] | def function[trigger_script, parameter[self]]:
constant[Actually process a script.]
if compare[name[self].remote_bridge.status <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Attribute object at 0x7da2041dbca0>]]] begin[:]
return[list[[<ast.Constant object at 0x7da2041da350>]]]
<ast.Try object at 0x7da2041d8a90>
return[list[[<ast.Constant object at 0x7da2046231f0>]]] | keyword[def] identifier[trigger_script] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[remote_bridge] . identifier[status] keyword[not] keyword[in] ( identifier[BRIDGE_STATUS] . identifier[RECEIVED] ,):
keyword[return] [ literal[int] ]
keyword[try] :
identifier[self] . identifier[remote_bridge] . identifier[parsed_script] = identifier[UpdateScript] . identifier[FromBinary] ( identifier[self] . identifier[_device] . identifier[script] )
identifier[self] . identifier[remote_bridge] . identifier[status] = identifier[BRIDGE_STATUS] . identifier[IDLE]
keyword[except] identifier[Exception] keyword[as] identifier[exc] :
identifier[self] . identifier[_logger] . identifier[exception] ( literal[string] )
identifier[self] . identifier[remote_bridge] . identifier[script_error] = identifier[exc]
identifier[self] . identifier[remote_bridge] . identifier[error] = literal[int]
keyword[return] [ literal[int] ] | def trigger_script(self):
"""Actually process a script."""
if self.remote_bridge.status not in (BRIDGE_STATUS.RECEIVED,):
return [1] #FIXME: State change # depends on [control=['if'], data=[]]
# This is asynchronous in real life so just cache the error
try:
self.remote_bridge.parsed_script = UpdateScript.FromBinary(self._device.script)
#FIXME: Actually run the script
self.remote_bridge.status = BRIDGE_STATUS.IDLE # depends on [control=['try'], data=[]]
except Exception as exc:
self._logger.exception('Error parsing script streamed to device')
self.remote_bridge.script_error = exc
self.remote_bridge.error = 1 # FIXME: Error code # depends on [control=['except'], data=['exc']]
return [0] |
def list_dir(self):
"""
Non-recursive file listing.
:returns: A generator over files in this "directory" for efficiency.
"""
bucket = self.s3_object.Bucket()
prefix = self.s3_object.key
if not prefix.endswith('/'): prefix += '/'
for obj in bucket.objects.filter(Delimiter='/', Prefix=prefix):
yield 's3://{}/{}'.format(obj.bucket_name, obj.key) | def function[list_dir, parameter[self]]:
constant[
Non-recursive file listing.
:returns: A generator over files in this "directory" for efficiency.
]
variable[bucket] assign[=] call[name[self].s3_object.Bucket, parameter[]]
variable[prefix] assign[=] name[self].s3_object.key
if <ast.UnaryOp object at 0x7da1b26ae5c0> begin[:]
<ast.AugAssign object at 0x7da1b26ae500>
for taget[name[obj]] in starred[call[name[bucket].objects.filter, parameter[]]] begin[:]
<ast.Yield object at 0x7da1b26acf10> | keyword[def] identifier[list_dir] ( identifier[self] ):
literal[string]
identifier[bucket] = identifier[self] . identifier[s3_object] . identifier[Bucket] ()
identifier[prefix] = identifier[self] . identifier[s3_object] . identifier[key]
keyword[if] keyword[not] identifier[prefix] . identifier[endswith] ( literal[string] ): identifier[prefix] += literal[string]
keyword[for] identifier[obj] keyword[in] identifier[bucket] . identifier[objects] . identifier[filter] ( identifier[Delimiter] = literal[string] , identifier[Prefix] = identifier[prefix] ):
keyword[yield] literal[string] . identifier[format] ( identifier[obj] . identifier[bucket_name] , identifier[obj] . identifier[key] ) | def list_dir(self):
"""
Non-recursive file listing.
:returns: A generator over files in this "directory" for efficiency.
"""
bucket = self.s3_object.Bucket()
prefix = self.s3_object.key
if not prefix.endswith('/'):
prefix += '/' # depends on [control=['if'], data=[]]
for obj in bucket.objects.filter(Delimiter='/', Prefix=prefix):
yield 's3://{}/{}'.format(obj.bucket_name, obj.key) # depends on [control=['for'], data=['obj']] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.