code
stringlengths 75
104k
| code_sememe
stringlengths 47
309k
| token_type
stringlengths 215
214k
| code_dependency
stringlengths 75
155k
|
|---|---|---|---|
def get_content(pattern, string, tag='content'):
"""
Finds the 'content' tag from a 'pattern' in the provided 'string'
"""
output = []
for match in re.finditer(pattern, string):
output.append(match.group(tag))
return output
|
def function[get_content, parameter[pattern, string, tag]]:
constant[
Finds the 'content' tag from a 'pattern' in the provided 'string'
]
variable[output] assign[=] list[[]]
for taget[name[match]] in starred[call[name[re].finditer, parameter[name[pattern], name[string]]]] begin[:]
call[name[output].append, parameter[call[name[match].group, parameter[name[tag]]]]]
return[name[output]]
|
keyword[def] identifier[get_content] ( identifier[pattern] , identifier[string] , identifier[tag] = literal[string] ):
literal[string]
identifier[output] =[]
keyword[for] identifier[match] keyword[in] identifier[re] . identifier[finditer] ( identifier[pattern] , identifier[string] ):
identifier[output] . identifier[append] ( identifier[match] . identifier[group] ( identifier[tag] ))
keyword[return] identifier[output]
|
def get_content(pattern, string, tag='content'):
"""
Finds the 'content' tag from a 'pattern' in the provided 'string'
"""
output = []
for match in re.finditer(pattern, string):
output.append(match.group(tag)) # depends on [control=['for'], data=['match']]
return output
|
def setPriority(self, queue, priority):
'''
Set priority of a sub-queue
'''
q = self.queueindex[queue]
self.queues[q[0]].removeSubQueue(q[1])
newPriority = self.queues.setdefault(priority, CBQueue.MultiQueue(self, priority))
q[0] = priority
newPriority.addSubQueue(q[1])
|
def function[setPriority, parameter[self, queue, priority]]:
constant[
Set priority of a sub-queue
]
variable[q] assign[=] call[name[self].queueindex][name[queue]]
call[call[name[self].queues][call[name[q]][constant[0]]].removeSubQueue, parameter[call[name[q]][constant[1]]]]
variable[newPriority] assign[=] call[name[self].queues.setdefault, parameter[name[priority], call[name[CBQueue].MultiQueue, parameter[name[self], name[priority]]]]]
call[name[q]][constant[0]] assign[=] name[priority]
call[name[newPriority].addSubQueue, parameter[call[name[q]][constant[1]]]]
|
keyword[def] identifier[setPriority] ( identifier[self] , identifier[queue] , identifier[priority] ):
literal[string]
identifier[q] = identifier[self] . identifier[queueindex] [ identifier[queue] ]
identifier[self] . identifier[queues] [ identifier[q] [ literal[int] ]]. identifier[removeSubQueue] ( identifier[q] [ literal[int] ])
identifier[newPriority] = identifier[self] . identifier[queues] . identifier[setdefault] ( identifier[priority] , identifier[CBQueue] . identifier[MultiQueue] ( identifier[self] , identifier[priority] ))
identifier[q] [ literal[int] ]= identifier[priority]
identifier[newPriority] . identifier[addSubQueue] ( identifier[q] [ literal[int] ])
|
def setPriority(self, queue, priority):
"""
Set priority of a sub-queue
"""
q = self.queueindex[queue]
self.queues[q[0]].removeSubQueue(q[1])
newPriority = self.queues.setdefault(priority, CBQueue.MultiQueue(self, priority))
q[0] = priority
newPriority.addSubQueue(q[1])
|
def register_entry_points(self, exclude=()):
"""Allow Gears plugins to inject themselves to the environment. For
example, if your plugin's package contains such ``entry_points``
definition in ``setup.py``, ``gears_plugin.register`` function will be
called with current environment during ``register_entry_points`` call::
entry_points = {
'gears': [
'register = gears_plugin:register',
],
}
Here is an example of such function::
def register(environment):
assets_dir = os.path.join(os.path.dirname(__file__), 'assets')
assets_dir = os.path.absolute_path(assets_dir)
environment.register(FileSystemFinder([assets_dir]))
If you want to disable this behavior for some plugins, list their
packages using ``exclude`` argument::
environment.register_entry_points(exclude=['plugin'])
"""
for entry_point in iter_entry_points('gears', 'register'):
if entry_point.module_name not in exclude:
register = entry_point.load()
register(self)
|
def function[register_entry_points, parameter[self, exclude]]:
constant[Allow Gears plugins to inject themselves to the environment. For
example, if your plugin's package contains such ``entry_points``
definition in ``setup.py``, ``gears_plugin.register`` function will be
called with current environment during ``register_entry_points`` call::
entry_points = {
'gears': [
'register = gears_plugin:register',
],
}
Here is an example of such function::
def register(environment):
assets_dir = os.path.join(os.path.dirname(__file__), 'assets')
assets_dir = os.path.absolute_path(assets_dir)
environment.register(FileSystemFinder([assets_dir]))
If you want to disable this behavior for some plugins, list their
packages using ``exclude`` argument::
environment.register_entry_points(exclude=['plugin'])
]
for taget[name[entry_point]] in starred[call[name[iter_entry_points], parameter[constant[gears], constant[register]]]] begin[:]
if compare[name[entry_point].module_name <ast.NotIn object at 0x7da2590d7190> name[exclude]] begin[:]
variable[register] assign[=] call[name[entry_point].load, parameter[]]
call[name[register], parameter[name[self]]]
|
keyword[def] identifier[register_entry_points] ( identifier[self] , identifier[exclude] =()):
literal[string]
keyword[for] identifier[entry_point] keyword[in] identifier[iter_entry_points] ( literal[string] , literal[string] ):
keyword[if] identifier[entry_point] . identifier[module_name] keyword[not] keyword[in] identifier[exclude] :
identifier[register] = identifier[entry_point] . identifier[load] ()
identifier[register] ( identifier[self] )
|
def register_entry_points(self, exclude=()):
"""Allow Gears plugins to inject themselves to the environment. For
example, if your plugin's package contains such ``entry_points``
definition in ``setup.py``, ``gears_plugin.register`` function will be
called with current environment during ``register_entry_points`` call::
entry_points = {
'gears': [
'register = gears_plugin:register',
],
}
Here is an example of such function::
def register(environment):
assets_dir = os.path.join(os.path.dirname(__file__), 'assets')
assets_dir = os.path.absolute_path(assets_dir)
environment.register(FileSystemFinder([assets_dir]))
If you want to disable this behavior for some plugins, list their
packages using ``exclude`` argument::
environment.register_entry_points(exclude=['plugin'])
"""
for entry_point in iter_entry_points('gears', 'register'):
if entry_point.module_name not in exclude:
register = entry_point.load()
register(self) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['entry_point']]
|
def run_sambamba_markdup(job, bam):
"""
Marks reads as PCR duplicates using Sambamba
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID for BAM file
:return: FileStoreID for sorted BAM file
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
job.fileStore.readGlobalFile(bam, os.path.join(work_dir, 'input.bam'))
command = ['/usr/local/bin/sambamba',
'markdup',
'-t', str(int(job.cores)),
'/data/input.bam',
'/data/output.bam']
start_time = time.time()
dockerCall(job=job, workDir=work_dir,
parameters=command,
tool='quay.io/biocontainers/sambamba:0.6.6--0')
end_time = time.time()
_log_runtime(job, start_time, end_time, "sambamba mkdup")
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.bam'))
|
def function[run_sambamba_markdup, parameter[job, bam]]:
constant[
Marks reads as PCR duplicates using Sambamba
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID for BAM file
:return: FileStoreID for sorted BAM file
:rtype: str
]
variable[work_dir] assign[=] call[name[job].fileStore.getLocalTempDir, parameter[]]
call[name[job].fileStore.readGlobalFile, parameter[name[bam], call[name[os].path.join, parameter[name[work_dir], constant[input.bam]]]]]
variable[command] assign[=] list[[<ast.Constant object at 0x7da2043455d0>, <ast.Constant object at 0x7da204346230>, <ast.Constant object at 0x7da204347040>, <ast.Call object at 0x7da2043472e0>, <ast.Constant object at 0x7da204344070>, <ast.Constant object at 0x7da2043448e0>]]
variable[start_time] assign[=] call[name[time].time, parameter[]]
call[name[dockerCall], parameter[]]
variable[end_time] assign[=] call[name[time].time, parameter[]]
call[name[_log_runtime], parameter[name[job], name[start_time], name[end_time], constant[sambamba mkdup]]]
return[call[name[job].fileStore.writeGlobalFile, parameter[call[name[os].path.join, parameter[name[work_dir], constant[output.bam]]]]]]
|
keyword[def] identifier[run_sambamba_markdup] ( identifier[job] , identifier[bam] ):
literal[string]
identifier[work_dir] = identifier[job] . identifier[fileStore] . identifier[getLocalTempDir] ()
identifier[job] . identifier[fileStore] . identifier[readGlobalFile] ( identifier[bam] , identifier[os] . identifier[path] . identifier[join] ( identifier[work_dir] , literal[string] ))
identifier[command] =[ literal[string] ,
literal[string] ,
literal[string] , identifier[str] ( identifier[int] ( identifier[job] . identifier[cores] )),
literal[string] ,
literal[string] ]
identifier[start_time] = identifier[time] . identifier[time] ()
identifier[dockerCall] ( identifier[job] = identifier[job] , identifier[workDir] = identifier[work_dir] ,
identifier[parameters] = identifier[command] ,
identifier[tool] = literal[string] )
identifier[end_time] = identifier[time] . identifier[time] ()
identifier[_log_runtime] ( identifier[job] , identifier[start_time] , identifier[end_time] , literal[string] )
keyword[return] identifier[job] . identifier[fileStore] . identifier[writeGlobalFile] ( identifier[os] . identifier[path] . identifier[join] ( identifier[work_dir] , literal[string] ))
|
def run_sambamba_markdup(job, bam):
"""
Marks reads as PCR duplicates using Sambamba
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID for BAM file
:return: FileStoreID for sorted BAM file
:rtype: str
"""
work_dir = job.fileStore.getLocalTempDir()
job.fileStore.readGlobalFile(bam, os.path.join(work_dir, 'input.bam'))
command = ['/usr/local/bin/sambamba', 'markdup', '-t', str(int(job.cores)), '/data/input.bam', '/data/output.bam']
start_time = time.time()
dockerCall(job=job, workDir=work_dir, parameters=command, tool='quay.io/biocontainers/sambamba:0.6.6--0')
end_time = time.time()
_log_runtime(job, start_time, end_time, 'sambamba mkdup')
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'output.bam'))
|
def olindices(order, dim):
"""
Create an lexiographical sorted basis for a given order.
Examples:
>>> chaospy.bertran.olindices(2, 2)
array([[0, 0],
[0, 1],
[1, 0],
[0, 2],
[1, 1],
[2, 0]])
"""
indices = [olindex(o, dim) for o in range(order+1)]
indices = numpy.vstack(indices)
return indices
|
def function[olindices, parameter[order, dim]]:
constant[
Create an lexiographical sorted basis for a given order.
Examples:
>>> chaospy.bertran.olindices(2, 2)
array([[0, 0],
[0, 1],
[1, 0],
[0, 2],
[1, 1],
[2, 0]])
]
variable[indices] assign[=] <ast.ListComp object at 0x7da20c7c8df0>
variable[indices] assign[=] call[name[numpy].vstack, parameter[name[indices]]]
return[name[indices]]
|
keyword[def] identifier[olindices] ( identifier[order] , identifier[dim] ):
literal[string]
identifier[indices] =[ identifier[olindex] ( identifier[o] , identifier[dim] ) keyword[for] identifier[o] keyword[in] identifier[range] ( identifier[order] + literal[int] )]
identifier[indices] = identifier[numpy] . identifier[vstack] ( identifier[indices] )
keyword[return] identifier[indices]
|
def olindices(order, dim):
"""
Create an lexiographical sorted basis for a given order.
Examples:
>>> chaospy.bertran.olindices(2, 2)
array([[0, 0],
[0, 1],
[1, 0],
[0, 2],
[1, 1],
[2, 0]])
"""
indices = [olindex(o, dim) for o in range(order + 1)]
indices = numpy.vstack(indices)
return indices
|
def _selftoken_expired():
'''
Validate the current token exists and is still valid
'''
try:
verify = __opts__['vault'].get('verify', None)
url = '{0}/v1/auth/token/lookup-self'.format(__opts__['vault']['url'])
if 'token' not in __opts__['vault']['auth']:
return True
headers = {'X-Vault-Token': __opts__['vault']['auth']['token']}
response = requests.get(url, headers=headers, verify=verify)
if response.status_code != 200:
return True
return False
except Exception as e:
raise salt.exceptions.CommandExecutionError(
'Error while looking up self token : {0}'.format(six.text_type(e))
)
|
def function[_selftoken_expired, parameter[]]:
constant[
Validate the current token exists and is still valid
]
<ast.Try object at 0x7da18ede58d0>
|
keyword[def] identifier[_selftoken_expired] ():
literal[string]
keyword[try] :
identifier[verify] = identifier[__opts__] [ literal[string] ]. identifier[get] ( literal[string] , keyword[None] )
identifier[url] = literal[string] . identifier[format] ( identifier[__opts__] [ literal[string] ][ literal[string] ])
keyword[if] literal[string] keyword[not] keyword[in] identifier[__opts__] [ literal[string] ][ literal[string] ]:
keyword[return] keyword[True]
identifier[headers] ={ literal[string] : identifier[__opts__] [ literal[string] ][ literal[string] ][ literal[string] ]}
identifier[response] = identifier[requests] . identifier[get] ( identifier[url] , identifier[headers] = identifier[headers] , identifier[verify] = identifier[verify] )
keyword[if] identifier[response] . identifier[status_code] != literal[int] :
keyword[return] keyword[True]
keyword[return] keyword[False]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[raise] identifier[salt] . identifier[exceptions] . identifier[CommandExecutionError] (
literal[string] . identifier[format] ( identifier[six] . identifier[text_type] ( identifier[e] ))
)
|
def _selftoken_expired():
"""
Validate the current token exists and is still valid
"""
try:
verify = __opts__['vault'].get('verify', None)
url = '{0}/v1/auth/token/lookup-self'.format(__opts__['vault']['url'])
if 'token' not in __opts__['vault']['auth']:
return True # depends on [control=['if'], data=[]]
headers = {'X-Vault-Token': __opts__['vault']['auth']['token']}
response = requests.get(url, headers=headers, verify=verify)
if response.status_code != 200:
return True # depends on [control=['if'], data=[]]
return False # depends on [control=['try'], data=[]]
except Exception as e:
raise salt.exceptions.CommandExecutionError('Error while looking up self token : {0}'.format(six.text_type(e))) # depends on [control=['except'], data=['e']]
|
def close(self):
"""gym api close"""
try:
# Purge last token from head node with <Close> message.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.server, self.port))
self._hello(sock)
comms.send_message(sock, ("<Close>" + self._get_token() + "</Close>").encode())
reply = comms.recv_message(sock)
ok, = struct.unpack('!I', reply)
assert ok
sock.close()
except Exception as e:
self._log_error(e)
if self.client_socket:
self.client_socket.close()
self.client_socket = None
|
def function[close, parameter[self]]:
constant[gym api close]
<ast.Try object at 0x7da1b1a560e0>
if name[self].client_socket begin[:]
call[name[self].client_socket.close, parameter[]]
name[self].client_socket assign[=] constant[None]
|
keyword[def] identifier[close] ( identifier[self] ):
literal[string]
keyword[try] :
identifier[sock] = identifier[socket] . identifier[socket] ( identifier[socket] . identifier[AF_INET] , identifier[socket] . identifier[SOCK_STREAM] )
identifier[sock] . identifier[connect] (( identifier[self] . identifier[server] , identifier[self] . identifier[port] ))
identifier[self] . identifier[_hello] ( identifier[sock] )
identifier[comms] . identifier[send_message] ( identifier[sock] ,( literal[string] + identifier[self] . identifier[_get_token] ()+ literal[string] ). identifier[encode] ())
identifier[reply] = identifier[comms] . identifier[recv_message] ( identifier[sock] )
identifier[ok] ,= identifier[struct] . identifier[unpack] ( literal[string] , identifier[reply] )
keyword[assert] identifier[ok]
identifier[sock] . identifier[close] ()
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[self] . identifier[_log_error] ( identifier[e] )
keyword[if] identifier[self] . identifier[client_socket] :
identifier[self] . identifier[client_socket] . identifier[close] ()
identifier[self] . identifier[client_socket] = keyword[None]
|
def close(self):
"""gym api close"""
try:
# Purge last token from head node with <Close> message.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.server, self.port))
self._hello(sock)
comms.send_message(sock, ('<Close>' + self._get_token() + '</Close>').encode())
reply = comms.recv_message(sock)
(ok,) = struct.unpack('!I', reply)
assert ok
sock.close() # depends on [control=['try'], data=[]]
except Exception as e:
self._log_error(e) # depends on [control=['except'], data=['e']]
if self.client_socket:
self.client_socket.close()
self.client_socket = None # depends on [control=['if'], data=[]]
|
def transform_array(rot_mtx,vec_array):
'''transform_array( matrix, vector_array ) -> vector_array
'''
return map( lambda x,m=rot_mtx:transform(m,x), vec_array )
|
def function[transform_array, parameter[rot_mtx, vec_array]]:
constant[transform_array( matrix, vector_array ) -> vector_array
]
return[call[name[map], parameter[<ast.Lambda object at 0x7da1b0e6cdf0>, name[vec_array]]]]
|
keyword[def] identifier[transform_array] ( identifier[rot_mtx] , identifier[vec_array] ):
literal[string]
keyword[return] identifier[map] ( keyword[lambda] identifier[x] , identifier[m] = identifier[rot_mtx] : identifier[transform] ( identifier[m] , identifier[x] ), identifier[vec_array] )
|
def transform_array(rot_mtx, vec_array):
"""transform_array( matrix, vector_array ) -> vector_array
"""
return map(lambda x, m=rot_mtx: transform(m, x), vec_array)
|
def match_anywhere(subject: Expression, pattern: Pattern) -> Iterator[Tuple[Substitution, Tuple[int, ...]]]:
"""Tries to match the given *pattern* to the any subexpression of the given *subject*.
Yields each match in form of a substitution and a position tuple.
The position is a tuple of indices, e.g. the empty tuple refers to the *subject* itself,
:code:`(0, )` refers to the first child (operand) of the subject, :code:`(0, 0)` to the first child of
the first child etc.
Parameters:
subject:
An subject to match.
pattern:
The pattern to match.
Yields:
All possible substitution and position pairs.
Raises:
ValueError:
If the subject is not constant.
"""
if not is_constant(subject):
raise ValueError("The subject for matching must be constant.")
for child, pos in preorder_iter_with_position(subject):
if match_head(child, pattern):
for subst in match(child, pattern):
yield subst, pos
|
def function[match_anywhere, parameter[subject, pattern]]:
constant[Tries to match the given *pattern* to the any subexpression of the given *subject*.
Yields each match in form of a substitution and a position tuple.
The position is a tuple of indices, e.g. the empty tuple refers to the *subject* itself,
:code:`(0, )` refers to the first child (operand) of the subject, :code:`(0, 0)` to the first child of
the first child etc.
Parameters:
subject:
An subject to match.
pattern:
The pattern to match.
Yields:
All possible substitution and position pairs.
Raises:
ValueError:
If the subject is not constant.
]
if <ast.UnaryOp object at 0x7da207f00b80> begin[:]
<ast.Raise object at 0x7da207f02e30>
for taget[tuple[[<ast.Name object at 0x7da207f01750>, <ast.Name object at 0x7da207f03250>]]] in starred[call[name[preorder_iter_with_position], parameter[name[subject]]]] begin[:]
if call[name[match_head], parameter[name[child], name[pattern]]] begin[:]
for taget[name[subst]] in starred[call[name[match], parameter[name[child], name[pattern]]]] begin[:]
<ast.Yield object at 0x7da207f01030>
|
keyword[def] identifier[match_anywhere] ( identifier[subject] : identifier[Expression] , identifier[pattern] : identifier[Pattern] )-> identifier[Iterator] [ identifier[Tuple] [ identifier[Substitution] , identifier[Tuple] [ identifier[int] ,...]]]:
literal[string]
keyword[if] keyword[not] identifier[is_constant] ( identifier[subject] ):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[for] identifier[child] , identifier[pos] keyword[in] identifier[preorder_iter_with_position] ( identifier[subject] ):
keyword[if] identifier[match_head] ( identifier[child] , identifier[pattern] ):
keyword[for] identifier[subst] keyword[in] identifier[match] ( identifier[child] , identifier[pattern] ):
keyword[yield] identifier[subst] , identifier[pos]
|
def match_anywhere(subject: Expression, pattern: Pattern) -> Iterator[Tuple[Substitution, Tuple[int, ...]]]:
"""Tries to match the given *pattern* to the any subexpression of the given *subject*.
Yields each match in form of a substitution and a position tuple.
The position is a tuple of indices, e.g. the empty tuple refers to the *subject* itself,
:code:`(0, )` refers to the first child (operand) of the subject, :code:`(0, 0)` to the first child of
the first child etc.
Parameters:
subject:
An subject to match.
pattern:
The pattern to match.
Yields:
All possible substitution and position pairs.
Raises:
ValueError:
If the subject is not constant.
"""
if not is_constant(subject):
raise ValueError('The subject for matching must be constant.') # depends on [control=['if'], data=[]]
for (child, pos) in preorder_iter_with_position(subject):
if match_head(child, pattern):
for subst in match(child, pattern):
yield (subst, pos) # depends on [control=['for'], data=['subst']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
|
def register(self, entry_point):
"""Register an extension
:param str entry_point: extension to register (entry point syntax).
:raise: ValueError if already registered.
"""
if entry_point in self.registered_extensions:
raise ValueError('Extension already registered')
ep = EntryPoint.parse(entry_point)
if ep.name in self.names():
raise ValueError('An extension with the same name already exist')
ext = self._load_one_plugin(ep, False, (), {}, False)
self.extensions.append(ext)
if self._extensions_by_name is not None:
self._extensions_by_name[ext.name] = ext
self.registered_extensions.insert(0, entry_point)
|
def function[register, parameter[self, entry_point]]:
constant[Register an extension
:param str entry_point: extension to register (entry point syntax).
:raise: ValueError if already registered.
]
if compare[name[entry_point] in name[self].registered_extensions] begin[:]
<ast.Raise object at 0x7da1b1d39c00>
variable[ep] assign[=] call[name[EntryPoint].parse, parameter[name[entry_point]]]
if compare[name[ep].name in call[name[self].names, parameter[]]] begin[:]
<ast.Raise object at 0x7da1b1d392d0>
variable[ext] assign[=] call[name[self]._load_one_plugin, parameter[name[ep], constant[False], tuple[[]], dictionary[[], []], constant[False]]]
call[name[self].extensions.append, parameter[name[ext]]]
if compare[name[self]._extensions_by_name is_not constant[None]] begin[:]
call[name[self]._extensions_by_name][name[ext].name] assign[=] name[ext]
call[name[self].registered_extensions.insert, parameter[constant[0], name[entry_point]]]
|
keyword[def] identifier[register] ( identifier[self] , identifier[entry_point] ):
literal[string]
keyword[if] identifier[entry_point] keyword[in] identifier[self] . identifier[registered_extensions] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[ep] = identifier[EntryPoint] . identifier[parse] ( identifier[entry_point] )
keyword[if] identifier[ep] . identifier[name] keyword[in] identifier[self] . identifier[names] ():
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[ext] = identifier[self] . identifier[_load_one_plugin] ( identifier[ep] , keyword[False] ,(),{}, keyword[False] )
identifier[self] . identifier[extensions] . identifier[append] ( identifier[ext] )
keyword[if] identifier[self] . identifier[_extensions_by_name] keyword[is] keyword[not] keyword[None] :
identifier[self] . identifier[_extensions_by_name] [ identifier[ext] . identifier[name] ]= identifier[ext]
identifier[self] . identifier[registered_extensions] . identifier[insert] ( literal[int] , identifier[entry_point] )
|
def register(self, entry_point):
"""Register an extension
:param str entry_point: extension to register (entry point syntax).
:raise: ValueError if already registered.
"""
if entry_point in self.registered_extensions:
raise ValueError('Extension already registered') # depends on [control=['if'], data=[]]
ep = EntryPoint.parse(entry_point)
if ep.name in self.names():
raise ValueError('An extension with the same name already exist') # depends on [control=['if'], data=[]]
ext = self._load_one_plugin(ep, False, (), {}, False)
self.extensions.append(ext)
if self._extensions_by_name is not None:
self._extensions_by_name[ext.name] = ext # depends on [control=['if'], data=[]]
self.registered_extensions.insert(0, entry_point)
|
def insert_or_replace_entity(self, entity):
'''
Adds an insert or replace entity operation to the batch. See
:func:`~azure.storage.table.tableservice.TableService.insert_or_replace_entity` for more
information on insert or replace operations.
The operation will not be executed until the batch is committed.
:param entity:
The entity to insert or replace. Could be a dict or an entity object.
Must contain a PartitionKey and a RowKey.
:type entity: dict or :class:`~azure.storage.table.models.Entity`
'''
request = _insert_or_replace_entity(entity, self._require_encryption, self._key_encryption_key,
self._encryption_resolver)
self._add_to_batch(entity['PartitionKey'], entity['RowKey'], request)
|
def function[insert_or_replace_entity, parameter[self, entity]]:
constant[
Adds an insert or replace entity operation to the batch. See
:func:`~azure.storage.table.tableservice.TableService.insert_or_replace_entity` for more
information on insert or replace operations.
The operation will not be executed until the batch is committed.
:param entity:
The entity to insert or replace. Could be a dict or an entity object.
Must contain a PartitionKey and a RowKey.
:type entity: dict or :class:`~azure.storage.table.models.Entity`
]
variable[request] assign[=] call[name[_insert_or_replace_entity], parameter[name[entity], name[self]._require_encryption, name[self]._key_encryption_key, name[self]._encryption_resolver]]
call[name[self]._add_to_batch, parameter[call[name[entity]][constant[PartitionKey]], call[name[entity]][constant[RowKey]], name[request]]]
|
keyword[def] identifier[insert_or_replace_entity] ( identifier[self] , identifier[entity] ):
literal[string]
identifier[request] = identifier[_insert_or_replace_entity] ( identifier[entity] , identifier[self] . identifier[_require_encryption] , identifier[self] . identifier[_key_encryption_key] ,
identifier[self] . identifier[_encryption_resolver] )
identifier[self] . identifier[_add_to_batch] ( identifier[entity] [ literal[string] ], identifier[entity] [ literal[string] ], identifier[request] )
|
def insert_or_replace_entity(self, entity):
"""
Adds an insert or replace entity operation to the batch. See
:func:`~azure.storage.table.tableservice.TableService.insert_or_replace_entity` for more
information on insert or replace operations.
The operation will not be executed until the batch is committed.
:param entity:
The entity to insert or replace. Could be a dict or an entity object.
Must contain a PartitionKey and a RowKey.
:type entity: dict or :class:`~azure.storage.table.models.Entity`
"""
request = _insert_or_replace_entity(entity, self._require_encryption, self._key_encryption_key, self._encryption_resolver)
self._add_to_batch(entity['PartitionKey'], entity['RowKey'], request)
|
def _send_acceptance(self, enrollment, password, event):
"""Send an acceptance mail to an open enrolment"""
self.log('Sending acceptance status mail to user')
if password is not "":
password_hint = '\n\nPS: Your new password is ' + password + ' - please change it after your first login!'
acceptance_text = self.config.acceptance_mail + password_hint
else:
acceptance_text = self.config.acceptance_mail
self._send_mail(self.config.acceptance_subject, acceptance_text, enrollment, event)
|
def function[_send_acceptance, parameter[self, enrollment, password, event]]:
constant[Send an acceptance mail to an open enrolment]
call[name[self].log, parameter[constant[Sending acceptance status mail to user]]]
if compare[name[password] is_not constant[]] begin[:]
variable[password_hint] assign[=] binary_operation[binary_operation[constant[
PS: Your new password is ] + name[password]] + constant[ - please change it after your first login!]]
variable[acceptance_text] assign[=] binary_operation[name[self].config.acceptance_mail + name[password_hint]]
call[name[self]._send_mail, parameter[name[self].config.acceptance_subject, name[acceptance_text], name[enrollment], name[event]]]
|
keyword[def] identifier[_send_acceptance] ( identifier[self] , identifier[enrollment] , identifier[password] , identifier[event] ):
literal[string]
identifier[self] . identifier[log] ( literal[string] )
keyword[if] identifier[password] keyword[is] keyword[not] literal[string] :
identifier[password_hint] = literal[string] + identifier[password] + literal[string]
identifier[acceptance_text] = identifier[self] . identifier[config] . identifier[acceptance_mail] + identifier[password_hint]
keyword[else] :
identifier[acceptance_text] = identifier[self] . identifier[config] . identifier[acceptance_mail]
identifier[self] . identifier[_send_mail] ( identifier[self] . identifier[config] . identifier[acceptance_subject] , identifier[acceptance_text] , identifier[enrollment] , identifier[event] )
|
def _send_acceptance(self, enrollment, password, event):
"""Send an acceptance mail to an open enrolment"""
self.log('Sending acceptance status mail to user')
if password is not '':
password_hint = '\n\nPS: Your new password is ' + password + ' - please change it after your first login!'
acceptance_text = self.config.acceptance_mail + password_hint # depends on [control=['if'], data=['password']]
else:
acceptance_text = self.config.acceptance_mail
self._send_mail(self.config.acceptance_subject, acceptance_text, enrollment, event)
|
def atlasdb_get_zonefile_bits( zonefile_hash, con=None, path=None ):
"""
What bit(s) in a zonefile inventory does a zonefile hash correspond to?
Return their indexes in the bit field.
"""
with AtlasDBOpen(con=con, path=path) as dbcon:
sql = "SELECT inv_index FROM zonefiles WHERE zonefile_hash = ?;"
args = (zonefile_hash,)
cur = dbcon.cursor()
res = atlasdb_query_execute( cur, sql, args )
# NOTE: zero-indexed
ret = []
for r in res:
ret.append( r['inv_index'] - 1 )
return ret
|
def function[atlasdb_get_zonefile_bits, parameter[zonefile_hash, con, path]]:
constant[
What bit(s) in a zonefile inventory does a zonefile hash correspond to?
Return their indexes in the bit field.
]
with call[name[AtlasDBOpen], parameter[]] begin[:]
variable[sql] assign[=] constant[SELECT inv_index FROM zonefiles WHERE zonefile_hash = ?;]
variable[args] assign[=] tuple[[<ast.Name object at 0x7da18f00f3a0>]]
variable[cur] assign[=] call[name[dbcon].cursor, parameter[]]
variable[res] assign[=] call[name[atlasdb_query_execute], parameter[name[cur], name[sql], name[args]]]
variable[ret] assign[=] list[[]]
for taget[name[r]] in starred[name[res]] begin[:]
call[name[ret].append, parameter[binary_operation[call[name[r]][constant[inv_index]] - constant[1]]]]
return[name[ret]]
|
keyword[def] identifier[atlasdb_get_zonefile_bits] ( identifier[zonefile_hash] , identifier[con] = keyword[None] , identifier[path] = keyword[None] ):
literal[string]
keyword[with] identifier[AtlasDBOpen] ( identifier[con] = identifier[con] , identifier[path] = identifier[path] ) keyword[as] identifier[dbcon] :
identifier[sql] = literal[string]
identifier[args] =( identifier[zonefile_hash] ,)
identifier[cur] = identifier[dbcon] . identifier[cursor] ()
identifier[res] = identifier[atlasdb_query_execute] ( identifier[cur] , identifier[sql] , identifier[args] )
identifier[ret] =[]
keyword[for] identifier[r] keyword[in] identifier[res] :
identifier[ret] . identifier[append] ( identifier[r] [ literal[string] ]- literal[int] )
keyword[return] identifier[ret]
|
def atlasdb_get_zonefile_bits(zonefile_hash, con=None, path=None):
"""
What bit(s) in a zonefile inventory does a zonefile hash correspond to?
Return their indexes in the bit field.
"""
with AtlasDBOpen(con=con, path=path) as dbcon:
sql = 'SELECT inv_index FROM zonefiles WHERE zonefile_hash = ?;'
args = (zonefile_hash,)
cur = dbcon.cursor()
res = atlasdb_query_execute(cur, sql, args)
# NOTE: zero-indexed
ret = []
for r in res:
ret.append(r['inv_index'] - 1) # depends on [control=['for'], data=['r']] # depends on [control=['with'], data=['dbcon']]
return ret
|
def etree_to_dict(t, trim=True, **kw):
u"""Converts an lxml.etree object to Python dict.
>>> etree_to_dict(etree.Element('root'))
{'root': None}
:param etree.Element t: lxml tree to convert
:returns d: a dict representing the lxml tree ``t``
:rtype: dict
"""
d = {t.tag: {} if t.attrib else None}
children = list(t)
etree_to_dict_w_args = partial(etree_to_dict, trim=trim, **kw)
if children:
dd = defaultdict(list)
d = {t.tag: {}}
for dc in map(etree_to_dict_w_args, children):
for k, v in dc.iteritems():
# do not add Comment instance to the key
if k is not etree.Comment:
dd[k].append(v)
d[t.tag] = {k: v[0] if len(v) == 1 else v for k, v in dd.iteritems()}
if t.attrib:
d[t.tag].update(('@' + k, v) for k, v in t.attrib.iteritems())
if trim and t.text:
t.text = t.text.strip()
if t.text:
if t.tag is etree.Comment and not kw.get('without_comments'):
# adds a comments node
d['#comments'] = t.text
elif children or t.attrib:
d[t.tag]['#text'] = t.text
else:
d[t.tag] = t.text
return d
|
def function[etree_to_dict, parameter[t, trim]]:
constant[Converts an lxml.etree object to Python dict.
>>> etree_to_dict(etree.Element('root'))
{'root': None}
:param etree.Element t: lxml tree to convert
:returns d: a dict representing the lxml tree ``t``
:rtype: dict
]
variable[d] assign[=] dictionary[[<ast.Attribute object at 0x7da1b27752d0>], [<ast.IfExp object at 0x7da1b2776710>]]
variable[children] assign[=] call[name[list], parameter[name[t]]]
variable[etree_to_dict_w_args] assign[=] call[name[partial], parameter[name[etree_to_dict]]]
if name[children] begin[:]
variable[dd] assign[=] call[name[defaultdict], parameter[name[list]]]
variable[d] assign[=] dictionary[[<ast.Attribute object at 0x7da1b2775180>], [<ast.Dict object at 0x7da1b27769b0>]]
for taget[name[dc]] in starred[call[name[map], parameter[name[etree_to_dict_w_args], name[children]]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da1b2776cb0>, <ast.Name object at 0x7da1b2776020>]]] in starred[call[name[dc].iteritems, parameter[]]] begin[:]
if compare[name[k] is_not name[etree].Comment] begin[:]
call[call[name[dd]][name[k]].append, parameter[name[v]]]
call[name[d]][name[t].tag] assign[=] <ast.DictComp object at 0x7da1b2777d30>
if name[t].attrib begin[:]
call[call[name[d]][name[t].tag].update, parameter[<ast.GeneratorExp object at 0x7da1b2775c30>]]
if <ast.BoolOp object at 0x7da1b2775a20> begin[:]
name[t].text assign[=] call[name[t].text.strip, parameter[]]
if name[t].text begin[:]
if <ast.BoolOp object at 0x7da1b2774d30> begin[:]
call[name[d]][constant[#comments]] assign[=] name[t].text
return[name[d]]
|
keyword[def] identifier[etree_to_dict] ( identifier[t] , identifier[trim] = keyword[True] ,** identifier[kw] ):
literal[string]
identifier[d] ={ identifier[t] . identifier[tag] :{} keyword[if] identifier[t] . identifier[attrib] keyword[else] keyword[None] }
identifier[children] = identifier[list] ( identifier[t] )
identifier[etree_to_dict_w_args] = identifier[partial] ( identifier[etree_to_dict] , identifier[trim] = identifier[trim] ,** identifier[kw] )
keyword[if] identifier[children] :
identifier[dd] = identifier[defaultdict] ( identifier[list] )
identifier[d] ={ identifier[t] . identifier[tag] :{}}
keyword[for] identifier[dc] keyword[in] identifier[map] ( identifier[etree_to_dict_w_args] , identifier[children] ):
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[dc] . identifier[iteritems] ():
keyword[if] identifier[k] keyword[is] keyword[not] identifier[etree] . identifier[Comment] :
identifier[dd] [ identifier[k] ]. identifier[append] ( identifier[v] )
identifier[d] [ identifier[t] . identifier[tag] ]={ identifier[k] : identifier[v] [ literal[int] ] keyword[if] identifier[len] ( identifier[v] )== literal[int] keyword[else] identifier[v] keyword[for] identifier[k] , identifier[v] keyword[in] identifier[dd] . identifier[iteritems] ()}
keyword[if] identifier[t] . identifier[attrib] :
identifier[d] [ identifier[t] . identifier[tag] ]. identifier[update] (( literal[string] + identifier[k] , identifier[v] ) keyword[for] identifier[k] , identifier[v] keyword[in] identifier[t] . identifier[attrib] . identifier[iteritems] ())
keyword[if] identifier[trim] keyword[and] identifier[t] . identifier[text] :
identifier[t] . identifier[text] = identifier[t] . identifier[text] . identifier[strip] ()
keyword[if] identifier[t] . identifier[text] :
keyword[if] identifier[t] . identifier[tag] keyword[is] identifier[etree] . identifier[Comment] keyword[and] keyword[not] identifier[kw] . identifier[get] ( literal[string] ):
identifier[d] [ literal[string] ]= identifier[t] . identifier[text]
keyword[elif] identifier[children] keyword[or] identifier[t] . identifier[attrib] :
identifier[d] [ identifier[t] . identifier[tag] ][ literal[string] ]= identifier[t] . identifier[text]
keyword[else] :
identifier[d] [ identifier[t] . identifier[tag] ]= identifier[t] . identifier[text]
keyword[return] identifier[d]
|
def etree_to_dict(t, trim=True, **kw):
u"""Converts an lxml.etree object to Python dict.
>>> etree_to_dict(etree.Element('root'))
{'root': None}
:param etree.Element t: lxml tree to convert
:returns d: a dict representing the lxml tree ``t``
:rtype: dict
"""
d = {t.tag: {} if t.attrib else None}
children = list(t)
etree_to_dict_w_args = partial(etree_to_dict, trim=trim, **kw)
if children:
dd = defaultdict(list)
d = {t.tag: {}}
for dc in map(etree_to_dict_w_args, children):
for (k, v) in dc.iteritems():
# do not add Comment instance to the key
if k is not etree.Comment:
dd[k].append(v) # depends on [control=['if'], data=['k']] # depends on [control=['for'], data=[]] # depends on [control=['for'], data=['dc']]
d[t.tag] = {k: v[0] if len(v) == 1 else v for (k, v) in dd.iteritems()} # depends on [control=['if'], data=[]]
if t.attrib:
d[t.tag].update((('@' + k, v) for (k, v) in t.attrib.iteritems())) # depends on [control=['if'], data=[]]
if trim and t.text:
t.text = t.text.strip() # depends on [control=['if'], data=[]]
if t.text:
if t.tag is etree.Comment and (not kw.get('without_comments')):
# adds a comments node
d['#comments'] = t.text # depends on [control=['if'], data=[]]
elif children or t.attrib:
d[t.tag]['#text'] = t.text # depends on [control=['if'], data=[]]
else:
d[t.tag] = t.text # depends on [control=['if'], data=[]]
return d
|
def wet_records_from_file_obj(f, take_ownership=False):
"""Iterate through records in WET file object."""
while True:
record = WETRecord.read(f)
if record is None:
break
if not record.url:
continue
yield record
if take_ownership:
f.close()
|
def function[wet_records_from_file_obj, parameter[f, take_ownership]]:
constant[Iterate through records in WET file object.]
while constant[True] begin[:]
variable[record] assign[=] call[name[WETRecord].read, parameter[name[f]]]
if compare[name[record] is constant[None]] begin[:]
break
if <ast.UnaryOp object at 0x7da1b2059c00> begin[:]
continue
<ast.Yield object at 0x7da1b2059990>
if name[take_ownership] begin[:]
call[name[f].close, parameter[]]
|
keyword[def] identifier[wet_records_from_file_obj] ( identifier[f] , identifier[take_ownership] = keyword[False] ):
literal[string]
keyword[while] keyword[True] :
identifier[record] = identifier[WETRecord] . identifier[read] ( identifier[f] )
keyword[if] identifier[record] keyword[is] keyword[None] :
keyword[break]
keyword[if] keyword[not] identifier[record] . identifier[url] :
keyword[continue]
keyword[yield] identifier[record]
keyword[if] identifier[take_ownership] :
identifier[f] . identifier[close] ()
|
def wet_records_from_file_obj(f, take_ownership=False):
"""Iterate through records in WET file object."""
while True:
record = WETRecord.read(f)
if record is None:
break # depends on [control=['if'], data=[]]
if not record.url:
continue # depends on [control=['if'], data=[]]
yield record # depends on [control=['while'], data=[]]
if take_ownership:
f.close() # depends on [control=['if'], data=[]]
|
def read(self, n):
"""Read n bytes.
Returns exactly n bytes of data unless the underlying raw IO
stream reaches EOF.
"""
buf = self._read_buf
pos = self._read_pos
end = pos + n
if end <= len(buf):
# Fast path: the data to read is fully buffered.
self._read_pos += n
return self._update_pos(buf[pos:end])
# Slow path: read from the stream until enough bytes are read,
# or until an EOF occurs or until read() would block.
wanted = max(self.buffer_size, n)
while len(buf) < end:
chunk = self.raw.read(wanted)
if not chunk:
break
buf += chunk
self._read_buf = buf[end:] # Save the extra data in the buffer.
self._read_pos = 0
return self._update_pos(buf[pos:end])
|
def function[read, parameter[self, n]]:
constant[Read n bytes.
Returns exactly n bytes of data unless the underlying raw IO
stream reaches EOF.
]
variable[buf] assign[=] name[self]._read_buf
variable[pos] assign[=] name[self]._read_pos
variable[end] assign[=] binary_operation[name[pos] + name[n]]
if compare[name[end] less_or_equal[<=] call[name[len], parameter[name[buf]]]] begin[:]
<ast.AugAssign object at 0x7da1b0505870>
return[call[name[self]._update_pos, parameter[call[name[buf]][<ast.Slice object at 0x7da1b0505b10>]]]]
variable[wanted] assign[=] call[name[max], parameter[name[self].buffer_size, name[n]]]
while compare[call[name[len], parameter[name[buf]]] less[<] name[end]] begin[:]
variable[chunk] assign[=] call[name[self].raw.read, parameter[name[wanted]]]
if <ast.UnaryOp object at 0x7da1b0506680> begin[:]
break
<ast.AugAssign object at 0x7da1b05065f0>
name[self]._read_buf assign[=] call[name[buf]][<ast.Slice object at 0x7da1b05058a0>]
name[self]._read_pos assign[=] constant[0]
return[call[name[self]._update_pos, parameter[call[name[buf]][<ast.Slice object at 0x7da1b0505240>]]]]
|
keyword[def] identifier[read] ( identifier[self] , identifier[n] ):
literal[string]
identifier[buf] = identifier[self] . identifier[_read_buf]
identifier[pos] = identifier[self] . identifier[_read_pos]
identifier[end] = identifier[pos] + identifier[n]
keyword[if] identifier[end] <= identifier[len] ( identifier[buf] ):
identifier[self] . identifier[_read_pos] += identifier[n]
keyword[return] identifier[self] . identifier[_update_pos] ( identifier[buf] [ identifier[pos] : identifier[end] ])
identifier[wanted] = identifier[max] ( identifier[self] . identifier[buffer_size] , identifier[n] )
keyword[while] identifier[len] ( identifier[buf] )< identifier[end] :
identifier[chunk] = identifier[self] . identifier[raw] . identifier[read] ( identifier[wanted] )
keyword[if] keyword[not] identifier[chunk] :
keyword[break]
identifier[buf] += identifier[chunk]
identifier[self] . identifier[_read_buf] = identifier[buf] [ identifier[end] :]
identifier[self] . identifier[_read_pos] = literal[int]
keyword[return] identifier[self] . identifier[_update_pos] ( identifier[buf] [ identifier[pos] : identifier[end] ])
|
def read(self, n):
"""Read n bytes.
Returns exactly n bytes of data unless the underlying raw IO
stream reaches EOF.
"""
buf = self._read_buf
pos = self._read_pos
end = pos + n
if end <= len(buf):
# Fast path: the data to read is fully buffered.
self._read_pos += n
return self._update_pos(buf[pos:end]) # depends on [control=['if'], data=['end']]
# Slow path: read from the stream until enough bytes are read,
# or until an EOF occurs or until read() would block.
wanted = max(self.buffer_size, n)
while len(buf) < end:
chunk = self.raw.read(wanted)
if not chunk:
break # depends on [control=['if'], data=[]]
buf += chunk # depends on [control=['while'], data=[]]
self._read_buf = buf[end:] # Save the extra data in the buffer.
self._read_pos = 0
return self._update_pos(buf[pos:end])
|
def ipv4_lstrip_zeros(address):
"""
The function to strip leading zeros in each octet of an IPv4 address.
Args:
address (:obj:`str`): An IPv4 address.
Returns:
str: The modified IPv4 address.
"""
# Split the octets.
obj = address.strip().split('.')
for x, y in enumerate(obj):
# Strip leading zeros. Split / here in case CIDR is attached.
obj[x] = y.split('/')[0].lstrip('0')
if obj[x] in ['', None]:
obj[x] = '0'
return '.'.join(obj)
|
def function[ipv4_lstrip_zeros, parameter[address]]:
constant[
The function to strip leading zeros in each octet of an IPv4 address.
Args:
address (:obj:`str`): An IPv4 address.
Returns:
str: The modified IPv4 address.
]
variable[obj] assign[=] call[call[name[address].strip, parameter[]].split, parameter[constant[.]]]
for taget[tuple[[<ast.Name object at 0x7da20cabeb30>, <ast.Name object at 0x7da20cabfa90>]]] in starred[call[name[enumerate], parameter[name[obj]]]] begin[:]
call[name[obj]][name[x]] assign[=] call[call[call[name[y].split, parameter[constant[/]]]][constant[0]].lstrip, parameter[constant[0]]]
if compare[call[name[obj]][name[x]] in list[[<ast.Constant object at 0x7da18c4cc3a0>, <ast.Constant object at 0x7da18c4cda50>]]] begin[:]
call[name[obj]][name[x]] assign[=] constant[0]
return[call[constant[.].join, parameter[name[obj]]]]
|
keyword[def] identifier[ipv4_lstrip_zeros] ( identifier[address] ):
literal[string]
identifier[obj] = identifier[address] . identifier[strip] (). identifier[split] ( literal[string] )
keyword[for] identifier[x] , identifier[y] keyword[in] identifier[enumerate] ( identifier[obj] ):
identifier[obj] [ identifier[x] ]= identifier[y] . identifier[split] ( literal[string] )[ literal[int] ]. identifier[lstrip] ( literal[string] )
keyword[if] identifier[obj] [ identifier[x] ] keyword[in] [ literal[string] , keyword[None] ]:
identifier[obj] [ identifier[x] ]= literal[string]
keyword[return] literal[string] . identifier[join] ( identifier[obj] )
|
def ipv4_lstrip_zeros(address):
"""
The function to strip leading zeros in each octet of an IPv4 address.
Args:
address (:obj:`str`): An IPv4 address.
Returns:
str: The modified IPv4 address.
"""
# Split the octets.
obj = address.strip().split('.')
for (x, y) in enumerate(obj):
# Strip leading zeros. Split / here in case CIDR is attached.
obj[x] = y.split('/')[0].lstrip('0')
if obj[x] in ['', None]:
obj[x] = '0' # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return '.'.join(obj)
|
def get_options(argv):
"""Called to parse the given list as command-line arguments.
:returns:
an options object as returned by argparse.
"""
arg_parser = make_arg_parser()
options, unknown = arg_parser.parse_known_args(argv)
if unknown:
arg_parser.print_help()
raise exceptions.UnknownArguments(
"unknown args: {0!r}".format(unknown))
options.print_help = arg_parser.print_help
return options
|
def function[get_options, parameter[argv]]:
constant[Called to parse the given list as command-line arguments.
:returns:
an options object as returned by argparse.
]
variable[arg_parser] assign[=] call[name[make_arg_parser], parameter[]]
<ast.Tuple object at 0x7da1b0f596f0> assign[=] call[name[arg_parser].parse_known_args, parameter[name[argv]]]
if name[unknown] begin[:]
call[name[arg_parser].print_help, parameter[]]
<ast.Raise object at 0x7da1b0f591b0>
name[options].print_help assign[=] name[arg_parser].print_help
return[name[options]]
|
keyword[def] identifier[get_options] ( identifier[argv] ):
literal[string]
identifier[arg_parser] = identifier[make_arg_parser] ()
identifier[options] , identifier[unknown] = identifier[arg_parser] . identifier[parse_known_args] ( identifier[argv] )
keyword[if] identifier[unknown] :
identifier[arg_parser] . identifier[print_help] ()
keyword[raise] identifier[exceptions] . identifier[UnknownArguments] (
literal[string] . identifier[format] ( identifier[unknown] ))
identifier[options] . identifier[print_help] = identifier[arg_parser] . identifier[print_help]
keyword[return] identifier[options]
|
def get_options(argv):
"""Called to parse the given list as command-line arguments.
:returns:
an options object as returned by argparse.
"""
arg_parser = make_arg_parser()
(options, unknown) = arg_parser.parse_known_args(argv)
if unknown:
arg_parser.print_help()
raise exceptions.UnknownArguments('unknown args: {0!r}'.format(unknown)) # depends on [control=['if'], data=[]]
options.print_help = arg_parser.print_help
return options
|
def is_on(self):
"""
Get sensor state.
Assume offline or open (worst case).
"""
return self.status not in (CONST.STATUS_OFF, CONST.STATUS_OFFLINE,
CONST.STATUS_CLOSED, CONST.STATUS_OPEN)
|
def function[is_on, parameter[self]]:
constant[
Get sensor state.
Assume offline or open (worst case).
]
return[compare[name[self].status <ast.NotIn object at 0x7da2590d7190> tuple[[<ast.Attribute object at 0x7da18f00fc10>, <ast.Attribute object at 0x7da18f00f520>, <ast.Attribute object at 0x7da18f00f340>, <ast.Attribute object at 0x7da18f00f940>]]]]
|
keyword[def] identifier[is_on] ( identifier[self] ):
literal[string]
keyword[return] identifier[self] . identifier[status] keyword[not] keyword[in] ( identifier[CONST] . identifier[STATUS_OFF] , identifier[CONST] . identifier[STATUS_OFFLINE] ,
identifier[CONST] . identifier[STATUS_CLOSED] , identifier[CONST] . identifier[STATUS_OPEN] )
|
def is_on(self):
"""
Get sensor state.
Assume offline or open (worst case).
"""
return self.status not in (CONST.STATUS_OFF, CONST.STATUS_OFFLINE, CONST.STATUS_CLOSED, CONST.STATUS_OPEN)
|
def _convert_parsed_show_output(parsed_data):
'''
Convert matching string values to lists/dictionaries.
:param dict parsed_data: The text of the command output that needs to be parsed.
:return: A dictionary containing the modified configuration data.
:rtype: dict
'''
# Match lines like "main: xenial [snapshot]" or "test [local]".
source_pattern = re.compile(r'(?:(?P<component>\S+):)?\s*(?P<name>\S+)\s+\[(?P<type>\S+)\]')
sources = list()
if 'architectures' in parsed_data:
parsed_data['architectures'] = [item.strip() for item in parsed_data['architectures'].split()]
parsed_data['architectures'] = sorted(parsed_data['architectures'])
for source in parsed_data.get('sources', []):
# Retain the key/value of only the matching named groups.
matches = source_pattern.search(source)
if matches:
groups = matches.groupdict()
sources.append({key: groups[key] for key in groups if groups[key]})
if sources:
parsed_data['sources'] = sources
return parsed_data
|
def function[_convert_parsed_show_output, parameter[parsed_data]]:
constant[
Convert matching string values to lists/dictionaries.
:param dict parsed_data: The text of the command output that needs to be parsed.
:return: A dictionary containing the modified configuration data.
:rtype: dict
]
variable[source_pattern] assign[=] call[name[re].compile, parameter[constant[(?:(?P<component>\S+):)?\s*(?P<name>\S+)\s+\[(?P<type>\S+)\]]]]
variable[sources] assign[=] call[name[list], parameter[]]
if compare[constant[architectures] in name[parsed_data]] begin[:]
call[name[parsed_data]][constant[architectures]] assign[=] <ast.ListComp object at 0x7da18f58d6f0>
call[name[parsed_data]][constant[architectures]] assign[=] call[name[sorted], parameter[call[name[parsed_data]][constant[architectures]]]]
for taget[name[source]] in starred[call[name[parsed_data].get, parameter[constant[sources], list[[]]]]] begin[:]
variable[matches] assign[=] call[name[source_pattern].search, parameter[name[source]]]
if name[matches] begin[:]
variable[groups] assign[=] call[name[matches].groupdict, parameter[]]
call[name[sources].append, parameter[<ast.DictComp object at 0x7da18f58f5b0>]]
if name[sources] begin[:]
call[name[parsed_data]][constant[sources]] assign[=] name[sources]
return[name[parsed_data]]
|
keyword[def] identifier[_convert_parsed_show_output] ( identifier[parsed_data] ):
literal[string]
identifier[source_pattern] = identifier[re] . identifier[compile] ( literal[string] )
identifier[sources] = identifier[list] ()
keyword[if] literal[string] keyword[in] identifier[parsed_data] :
identifier[parsed_data] [ literal[string] ]=[ identifier[item] . identifier[strip] () keyword[for] identifier[item] keyword[in] identifier[parsed_data] [ literal[string] ]. identifier[split] ()]
identifier[parsed_data] [ literal[string] ]= identifier[sorted] ( identifier[parsed_data] [ literal[string] ])
keyword[for] identifier[source] keyword[in] identifier[parsed_data] . identifier[get] ( literal[string] ,[]):
identifier[matches] = identifier[source_pattern] . identifier[search] ( identifier[source] )
keyword[if] identifier[matches] :
identifier[groups] = identifier[matches] . identifier[groupdict] ()
identifier[sources] . identifier[append] ({ identifier[key] : identifier[groups] [ identifier[key] ] keyword[for] identifier[key] keyword[in] identifier[groups] keyword[if] identifier[groups] [ identifier[key] ]})
keyword[if] identifier[sources] :
identifier[parsed_data] [ literal[string] ]= identifier[sources]
keyword[return] identifier[parsed_data]
|
def _convert_parsed_show_output(parsed_data):
"""
Convert matching string values to lists/dictionaries.
:param dict parsed_data: The text of the command output that needs to be parsed.
:return: A dictionary containing the modified configuration data.
:rtype: dict
"""
# Match lines like "main: xenial [snapshot]" or "test [local]".
source_pattern = re.compile('(?:(?P<component>\\S+):)?\\s*(?P<name>\\S+)\\s+\\[(?P<type>\\S+)\\]')
sources = list()
if 'architectures' in parsed_data:
parsed_data['architectures'] = [item.strip() for item in parsed_data['architectures'].split()]
parsed_data['architectures'] = sorted(parsed_data['architectures']) # depends on [control=['if'], data=['parsed_data']]
for source in parsed_data.get('sources', []):
# Retain the key/value of only the matching named groups.
matches = source_pattern.search(source)
if matches:
groups = matches.groupdict()
sources.append({key: groups[key] for key in groups if groups[key]}) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['source']]
if sources:
parsed_data['sources'] = sources # depends on [control=['if'], data=[]]
return parsed_data
|
def extend(self, step):
"""
Adds the data from another STObject to this object.
Args:
step: another STObject being added after the current one in time.
"""
self.timesteps.extend(step.timesteps)
self.masks.extend(step.masks)
self.x.extend(step.x)
self.y.extend(step.y)
self.i.extend(step.i)
self.j.extend(step.j)
self.end_time = step.end_time
self.times = np.arange(self.start_time, self.end_time + self.step, self.step)
self.u = np.concatenate((self.u, step.u))
self.v = np.concatenate((self.v, step.v))
for attr in self.attributes.keys():
if attr in step.attributes.keys():
self.attributes[attr].extend(step.attributes[attr])
|
def function[extend, parameter[self, step]]:
constant[
Adds the data from another STObject to this object.
Args:
step: another STObject being added after the current one in time.
]
call[name[self].timesteps.extend, parameter[name[step].timesteps]]
call[name[self].masks.extend, parameter[name[step].masks]]
call[name[self].x.extend, parameter[name[step].x]]
call[name[self].y.extend, parameter[name[step].y]]
call[name[self].i.extend, parameter[name[step].i]]
call[name[self].j.extend, parameter[name[step].j]]
name[self].end_time assign[=] name[step].end_time
name[self].times assign[=] call[name[np].arange, parameter[name[self].start_time, binary_operation[name[self].end_time + name[self].step], name[self].step]]
name[self].u assign[=] call[name[np].concatenate, parameter[tuple[[<ast.Attribute object at 0x7da1b0e4ddb0>, <ast.Attribute object at 0x7da1b0e4f940>]]]]
name[self].v assign[=] call[name[np].concatenate, parameter[tuple[[<ast.Attribute object at 0x7da1b0e4fac0>, <ast.Attribute object at 0x7da1b0e4d0c0>]]]]
for taget[name[attr]] in starred[call[name[self].attributes.keys, parameter[]]] begin[:]
if compare[name[attr] in call[name[step].attributes.keys, parameter[]]] begin[:]
call[call[name[self].attributes][name[attr]].extend, parameter[call[name[step].attributes][name[attr]]]]
|
keyword[def] identifier[extend] ( identifier[self] , identifier[step] ):
literal[string]
identifier[self] . identifier[timesteps] . identifier[extend] ( identifier[step] . identifier[timesteps] )
identifier[self] . identifier[masks] . identifier[extend] ( identifier[step] . identifier[masks] )
identifier[self] . identifier[x] . identifier[extend] ( identifier[step] . identifier[x] )
identifier[self] . identifier[y] . identifier[extend] ( identifier[step] . identifier[y] )
identifier[self] . identifier[i] . identifier[extend] ( identifier[step] . identifier[i] )
identifier[self] . identifier[j] . identifier[extend] ( identifier[step] . identifier[j] )
identifier[self] . identifier[end_time] = identifier[step] . identifier[end_time]
identifier[self] . identifier[times] = identifier[np] . identifier[arange] ( identifier[self] . identifier[start_time] , identifier[self] . identifier[end_time] + identifier[self] . identifier[step] , identifier[self] . identifier[step] )
identifier[self] . identifier[u] = identifier[np] . identifier[concatenate] (( identifier[self] . identifier[u] , identifier[step] . identifier[u] ))
identifier[self] . identifier[v] = identifier[np] . identifier[concatenate] (( identifier[self] . identifier[v] , identifier[step] . identifier[v] ))
keyword[for] identifier[attr] keyword[in] identifier[self] . identifier[attributes] . identifier[keys] ():
keyword[if] identifier[attr] keyword[in] identifier[step] . identifier[attributes] . identifier[keys] ():
identifier[self] . identifier[attributes] [ identifier[attr] ]. identifier[extend] ( identifier[step] . identifier[attributes] [ identifier[attr] ])
|
def extend(self, step):
"""
Adds the data from another STObject to this object.
Args:
step: another STObject being added after the current one in time.
"""
self.timesteps.extend(step.timesteps)
self.masks.extend(step.masks)
self.x.extend(step.x)
self.y.extend(step.y)
self.i.extend(step.i)
self.j.extend(step.j)
self.end_time = step.end_time
self.times = np.arange(self.start_time, self.end_time + self.step, self.step)
self.u = np.concatenate((self.u, step.u))
self.v = np.concatenate((self.v, step.v))
for attr in self.attributes.keys():
if attr in step.attributes.keys():
self.attributes[attr].extend(step.attributes[attr]) # depends on [control=['if'], data=['attr']] # depends on [control=['for'], data=['attr']]
|
def GetHasherClasses(cls, hasher_names=None):
"""Retrieves the registered hashers.
Args:
hasher_names (list[str]): names of the hashers to retrieve.
Yields:
tuple: containing:
str: parser name
type: next hasher class.
"""
for hasher_name, hasher_class in iter(cls._hasher_classes.items()):
if not hasher_names or hasher_name in hasher_names:
yield hasher_name, hasher_class
|
def function[GetHasherClasses, parameter[cls, hasher_names]]:
constant[Retrieves the registered hashers.
Args:
hasher_names (list[str]): names of the hashers to retrieve.
Yields:
tuple: containing:
str: parser name
type: next hasher class.
]
for taget[tuple[[<ast.Name object at 0x7da18dc9b730>, <ast.Name object at 0x7da18dc98cd0>]]] in starred[call[name[iter], parameter[call[name[cls]._hasher_classes.items, parameter[]]]]] begin[:]
if <ast.BoolOp object at 0x7da18dc988b0> begin[:]
<ast.Yield object at 0x7da18dc98d00>
|
keyword[def] identifier[GetHasherClasses] ( identifier[cls] , identifier[hasher_names] = keyword[None] ):
literal[string]
keyword[for] identifier[hasher_name] , identifier[hasher_class] keyword[in] identifier[iter] ( identifier[cls] . identifier[_hasher_classes] . identifier[items] ()):
keyword[if] keyword[not] identifier[hasher_names] keyword[or] identifier[hasher_name] keyword[in] identifier[hasher_names] :
keyword[yield] identifier[hasher_name] , identifier[hasher_class]
|
def GetHasherClasses(cls, hasher_names=None):
"""Retrieves the registered hashers.
Args:
hasher_names (list[str]): names of the hashers to retrieve.
Yields:
tuple: containing:
str: parser name
type: next hasher class.
"""
for (hasher_name, hasher_class) in iter(cls._hasher_classes.items()):
if not hasher_names or hasher_name in hasher_names:
yield (hasher_name, hasher_class) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
|
def _on_new_data_received(self, data: bytes):
"""
Gets called whenever we get a whole new XML element from kik's servers.
:param data: The data received (bytes)
"""
if data == b' ':
# Happens every half hour. Disconnect after 10th time. Some kind of keep-alive? Let's send it back.
self.loop.call_soon_threadsafe(self.connection.send_raw_data, b' ')
return
xml_element = BeautifulSoup(data.decode(), features='xml')
xml_element = next(iter(xml_element)) if len(xml_element) > 0 else xml_element
# choose the handler based on the XML tag name
if xml_element.name == "k":
self._handle_received_k_element(xml_element)
if xml_element.name == "iq":
self._handle_received_iq_element(xml_element)
elif xml_element.name == "message":
self._handle_xmpp_message(xml_element)
elif xml_element.name == 'stc':
self.callback.on_captcha_received(login.CaptchaElement(xml_element))
|
def function[_on_new_data_received, parameter[self, data]]:
constant[
Gets called whenever we get a whole new XML element from kik's servers.
:param data: The data received (bytes)
]
if compare[name[data] equal[==] constant[b' ']] begin[:]
call[name[self].loop.call_soon_threadsafe, parameter[name[self].connection.send_raw_data, constant[b' ']]]
return[None]
variable[xml_element] assign[=] call[name[BeautifulSoup], parameter[call[name[data].decode, parameter[]]]]
variable[xml_element] assign[=] <ast.IfExp object at 0x7da20e961e70>
if compare[name[xml_element].name equal[==] constant[k]] begin[:]
call[name[self]._handle_received_k_element, parameter[name[xml_element]]]
if compare[name[xml_element].name equal[==] constant[iq]] begin[:]
call[name[self]._handle_received_iq_element, parameter[name[xml_element]]]
|
keyword[def] identifier[_on_new_data_received] ( identifier[self] , identifier[data] : identifier[bytes] ):
literal[string]
keyword[if] identifier[data] == literal[string] :
identifier[self] . identifier[loop] . identifier[call_soon_threadsafe] ( identifier[self] . identifier[connection] . identifier[send_raw_data] , literal[string] )
keyword[return]
identifier[xml_element] = identifier[BeautifulSoup] ( identifier[data] . identifier[decode] (), identifier[features] = literal[string] )
identifier[xml_element] = identifier[next] ( identifier[iter] ( identifier[xml_element] )) keyword[if] identifier[len] ( identifier[xml_element] )> literal[int] keyword[else] identifier[xml_element]
keyword[if] identifier[xml_element] . identifier[name] == literal[string] :
identifier[self] . identifier[_handle_received_k_element] ( identifier[xml_element] )
keyword[if] identifier[xml_element] . identifier[name] == literal[string] :
identifier[self] . identifier[_handle_received_iq_element] ( identifier[xml_element] )
keyword[elif] identifier[xml_element] . identifier[name] == literal[string] :
identifier[self] . identifier[_handle_xmpp_message] ( identifier[xml_element] )
keyword[elif] identifier[xml_element] . identifier[name] == literal[string] :
identifier[self] . identifier[callback] . identifier[on_captcha_received] ( identifier[login] . identifier[CaptchaElement] ( identifier[xml_element] ))
|
def _on_new_data_received(self, data: bytes):
"""
Gets called whenever we get a whole new XML element from kik's servers.
:param data: The data received (bytes)
"""
if data == b' ':
# Happens every half hour. Disconnect after 10th time. Some kind of keep-alive? Let's send it back.
self.loop.call_soon_threadsafe(self.connection.send_raw_data, b' ')
return # depends on [control=['if'], data=[]]
xml_element = BeautifulSoup(data.decode(), features='xml')
xml_element = next(iter(xml_element)) if len(xml_element) > 0 else xml_element
# choose the handler based on the XML tag name
if xml_element.name == 'k':
self._handle_received_k_element(xml_element) # depends on [control=['if'], data=[]]
if xml_element.name == 'iq':
self._handle_received_iq_element(xml_element) # depends on [control=['if'], data=[]]
elif xml_element.name == 'message':
self._handle_xmpp_message(xml_element) # depends on [control=['if'], data=[]]
elif xml_element.name == 'stc':
self.callback.on_captcha_received(login.CaptchaElement(xml_element)) # depends on [control=['if'], data=[]]
|
def process(self, document):
"""Processing a group of tasks."""
self.logger.info("Processing group of tasks (parallel=%s)", self.get_parallel_mode())
self.pipeline.data.env_list[2] = {}
output, shells = [], []
result = Adapter({'success': True, 'output': []})
for task_entry in document:
key, entry = list(task_entry.items())[0]
if (not self.parallel or key == 'env') and len(shells) > 0:
result = Adapter(self.process_shells(shells))
output += result.output
shells = []
if not result.success:
break
if key == 'env':
self.pipeline.data.env_list[2].update(entry)
elif key in ['shell', 'docker(container)', 'docker(image)', 'python',
'packer', 'ansible(simple)']:
self.prepare_shell_data(shells, key, entry)
if result.success:
result = Adapter(self.process_shells(shells))
output += result.output
self.event.delegate(result.success)
return {'success': result.success, 'output': output}
|
def function[process, parameter[self, document]]:
constant[Processing a group of tasks.]
call[name[self].logger.info, parameter[constant[Processing group of tasks (parallel=%s)], call[name[self].get_parallel_mode, parameter[]]]]
call[name[self].pipeline.data.env_list][constant[2]] assign[=] dictionary[[], []]
<ast.Tuple object at 0x7da20c795390> assign[=] tuple[[<ast.List object at 0x7da20c794cd0>, <ast.List object at 0x7da20c7959c0>]]
variable[result] assign[=] call[name[Adapter], parameter[dictionary[[<ast.Constant object at 0x7da20c795c90>, <ast.Constant object at 0x7da20c795d50>], [<ast.Constant object at 0x7da20c795e70>, <ast.List object at 0x7da20c794b80>]]]]
for taget[name[task_entry]] in starred[name[document]] begin[:]
<ast.Tuple object at 0x7da20c794370> assign[=] call[call[name[list], parameter[call[name[task_entry].items, parameter[]]]]][constant[0]]
if <ast.BoolOp object at 0x7da2054a7eb0> begin[:]
variable[result] assign[=] call[name[Adapter], parameter[call[name[self].process_shells, parameter[name[shells]]]]]
<ast.AugAssign object at 0x7da2054a64a0>
variable[shells] assign[=] list[[]]
if <ast.UnaryOp object at 0x7da2054a7dc0> begin[:]
break
if compare[name[key] equal[==] constant[env]] begin[:]
call[call[name[self].pipeline.data.env_list][constant[2]].update, parameter[name[entry]]]
if name[result].success begin[:]
variable[result] assign[=] call[name[Adapter], parameter[call[name[self].process_shells, parameter[name[shells]]]]]
<ast.AugAssign object at 0x7da2054a5870>
call[name[self].event.delegate, parameter[name[result].success]]
return[dictionary[[<ast.Constant object at 0x7da2054a4340>, <ast.Constant object at 0x7da2054a6e60>], [<ast.Attribute object at 0x7da2054a6d70>, <ast.Name object at 0x7da2054a4be0>]]]
|
keyword[def] identifier[process] ( identifier[self] , identifier[document] ):
literal[string]
identifier[self] . identifier[logger] . identifier[info] ( literal[string] , identifier[self] . identifier[get_parallel_mode] ())
identifier[self] . identifier[pipeline] . identifier[data] . identifier[env_list] [ literal[int] ]={}
identifier[output] , identifier[shells] =[],[]
identifier[result] = identifier[Adapter] ({ literal[string] : keyword[True] , literal[string] :[]})
keyword[for] identifier[task_entry] keyword[in] identifier[document] :
identifier[key] , identifier[entry] = identifier[list] ( identifier[task_entry] . identifier[items] ())[ literal[int] ]
keyword[if] ( keyword[not] identifier[self] . identifier[parallel] keyword[or] identifier[key] == literal[string] ) keyword[and] identifier[len] ( identifier[shells] )> literal[int] :
identifier[result] = identifier[Adapter] ( identifier[self] . identifier[process_shells] ( identifier[shells] ))
identifier[output] += identifier[result] . identifier[output]
identifier[shells] =[]
keyword[if] keyword[not] identifier[result] . identifier[success] :
keyword[break]
keyword[if] identifier[key] == literal[string] :
identifier[self] . identifier[pipeline] . identifier[data] . identifier[env_list] [ literal[int] ]. identifier[update] ( identifier[entry] )
keyword[elif] identifier[key] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] ]:
identifier[self] . identifier[prepare_shell_data] ( identifier[shells] , identifier[key] , identifier[entry] )
keyword[if] identifier[result] . identifier[success] :
identifier[result] = identifier[Adapter] ( identifier[self] . identifier[process_shells] ( identifier[shells] ))
identifier[output] += identifier[result] . identifier[output]
identifier[self] . identifier[event] . identifier[delegate] ( identifier[result] . identifier[success] )
keyword[return] { literal[string] : identifier[result] . identifier[success] , literal[string] : identifier[output] }
|
def process(self, document):
"""Processing a group of tasks."""
self.logger.info('Processing group of tasks (parallel=%s)', self.get_parallel_mode())
self.pipeline.data.env_list[2] = {}
(output, shells) = ([], [])
result = Adapter({'success': True, 'output': []})
for task_entry in document:
(key, entry) = list(task_entry.items())[0]
if (not self.parallel or key == 'env') and len(shells) > 0:
result = Adapter(self.process_shells(shells))
output += result.output
shells = []
if not result.success:
break # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
if key == 'env':
self.pipeline.data.env_list[2].update(entry) # depends on [control=['if'], data=[]]
elif key in ['shell', 'docker(container)', 'docker(image)', 'python', 'packer', 'ansible(simple)']:
self.prepare_shell_data(shells, key, entry) # depends on [control=['if'], data=['key']] # depends on [control=['for'], data=['task_entry']]
if result.success:
result = Adapter(self.process_shells(shells))
output += result.output # depends on [control=['if'], data=[]]
self.event.delegate(result.success)
return {'success': result.success, 'output': output}
|
def gen_cert_request(filepath, keyfile, config, silent=False):
"""
generate certificate request
:param filepath: file path to the certificate request
:param keyfile: file path to the private key
:param silent: whether to suppress output
"""
message = 'generate ssl certificate request'
cmd = (
'openssl req -new -key {} -out {} -subj "{}"'
' -extensions v3_req -config {}').format(
keyfile, filepath, SUBJECT, config)
call_openssl(cmd, message, silent)
|
def function[gen_cert_request, parameter[filepath, keyfile, config, silent]]:
constant[
generate certificate request
:param filepath: file path to the certificate request
:param keyfile: file path to the private key
:param silent: whether to suppress output
]
variable[message] assign[=] constant[generate ssl certificate request]
variable[cmd] assign[=] call[constant[openssl req -new -key {} -out {} -subj "{}" -extensions v3_req -config {}].format, parameter[name[keyfile], name[filepath], name[SUBJECT], name[config]]]
call[name[call_openssl], parameter[name[cmd], name[message], name[silent]]]
|
keyword[def] identifier[gen_cert_request] ( identifier[filepath] , identifier[keyfile] , identifier[config] , identifier[silent] = keyword[False] ):
literal[string]
identifier[message] = literal[string]
identifier[cmd] =(
literal[string]
literal[string] ). identifier[format] (
identifier[keyfile] , identifier[filepath] , identifier[SUBJECT] , identifier[config] )
identifier[call_openssl] ( identifier[cmd] , identifier[message] , identifier[silent] )
|
def gen_cert_request(filepath, keyfile, config, silent=False):
"""
generate certificate request
:param filepath: file path to the certificate request
:param keyfile: file path to the private key
:param silent: whether to suppress output
"""
message = 'generate ssl certificate request'
cmd = 'openssl req -new -key {} -out {} -subj "{}" -extensions v3_req -config {}'.format(keyfile, filepath, SUBJECT, config)
call_openssl(cmd, message, silent)
|
def mel(sr, n_dft, n_mels=128, fmin=0.0, fmax=None, htk=False, norm=1):
"""[np] create a filterbank matrix to combine stft bins into mel-frequency bins
use Slaney (said Librosa)
n_mels: numbre of mel bands
fmin : lowest frequency [Hz]
fmax : highest frequency [Hz]
If `None`, use `sr / 2.0`
"""
return librosa.filters.mel(sr=sr, n_fft=n_dft, n_mels=n_mels,
fmin=fmin, fmax=fmax,
htk=htk, norm=norm).astype(K.floatx())
|
def function[mel, parameter[sr, n_dft, n_mels, fmin, fmax, htk, norm]]:
constant[[np] create a filterbank matrix to combine stft bins into mel-frequency bins
use Slaney (said Librosa)
n_mels: numbre of mel bands
fmin : lowest frequency [Hz]
fmax : highest frequency [Hz]
If `None`, use `sr / 2.0`
]
return[call[call[name[librosa].filters.mel, parameter[]].astype, parameter[call[name[K].floatx, parameter[]]]]]
|
keyword[def] identifier[mel] ( identifier[sr] , identifier[n_dft] , identifier[n_mels] = literal[int] , identifier[fmin] = literal[int] , identifier[fmax] = keyword[None] , identifier[htk] = keyword[False] , identifier[norm] = literal[int] ):
literal[string]
keyword[return] identifier[librosa] . identifier[filters] . identifier[mel] ( identifier[sr] = identifier[sr] , identifier[n_fft] = identifier[n_dft] , identifier[n_mels] = identifier[n_mels] ,
identifier[fmin] = identifier[fmin] , identifier[fmax] = identifier[fmax] ,
identifier[htk] = identifier[htk] , identifier[norm] = identifier[norm] ). identifier[astype] ( identifier[K] . identifier[floatx] ())
|
def mel(sr, n_dft, n_mels=128, fmin=0.0, fmax=None, htk=False, norm=1):
"""[np] create a filterbank matrix to combine stft bins into mel-frequency bins
use Slaney (said Librosa)
n_mels: numbre of mel bands
fmin : lowest frequency [Hz]
fmax : highest frequency [Hz]
If `None`, use `sr / 2.0`
"""
return librosa.filters.mel(sr=sr, n_fft=n_dft, n_mels=n_mels, fmin=fmin, fmax=fmax, htk=htk, norm=norm).astype(K.floatx())
|
def import_admin_credentials(self, username, password):
"""
Imports the KDC Account Manager credentials needed by Cloudera
Manager to create kerberos principals needed by CDH services.
@param username Username of the Account Manager. Full name including the Kerberos
realm must be specified.
@param password Password for the Account Manager.
@return: Information about the submitted command.
@since API v7
"""
return self._cmd('importAdminCredentials', params=dict(username=username, password=password))
|
def function[import_admin_credentials, parameter[self, username, password]]:
constant[
Imports the KDC Account Manager credentials needed by Cloudera
Manager to create kerberos principals needed by CDH services.
@param username Username of the Account Manager. Full name including the Kerberos
realm must be specified.
@param password Password for the Account Manager.
@return: Information about the submitted command.
@since API v7
]
return[call[name[self]._cmd, parameter[constant[importAdminCredentials]]]]
|
keyword[def] identifier[import_admin_credentials] ( identifier[self] , identifier[username] , identifier[password] ):
literal[string]
keyword[return] identifier[self] . identifier[_cmd] ( literal[string] , identifier[params] = identifier[dict] ( identifier[username] = identifier[username] , identifier[password] = identifier[password] ))
|
def import_admin_credentials(self, username, password):
"""
Imports the KDC Account Manager credentials needed by Cloudera
Manager to create kerberos principals needed by CDH services.
@param username Username of the Account Manager. Full name including the Kerberos
realm must be specified.
@param password Password for the Account Manager.
@return: Information about the submitted command.
@since API v7
"""
return self._cmd('importAdminCredentials', params=dict(username=username, password=password))
|
def estimate(s1, s2):
"""
Estimate the spacial relationship by
examining the position of the bounding boxes.
Parameters
----------
s1 : HandwrittenData
s2 : HandwrittenData
Returns
-------
dict of probabilities
{'bottom': 0.1,
'subscript': 0.2,
'right': 0.3,
'superscript': 0.3,
'top': 0.1}
"""
s1bb = s1.get_bounding_box()
s2bb = s2.get_bounding_box()
total_area = ((s2bb['maxx'] - s2bb['minx']+1) *
(s2bb['maxy'] - s2bb['miny']+1))
total_area = float(total_area)
top_area = 0.0
superscript_area = 0.0
right_area = 0.0
subscript_area = 0.0
bottom_area = 0.0
# bottom
if s2bb['maxy'] > s1bb['maxy'] and s2bb['minx'] < s1bb['maxx']:
miny = max(s2bb['miny'], s1bb['maxy'])
maxy = s2bb['maxy']
minx = max(s2bb['minx'], s1bb['minx'])
maxx = min(s2bb['maxx'], s1bb['maxx'])
bottom_area = float((maxx-minx)*(maxy-miny))
# Subscript
if s2bb['maxy'] > s1bb['maxy'] and s2bb['maxx'] > s1bb['maxx']:
miny = max(s2bb['miny'], s1bb['maxy'])
maxy = s2bb['maxy']
minx = max(s2bb['minx'], s1bb['maxx'])
maxx = s2bb['maxx']
subscript_area = (maxx-minx)*(maxy-miny)
# right
if s2bb['miny'] < s1bb['maxy'] and s2bb['maxy'] > s1bb['miny'] \
and s2bb['maxx'] > s1bb['maxx']:
miny = max(s1bb['miny'], s2bb['miny'])
maxy = min(s1bb['maxy'], s2bb['maxy'])
minx = max(s1bb['maxx'], s2bb['minx'])
maxx = s2bb['maxx']
right_area = (maxx-minx)*(maxy-miny)
# superscript
if s2bb['miny'] < s1bb['miny'] and s2bb['maxx'] > s1bb['maxx']:
miny = s2bb['miny']
maxy = min(s1bb['miny'], s2bb['maxy'])
minx = max(s1bb['maxx'], s2bb['minx'])
maxx = s2bb['maxx']
superscript_area = (maxx-minx)*(maxy-miny)
# top
if s2bb['miny'] < s1bb['miny'] and s2bb['minx'] < s1bb['maxx']:
miny = s2bb['miny']
maxy = min(s1bb['miny'], s2bb['maxy'])
minx = max(s1bb['minx'], s2bb['minx'])
maxx = min(s1bb['maxx'], s2bb['maxx'])
top_area = (maxx-minx)*(maxy-miny)
return {'bottom': bottom_area/total_area,
'subscript': subscript_area/total_area,
'right': right_area/total_area,
'superscript': superscript_area/total_area,
'top': top_area/total_area}
|
def function[estimate, parameter[s1, s2]]:
constant[
Estimate the spacial relationship by
examining the position of the bounding boxes.
Parameters
----------
s1 : HandwrittenData
s2 : HandwrittenData
Returns
-------
dict of probabilities
{'bottom': 0.1,
'subscript': 0.2,
'right': 0.3,
'superscript': 0.3,
'top': 0.1}
]
variable[s1bb] assign[=] call[name[s1].get_bounding_box, parameter[]]
variable[s2bb] assign[=] call[name[s2].get_bounding_box, parameter[]]
variable[total_area] assign[=] binary_operation[binary_operation[binary_operation[call[name[s2bb]][constant[maxx]] - call[name[s2bb]][constant[minx]]] + constant[1]] * binary_operation[binary_operation[call[name[s2bb]][constant[maxy]] - call[name[s2bb]][constant[miny]]] + constant[1]]]
variable[total_area] assign[=] call[name[float], parameter[name[total_area]]]
variable[top_area] assign[=] constant[0.0]
variable[superscript_area] assign[=] constant[0.0]
variable[right_area] assign[=] constant[0.0]
variable[subscript_area] assign[=] constant[0.0]
variable[bottom_area] assign[=] constant[0.0]
if <ast.BoolOp object at 0x7da1b281b610> begin[:]
variable[miny] assign[=] call[name[max], parameter[call[name[s2bb]][constant[miny]], call[name[s1bb]][constant[maxy]]]]
variable[maxy] assign[=] call[name[s2bb]][constant[maxy]]
variable[minx] assign[=] call[name[max], parameter[call[name[s2bb]][constant[minx]], call[name[s1bb]][constant[minx]]]]
variable[maxx] assign[=] call[name[min], parameter[call[name[s2bb]][constant[maxx]], call[name[s1bb]][constant[maxx]]]]
variable[bottom_area] assign[=] call[name[float], parameter[binary_operation[binary_operation[name[maxx] - name[minx]] * binary_operation[name[maxy] - name[miny]]]]]
if <ast.BoolOp object at 0x7da1b2818880> begin[:]
variable[miny] assign[=] call[name[max], parameter[call[name[s2bb]][constant[miny]], call[name[s1bb]][constant[maxy]]]]
variable[maxy] assign[=] call[name[s2bb]][constant[maxy]]
variable[minx] assign[=] call[name[max], parameter[call[name[s2bb]][constant[minx]], call[name[s1bb]][constant[maxx]]]]
variable[maxx] assign[=] call[name[s2bb]][constant[maxx]]
variable[subscript_area] assign[=] binary_operation[binary_operation[name[maxx] - name[minx]] * binary_operation[name[maxy] - name[miny]]]
if <ast.BoolOp object at 0x7da1b281bc40> begin[:]
variable[miny] assign[=] call[name[max], parameter[call[name[s1bb]][constant[miny]], call[name[s2bb]][constant[miny]]]]
variable[maxy] assign[=] call[name[min], parameter[call[name[s1bb]][constant[maxy]], call[name[s2bb]][constant[maxy]]]]
variable[minx] assign[=] call[name[max], parameter[call[name[s1bb]][constant[maxx]], call[name[s2bb]][constant[minx]]]]
variable[maxx] assign[=] call[name[s2bb]][constant[maxx]]
variable[right_area] assign[=] binary_operation[binary_operation[name[maxx] - name[minx]] * binary_operation[name[maxy] - name[miny]]]
if <ast.BoolOp object at 0x7da1b2819990> begin[:]
variable[miny] assign[=] call[name[s2bb]][constant[miny]]
variable[maxy] assign[=] call[name[min], parameter[call[name[s1bb]][constant[miny]], call[name[s2bb]][constant[maxy]]]]
variable[minx] assign[=] call[name[max], parameter[call[name[s1bb]][constant[maxx]], call[name[s2bb]][constant[minx]]]]
variable[maxx] assign[=] call[name[s2bb]][constant[maxx]]
variable[superscript_area] assign[=] binary_operation[binary_operation[name[maxx] - name[minx]] * binary_operation[name[maxy] - name[miny]]]
if <ast.BoolOp object at 0x7da1b281b160> begin[:]
variable[miny] assign[=] call[name[s2bb]][constant[miny]]
variable[maxy] assign[=] call[name[min], parameter[call[name[s1bb]][constant[miny]], call[name[s2bb]][constant[maxy]]]]
variable[minx] assign[=] call[name[max], parameter[call[name[s1bb]][constant[minx]], call[name[s2bb]][constant[minx]]]]
variable[maxx] assign[=] call[name[min], parameter[call[name[s1bb]][constant[maxx]], call[name[s2bb]][constant[maxx]]]]
variable[top_area] assign[=] binary_operation[binary_operation[name[maxx] - name[minx]] * binary_operation[name[maxy] - name[miny]]]
return[dictionary[[<ast.Constant object at 0x7da1b2872110>, <ast.Constant object at 0x7da1b2873c40>, <ast.Constant object at 0x7da1b2871a50>, <ast.Constant object at 0x7da1b2872aa0>, <ast.Constant object at 0x7da1b2872680>], [<ast.BinOp object at 0x7da1b28709a0>, <ast.BinOp object at 0x7da1b2871cc0>, <ast.BinOp object at 0x7da1b28706d0>, <ast.BinOp object at 0x7da1b2872d10>, <ast.BinOp object at 0x7da1b2870670>]]]
|
keyword[def] identifier[estimate] ( identifier[s1] , identifier[s2] ):
literal[string]
identifier[s1bb] = identifier[s1] . identifier[get_bounding_box] ()
identifier[s2bb] = identifier[s2] . identifier[get_bounding_box] ()
identifier[total_area] =(( identifier[s2bb] [ literal[string] ]- identifier[s2bb] [ literal[string] ]+ literal[int] )*
( identifier[s2bb] [ literal[string] ]- identifier[s2bb] [ literal[string] ]+ literal[int] ))
identifier[total_area] = identifier[float] ( identifier[total_area] )
identifier[top_area] = literal[int]
identifier[superscript_area] = literal[int]
identifier[right_area] = literal[int]
identifier[subscript_area] = literal[int]
identifier[bottom_area] = literal[int]
keyword[if] identifier[s2bb] [ literal[string] ]> identifier[s1bb] [ literal[string] ] keyword[and] identifier[s2bb] [ literal[string] ]< identifier[s1bb] [ literal[string] ]:
identifier[miny] = identifier[max] ( identifier[s2bb] [ literal[string] ], identifier[s1bb] [ literal[string] ])
identifier[maxy] = identifier[s2bb] [ literal[string] ]
identifier[minx] = identifier[max] ( identifier[s2bb] [ literal[string] ], identifier[s1bb] [ literal[string] ])
identifier[maxx] = identifier[min] ( identifier[s2bb] [ literal[string] ], identifier[s1bb] [ literal[string] ])
identifier[bottom_area] = identifier[float] (( identifier[maxx] - identifier[minx] )*( identifier[maxy] - identifier[miny] ))
keyword[if] identifier[s2bb] [ literal[string] ]> identifier[s1bb] [ literal[string] ] keyword[and] identifier[s2bb] [ literal[string] ]> identifier[s1bb] [ literal[string] ]:
identifier[miny] = identifier[max] ( identifier[s2bb] [ literal[string] ], identifier[s1bb] [ literal[string] ])
identifier[maxy] = identifier[s2bb] [ literal[string] ]
identifier[minx] = identifier[max] ( identifier[s2bb] [ literal[string] ], identifier[s1bb] [ literal[string] ])
identifier[maxx] = identifier[s2bb] [ literal[string] ]
identifier[subscript_area] =( identifier[maxx] - identifier[minx] )*( identifier[maxy] - identifier[miny] )
keyword[if] identifier[s2bb] [ literal[string] ]< identifier[s1bb] [ literal[string] ] keyword[and] identifier[s2bb] [ literal[string] ]> identifier[s1bb] [ literal[string] ] keyword[and] identifier[s2bb] [ literal[string] ]> identifier[s1bb] [ literal[string] ]:
identifier[miny] = identifier[max] ( identifier[s1bb] [ literal[string] ], identifier[s2bb] [ literal[string] ])
identifier[maxy] = identifier[min] ( identifier[s1bb] [ literal[string] ], identifier[s2bb] [ literal[string] ])
identifier[minx] = identifier[max] ( identifier[s1bb] [ literal[string] ], identifier[s2bb] [ literal[string] ])
identifier[maxx] = identifier[s2bb] [ literal[string] ]
identifier[right_area] =( identifier[maxx] - identifier[minx] )*( identifier[maxy] - identifier[miny] )
keyword[if] identifier[s2bb] [ literal[string] ]< identifier[s1bb] [ literal[string] ] keyword[and] identifier[s2bb] [ literal[string] ]> identifier[s1bb] [ literal[string] ]:
identifier[miny] = identifier[s2bb] [ literal[string] ]
identifier[maxy] = identifier[min] ( identifier[s1bb] [ literal[string] ], identifier[s2bb] [ literal[string] ])
identifier[minx] = identifier[max] ( identifier[s1bb] [ literal[string] ], identifier[s2bb] [ literal[string] ])
identifier[maxx] = identifier[s2bb] [ literal[string] ]
identifier[superscript_area] =( identifier[maxx] - identifier[minx] )*( identifier[maxy] - identifier[miny] )
keyword[if] identifier[s2bb] [ literal[string] ]< identifier[s1bb] [ literal[string] ] keyword[and] identifier[s2bb] [ literal[string] ]< identifier[s1bb] [ literal[string] ]:
identifier[miny] = identifier[s2bb] [ literal[string] ]
identifier[maxy] = identifier[min] ( identifier[s1bb] [ literal[string] ], identifier[s2bb] [ literal[string] ])
identifier[minx] = identifier[max] ( identifier[s1bb] [ literal[string] ], identifier[s2bb] [ literal[string] ])
identifier[maxx] = identifier[min] ( identifier[s1bb] [ literal[string] ], identifier[s2bb] [ literal[string] ])
identifier[top_area] =( identifier[maxx] - identifier[minx] )*( identifier[maxy] - identifier[miny] )
keyword[return] { literal[string] : identifier[bottom_area] / identifier[total_area] ,
literal[string] : identifier[subscript_area] / identifier[total_area] ,
literal[string] : identifier[right_area] / identifier[total_area] ,
literal[string] : identifier[superscript_area] / identifier[total_area] ,
literal[string] : identifier[top_area] / identifier[total_area] }
|
def estimate(s1, s2):
"""
Estimate the spacial relationship by
examining the position of the bounding boxes.
Parameters
----------
s1 : HandwrittenData
s2 : HandwrittenData
Returns
-------
dict of probabilities
{'bottom': 0.1,
'subscript': 0.2,
'right': 0.3,
'superscript': 0.3,
'top': 0.1}
"""
s1bb = s1.get_bounding_box()
s2bb = s2.get_bounding_box()
total_area = (s2bb['maxx'] - s2bb['minx'] + 1) * (s2bb['maxy'] - s2bb['miny'] + 1)
total_area = float(total_area)
top_area = 0.0
superscript_area = 0.0
right_area = 0.0
subscript_area = 0.0
bottom_area = 0.0
# bottom
if s2bb['maxy'] > s1bb['maxy'] and s2bb['minx'] < s1bb['maxx']:
miny = max(s2bb['miny'], s1bb['maxy'])
maxy = s2bb['maxy']
minx = max(s2bb['minx'], s1bb['minx'])
maxx = min(s2bb['maxx'], s1bb['maxx'])
bottom_area = float((maxx - minx) * (maxy - miny)) # depends on [control=['if'], data=[]]
# Subscript
if s2bb['maxy'] > s1bb['maxy'] and s2bb['maxx'] > s1bb['maxx']:
miny = max(s2bb['miny'], s1bb['maxy'])
maxy = s2bb['maxy']
minx = max(s2bb['minx'], s1bb['maxx'])
maxx = s2bb['maxx']
subscript_area = (maxx - minx) * (maxy - miny) # depends on [control=['if'], data=[]]
# right
if s2bb['miny'] < s1bb['maxy'] and s2bb['maxy'] > s1bb['miny'] and (s2bb['maxx'] > s1bb['maxx']):
miny = max(s1bb['miny'], s2bb['miny'])
maxy = min(s1bb['maxy'], s2bb['maxy'])
minx = max(s1bb['maxx'], s2bb['minx'])
maxx = s2bb['maxx']
right_area = (maxx - minx) * (maxy - miny) # depends on [control=['if'], data=[]]
# superscript
if s2bb['miny'] < s1bb['miny'] and s2bb['maxx'] > s1bb['maxx']:
miny = s2bb['miny']
maxy = min(s1bb['miny'], s2bb['maxy'])
minx = max(s1bb['maxx'], s2bb['minx'])
maxx = s2bb['maxx']
superscript_area = (maxx - minx) * (maxy - miny) # depends on [control=['if'], data=[]]
# top
if s2bb['miny'] < s1bb['miny'] and s2bb['minx'] < s1bb['maxx']:
miny = s2bb['miny']
maxy = min(s1bb['miny'], s2bb['maxy'])
minx = max(s1bb['minx'], s2bb['minx'])
maxx = min(s1bb['maxx'], s2bb['maxx'])
top_area = (maxx - minx) * (maxy - miny) # depends on [control=['if'], data=[]]
return {'bottom': bottom_area / total_area, 'subscript': subscript_area / total_area, 'right': right_area / total_area, 'superscript': superscript_area / total_area, 'top': top_area / total_area}
|
def name_parts(self):
"""Works with PartialNameMixin.clear_dict to set NONE and ANY
values."""
default = PartialMixin.ANY
return ([(k, default, True)
for k, _, _ in PartitionName._name_parts]
+
[(k, default, True)
for k, _, _ in Name._generated_names]
)
|
def function[name_parts, parameter[self]]:
constant[Works with PartialNameMixin.clear_dict to set NONE and ANY
values.]
variable[default] assign[=] name[PartialMixin].ANY
return[binary_operation[<ast.ListComp object at 0x7da2041d9f60> + <ast.ListComp object at 0x7da2041d9ba0>]]
|
keyword[def] identifier[name_parts] ( identifier[self] ):
literal[string]
identifier[default] = identifier[PartialMixin] . identifier[ANY]
keyword[return] ([( identifier[k] , identifier[default] , keyword[True] )
keyword[for] identifier[k] , identifier[_] , identifier[_] keyword[in] identifier[PartitionName] . identifier[_name_parts] ]
+
[( identifier[k] , identifier[default] , keyword[True] )
keyword[for] identifier[k] , identifier[_] , identifier[_] keyword[in] identifier[Name] . identifier[_generated_names] ]
)
|
def name_parts(self):
"""Works with PartialNameMixin.clear_dict to set NONE and ANY
values."""
default = PartialMixin.ANY
return [(k, default, True) for (k, _, _) in PartitionName._name_parts] + [(k, default, True) for (k, _, _) in Name._generated_names]
|
def _write_jpy_config(target_dir=None, install_dir=None):
"""
Write out a well-formed jpyconfig.properties file for easier Java
integration in a given location.
"""
if not target_dir:
target_dir = _build_dir()
args = [sys.executable,
os.path.join(target_dir, 'jpyutil.py'),
'--jvm_dll', jvm_dll_file,
'--java_home', jdk_home_dir,
'--log_level', 'DEBUG',
'--req_java',
'--req_py']
if install_dir:
args.append('--install_dir')
args.append(install_dir)
log.info('Writing jpy configuration to %s using install_dir %s' % (target_dir, install_dir))
return subprocess.call(args)
|
def function[_write_jpy_config, parameter[target_dir, install_dir]]:
constant[
Write out a well-formed jpyconfig.properties file for easier Java
integration in a given location.
]
if <ast.UnaryOp object at 0x7da204623970> begin[:]
variable[target_dir] assign[=] call[name[_build_dir], parameter[]]
variable[args] assign[=] list[[<ast.Attribute object at 0x7da204622ad0>, <ast.Call object at 0x7da204621ab0>, <ast.Constant object at 0x7da1b26ada80>, <ast.Name object at 0x7da1b26ac0d0>, <ast.Constant object at 0x7da1b26affa0>, <ast.Name object at 0x7da18f8103d0>, <ast.Constant object at 0x7da18f812740>, <ast.Constant object at 0x7da204622170>, <ast.Constant object at 0x7da204621600>, <ast.Constant object at 0x7da204622e00>]]
if name[install_dir] begin[:]
call[name[args].append, parameter[constant[--install_dir]]]
call[name[args].append, parameter[name[install_dir]]]
call[name[log].info, parameter[binary_operation[constant[Writing jpy configuration to %s using install_dir %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da204622f80>, <ast.Name object at 0x7da204621b10>]]]]]
return[call[name[subprocess].call, parameter[name[args]]]]
|
keyword[def] identifier[_write_jpy_config] ( identifier[target_dir] = keyword[None] , identifier[install_dir] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[target_dir] :
identifier[target_dir] = identifier[_build_dir] ()
identifier[args] =[ identifier[sys] . identifier[executable] ,
identifier[os] . identifier[path] . identifier[join] ( identifier[target_dir] , literal[string] ),
literal[string] , identifier[jvm_dll_file] ,
literal[string] , identifier[jdk_home_dir] ,
literal[string] , literal[string] ,
literal[string] ,
literal[string] ]
keyword[if] identifier[install_dir] :
identifier[args] . identifier[append] ( literal[string] )
identifier[args] . identifier[append] ( identifier[install_dir] )
identifier[log] . identifier[info] ( literal[string] %( identifier[target_dir] , identifier[install_dir] ))
keyword[return] identifier[subprocess] . identifier[call] ( identifier[args] )
|
def _write_jpy_config(target_dir=None, install_dir=None):
"""
Write out a well-formed jpyconfig.properties file for easier Java
integration in a given location.
"""
if not target_dir:
target_dir = _build_dir() # depends on [control=['if'], data=[]]
args = [sys.executable, os.path.join(target_dir, 'jpyutil.py'), '--jvm_dll', jvm_dll_file, '--java_home', jdk_home_dir, '--log_level', 'DEBUG', '--req_java', '--req_py']
if install_dir:
args.append('--install_dir')
args.append(install_dir) # depends on [control=['if'], data=[]]
log.info('Writing jpy configuration to %s using install_dir %s' % (target_dir, install_dir))
return subprocess.call(args)
|
def cached(f):
"""
Cache decorator for functions taking one or more arguments.
:param f: The function to be cached.
:return: The cached value.
"""
cache = f.cache = {}
@functools.wraps(f)
def decorator(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = f(*args, **kwargs)
return cache[key]
return decorator
|
def function[cached, parameter[f]]:
constant[
Cache decorator for functions taking one or more arguments.
:param f: The function to be cached.
:return: The cached value.
]
variable[cache] assign[=] dictionary[[], []]
def function[decorator, parameter[]]:
variable[key] assign[=] binary_operation[call[name[str], parameter[name[args]]] + call[name[str], parameter[name[kwargs]]]]
if compare[name[key] <ast.NotIn object at 0x7da2590d7190> name[cache]] begin[:]
call[name[cache]][name[key]] assign[=] call[name[f], parameter[<ast.Starred object at 0x7da18f58cd90>]]
return[call[name[cache]][name[key]]]
return[name[decorator]]
|
keyword[def] identifier[cached] ( identifier[f] ):
literal[string]
identifier[cache] = identifier[f] . identifier[cache] ={}
@ identifier[functools] . identifier[wraps] ( identifier[f] )
keyword[def] identifier[decorator] (* identifier[args] ,** identifier[kwargs] ):
identifier[key] = identifier[str] ( identifier[args] )+ identifier[str] ( identifier[kwargs] )
keyword[if] identifier[key] keyword[not] keyword[in] identifier[cache] :
identifier[cache] [ identifier[key] ]= identifier[f] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[cache] [ identifier[key] ]
keyword[return] identifier[decorator]
|
def cached(f):
"""
Cache decorator for functions taking one or more arguments.
:param f: The function to be cached.
:return: The cached value.
"""
cache = f.cache = {}
@functools.wraps(f)
def decorator(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = f(*args, **kwargs) # depends on [control=['if'], data=['key', 'cache']]
return cache[key]
return decorator
|
def write(self, message, flush=True):
"""
Function: write
Summary: write method on the default stream
Examples: >>> stream.write('message')
'message'
Attributes:
@param (message): str-like content to send on stream
@param (flush) default=True: flush the stdout after write
Returns: None
"""
self.stream.write(message)
if flush:
self.stream.flush()
|
def function[write, parameter[self, message, flush]]:
constant[
Function: write
Summary: write method on the default stream
Examples: >>> stream.write('message')
'message'
Attributes:
@param (message): str-like content to send on stream
@param (flush) default=True: flush the stdout after write
Returns: None
]
call[name[self].stream.write, parameter[name[message]]]
if name[flush] begin[:]
call[name[self].stream.flush, parameter[]]
|
keyword[def] identifier[write] ( identifier[self] , identifier[message] , identifier[flush] = keyword[True] ):
literal[string]
identifier[self] . identifier[stream] . identifier[write] ( identifier[message] )
keyword[if] identifier[flush] :
identifier[self] . identifier[stream] . identifier[flush] ()
|
def write(self, message, flush=True):
"""
Function: write
Summary: write method on the default stream
Examples: >>> stream.write('message')
'message'
Attributes:
@param (message): str-like content to send on stream
@param (flush) default=True: flush the stdout after write
Returns: None
"""
self.stream.write(message)
if flush:
self.stream.flush() # depends on [control=['if'], data=[]]
|
def check_bucket_exists(self, bucket: str) -> bool:
"""
Checks if bucket with specified name exists.
:param bucket: the bucket to be checked.
:return: true if specified bucket exists.
"""
bucket_obj = self.gcp_client.bucket(bucket) # type: Bucket
return bucket_obj.exists()
|
def function[check_bucket_exists, parameter[self, bucket]]:
constant[
Checks if bucket with specified name exists.
:param bucket: the bucket to be checked.
:return: true if specified bucket exists.
]
variable[bucket_obj] assign[=] call[name[self].gcp_client.bucket, parameter[name[bucket]]]
return[call[name[bucket_obj].exists, parameter[]]]
|
keyword[def] identifier[check_bucket_exists] ( identifier[self] , identifier[bucket] : identifier[str] )-> identifier[bool] :
literal[string]
identifier[bucket_obj] = identifier[self] . identifier[gcp_client] . identifier[bucket] ( identifier[bucket] )
keyword[return] identifier[bucket_obj] . identifier[exists] ()
|
def check_bucket_exists(self, bucket: str) -> bool:
"""
Checks if bucket with specified name exists.
:param bucket: the bucket to be checked.
:return: true if specified bucket exists.
"""
bucket_obj = self.gcp_client.bucket(bucket) # type: Bucket
return bucket_obj.exists()
|
def collection_callback(result=None):
"""
:type result: opendnp3.CommandPointResult
"""
print("Header: {0} | Index: {1} | State: {2} | Status: {3}".format(
result.headerIndex,
result.index,
opendnp3.CommandPointStateToString(result.state),
opendnp3.CommandStatusToString(result.status)
))
|
def function[collection_callback, parameter[result]]:
constant[
:type result: opendnp3.CommandPointResult
]
call[name[print], parameter[call[constant[Header: {0} | Index: {1} | State: {2} | Status: {3}].format, parameter[name[result].headerIndex, name[result].index, call[name[opendnp3].CommandPointStateToString, parameter[name[result].state]], call[name[opendnp3].CommandStatusToString, parameter[name[result].status]]]]]]
|
keyword[def] identifier[collection_callback] ( identifier[result] = keyword[None] ):
literal[string]
identifier[print] ( literal[string] . identifier[format] (
identifier[result] . identifier[headerIndex] ,
identifier[result] . identifier[index] ,
identifier[opendnp3] . identifier[CommandPointStateToString] ( identifier[result] . identifier[state] ),
identifier[opendnp3] . identifier[CommandStatusToString] ( identifier[result] . identifier[status] )
))
|
def collection_callback(result=None):
"""
:type result: opendnp3.CommandPointResult
"""
print('Header: {0} | Index: {1} | State: {2} | Status: {3}'.format(result.headerIndex, result.index, opendnp3.CommandPointStateToString(result.state), opendnp3.CommandStatusToString(result.status)))
|
def print_stmt(self, print_loc, stmt):
"""
(2.6-2.7)
print_stmt: 'print' ( [ test (',' test)* [','] ] |
'>>' test [ (',' test)+ [','] ] )
"""
stmt.keyword_loc = print_loc
if stmt.loc is None:
stmt.loc = print_loc
else:
stmt.loc = print_loc.join(stmt.loc)
return stmt
|
def function[print_stmt, parameter[self, print_loc, stmt]]:
constant[
(2.6-2.7)
print_stmt: 'print' ( [ test (',' test)* [','] ] |
'>>' test [ (',' test)+ [','] ] )
]
name[stmt].keyword_loc assign[=] name[print_loc]
if compare[name[stmt].loc is constant[None]] begin[:]
name[stmt].loc assign[=] name[print_loc]
return[name[stmt]]
|
keyword[def] identifier[print_stmt] ( identifier[self] , identifier[print_loc] , identifier[stmt] ):
literal[string]
identifier[stmt] . identifier[keyword_loc] = identifier[print_loc]
keyword[if] identifier[stmt] . identifier[loc] keyword[is] keyword[None] :
identifier[stmt] . identifier[loc] = identifier[print_loc]
keyword[else] :
identifier[stmt] . identifier[loc] = identifier[print_loc] . identifier[join] ( identifier[stmt] . identifier[loc] )
keyword[return] identifier[stmt]
|
def print_stmt(self, print_loc, stmt):
"""
(2.6-2.7)
print_stmt: 'print' ( [ test (',' test)* [','] ] |
'>>' test [ (',' test)+ [','] ] )
"""
stmt.keyword_loc = print_loc
if stmt.loc is None:
stmt.loc = print_loc # depends on [control=['if'], data=[]]
else:
stmt.loc = print_loc.join(stmt.loc)
return stmt
|
def _iter_ns_range(self):
"""Iterates over self._ns_range, delegating to self._iter_key_range()."""
while True:
if self._current_key_range is None:
query = self._ns_range.make_datastore_query()
namespace_result = query.Get(1)
if not namespace_result:
break
namespace = namespace_result[0].name() or ""
self._current_key_range = key_range.KeyRange(
namespace=namespace, _app=self._ns_range.app)
yield ALLOW_CHECKPOINT
for key, o in self._iter_key_range(
copy.deepcopy(self._current_key_range)):
# The caller must consume yielded values so advancing the KeyRange
# before yielding is safe.
self._current_key_range.advance(key)
yield o
if (self._ns_range.is_single_namespace or
self._current_key_range.namespace == self._ns_range.namespace_end):
break
self._ns_range = self._ns_range.with_start_after(
self._current_key_range.namespace)
self._current_key_range = None
|
def function[_iter_ns_range, parameter[self]]:
constant[Iterates over self._ns_range, delegating to self._iter_key_range().]
while constant[True] begin[:]
if compare[name[self]._current_key_range is constant[None]] begin[:]
variable[query] assign[=] call[name[self]._ns_range.make_datastore_query, parameter[]]
variable[namespace_result] assign[=] call[name[query].Get, parameter[constant[1]]]
if <ast.UnaryOp object at 0x7da20c6e6d70> begin[:]
break
variable[namespace] assign[=] <ast.BoolOp object at 0x7da1b056cb20>
name[self]._current_key_range assign[=] call[name[key_range].KeyRange, parameter[]]
<ast.Yield object at 0x7da1b056e680>
for taget[tuple[[<ast.Name object at 0x7da1b056f370>, <ast.Name object at 0x7da1b056c370>]]] in starred[call[name[self]._iter_key_range, parameter[call[name[copy].deepcopy, parameter[name[self]._current_key_range]]]]] begin[:]
call[name[self]._current_key_range.advance, parameter[name[key]]]
<ast.Yield object at 0x7da1b056cb50>
if <ast.BoolOp object at 0x7da1b056dd80> begin[:]
break
name[self]._ns_range assign[=] call[name[self]._ns_range.with_start_after, parameter[name[self]._current_key_range.namespace]]
name[self]._current_key_range assign[=] constant[None]
|
keyword[def] identifier[_iter_ns_range] ( identifier[self] ):
literal[string]
keyword[while] keyword[True] :
keyword[if] identifier[self] . identifier[_current_key_range] keyword[is] keyword[None] :
identifier[query] = identifier[self] . identifier[_ns_range] . identifier[make_datastore_query] ()
identifier[namespace_result] = identifier[query] . identifier[Get] ( literal[int] )
keyword[if] keyword[not] identifier[namespace_result] :
keyword[break]
identifier[namespace] = identifier[namespace_result] [ literal[int] ]. identifier[name] () keyword[or] literal[string]
identifier[self] . identifier[_current_key_range] = identifier[key_range] . identifier[KeyRange] (
identifier[namespace] = identifier[namespace] , identifier[_app] = identifier[self] . identifier[_ns_range] . identifier[app] )
keyword[yield] identifier[ALLOW_CHECKPOINT]
keyword[for] identifier[key] , identifier[o] keyword[in] identifier[self] . identifier[_iter_key_range] (
identifier[copy] . identifier[deepcopy] ( identifier[self] . identifier[_current_key_range] )):
identifier[self] . identifier[_current_key_range] . identifier[advance] ( identifier[key] )
keyword[yield] identifier[o]
keyword[if] ( identifier[self] . identifier[_ns_range] . identifier[is_single_namespace] keyword[or]
identifier[self] . identifier[_current_key_range] . identifier[namespace] == identifier[self] . identifier[_ns_range] . identifier[namespace_end] ):
keyword[break]
identifier[self] . identifier[_ns_range] = identifier[self] . identifier[_ns_range] . identifier[with_start_after] (
identifier[self] . identifier[_current_key_range] . identifier[namespace] )
identifier[self] . identifier[_current_key_range] = keyword[None]
|
def _iter_ns_range(self):
"""Iterates over self._ns_range, delegating to self._iter_key_range()."""
while True:
if self._current_key_range is None:
query = self._ns_range.make_datastore_query()
namespace_result = query.Get(1)
if not namespace_result:
break # depends on [control=['if'], data=[]]
namespace = namespace_result[0].name() or ''
self._current_key_range = key_range.KeyRange(namespace=namespace, _app=self._ns_range.app)
yield ALLOW_CHECKPOINT # depends on [control=['if'], data=[]]
for (key, o) in self._iter_key_range(copy.deepcopy(self._current_key_range)):
# The caller must consume yielded values so advancing the KeyRange
# before yielding is safe.
self._current_key_range.advance(key)
yield o # depends on [control=['for'], data=[]]
if self._ns_range.is_single_namespace or self._current_key_range.namespace == self._ns_range.namespace_end:
break # depends on [control=['if'], data=[]]
self._ns_range = self._ns_range.with_start_after(self._current_key_range.namespace)
self._current_key_range = None # depends on [control=['while'], data=[]]
|
def _rolling_lstsq(x, y):
"""Finds solution for the rolling case. Matrix formulation."""
if x.ndim == 2:
# Treat everything as 3d and avoid AxisError on .swapaxes(1, 2) below
# This means an original input of:
# array([0., 1., 2., 3., 4., 5., 6.])
# becomes:
# array([[[0.],
# [1.],
# [2.],
# [3.]],
#
# [[1.],
# [2.],
# ...
x = x[:, :, None]
elif x.ndim <= 1:
raise np.AxisError("x should have ndmi >= 2")
return np.squeeze(
np.matmul(
np.linalg.inv(np.matmul(x.swapaxes(1, 2), x)),
np.matmul(x.swapaxes(1, 2), np.atleast_3d(y)),
)
)
|
def function[_rolling_lstsq, parameter[x, y]]:
constant[Finds solution for the rolling case. Matrix formulation.]
if compare[name[x].ndim equal[==] constant[2]] begin[:]
variable[x] assign[=] call[name[x]][tuple[[<ast.Slice object at 0x7da1b078ffd0>, <ast.Slice object at 0x7da1b078e500>, <ast.Constant object at 0x7da1b078c100>]]]
return[call[name[np].squeeze, parameter[call[name[np].matmul, parameter[call[name[np].linalg.inv, parameter[call[name[np].matmul, parameter[call[name[x].swapaxes, parameter[constant[1], constant[2]]], name[x]]]]], call[name[np].matmul, parameter[call[name[x].swapaxes, parameter[constant[1], constant[2]]], call[name[np].atleast_3d, parameter[name[y]]]]]]]]]]
|
keyword[def] identifier[_rolling_lstsq] ( identifier[x] , identifier[y] ):
literal[string]
keyword[if] identifier[x] . identifier[ndim] == literal[int] :
identifier[x] = identifier[x] [:,:, keyword[None] ]
keyword[elif] identifier[x] . identifier[ndim] <= literal[int] :
keyword[raise] identifier[np] . identifier[AxisError] ( literal[string] )
keyword[return] identifier[np] . identifier[squeeze] (
identifier[np] . identifier[matmul] (
identifier[np] . identifier[linalg] . identifier[inv] ( identifier[np] . identifier[matmul] ( identifier[x] . identifier[swapaxes] ( literal[int] , literal[int] ), identifier[x] )),
identifier[np] . identifier[matmul] ( identifier[x] . identifier[swapaxes] ( literal[int] , literal[int] ), identifier[np] . identifier[atleast_3d] ( identifier[y] )),
)
)
|
def _rolling_lstsq(x, y):
"""Finds solution for the rolling case. Matrix formulation."""
if x.ndim == 2: # Treat everything as 3d and avoid AxisError on .swapaxes(1, 2) below
# This means an original input of:
# array([0., 1., 2., 3., 4., 5., 6.])
# becomes:
# array([[[0.],
# [1.],
# [2.],
# [3.]],
#
# [[1.],
# [2.],
# ...
x = x[:, :, None] # depends on [control=['if'], data=[]]
elif x.ndim <= 1:
raise np.AxisError('x should have ndmi >= 2') # depends on [control=['if'], data=[]]
return np.squeeze(np.matmul(np.linalg.inv(np.matmul(x.swapaxes(1, 2), x)), np.matmul(x.swapaxes(1, 2), np.atleast_3d(y))))
|
def create_instance(self, plugin_type, plugin_name, **instance_kwargs):
"""Create and return an instance of the given plugin."""
plugin_type = self._get_plugin_type(plugin_type)
return plugin_type.create_instance(plugin_name, **instance_kwargs)
|
def function[create_instance, parameter[self, plugin_type, plugin_name]]:
constant[Create and return an instance of the given plugin.]
variable[plugin_type] assign[=] call[name[self]._get_plugin_type, parameter[name[plugin_type]]]
return[call[name[plugin_type].create_instance, parameter[name[plugin_name]]]]
|
keyword[def] identifier[create_instance] ( identifier[self] , identifier[plugin_type] , identifier[plugin_name] ,** identifier[instance_kwargs] ):
literal[string]
identifier[plugin_type] = identifier[self] . identifier[_get_plugin_type] ( identifier[plugin_type] )
keyword[return] identifier[plugin_type] . identifier[create_instance] ( identifier[plugin_name] ,** identifier[instance_kwargs] )
|
def create_instance(self, plugin_type, plugin_name, **instance_kwargs):
"""Create and return an instance of the given plugin."""
plugin_type = self._get_plugin_type(plugin_type)
return plugin_type.create_instance(plugin_name, **instance_kwargs)
|
def cmd_tcpflags(ip, port, flags, rflags, verbose):
"""Send TCP packets with different flags and tell what responses receives.
It can be used to analyze how the different TCP/IP stack implementations
and configurations responds to packet with various flag combinations.
Example:
\b
# habu.tcpflags www.portantier.com
S -> SA
FS -> SA
FA -> R
SA -> R
By default, the command sends all possible flag combinations. You can
specify which flags must ever be present (reducing the quantity of
possible combinations), with the option '-f'.
Also, you can specify which flags you want to be present on the response
packets to show, with the option '-r'.
With the next command, you see all the possible combinations that have
the FIN (F) flag set and generates a response that contains the RST (R)
flag.
Example:
\b
# habu.tcpflags -f F -r R www.portantier.com
FPA -> R
FSPA -> R
FAU -> R
"""
conf.verb = False
pkts = IP(dst=ip) / TCP(flags=(0, 255), dport=port)
out = "{:>8} -> {:<8}"
for pkt in pkts:
if not flags or all(i in pkt.sprintf(r"%TCP.flags%") for i in flags):
ans = sr1(pkt, timeout=0.2)
if ans:
if not rflags or all(i in ans.sprintf(r"%TCP.flags%") for i in rflags):
print(out.format(pkt.sprintf(r"%TCP.flags%"), ans.sprintf(r"%TCP.flags%")))
return True
|
def function[cmd_tcpflags, parameter[ip, port, flags, rflags, verbose]]:
constant[Send TCP packets with different flags and tell what responses receives.
It can be used to analyze how the different TCP/IP stack implementations
and configurations responds to packet with various flag combinations.
Example:
# habu.tcpflags www.portantier.com
S -> SA
FS -> SA
FA -> R
SA -> R
By default, the command sends all possible flag combinations. You can
specify which flags must ever be present (reducing the quantity of
possible combinations), with the option '-f'.
Also, you can specify which flags you want to be present on the response
packets to show, with the option '-r'.
With the next command, you see all the possible combinations that have
the FIN (F) flag set and generates a response that contains the RST (R)
flag.
Example:
# habu.tcpflags -f F -r R www.portantier.com
FPA -> R
FSPA -> R
FAU -> R
]
name[conf].verb assign[=] constant[False]
variable[pkts] assign[=] binary_operation[call[name[IP], parameter[]] / call[name[TCP], parameter[]]]
variable[out] assign[=] constant[{:>8} -> {:<8}]
for taget[name[pkt]] in starred[name[pkts]] begin[:]
if <ast.BoolOp object at 0x7da1b2263d30> begin[:]
variable[ans] assign[=] call[name[sr1], parameter[name[pkt]]]
if name[ans] begin[:]
if <ast.BoolOp object at 0x7da1b22eb520> begin[:]
call[name[print], parameter[call[name[out].format, parameter[call[name[pkt].sprintf, parameter[constant[%TCP.flags%]]], call[name[ans].sprintf, parameter[constant[%TCP.flags%]]]]]]]
return[constant[True]]
|
keyword[def] identifier[cmd_tcpflags] ( identifier[ip] , identifier[port] , identifier[flags] , identifier[rflags] , identifier[verbose] ):
literal[string]
identifier[conf] . identifier[verb] = keyword[False]
identifier[pkts] = identifier[IP] ( identifier[dst] = identifier[ip] )/ identifier[TCP] ( identifier[flags] =( literal[int] , literal[int] ), identifier[dport] = identifier[port] )
identifier[out] = literal[string]
keyword[for] identifier[pkt] keyword[in] identifier[pkts] :
keyword[if] keyword[not] identifier[flags] keyword[or] identifier[all] ( identifier[i] keyword[in] identifier[pkt] . identifier[sprintf] ( literal[string] ) keyword[for] identifier[i] keyword[in] identifier[flags] ):
identifier[ans] = identifier[sr1] ( identifier[pkt] , identifier[timeout] = literal[int] )
keyword[if] identifier[ans] :
keyword[if] keyword[not] identifier[rflags] keyword[or] identifier[all] ( identifier[i] keyword[in] identifier[ans] . identifier[sprintf] ( literal[string] ) keyword[for] identifier[i] keyword[in] identifier[rflags] ):
identifier[print] ( identifier[out] . identifier[format] ( identifier[pkt] . identifier[sprintf] ( literal[string] ), identifier[ans] . identifier[sprintf] ( literal[string] )))
keyword[return] keyword[True]
|
def cmd_tcpflags(ip, port, flags, rflags, verbose):
"""Send TCP packets with different flags and tell what responses receives.
It can be used to analyze how the different TCP/IP stack implementations
and configurations responds to packet with various flag combinations.
Example:
\x08
# habu.tcpflags www.portantier.com
S -> SA
FS -> SA
FA -> R
SA -> R
By default, the command sends all possible flag combinations. You can
specify which flags must ever be present (reducing the quantity of
possible combinations), with the option '-f'.
Also, you can specify which flags you want to be present on the response
packets to show, with the option '-r'.
With the next command, you see all the possible combinations that have
the FIN (F) flag set and generates a response that contains the RST (R)
flag.
Example:
\x08
# habu.tcpflags -f F -r R www.portantier.com
FPA -> R
FSPA -> R
FAU -> R
"""
conf.verb = False
pkts = IP(dst=ip) / TCP(flags=(0, 255), dport=port)
out = '{:>8} -> {:<8}'
for pkt in pkts:
if not flags or all((i in pkt.sprintf('%TCP.flags%') for i in flags)):
ans = sr1(pkt, timeout=0.2)
if ans:
if not rflags or all((i in ans.sprintf('%TCP.flags%') for i in rflags)):
print(out.format(pkt.sprintf('%TCP.flags%'), ans.sprintf('%TCP.flags%'))) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['pkt']]
return True
|
def svg_to_path(file_obj, file_type=None):
"""
Load an SVG file into a Path2D object.
Parameters
-----------
file_obj : open file object
Contains SVG data
file_type: None
Not used
Returns
-----------
loaded : dict
With kwargs for Path2D constructor
"""
def element_transform(e, max_depth=100):
"""
Find a transformation matrix for an XML element.
"""
matrices = []
current = e
for i in range(max_depth):
if 'transform' in current.attrib:
mat = transform_to_matrices(current.attrib['transform'])
matrices.extend(mat)
# cached[current] = mat
current = current.getparent()
if current is None:
break
if len(matrices) == 0:
return np.eye(3)
elif len(matrices) == 1:
return matrices[0]
else:
return util.multi_dot(matrices[::-1])
# first parse the XML
xml = etree.fromstring(file_obj.read())
# store paths and transforms as
# (path string, 3x3 matrix)
paths = []
# store every path element
for element in xml.iter('{*}path'):
paths.append((element.attrib['d'],
element_transform(element)))
return _svg_path_convert(paths)
|
def function[svg_to_path, parameter[file_obj, file_type]]:
constant[
Load an SVG file into a Path2D object.
Parameters
-----------
file_obj : open file object
Contains SVG data
file_type: None
Not used
Returns
-----------
loaded : dict
With kwargs for Path2D constructor
]
def function[element_transform, parameter[e, max_depth]]:
constant[
Find a transformation matrix for an XML element.
]
variable[matrices] assign[=] list[[]]
variable[current] assign[=] name[e]
for taget[name[i]] in starred[call[name[range], parameter[name[max_depth]]]] begin[:]
if compare[constant[transform] in name[current].attrib] begin[:]
variable[mat] assign[=] call[name[transform_to_matrices], parameter[call[name[current].attrib][constant[transform]]]]
call[name[matrices].extend, parameter[name[mat]]]
variable[current] assign[=] call[name[current].getparent, parameter[]]
if compare[name[current] is constant[None]] begin[:]
break
if compare[call[name[len], parameter[name[matrices]]] equal[==] constant[0]] begin[:]
return[call[name[np].eye, parameter[constant[3]]]]
variable[xml] assign[=] call[name[etree].fromstring, parameter[call[name[file_obj].read, parameter[]]]]
variable[paths] assign[=] list[[]]
for taget[name[element]] in starred[call[name[xml].iter, parameter[constant[{*}path]]]] begin[:]
call[name[paths].append, parameter[tuple[[<ast.Subscript object at 0x7da204565f00>, <ast.Call object at 0x7da2045648b0>]]]]
return[call[name[_svg_path_convert], parameter[name[paths]]]]
|
keyword[def] identifier[svg_to_path] ( identifier[file_obj] , identifier[file_type] = keyword[None] ):
literal[string]
keyword[def] identifier[element_transform] ( identifier[e] , identifier[max_depth] = literal[int] ):
literal[string]
identifier[matrices] =[]
identifier[current] = identifier[e]
keyword[for] identifier[i] keyword[in] identifier[range] ( identifier[max_depth] ):
keyword[if] literal[string] keyword[in] identifier[current] . identifier[attrib] :
identifier[mat] = identifier[transform_to_matrices] ( identifier[current] . identifier[attrib] [ literal[string] ])
identifier[matrices] . identifier[extend] ( identifier[mat] )
identifier[current] = identifier[current] . identifier[getparent] ()
keyword[if] identifier[current] keyword[is] keyword[None] :
keyword[break]
keyword[if] identifier[len] ( identifier[matrices] )== literal[int] :
keyword[return] identifier[np] . identifier[eye] ( literal[int] )
keyword[elif] identifier[len] ( identifier[matrices] )== literal[int] :
keyword[return] identifier[matrices] [ literal[int] ]
keyword[else] :
keyword[return] identifier[util] . identifier[multi_dot] ( identifier[matrices] [::- literal[int] ])
identifier[xml] = identifier[etree] . identifier[fromstring] ( identifier[file_obj] . identifier[read] ())
identifier[paths] =[]
keyword[for] identifier[element] keyword[in] identifier[xml] . identifier[iter] ( literal[string] ):
identifier[paths] . identifier[append] (( identifier[element] . identifier[attrib] [ literal[string] ],
identifier[element_transform] ( identifier[element] )))
keyword[return] identifier[_svg_path_convert] ( identifier[paths] )
|
def svg_to_path(file_obj, file_type=None):
"""
Load an SVG file into a Path2D object.
Parameters
-----------
file_obj : open file object
Contains SVG data
file_type: None
Not used
Returns
-----------
loaded : dict
With kwargs for Path2D constructor
"""
def element_transform(e, max_depth=100):
"""
Find a transformation matrix for an XML element.
"""
matrices = []
current = e
for i in range(max_depth):
if 'transform' in current.attrib:
mat = transform_to_matrices(current.attrib['transform'])
matrices.extend(mat) # depends on [control=['if'], data=[]]
# cached[current] = mat
current = current.getparent()
if current is None:
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
if len(matrices) == 0:
return np.eye(3) # depends on [control=['if'], data=[]]
elif len(matrices) == 1:
return matrices[0] # depends on [control=['if'], data=[]]
else:
return util.multi_dot(matrices[::-1])
# first parse the XML
xml = etree.fromstring(file_obj.read())
# store paths and transforms as
# (path string, 3x3 matrix)
paths = []
# store every path element
for element in xml.iter('{*}path'):
paths.append((element.attrib['d'], element_transform(element))) # depends on [control=['for'], data=['element']]
return _svg_path_convert(paths)
|
def _parse_status(data, cast_type):
"""
Parses a STATUS message and returns a CastStatus object.
:type data: dict
:param cast_type: Type of Chromecast.
:rtype: CastStatus
"""
data = data.get('status', {})
volume_data = data.get('volume', {})
try:
app_data = data['applications'][0]
except KeyError:
app_data = {}
is_audio = cast_type in (CAST_TYPE_AUDIO, CAST_TYPE_GROUP)
status = CastStatus(
data.get('isActiveInput', None if is_audio else False),
data.get('isStandBy', None if is_audio else True),
volume_data.get('level', 1.0),
volume_data.get('muted', False),
app_data.get(APP_ID),
app_data.get('displayName'),
[item['name'] for item in app_data.get('namespaces', [])],
app_data.get(SESSION_ID),
app_data.get('transportId'),
app_data.get('statusText', '')
)
return status
|
def function[_parse_status, parameter[data, cast_type]]:
constant[
Parses a STATUS message and returns a CastStatus object.
:type data: dict
:param cast_type: Type of Chromecast.
:rtype: CastStatus
]
variable[data] assign[=] call[name[data].get, parameter[constant[status], dictionary[[], []]]]
variable[volume_data] assign[=] call[name[data].get, parameter[constant[volume], dictionary[[], []]]]
<ast.Try object at 0x7da18ede44f0>
variable[is_audio] assign[=] compare[name[cast_type] in tuple[[<ast.Name object at 0x7da18ede4880>, <ast.Name object at 0x7da18ede5ae0>]]]
variable[status] assign[=] call[name[CastStatus], parameter[call[name[data].get, parameter[constant[isActiveInput], <ast.IfExp object at 0x7da18ede6c80>]], call[name[data].get, parameter[constant[isStandBy], <ast.IfExp object at 0x7da18ede51b0>]], call[name[volume_data].get, parameter[constant[level], constant[1.0]]], call[name[volume_data].get, parameter[constant[muted], constant[False]]], call[name[app_data].get, parameter[name[APP_ID]]], call[name[app_data].get, parameter[constant[displayName]]], <ast.ListComp object at 0x7da207f99ff0>, call[name[app_data].get, parameter[name[SESSION_ID]]], call[name[app_data].get, parameter[constant[transportId]]], call[name[app_data].get, parameter[constant[statusText], constant[]]]]]
return[name[status]]
|
keyword[def] identifier[_parse_status] ( identifier[data] , identifier[cast_type] ):
literal[string]
identifier[data] = identifier[data] . identifier[get] ( literal[string] ,{})
identifier[volume_data] = identifier[data] . identifier[get] ( literal[string] ,{})
keyword[try] :
identifier[app_data] = identifier[data] [ literal[string] ][ literal[int] ]
keyword[except] identifier[KeyError] :
identifier[app_data] ={}
identifier[is_audio] = identifier[cast_type] keyword[in] ( identifier[CAST_TYPE_AUDIO] , identifier[CAST_TYPE_GROUP] )
identifier[status] = identifier[CastStatus] (
identifier[data] . identifier[get] ( literal[string] , keyword[None] keyword[if] identifier[is_audio] keyword[else] keyword[False] ),
identifier[data] . identifier[get] ( literal[string] , keyword[None] keyword[if] identifier[is_audio] keyword[else] keyword[True] ),
identifier[volume_data] . identifier[get] ( literal[string] , literal[int] ),
identifier[volume_data] . identifier[get] ( literal[string] , keyword[False] ),
identifier[app_data] . identifier[get] ( identifier[APP_ID] ),
identifier[app_data] . identifier[get] ( literal[string] ),
[ identifier[item] [ literal[string] ] keyword[for] identifier[item] keyword[in] identifier[app_data] . identifier[get] ( literal[string] ,[])],
identifier[app_data] . identifier[get] ( identifier[SESSION_ID] ),
identifier[app_data] . identifier[get] ( literal[string] ),
identifier[app_data] . identifier[get] ( literal[string] , literal[string] )
)
keyword[return] identifier[status]
|
def _parse_status(data, cast_type):
"""
Parses a STATUS message and returns a CastStatus object.
:type data: dict
:param cast_type: Type of Chromecast.
:rtype: CastStatus
"""
data = data.get('status', {})
volume_data = data.get('volume', {})
try:
app_data = data['applications'][0] # depends on [control=['try'], data=[]]
except KeyError:
app_data = {} # depends on [control=['except'], data=[]]
is_audio = cast_type in (CAST_TYPE_AUDIO, CAST_TYPE_GROUP)
status = CastStatus(data.get('isActiveInput', None if is_audio else False), data.get('isStandBy', None if is_audio else True), volume_data.get('level', 1.0), volume_data.get('muted', False), app_data.get(APP_ID), app_data.get('displayName'), [item['name'] for item in app_data.get('namespaces', [])], app_data.get(SESSION_ID), app_data.get('transportId'), app_data.get('statusText', ''))
return status
|
def parse_rosters(self):
"""
Parse the home and away game rosters
:returns: ``self`` on success, ``None`` otherwise
"""
lx_doc = self.html_doc()
if not self.__blocks:
self.__pl_blocks(lx_doc)
for t in ['home', 'away']:
self.rosters[t] = self.__clean_pl_block(self.__blocks[t])
return self if self.rosters else None
|
def function[parse_rosters, parameter[self]]:
constant[
Parse the home and away game rosters
:returns: ``self`` on success, ``None`` otherwise
]
variable[lx_doc] assign[=] call[name[self].html_doc, parameter[]]
if <ast.UnaryOp object at 0x7da1b0e0e800> begin[:]
call[name[self].__pl_blocks, parameter[name[lx_doc]]]
for taget[name[t]] in starred[list[[<ast.Constant object at 0x7da1b0e0e950>, <ast.Constant object at 0x7da1b0e0d600>]]] begin[:]
call[name[self].rosters][name[t]] assign[=] call[name[self].__clean_pl_block, parameter[call[name[self].__blocks][name[t]]]]
return[<ast.IfExp object at 0x7da1b0e0dcc0>]
|
keyword[def] identifier[parse_rosters] ( identifier[self] ):
literal[string]
identifier[lx_doc] = identifier[self] . identifier[html_doc] ()
keyword[if] keyword[not] identifier[self] . identifier[__blocks] :
identifier[self] . identifier[__pl_blocks] ( identifier[lx_doc] )
keyword[for] identifier[t] keyword[in] [ literal[string] , literal[string] ]:
identifier[self] . identifier[rosters] [ identifier[t] ]= identifier[self] . identifier[__clean_pl_block] ( identifier[self] . identifier[__blocks] [ identifier[t] ])
keyword[return] identifier[self] keyword[if] identifier[self] . identifier[rosters] keyword[else] keyword[None]
|
def parse_rosters(self):
"""
Parse the home and away game rosters
:returns: ``self`` on success, ``None`` otherwise
"""
lx_doc = self.html_doc()
if not self.__blocks:
self.__pl_blocks(lx_doc) # depends on [control=['if'], data=[]]
for t in ['home', 'away']:
self.rosters[t] = self.__clean_pl_block(self.__blocks[t]) # depends on [control=['for'], data=['t']]
return self if self.rosters else None
|
def compile_latex_text(input_text, dpath=None, fname=None, verbose=True,
move=True, nest_in_doc=None, title=None,
preamb_extra=None):
r"""
CommandLine:
python -m utool.util_latex --test-compile_latex_text --show
Ignore:
pdflatex -shell-escape --synctex=-1 -src-specials -interaction=nonstopmode\
~/code/ibeis/tmptex/latex_formatter_temp.tex
Example1:
>>> # DISABLE_DOCTEST
>>> from utool.util_latex import * # NOQA
>>> import utool as ut
>>> verbose = True
>>> #dpath = '/home/joncrall/code/ibeis/aidchallenge'
>>> dpath = dirname(ut.grab_test_imgpath())
>>> #ut.vd(dpath)
>>> orig_fpaths = ut.list_images(dpath, fullpath=True)
>>> figure_str = ut.get_latex_figure_str(orig_fpaths, width_str='2.4in', nCols=2)
>>> input_text = figure_str
>>> pdf_fpath = ut.compile_latex_text(input_text, dpath=dpath,
>>> verbose=verbose)
>>> output_pdf_fpath = ut.compress_pdf(pdf_fpath)
>>> print(pdf_fpath)
>>> ut.quit_if_noshow()
>>> ut.startfile(pdf_fpath)
"""
import utool as ut
if verbose:
print('[ut] compile_latex_text')
if nest_in_doc is None:
nest_in_doc = 'documentclass' not in input_text
if nest_in_doc:
text = make_full_document(input_text, title=title,
preamb_extra=preamb_extra)
if not dpath:
dpath = os.getcwd()
if fname is None:
fname = 'temp_latex'
# Create temporary work directly
work_dpath = join(dpath, '.tmptex')
ut.ensuredir(work_dpath, verbose=verbose > 1)
fname_tex = ut.ensure_ext(fname, '.tex')
fname_pdf = ut.ensure_ext(fname, '.pdf')
tex_fpath = join(work_dpath, fname_tex)
pdf_fpath_output = join(work_dpath, fname_pdf)
ut.write_to(tex_fpath, text)
with ut.ChdirContext(work_dpath, verbose=verbose > 1):
# print(text)
args = ' '.join([
'lualatex', '-shell-escape', '--synctex=-1', '-src-specials',
'-interaction=nonstopmode', tex_fpath
])
info = ut.cmd2(args, verbose=verbose > 1)
if not ut.checkpath(pdf_fpath_output, verbose=verbose > 1):
print('Error compiling LaTeX')
ut.print_code(text, 'latex')
print(info['out'])
raise RuntimeError('latex failed ')
if move:
pdf_fpath = join(dpath, fname_pdf)
ut.move(pdf_fpath_output, pdf_fpath, verbose=verbose > 1)
else:
pdf_fpath = pdf_fpath_output
return pdf_fpath
|
def function[compile_latex_text, parameter[input_text, dpath, fname, verbose, move, nest_in_doc, title, preamb_extra]]:
constant[
CommandLine:
python -m utool.util_latex --test-compile_latex_text --show
Ignore:
pdflatex -shell-escape --synctex=-1 -src-specials -interaction=nonstopmode\
~/code/ibeis/tmptex/latex_formatter_temp.tex
Example1:
>>> # DISABLE_DOCTEST
>>> from utool.util_latex import * # NOQA
>>> import utool as ut
>>> verbose = True
>>> #dpath = '/home/joncrall/code/ibeis/aidchallenge'
>>> dpath = dirname(ut.grab_test_imgpath())
>>> #ut.vd(dpath)
>>> orig_fpaths = ut.list_images(dpath, fullpath=True)
>>> figure_str = ut.get_latex_figure_str(orig_fpaths, width_str='2.4in', nCols=2)
>>> input_text = figure_str
>>> pdf_fpath = ut.compile_latex_text(input_text, dpath=dpath,
>>> verbose=verbose)
>>> output_pdf_fpath = ut.compress_pdf(pdf_fpath)
>>> print(pdf_fpath)
>>> ut.quit_if_noshow()
>>> ut.startfile(pdf_fpath)
]
import module[utool] as alias[ut]
if name[verbose] begin[:]
call[name[print], parameter[constant[[ut] compile_latex_text]]]
if compare[name[nest_in_doc] is constant[None]] begin[:]
variable[nest_in_doc] assign[=] compare[constant[documentclass] <ast.NotIn object at 0x7da2590d7190> name[input_text]]
if name[nest_in_doc] begin[:]
variable[text] assign[=] call[name[make_full_document], parameter[name[input_text]]]
if <ast.UnaryOp object at 0x7da1b246b5b0> begin[:]
variable[dpath] assign[=] call[name[os].getcwd, parameter[]]
if compare[name[fname] is constant[None]] begin[:]
variable[fname] assign[=] constant[temp_latex]
variable[work_dpath] assign[=] call[name[join], parameter[name[dpath], constant[.tmptex]]]
call[name[ut].ensuredir, parameter[name[work_dpath]]]
variable[fname_tex] assign[=] call[name[ut].ensure_ext, parameter[name[fname], constant[.tex]]]
variable[fname_pdf] assign[=] call[name[ut].ensure_ext, parameter[name[fname], constant[.pdf]]]
variable[tex_fpath] assign[=] call[name[join], parameter[name[work_dpath], name[fname_tex]]]
variable[pdf_fpath_output] assign[=] call[name[join], parameter[name[work_dpath], name[fname_pdf]]]
call[name[ut].write_to, parameter[name[tex_fpath], name[text]]]
with call[name[ut].ChdirContext, parameter[name[work_dpath]]] begin[:]
variable[args] assign[=] call[constant[ ].join, parameter[list[[<ast.Constant object at 0x7da1b24e57e0>, <ast.Constant object at 0x7da1b24e4550>, <ast.Constant object at 0x7da1b24e4160>, <ast.Constant object at 0x7da1b24e4d90>, <ast.Constant object at 0x7da1b24e7910>, <ast.Name object at 0x7da1b24e5060>]]]]
variable[info] assign[=] call[name[ut].cmd2, parameter[name[args]]]
if <ast.UnaryOp object at 0x7da1b24e6aa0> begin[:]
call[name[print], parameter[constant[Error compiling LaTeX]]]
call[name[ut].print_code, parameter[name[text], constant[latex]]]
call[name[print], parameter[call[name[info]][constant[out]]]]
<ast.Raise object at 0x7da1b25381c0>
if name[move] begin[:]
variable[pdf_fpath] assign[=] call[name[join], parameter[name[dpath], name[fname_pdf]]]
call[name[ut].move, parameter[name[pdf_fpath_output], name[pdf_fpath]]]
return[name[pdf_fpath]]
|
keyword[def] identifier[compile_latex_text] ( identifier[input_text] , identifier[dpath] = keyword[None] , identifier[fname] = keyword[None] , identifier[verbose] = keyword[True] ,
identifier[move] = keyword[True] , identifier[nest_in_doc] = keyword[None] , identifier[title] = keyword[None] ,
identifier[preamb_extra] = keyword[None] ):
literal[string]
keyword[import] identifier[utool] keyword[as] identifier[ut]
keyword[if] identifier[verbose] :
identifier[print] ( literal[string] )
keyword[if] identifier[nest_in_doc] keyword[is] keyword[None] :
identifier[nest_in_doc] = literal[string] keyword[not] keyword[in] identifier[input_text]
keyword[if] identifier[nest_in_doc] :
identifier[text] = identifier[make_full_document] ( identifier[input_text] , identifier[title] = identifier[title] ,
identifier[preamb_extra] = identifier[preamb_extra] )
keyword[if] keyword[not] identifier[dpath] :
identifier[dpath] = identifier[os] . identifier[getcwd] ()
keyword[if] identifier[fname] keyword[is] keyword[None] :
identifier[fname] = literal[string]
identifier[work_dpath] = identifier[join] ( identifier[dpath] , literal[string] )
identifier[ut] . identifier[ensuredir] ( identifier[work_dpath] , identifier[verbose] = identifier[verbose] > literal[int] )
identifier[fname_tex] = identifier[ut] . identifier[ensure_ext] ( identifier[fname] , literal[string] )
identifier[fname_pdf] = identifier[ut] . identifier[ensure_ext] ( identifier[fname] , literal[string] )
identifier[tex_fpath] = identifier[join] ( identifier[work_dpath] , identifier[fname_tex] )
identifier[pdf_fpath_output] = identifier[join] ( identifier[work_dpath] , identifier[fname_pdf] )
identifier[ut] . identifier[write_to] ( identifier[tex_fpath] , identifier[text] )
keyword[with] identifier[ut] . identifier[ChdirContext] ( identifier[work_dpath] , identifier[verbose] = identifier[verbose] > literal[int] ):
identifier[args] = literal[string] . identifier[join] ([
literal[string] , literal[string] , literal[string] , literal[string] ,
literal[string] , identifier[tex_fpath]
])
identifier[info] = identifier[ut] . identifier[cmd2] ( identifier[args] , identifier[verbose] = identifier[verbose] > literal[int] )
keyword[if] keyword[not] identifier[ut] . identifier[checkpath] ( identifier[pdf_fpath_output] , identifier[verbose] = identifier[verbose] > literal[int] ):
identifier[print] ( literal[string] )
identifier[ut] . identifier[print_code] ( identifier[text] , literal[string] )
identifier[print] ( identifier[info] [ literal[string] ])
keyword[raise] identifier[RuntimeError] ( literal[string] )
keyword[if] identifier[move] :
identifier[pdf_fpath] = identifier[join] ( identifier[dpath] , identifier[fname_pdf] )
identifier[ut] . identifier[move] ( identifier[pdf_fpath_output] , identifier[pdf_fpath] , identifier[verbose] = identifier[verbose] > literal[int] )
keyword[else] :
identifier[pdf_fpath] = identifier[pdf_fpath_output]
keyword[return] identifier[pdf_fpath]
|
def compile_latex_text(input_text, dpath=None, fname=None, verbose=True, move=True, nest_in_doc=None, title=None, preamb_extra=None):
"""
CommandLine:
python -m utool.util_latex --test-compile_latex_text --show
Ignore:
pdflatex -shell-escape --synctex=-1 -src-specials -interaction=nonstopmode\\
~/code/ibeis/tmptex/latex_formatter_temp.tex
Example1:
>>> # DISABLE_DOCTEST
>>> from utool.util_latex import * # NOQA
>>> import utool as ut
>>> verbose = True
>>> #dpath = '/home/joncrall/code/ibeis/aidchallenge'
>>> dpath = dirname(ut.grab_test_imgpath())
>>> #ut.vd(dpath)
>>> orig_fpaths = ut.list_images(dpath, fullpath=True)
>>> figure_str = ut.get_latex_figure_str(orig_fpaths, width_str='2.4in', nCols=2)
>>> input_text = figure_str
>>> pdf_fpath = ut.compile_latex_text(input_text, dpath=dpath,
>>> verbose=verbose)
>>> output_pdf_fpath = ut.compress_pdf(pdf_fpath)
>>> print(pdf_fpath)
>>> ut.quit_if_noshow()
>>> ut.startfile(pdf_fpath)
"""
import utool as ut
if verbose:
print('[ut] compile_latex_text') # depends on [control=['if'], data=[]]
if nest_in_doc is None:
nest_in_doc = 'documentclass' not in input_text # depends on [control=['if'], data=['nest_in_doc']]
if nest_in_doc:
text = make_full_document(input_text, title=title, preamb_extra=preamb_extra) # depends on [control=['if'], data=[]]
if not dpath:
dpath = os.getcwd() # depends on [control=['if'], data=[]]
if fname is None:
fname = 'temp_latex' # depends on [control=['if'], data=['fname']]
# Create temporary work directly
work_dpath = join(dpath, '.tmptex')
ut.ensuredir(work_dpath, verbose=verbose > 1)
fname_tex = ut.ensure_ext(fname, '.tex')
fname_pdf = ut.ensure_ext(fname, '.pdf')
tex_fpath = join(work_dpath, fname_tex)
pdf_fpath_output = join(work_dpath, fname_pdf)
ut.write_to(tex_fpath, text)
with ut.ChdirContext(work_dpath, verbose=verbose > 1):
# print(text)
args = ' '.join(['lualatex', '-shell-escape', '--synctex=-1', '-src-specials', '-interaction=nonstopmode', tex_fpath])
info = ut.cmd2(args, verbose=verbose > 1)
if not ut.checkpath(pdf_fpath_output, verbose=verbose > 1):
print('Error compiling LaTeX')
ut.print_code(text, 'latex')
print(info['out'])
raise RuntimeError('latex failed ') # depends on [control=['if'], data=[]] # depends on [control=['with'], data=[]]
if move:
pdf_fpath = join(dpath, fname_pdf)
ut.move(pdf_fpath_output, pdf_fpath, verbose=verbose > 1) # depends on [control=['if'], data=[]]
else:
pdf_fpath = pdf_fpath_output
return pdf_fpath
|
def _eval_script(redis, script_id, *keys, **kwargs):
"""Tries to call ``EVALSHA`` with the `hash` and then, if it fails, calls
regular ``EVAL`` with the `script`.
"""
args = kwargs.pop('args', ())
if kwargs:
raise TypeError("Unexpected keyword arguments %s" % kwargs.keys())
try:
return redis.evalsha(SCRIPTS[script_id], len(keys), *keys + args)
except NoScriptError:
logger.info("%s not cached.", SCRIPTS[script_id + 2])
return redis.eval(SCRIPTS[script_id + 1], len(keys), *keys + args)
|
def function[_eval_script, parameter[redis, script_id]]:
constant[Tries to call ``EVALSHA`` with the `hash` and then, if it fails, calls
regular ``EVAL`` with the `script`.
]
variable[args] assign[=] call[name[kwargs].pop, parameter[constant[args], tuple[[]]]]
if name[kwargs] begin[:]
<ast.Raise object at 0x7da18bcc9a80>
<ast.Try object at 0x7da207f00b50>
|
keyword[def] identifier[_eval_script] ( identifier[redis] , identifier[script_id] ,* identifier[keys] ,** identifier[kwargs] ):
literal[string]
identifier[args] = identifier[kwargs] . identifier[pop] ( literal[string] ,())
keyword[if] identifier[kwargs] :
keyword[raise] identifier[TypeError] ( literal[string] % identifier[kwargs] . identifier[keys] ())
keyword[try] :
keyword[return] identifier[redis] . identifier[evalsha] ( identifier[SCRIPTS] [ identifier[script_id] ], identifier[len] ( identifier[keys] ),* identifier[keys] + identifier[args] )
keyword[except] identifier[NoScriptError] :
identifier[logger] . identifier[info] ( literal[string] , identifier[SCRIPTS] [ identifier[script_id] + literal[int] ])
keyword[return] identifier[redis] . identifier[eval] ( identifier[SCRIPTS] [ identifier[script_id] + literal[int] ], identifier[len] ( identifier[keys] ),* identifier[keys] + identifier[args] )
|
def _eval_script(redis, script_id, *keys, **kwargs):
"""Tries to call ``EVALSHA`` with the `hash` and then, if it fails, calls
regular ``EVAL`` with the `script`.
"""
args = kwargs.pop('args', ())
if kwargs:
raise TypeError('Unexpected keyword arguments %s' % kwargs.keys()) # depends on [control=['if'], data=[]]
try:
return redis.evalsha(SCRIPTS[script_id], len(keys), *keys + args) # depends on [control=['try'], data=[]]
except NoScriptError:
logger.info('%s not cached.', SCRIPTS[script_id + 2])
return redis.eval(SCRIPTS[script_id + 1], len(keys), *keys + args) # depends on [control=['except'], data=[]]
|
def underline(self, text, indent=4):
"""Underline a given text"""
length = len(text)
indentation = (' ' * indent)
return indentation + text + '\n' + indentation + ('-' * length)
|
def function[underline, parameter[self, text, indent]]:
constant[Underline a given text]
variable[length] assign[=] call[name[len], parameter[name[text]]]
variable[indentation] assign[=] binary_operation[constant[ ] * name[indent]]
return[binary_operation[binary_operation[binary_operation[binary_operation[name[indentation] + name[text]] + constant[
]] + name[indentation]] + binary_operation[constant[-] * name[length]]]]
|
keyword[def] identifier[underline] ( identifier[self] , identifier[text] , identifier[indent] = literal[int] ):
literal[string]
identifier[length] = identifier[len] ( identifier[text] )
identifier[indentation] =( literal[string] * identifier[indent] )
keyword[return] identifier[indentation] + identifier[text] + literal[string] + identifier[indentation] +( literal[string] * identifier[length] )
|
def underline(self, text, indent=4):
"""Underline a given text"""
length = len(text)
indentation = ' ' * indent
return indentation + text + '\n' + indentation + '-' * length
|
def threaded(
name: typing.Callable[..., typing.Any], daemon: bool = False, started: bool = False
) -> typing.Callable[..., threading.Thread]:
"""Overload: Call decorator without arguments."""
|
def function[threaded, parameter[name, daemon, started]]:
constant[Overload: Call decorator without arguments.]
|
keyword[def] identifier[threaded] (
identifier[name] : identifier[typing] . identifier[Callable] [..., identifier[typing] . identifier[Any] ], identifier[daemon] : identifier[bool] = keyword[False] , identifier[started] : identifier[bool] = keyword[False]
)-> identifier[typing] . identifier[Callable] [..., identifier[threading] . identifier[Thread] ]:
literal[string]
|
def threaded(name: typing.Callable[..., typing.Any], daemon: bool=False, started: bool=False) -> typing.Callable[..., threading.Thread]:
"""Overload: Call decorator without arguments."""
|
def gif(gif_id, api_key=GIPHY_PUBLIC_KEY, strict=False):
"""
Shorthand for creating a Giphy api wrapper with the given api key
and then calling the gif method.
"""
return Giphy(api_key=api_key, strict=strict).gif(gif_id)
|
def function[gif, parameter[gif_id, api_key, strict]]:
constant[
Shorthand for creating a Giphy api wrapper with the given api key
and then calling the gif method.
]
return[call[call[name[Giphy], parameter[]].gif, parameter[name[gif_id]]]]
|
keyword[def] identifier[gif] ( identifier[gif_id] , identifier[api_key] = identifier[GIPHY_PUBLIC_KEY] , identifier[strict] = keyword[False] ):
literal[string]
keyword[return] identifier[Giphy] ( identifier[api_key] = identifier[api_key] , identifier[strict] = identifier[strict] ). identifier[gif] ( identifier[gif_id] )
|
def gif(gif_id, api_key=GIPHY_PUBLIC_KEY, strict=False):
"""
Shorthand for creating a Giphy api wrapper with the given api key
and then calling the gif method.
"""
return Giphy(api_key=api_key, strict=strict).gif(gif_id)
|
def contextual_log_handler(context, path, log_obj=None, level=logging.DEBUG,
formatter=None):
"""Add a short-lived log with a contextmanager for cleanup.
Args:
context (scriptworker.context.Context): the scriptworker context
path (str): the path to the log file to create
log_obj (logging.Logger): the log object to modify. If None, use
``scriptworker.log.log``. Defaults to None.
level (int, optional): the logging level. Defaults to logging.DEBUG.
formatter (logging.Formatter, optional): the logging formatter. If None,
defaults to ``logging.Formatter(fmt=fmt)``. Default is None.
Yields:
None: but cleans up the handler afterwards.
"""
log_obj = log_obj or log
formatter = formatter or logging.Formatter(
fmt=context.config['log_fmt'],
datefmt=context.config['log_datefmt'],
)
parent_path = os.path.dirname(path)
makedirs(parent_path)
contextual_handler = logging.FileHandler(path, encoding='utf-8')
contextual_handler.setLevel(level)
contextual_handler.setFormatter(formatter)
log_obj.addHandler(contextual_handler)
yield
contextual_handler.close()
log_obj.removeHandler(contextual_handler)
|
def function[contextual_log_handler, parameter[context, path, log_obj, level, formatter]]:
constant[Add a short-lived log with a contextmanager for cleanup.
Args:
context (scriptworker.context.Context): the scriptworker context
path (str): the path to the log file to create
log_obj (logging.Logger): the log object to modify. If None, use
``scriptworker.log.log``. Defaults to None.
level (int, optional): the logging level. Defaults to logging.DEBUG.
formatter (logging.Formatter, optional): the logging formatter. If None,
defaults to ``logging.Formatter(fmt=fmt)``. Default is None.
Yields:
None: but cleans up the handler afterwards.
]
variable[log_obj] assign[=] <ast.BoolOp object at 0x7da18bcc8550>
variable[formatter] assign[=] <ast.BoolOp object at 0x7da18bcc82b0>
variable[parent_path] assign[=] call[name[os].path.dirname, parameter[name[path]]]
call[name[makedirs], parameter[name[parent_path]]]
variable[contextual_handler] assign[=] call[name[logging].FileHandler, parameter[name[path]]]
call[name[contextual_handler].setLevel, parameter[name[level]]]
call[name[contextual_handler].setFormatter, parameter[name[formatter]]]
call[name[log_obj].addHandler, parameter[name[contextual_handler]]]
<ast.Yield object at 0x7da18bccb6a0>
call[name[contextual_handler].close, parameter[]]
call[name[log_obj].removeHandler, parameter[name[contextual_handler]]]
|
keyword[def] identifier[contextual_log_handler] ( identifier[context] , identifier[path] , identifier[log_obj] = keyword[None] , identifier[level] = identifier[logging] . identifier[DEBUG] ,
identifier[formatter] = keyword[None] ):
literal[string]
identifier[log_obj] = identifier[log_obj] keyword[or] identifier[log]
identifier[formatter] = identifier[formatter] keyword[or] identifier[logging] . identifier[Formatter] (
identifier[fmt] = identifier[context] . identifier[config] [ literal[string] ],
identifier[datefmt] = identifier[context] . identifier[config] [ literal[string] ],
)
identifier[parent_path] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[path] )
identifier[makedirs] ( identifier[parent_path] )
identifier[contextual_handler] = identifier[logging] . identifier[FileHandler] ( identifier[path] , identifier[encoding] = literal[string] )
identifier[contextual_handler] . identifier[setLevel] ( identifier[level] )
identifier[contextual_handler] . identifier[setFormatter] ( identifier[formatter] )
identifier[log_obj] . identifier[addHandler] ( identifier[contextual_handler] )
keyword[yield]
identifier[contextual_handler] . identifier[close] ()
identifier[log_obj] . identifier[removeHandler] ( identifier[contextual_handler] )
|
def contextual_log_handler(context, path, log_obj=None, level=logging.DEBUG, formatter=None):
"""Add a short-lived log with a contextmanager for cleanup.
Args:
context (scriptworker.context.Context): the scriptworker context
path (str): the path to the log file to create
log_obj (logging.Logger): the log object to modify. If None, use
``scriptworker.log.log``. Defaults to None.
level (int, optional): the logging level. Defaults to logging.DEBUG.
formatter (logging.Formatter, optional): the logging formatter. If None,
defaults to ``logging.Formatter(fmt=fmt)``. Default is None.
Yields:
None: but cleans up the handler afterwards.
"""
log_obj = log_obj or log
formatter = formatter or logging.Formatter(fmt=context.config['log_fmt'], datefmt=context.config['log_datefmt'])
parent_path = os.path.dirname(path)
makedirs(parent_path)
contextual_handler = logging.FileHandler(path, encoding='utf-8')
contextual_handler.setLevel(level)
contextual_handler.setFormatter(formatter)
log_obj.addHandler(contextual_handler)
yield
contextual_handler.close()
log_obj.removeHandler(contextual_handler)
|
def add_meta(self, **metadict):
"""Add meta information to a Symbol.
Parameters
----------
metadict
Attributes are passed as keywords, with their
associated values as strings. For meta attributes with spaces,
use an unpacked dict.
"""
objs = object_session(self)
for attr,val in metadict.iteritems():
newmeta = SymbolMeta(self, attr, val)
self.meta.append(newmeta)
objs.commit()
|
def function[add_meta, parameter[self]]:
constant[Add meta information to a Symbol.
Parameters
----------
metadict
Attributes are passed as keywords, with their
associated values as strings. For meta attributes with spaces,
use an unpacked dict.
]
variable[objs] assign[=] call[name[object_session], parameter[name[self]]]
for taget[tuple[[<ast.Name object at 0x7da18dc9a6b0>, <ast.Name object at 0x7da18dc99b70>]]] in starred[call[name[metadict].iteritems, parameter[]]] begin[:]
variable[newmeta] assign[=] call[name[SymbolMeta], parameter[name[self], name[attr], name[val]]]
call[name[self].meta.append, parameter[name[newmeta]]]
call[name[objs].commit, parameter[]]
|
keyword[def] identifier[add_meta] ( identifier[self] ,** identifier[metadict] ):
literal[string]
identifier[objs] = identifier[object_session] ( identifier[self] )
keyword[for] identifier[attr] , identifier[val] keyword[in] identifier[metadict] . identifier[iteritems] ():
identifier[newmeta] = identifier[SymbolMeta] ( identifier[self] , identifier[attr] , identifier[val] )
identifier[self] . identifier[meta] . identifier[append] ( identifier[newmeta] )
identifier[objs] . identifier[commit] ()
|
def add_meta(self, **metadict):
"""Add meta information to a Symbol.
Parameters
----------
metadict
Attributes are passed as keywords, with their
associated values as strings. For meta attributes with spaces,
use an unpacked dict.
"""
objs = object_session(self)
for (attr, val) in metadict.iteritems():
newmeta = SymbolMeta(self, attr, val)
self.meta.append(newmeta) # depends on [control=['for'], data=[]]
objs.commit()
|
def nxos_api_show(commands, raw_text=True, **kwargs):
'''
.. versionadded:: 2019.2.0
Execute one or more show (non-configuration) commands.
commands
The commands to be executed.
raw_text: ``True``
Whether to return raw text or structured data.
CLI Example:
.. code-block:: bash
salt '*' napalm.nxos_api_show 'show version'
salt '*' napalm.nxos_api_show 'show bgp sessions' 'show processes' raw_text=False
'''
nxos_api_kwargs = pyeapi_nxos_api_args(**kwargs)
return __salt__['nxos_api.show'](commands,
raw_text=raw_text,
**nxos_api_kwargs)
|
def function[nxos_api_show, parameter[commands, raw_text]]:
constant[
.. versionadded:: 2019.2.0
Execute one or more show (non-configuration) commands.
commands
The commands to be executed.
raw_text: ``True``
Whether to return raw text or structured data.
CLI Example:
.. code-block:: bash
salt '*' napalm.nxos_api_show 'show version'
salt '*' napalm.nxos_api_show 'show bgp sessions' 'show processes' raw_text=False
]
variable[nxos_api_kwargs] assign[=] call[name[pyeapi_nxos_api_args], parameter[]]
return[call[call[name[__salt__]][constant[nxos_api.show]], parameter[name[commands]]]]
|
keyword[def] identifier[nxos_api_show] ( identifier[commands] , identifier[raw_text] = keyword[True] ,** identifier[kwargs] ):
literal[string]
identifier[nxos_api_kwargs] = identifier[pyeapi_nxos_api_args] (** identifier[kwargs] )
keyword[return] identifier[__salt__] [ literal[string] ]( identifier[commands] ,
identifier[raw_text] = identifier[raw_text] ,
** identifier[nxos_api_kwargs] )
|
def nxos_api_show(commands, raw_text=True, **kwargs):
"""
.. versionadded:: 2019.2.0
Execute one or more show (non-configuration) commands.
commands
The commands to be executed.
raw_text: ``True``
Whether to return raw text or structured data.
CLI Example:
.. code-block:: bash
salt '*' napalm.nxos_api_show 'show version'
salt '*' napalm.nxos_api_show 'show bgp sessions' 'show processes' raw_text=False
"""
nxos_api_kwargs = pyeapi_nxos_api_args(**kwargs)
return __salt__['nxos_api.show'](commands, raw_text=raw_text, **nxos_api_kwargs)
|
def median(nums):
"""Return median.
With numbers sorted by value, the median is the middle value (if there is
an odd number of values) or the arithmetic mean of the two middle values
(if there is an even number of values).
Cf. https://en.wikipedia.org/wiki/Median
Parameters
----------
nums : list
A series of numbers
Returns
-------
int or float
The median of nums
Examples
--------
>>> median([1, 2, 3])
2
>>> median([1, 2, 3, 4])
2.5
>>> median([1, 2, 2, 4])
2
"""
nums = sorted(nums)
mag = len(nums)
if mag % 2:
mag = int((mag - 1) / 2)
return nums[mag]
mag = int(mag / 2)
med = (nums[mag - 1] + nums[mag]) / 2
return med if not med.is_integer() else int(med)
|
def function[median, parameter[nums]]:
constant[Return median.
With numbers sorted by value, the median is the middle value (if there is
an odd number of values) or the arithmetic mean of the two middle values
(if there is an even number of values).
Cf. https://en.wikipedia.org/wiki/Median
Parameters
----------
nums : list
A series of numbers
Returns
-------
int or float
The median of nums
Examples
--------
>>> median([1, 2, 3])
2
>>> median([1, 2, 3, 4])
2.5
>>> median([1, 2, 2, 4])
2
]
variable[nums] assign[=] call[name[sorted], parameter[name[nums]]]
variable[mag] assign[=] call[name[len], parameter[name[nums]]]
if binary_operation[name[mag] <ast.Mod object at 0x7da2590d6920> constant[2]] begin[:]
variable[mag] assign[=] call[name[int], parameter[binary_operation[binary_operation[name[mag] - constant[1]] / constant[2]]]]
return[call[name[nums]][name[mag]]]
variable[mag] assign[=] call[name[int], parameter[binary_operation[name[mag] / constant[2]]]]
variable[med] assign[=] binary_operation[binary_operation[call[name[nums]][binary_operation[name[mag] - constant[1]]] + call[name[nums]][name[mag]]] / constant[2]]
return[<ast.IfExp object at 0x7da1b0142200>]
|
keyword[def] identifier[median] ( identifier[nums] ):
literal[string]
identifier[nums] = identifier[sorted] ( identifier[nums] )
identifier[mag] = identifier[len] ( identifier[nums] )
keyword[if] identifier[mag] % literal[int] :
identifier[mag] = identifier[int] (( identifier[mag] - literal[int] )/ literal[int] )
keyword[return] identifier[nums] [ identifier[mag] ]
identifier[mag] = identifier[int] ( identifier[mag] / literal[int] )
identifier[med] =( identifier[nums] [ identifier[mag] - literal[int] ]+ identifier[nums] [ identifier[mag] ])/ literal[int]
keyword[return] identifier[med] keyword[if] keyword[not] identifier[med] . identifier[is_integer] () keyword[else] identifier[int] ( identifier[med] )
|
def median(nums):
"""Return median.
With numbers sorted by value, the median is the middle value (if there is
an odd number of values) or the arithmetic mean of the two middle values
(if there is an even number of values).
Cf. https://en.wikipedia.org/wiki/Median
Parameters
----------
nums : list
A series of numbers
Returns
-------
int or float
The median of nums
Examples
--------
>>> median([1, 2, 3])
2
>>> median([1, 2, 3, 4])
2.5
>>> median([1, 2, 2, 4])
2
"""
nums = sorted(nums)
mag = len(nums)
if mag % 2:
mag = int((mag - 1) / 2)
return nums[mag] # depends on [control=['if'], data=[]]
mag = int(mag / 2)
med = (nums[mag - 1] + nums[mag]) / 2
return med if not med.is_integer() else int(med)
|
def get_repository(self, entity_cls):
"""Return a repository object configured with a live connection"""
model_cls = self.get_model(entity_cls)
return DictRepository(self, entity_cls, model_cls)
|
def function[get_repository, parameter[self, entity_cls]]:
constant[Return a repository object configured with a live connection]
variable[model_cls] assign[=] call[name[self].get_model, parameter[name[entity_cls]]]
return[call[name[DictRepository], parameter[name[self], name[entity_cls], name[model_cls]]]]
|
keyword[def] identifier[get_repository] ( identifier[self] , identifier[entity_cls] ):
literal[string]
identifier[model_cls] = identifier[self] . identifier[get_model] ( identifier[entity_cls] )
keyword[return] identifier[DictRepository] ( identifier[self] , identifier[entity_cls] , identifier[model_cls] )
|
def get_repository(self, entity_cls):
"""Return a repository object configured with a live connection"""
model_cls = self.get_model(entity_cls)
return DictRepository(self, entity_cls, model_cls)
|
def set_plot_theme(theme):
"""Set the plotting parameters to a predefined theme"""
if theme.lower() in ['paraview', 'pv']:
rcParams['background'] = PV_BACKGROUND
rcParams['cmap'] = 'coolwarm'
rcParams['font']['family'] = 'arial'
rcParams['font']['label_size'] = 16
rcParams['show_edges'] = False
elif theme.lower() in ['document', 'doc', 'paper', 'report']:
rcParams['background'] = 'white'
rcParams['cmap'] = 'viridis'
rcParams['font']['size'] = 18
rcParams['font']['title_size'] = 18
rcParams['font']['label_size'] = 18
rcParams['font']['color'] = 'black'
rcParams['show_edges'] = False
rcParams['color'] = 'tan'
rcParams['outline_color'] = 'black'
elif theme.lower() in ['night', 'dark']:
rcParams['background'] = 'black'
rcParams['cmap'] = 'viridis'
rcParams['font']['color'] = 'white'
rcParams['show_edges'] = False
rcParams['color'] = 'tan'
rcParams['outline_color'] = 'white'
elif theme.lower() in ['default']:
for k,v in DEFAULT_THEME.items():
rcParams[k] = v
|
def function[set_plot_theme, parameter[theme]]:
constant[Set the plotting parameters to a predefined theme]
if compare[call[name[theme].lower, parameter[]] in list[[<ast.Constant object at 0x7da20e960df0>, <ast.Constant object at 0x7da20e9612d0>]]] begin[:]
call[name[rcParams]][constant[background]] assign[=] name[PV_BACKGROUND]
call[name[rcParams]][constant[cmap]] assign[=] constant[coolwarm]
call[call[name[rcParams]][constant[font]]][constant[family]] assign[=] constant[arial]
call[call[name[rcParams]][constant[font]]][constant[label_size]] assign[=] constant[16]
call[name[rcParams]][constant[show_edges]] assign[=] constant[False]
|
keyword[def] identifier[set_plot_theme] ( identifier[theme] ):
literal[string]
keyword[if] identifier[theme] . identifier[lower] () keyword[in] [ literal[string] , literal[string] ]:
identifier[rcParams] [ literal[string] ]= identifier[PV_BACKGROUND]
identifier[rcParams] [ literal[string] ]= literal[string]
identifier[rcParams] [ literal[string] ][ literal[string] ]= literal[string]
identifier[rcParams] [ literal[string] ][ literal[string] ]= literal[int]
identifier[rcParams] [ literal[string] ]= keyword[False]
keyword[elif] identifier[theme] . identifier[lower] () keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] ]:
identifier[rcParams] [ literal[string] ]= literal[string]
identifier[rcParams] [ literal[string] ]= literal[string]
identifier[rcParams] [ literal[string] ][ literal[string] ]= literal[int]
identifier[rcParams] [ literal[string] ][ literal[string] ]= literal[int]
identifier[rcParams] [ literal[string] ][ literal[string] ]= literal[int]
identifier[rcParams] [ literal[string] ][ literal[string] ]= literal[string]
identifier[rcParams] [ literal[string] ]= keyword[False]
identifier[rcParams] [ literal[string] ]= literal[string]
identifier[rcParams] [ literal[string] ]= literal[string]
keyword[elif] identifier[theme] . identifier[lower] () keyword[in] [ literal[string] , literal[string] ]:
identifier[rcParams] [ literal[string] ]= literal[string]
identifier[rcParams] [ literal[string] ]= literal[string]
identifier[rcParams] [ literal[string] ][ literal[string] ]= literal[string]
identifier[rcParams] [ literal[string] ]= keyword[False]
identifier[rcParams] [ literal[string] ]= literal[string]
identifier[rcParams] [ literal[string] ]= literal[string]
keyword[elif] identifier[theme] . identifier[lower] () keyword[in] [ literal[string] ]:
keyword[for] identifier[k] , identifier[v] keyword[in] identifier[DEFAULT_THEME] . identifier[items] ():
identifier[rcParams] [ identifier[k] ]= identifier[v]
|
def set_plot_theme(theme):
"""Set the plotting parameters to a predefined theme"""
if theme.lower() in ['paraview', 'pv']:
rcParams['background'] = PV_BACKGROUND
rcParams['cmap'] = 'coolwarm'
rcParams['font']['family'] = 'arial'
rcParams['font']['label_size'] = 16
rcParams['show_edges'] = False # depends on [control=['if'], data=[]]
elif theme.lower() in ['document', 'doc', 'paper', 'report']:
rcParams['background'] = 'white'
rcParams['cmap'] = 'viridis'
rcParams['font']['size'] = 18
rcParams['font']['title_size'] = 18
rcParams['font']['label_size'] = 18
rcParams['font']['color'] = 'black'
rcParams['show_edges'] = False
rcParams['color'] = 'tan'
rcParams['outline_color'] = 'black' # depends on [control=['if'], data=[]]
elif theme.lower() in ['night', 'dark']:
rcParams['background'] = 'black'
rcParams['cmap'] = 'viridis'
rcParams['font']['color'] = 'white'
rcParams['show_edges'] = False
rcParams['color'] = 'tan'
rcParams['outline_color'] = 'white' # depends on [control=['if'], data=[]]
elif theme.lower() in ['default']:
for (k, v) in DEFAULT_THEME.items():
rcParams[k] = v # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
|
def add_user(self, attrs):
"""add a user"""
ldap_client = self._bind()
# encoding crap
attrs_srt = self.attrs_pretreatment(attrs)
attrs_srt[self._byte_p2('objectClass')] = self.objectclasses
# construct is DN
dn = \
self._byte_p2(self.dn_user_attr) + \
self._byte_p2('=') + \
self._byte_p2(ldap.dn.escape_dn_chars(
attrs[self.dn_user_attr]
)
) + \
self._byte_p2(',') + \
self._byte_p2(self.userdn)
# gen the ldif first add_s and add the user
ldif = modlist.addModlist(attrs_srt)
try:
ldap_client.add_s(dn, ldif)
except ldap.ALREADY_EXISTS as e:
raise UserAlreadyExists(attrs[self.key], self.backend_name)
except Exception as e:
ldap_client.unbind_s()
self._exception_handler(e)
ldap_client.unbind_s()
|
def function[add_user, parameter[self, attrs]]:
constant[add a user]
variable[ldap_client] assign[=] call[name[self]._bind, parameter[]]
variable[attrs_srt] assign[=] call[name[self].attrs_pretreatment, parameter[name[attrs]]]
call[name[attrs_srt]][call[name[self]._byte_p2, parameter[constant[objectClass]]]] assign[=] name[self].objectclasses
variable[dn] assign[=] binary_operation[binary_operation[binary_operation[binary_operation[call[name[self]._byte_p2, parameter[name[self].dn_user_attr]] + call[name[self]._byte_p2, parameter[constant[=]]]] + call[name[self]._byte_p2, parameter[call[name[ldap].dn.escape_dn_chars, parameter[call[name[attrs]][name[self].dn_user_attr]]]]]] + call[name[self]._byte_p2, parameter[constant[,]]]] + call[name[self]._byte_p2, parameter[name[self].userdn]]]
variable[ldif] assign[=] call[name[modlist].addModlist, parameter[name[attrs_srt]]]
<ast.Try object at 0x7da20c6c50f0>
call[name[ldap_client].unbind_s, parameter[]]
|
keyword[def] identifier[add_user] ( identifier[self] , identifier[attrs] ):
literal[string]
identifier[ldap_client] = identifier[self] . identifier[_bind] ()
identifier[attrs_srt] = identifier[self] . identifier[attrs_pretreatment] ( identifier[attrs] )
identifier[attrs_srt] [ identifier[self] . identifier[_byte_p2] ( literal[string] )]= identifier[self] . identifier[objectclasses]
identifier[dn] = identifier[self] . identifier[_byte_p2] ( identifier[self] . identifier[dn_user_attr] )+ identifier[self] . identifier[_byte_p2] ( literal[string] )+ identifier[self] . identifier[_byte_p2] ( identifier[ldap] . identifier[dn] . identifier[escape_dn_chars] (
identifier[attrs] [ identifier[self] . identifier[dn_user_attr] ]
)
)+ identifier[self] . identifier[_byte_p2] ( literal[string] )+ identifier[self] . identifier[_byte_p2] ( identifier[self] . identifier[userdn] )
identifier[ldif] = identifier[modlist] . identifier[addModlist] ( identifier[attrs_srt] )
keyword[try] :
identifier[ldap_client] . identifier[add_s] ( identifier[dn] , identifier[ldif] )
keyword[except] identifier[ldap] . identifier[ALREADY_EXISTS] keyword[as] identifier[e] :
keyword[raise] identifier[UserAlreadyExists] ( identifier[attrs] [ identifier[self] . identifier[key] ], identifier[self] . identifier[backend_name] )
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[ldap_client] . identifier[unbind_s] ()
identifier[self] . identifier[_exception_handler] ( identifier[e] )
identifier[ldap_client] . identifier[unbind_s] ()
|
def add_user(self, attrs):
"""add a user"""
ldap_client = self._bind()
# encoding crap
attrs_srt = self.attrs_pretreatment(attrs)
attrs_srt[self._byte_p2('objectClass')] = self.objectclasses
# construct is DN
dn = self._byte_p2(self.dn_user_attr) + self._byte_p2('=') + self._byte_p2(ldap.dn.escape_dn_chars(attrs[self.dn_user_attr])) + self._byte_p2(',') + self._byte_p2(self.userdn)
# gen the ldif first add_s and add the user
ldif = modlist.addModlist(attrs_srt)
try:
ldap_client.add_s(dn, ldif) # depends on [control=['try'], data=[]]
except ldap.ALREADY_EXISTS as e:
raise UserAlreadyExists(attrs[self.key], self.backend_name) # depends on [control=['except'], data=[]]
except Exception as e:
ldap_client.unbind_s()
self._exception_handler(e) # depends on [control=['except'], data=['e']]
ldap_client.unbind_s()
|
def handle_machines(changeset):
"""Populate the change set with addMachines changes."""
machines = sorted(changeset.bundle.get('machines', {}).items())
for machine_name, machine in machines:
if machine is None:
# We allow the machine value to be unset in the YAML.
machine = {}
record_id = 'addMachines-{}'.format(changeset.next_action())
changeset.send({
'id': record_id,
'method': 'addMachines',
'args': [
{
'series': machine.get('series', ''),
'constraints': machine.get('constraints', ''),
},
],
'requires': [],
})
changeset.machines_added[str(machine_name)] = record_id
if 'annotations' in machine:
changeset.send({
'id': 'setAnnotations-{}'.format(changeset.next_action()),
'method': 'setAnnotations',
'args': [
'${}'.format(record_id),
'machine',
machine['annotations'],
],
'requires': [record_id],
})
return handle_relations
|
def function[handle_machines, parameter[changeset]]:
constant[Populate the change set with addMachines changes.]
variable[machines] assign[=] call[name[sorted], parameter[call[call[name[changeset].bundle.get, parameter[constant[machines], dictionary[[], []]]].items, parameter[]]]]
for taget[tuple[[<ast.Name object at 0x7da1b0a60880>, <ast.Name object at 0x7da1b0a60ac0>]]] in starred[name[machines]] begin[:]
if compare[name[machine] is constant[None]] begin[:]
variable[machine] assign[=] dictionary[[], []]
variable[record_id] assign[=] call[constant[addMachines-{}].format, parameter[call[name[changeset].next_action, parameter[]]]]
call[name[changeset].send, parameter[dictionary[[<ast.Constant object at 0x7da1b0a63370>, <ast.Constant object at 0x7da1b0a61510>, <ast.Constant object at 0x7da1b0a61420>, <ast.Constant object at 0x7da1b0a60370>], [<ast.Name object at 0x7da1b0a613c0>, <ast.Constant object at 0x7da1b0a61570>, <ast.List object at 0x7da1b0a616f0>, <ast.List object at 0x7da1b0a636a0>]]]]
call[name[changeset].machines_added][call[name[str], parameter[name[machine_name]]]] assign[=] name[record_id]
if compare[constant[annotations] in name[machine]] begin[:]
call[name[changeset].send, parameter[dictionary[[<ast.Constant object at 0x7da1b0a63c40>, <ast.Constant object at 0x7da1b0a633d0>, <ast.Constant object at 0x7da1b0a639a0>, <ast.Constant object at 0x7da1b0a63af0>], [<ast.Call object at 0x7da1b0a630a0>, <ast.Constant object at 0x7da1b0a602e0>, <ast.List object at 0x7da1b0a63d30>, <ast.List object at 0x7da1b0a629e0>]]]]
return[name[handle_relations]]
|
keyword[def] identifier[handle_machines] ( identifier[changeset] ):
literal[string]
identifier[machines] = identifier[sorted] ( identifier[changeset] . identifier[bundle] . identifier[get] ( literal[string] ,{}). identifier[items] ())
keyword[for] identifier[machine_name] , identifier[machine] keyword[in] identifier[machines] :
keyword[if] identifier[machine] keyword[is] keyword[None] :
identifier[machine] ={}
identifier[record_id] = literal[string] . identifier[format] ( identifier[changeset] . identifier[next_action] ())
identifier[changeset] . identifier[send] ({
literal[string] : identifier[record_id] ,
literal[string] : literal[string] ,
literal[string] :[
{
literal[string] : identifier[machine] . identifier[get] ( literal[string] , literal[string] ),
literal[string] : identifier[machine] . identifier[get] ( literal[string] , literal[string] ),
},
],
literal[string] :[],
})
identifier[changeset] . identifier[machines_added] [ identifier[str] ( identifier[machine_name] )]= identifier[record_id]
keyword[if] literal[string] keyword[in] identifier[machine] :
identifier[changeset] . identifier[send] ({
literal[string] : literal[string] . identifier[format] ( identifier[changeset] . identifier[next_action] ()),
literal[string] : literal[string] ,
literal[string] :[
literal[string] . identifier[format] ( identifier[record_id] ),
literal[string] ,
identifier[machine] [ literal[string] ],
],
literal[string] :[ identifier[record_id] ],
})
keyword[return] identifier[handle_relations]
|
def handle_machines(changeset):
"""Populate the change set with addMachines changes."""
machines = sorted(changeset.bundle.get('machines', {}).items())
for (machine_name, machine) in machines:
if machine is None:
# We allow the machine value to be unset in the YAML.
machine = {} # depends on [control=['if'], data=['machine']]
record_id = 'addMachines-{}'.format(changeset.next_action())
changeset.send({'id': record_id, 'method': 'addMachines', 'args': [{'series': machine.get('series', ''), 'constraints': machine.get('constraints', '')}], 'requires': []})
changeset.machines_added[str(machine_name)] = record_id
if 'annotations' in machine:
changeset.send({'id': 'setAnnotations-{}'.format(changeset.next_action()), 'method': 'setAnnotations', 'args': ['${}'.format(record_id), 'machine', machine['annotations']], 'requires': [record_id]}) # depends on [control=['if'], data=['machine']] # depends on [control=['for'], data=[]]
return handle_relations
|
def register_intent_parser(self, intent_parser, domain=0):
"""
Register a intent parser with a domain.
Args:
intent_parser(intent): The intent parser you wish to register.
domain(str): a string representing the domain you wish register the intent
parser to.
"""
if domain not in self.domains:
self.register_domain(domain=domain)
self.domains[domain].register_intent_parser(
intent_parser=intent_parser)
|
def function[register_intent_parser, parameter[self, intent_parser, domain]]:
constant[
Register a intent parser with a domain.
Args:
intent_parser(intent): The intent parser you wish to register.
domain(str): a string representing the domain you wish register the intent
parser to.
]
if compare[name[domain] <ast.NotIn object at 0x7da2590d7190> name[self].domains] begin[:]
call[name[self].register_domain, parameter[]]
call[call[name[self].domains][name[domain]].register_intent_parser, parameter[]]
|
keyword[def] identifier[register_intent_parser] ( identifier[self] , identifier[intent_parser] , identifier[domain] = literal[int] ):
literal[string]
keyword[if] identifier[domain] keyword[not] keyword[in] identifier[self] . identifier[domains] :
identifier[self] . identifier[register_domain] ( identifier[domain] = identifier[domain] )
identifier[self] . identifier[domains] [ identifier[domain] ]. identifier[register_intent_parser] (
identifier[intent_parser] = identifier[intent_parser] )
|
def register_intent_parser(self, intent_parser, domain=0):
"""
Register a intent parser with a domain.
Args:
intent_parser(intent): The intent parser you wish to register.
domain(str): a string representing the domain you wish register the intent
parser to.
"""
if domain not in self.domains:
self.register_domain(domain=domain) # depends on [control=['if'], data=['domain']]
self.domains[domain].register_intent_parser(intent_parser=intent_parser)
|
def get_email_message(self, message_uid, message_type="text/plain"):
"""
Fetch contents of email.
Args:
message_uid (int): IMAP Message UID number.
Kwargs:
message_type: Can be 'text' or 'html'
"""
self._mail.select("inbox")
result = self._mail.uid('fetch', message_uid, "(RFC822)")
msg = email.message_from_string(result[1][0][1])
try:
# Try to handle as multipart message first.
for part in msg.walk():
if part.get_content_type() == message_type:
return part.get_payload(decode=True)
except:
# handle as plain text email
return msg.get_payload(decode=True)
|
def function[get_email_message, parameter[self, message_uid, message_type]]:
constant[
Fetch contents of email.
Args:
message_uid (int): IMAP Message UID number.
Kwargs:
message_type: Can be 'text' or 'html'
]
call[name[self]._mail.select, parameter[constant[inbox]]]
variable[result] assign[=] call[name[self]._mail.uid, parameter[constant[fetch], name[message_uid], constant[(RFC822)]]]
variable[msg] assign[=] call[name[email].message_from_string, parameter[call[call[call[name[result]][constant[1]]][constant[0]]][constant[1]]]]
<ast.Try object at 0x7da1b11db4f0>
|
keyword[def] identifier[get_email_message] ( identifier[self] , identifier[message_uid] , identifier[message_type] = literal[string] ):
literal[string]
identifier[self] . identifier[_mail] . identifier[select] ( literal[string] )
identifier[result] = identifier[self] . identifier[_mail] . identifier[uid] ( literal[string] , identifier[message_uid] , literal[string] )
identifier[msg] = identifier[email] . identifier[message_from_string] ( identifier[result] [ literal[int] ][ literal[int] ][ literal[int] ])
keyword[try] :
keyword[for] identifier[part] keyword[in] identifier[msg] . identifier[walk] ():
keyword[if] identifier[part] . identifier[get_content_type] ()== identifier[message_type] :
keyword[return] identifier[part] . identifier[get_payload] ( identifier[decode] = keyword[True] )
keyword[except] :
keyword[return] identifier[msg] . identifier[get_payload] ( identifier[decode] = keyword[True] )
|
def get_email_message(self, message_uid, message_type='text/plain'):
"""
Fetch contents of email.
Args:
message_uid (int): IMAP Message UID number.
Kwargs:
message_type: Can be 'text' or 'html'
"""
self._mail.select('inbox')
result = self._mail.uid('fetch', message_uid, '(RFC822)')
msg = email.message_from_string(result[1][0][1])
try:
# Try to handle as multipart message first.
for part in msg.walk():
if part.get_content_type() == message_type:
return part.get_payload(decode=True) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['part']] # depends on [control=['try'], data=[]]
except:
# handle as plain text email
return msg.get_payload(decode=True) # depends on [control=['except'], data=[]]
|
def get_command_arg_list(self, command_name: str, to_parse: Union[Statement, str],
preserve_quotes: bool) -> Tuple[Statement, List[str]]:
"""
Called by the argument_list and argparse wrappers to retrieve just the arguments being
passed to their do_* methods as a list.
:param command_name: name of the command being run
:param to_parse: what is being passed to the do_* method. It can be one of two types:
1. An already parsed Statement
2. An argument string in cases where a do_* method is explicitly called
e.g.: Calling do_help('alias create') would cause to_parse to be 'alias create'
In this case, the string will be converted to a Statement and returned along
with the argument list.
:param preserve_quotes: if True, then quotes will not be stripped from the arguments
:return: A tuple containing:
The Statement used to retrieve the arguments
The argument list
"""
# Check if to_parse needs to be converted to a Statement
if not isinstance(to_parse, Statement):
to_parse = self.parse(command_name + ' ' + to_parse, expand=False)
if preserve_quotes:
return to_parse, to_parse.arg_list
else:
return to_parse, to_parse.argv[1:]
|
def function[get_command_arg_list, parameter[self, command_name, to_parse, preserve_quotes]]:
constant[
Called by the argument_list and argparse wrappers to retrieve just the arguments being
passed to their do_* methods as a list.
:param command_name: name of the command being run
:param to_parse: what is being passed to the do_* method. It can be one of two types:
1. An already parsed Statement
2. An argument string in cases where a do_* method is explicitly called
e.g.: Calling do_help('alias create') would cause to_parse to be 'alias create'
In this case, the string will be converted to a Statement and returned along
with the argument list.
:param preserve_quotes: if True, then quotes will not be stripped from the arguments
:return: A tuple containing:
The Statement used to retrieve the arguments
The argument list
]
if <ast.UnaryOp object at 0x7da1b1e8f0d0> begin[:]
variable[to_parse] assign[=] call[name[self].parse, parameter[binary_operation[binary_operation[name[command_name] + constant[ ]] + name[to_parse]]]]
if name[preserve_quotes] begin[:]
return[tuple[[<ast.Name object at 0x7da1b1e8de10>, <ast.Attribute object at 0x7da1b1e8f040>]]]
|
keyword[def] identifier[get_command_arg_list] ( identifier[self] , identifier[command_name] : identifier[str] , identifier[to_parse] : identifier[Union] [ identifier[Statement] , identifier[str] ],
identifier[preserve_quotes] : identifier[bool] )-> identifier[Tuple] [ identifier[Statement] , identifier[List] [ identifier[str] ]]:
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[to_parse] , identifier[Statement] ):
identifier[to_parse] = identifier[self] . identifier[parse] ( identifier[command_name] + literal[string] + identifier[to_parse] , identifier[expand] = keyword[False] )
keyword[if] identifier[preserve_quotes] :
keyword[return] identifier[to_parse] , identifier[to_parse] . identifier[arg_list]
keyword[else] :
keyword[return] identifier[to_parse] , identifier[to_parse] . identifier[argv] [ literal[int] :]
|
def get_command_arg_list(self, command_name: str, to_parse: Union[Statement, str], preserve_quotes: bool) -> Tuple[Statement, List[str]]:
"""
Called by the argument_list and argparse wrappers to retrieve just the arguments being
passed to their do_* methods as a list.
:param command_name: name of the command being run
:param to_parse: what is being passed to the do_* method. It can be one of two types:
1. An already parsed Statement
2. An argument string in cases where a do_* method is explicitly called
e.g.: Calling do_help('alias create') would cause to_parse to be 'alias create'
In this case, the string will be converted to a Statement and returned along
with the argument list.
:param preserve_quotes: if True, then quotes will not be stripped from the arguments
:return: A tuple containing:
The Statement used to retrieve the arguments
The argument list
"""
# Check if to_parse needs to be converted to a Statement
if not isinstance(to_parse, Statement):
to_parse = self.parse(command_name + ' ' + to_parse, expand=False) # depends on [control=['if'], data=[]]
if preserve_quotes:
return (to_parse, to_parse.arg_list) # depends on [control=['if'], data=[]]
else:
return (to_parse, to_parse.argv[1:])
|
def split_calls(func):
"""
Decorator to split up server calls for methods using url parameters, due to the lenght
limitation of the URI in Apache. By default 8190 bytes
"""
def wrapper(*args, **kwargs):
#The size limit is 8190 bytes minus url and api to call
#For example (https://cmsweb-testbed.cern.ch:8443/dbs/prod/global/filechildren), so 192 bytes should be safe.
size_limit = 8000
encoded_url = urllib.urlencode(kwargs)
if len(encoded_url) > size_limit:
for key, value in kwargs.iteritems():
###only one (first) list at a time is splitted,
###currently only file lists are supported
if key in ('logical_file_name', 'block_name', 'lumi_list', 'run_num') and isinstance(value, list):
ret_val = []
for splitted_param in list_parameter_splitting(data=dict(kwargs), #make a copy, since it is manipulated
key=key,
size_limit=size_limit):
try:
ret_val.extend(func(*args, **splitted_param))
except (TypeError, AttributeError):#update function call do not return lists
ret_val= []
return ret_val
raise dbsClientException("Invalid input",
"The lenght of the urlencoded parameters to API %s \
is exceeding %s bytes and cannot be splitted." % (func.__name__, size_limit))
else:
return func(*args, **kwargs)
return wrapper
|
def function[split_calls, parameter[func]]:
constant[
Decorator to split up server calls for methods using url parameters, due to the lenght
limitation of the URI in Apache. By default 8190 bytes
]
def function[wrapper, parameter[]]:
variable[size_limit] assign[=] constant[8000]
variable[encoded_url] assign[=] call[name[urllib].urlencode, parameter[name[kwargs]]]
if compare[call[name[len], parameter[name[encoded_url]]] greater[>] name[size_limit]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da18bc71990>, <ast.Name object at 0x7da18bc73a30>]]] in starred[call[name[kwargs].iteritems, parameter[]]] begin[:]
if <ast.BoolOp object at 0x7da18bc70040> begin[:]
variable[ret_val] assign[=] list[[]]
for taget[name[splitted_param]] in starred[call[name[list_parameter_splitting], parameter[]]] begin[:]
<ast.Try object at 0x7da18bc72dd0>
return[name[ret_val]]
<ast.Raise object at 0x7da18bc71690>
return[name[wrapper]]
|
keyword[def] identifier[split_calls] ( identifier[func] ):
literal[string]
keyword[def] identifier[wrapper] (* identifier[args] ,** identifier[kwargs] ):
identifier[size_limit] = literal[int]
identifier[encoded_url] = identifier[urllib] . identifier[urlencode] ( identifier[kwargs] )
keyword[if] identifier[len] ( identifier[encoded_url] )> identifier[size_limit] :
keyword[for] identifier[key] , identifier[value] keyword[in] identifier[kwargs] . identifier[iteritems] ():
keyword[if] identifier[key] keyword[in] ( literal[string] , literal[string] , literal[string] , literal[string] ) keyword[and] identifier[isinstance] ( identifier[value] , identifier[list] ):
identifier[ret_val] =[]
keyword[for] identifier[splitted_param] keyword[in] identifier[list_parameter_splitting] ( identifier[data] = identifier[dict] ( identifier[kwargs] ),
identifier[key] = identifier[key] ,
identifier[size_limit] = identifier[size_limit] ):
keyword[try] :
identifier[ret_val] . identifier[extend] ( identifier[func] (* identifier[args] ,** identifier[splitted_param] ))
keyword[except] ( identifier[TypeError] , identifier[AttributeError] ):
identifier[ret_val] =[]
keyword[return] identifier[ret_val]
keyword[raise] identifier[dbsClientException] ( literal[string] ,
literal[string] %( identifier[func] . identifier[__name__] , identifier[size_limit] ))
keyword[else] :
keyword[return] identifier[func] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[wrapper]
|
def split_calls(func):
"""
Decorator to split up server calls for methods using url parameters, due to the lenght
limitation of the URI in Apache. By default 8190 bytes
"""
def wrapper(*args, **kwargs):
#The size limit is 8190 bytes minus url and api to call
#For example (https://cmsweb-testbed.cern.ch:8443/dbs/prod/global/filechildren), so 192 bytes should be safe.
size_limit = 8000
encoded_url = urllib.urlencode(kwargs)
if len(encoded_url) > size_limit:
for (key, value) in kwargs.iteritems():
###only one (first) list at a time is splitted,
###currently only file lists are supported
if key in ('logical_file_name', 'block_name', 'lumi_list', 'run_num') and isinstance(value, list):
ret_val = []
for splitted_param in list_parameter_splitting(data=dict(kwargs), key=key, size_limit=size_limit): #make a copy, since it is manipulated
try:
ret_val.extend(func(*args, **splitted_param)) # depends on [control=['try'], data=[]]
except (TypeError, AttributeError): #update function call do not return lists
ret_val = [] # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['splitted_param']]
return ret_val # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
raise dbsClientException('Invalid input', 'The lenght of the urlencoded parameters to API %s is exceeding %s bytes and cannot be splitted.' % (func.__name__, size_limit)) # depends on [control=['if'], data=['size_limit']]
else:
return func(*args, **kwargs)
return wrapper
|
def get_config(cls, service, config=None):
"""Get get configuration of the specified rate limiter
:param str service:
rate limiter name
:param config:
optional global rate limiters configuration.
If not specified, then use rate limiters configuration
specified at application level
"""
config = config or cls.get_configs()
return config[service]
|
def function[get_config, parameter[cls, service, config]]:
constant[Get get configuration of the specified rate limiter
:param str service:
rate limiter name
:param config:
optional global rate limiters configuration.
If not specified, then use rate limiters configuration
specified at application level
]
variable[config] assign[=] <ast.BoolOp object at 0x7da2041da2f0>
return[call[name[config]][name[service]]]
|
keyword[def] identifier[get_config] ( identifier[cls] , identifier[service] , identifier[config] = keyword[None] ):
literal[string]
identifier[config] = identifier[config] keyword[or] identifier[cls] . identifier[get_configs] ()
keyword[return] identifier[config] [ identifier[service] ]
|
def get_config(cls, service, config=None):
"""Get get configuration of the specified rate limiter
:param str service:
rate limiter name
:param config:
optional global rate limiters configuration.
If not specified, then use rate limiters configuration
specified at application level
"""
config = config or cls.get_configs()
return config[service]
|
def find_formatters(path, silent=True):
"""
Returns a list of formatter classes which would accept the file given by *path*. When no classes
could be found and *silent* is *True*, an empty list is returned. Otherwise, an exception is
raised.
"""
formatters = [f for f in six.itervalues(FormatterRegister.formatters) if f.accepts(path)]
if formatters or silent:
return formatters
else:
raise Exception("cannot find formatter for path '{}'".format(path))
|
def function[find_formatters, parameter[path, silent]]:
constant[
Returns a list of formatter classes which would accept the file given by *path*. When no classes
could be found and *silent* is *True*, an empty list is returned. Otherwise, an exception is
raised.
]
variable[formatters] assign[=] <ast.ListComp object at 0x7da1b05ee7a0>
if <ast.BoolOp object at 0x7da1b05ecfd0> begin[:]
return[name[formatters]]
|
keyword[def] identifier[find_formatters] ( identifier[path] , identifier[silent] = keyword[True] ):
literal[string]
identifier[formatters] =[ identifier[f] keyword[for] identifier[f] keyword[in] identifier[six] . identifier[itervalues] ( identifier[FormatterRegister] . identifier[formatters] ) keyword[if] identifier[f] . identifier[accepts] ( identifier[path] )]
keyword[if] identifier[formatters] keyword[or] identifier[silent] :
keyword[return] identifier[formatters]
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] . identifier[format] ( identifier[path] ))
|
def find_formatters(path, silent=True):
"""
Returns a list of formatter classes which would accept the file given by *path*. When no classes
could be found and *silent* is *True*, an empty list is returned. Otherwise, an exception is
raised.
"""
formatters = [f for f in six.itervalues(FormatterRegister.formatters) if f.accepts(path)]
if formatters or silent:
return formatters # depends on [control=['if'], data=[]]
else:
raise Exception("cannot find formatter for path '{}'".format(path))
|
def current_user(self):
"""Retrieve the user ID of the current user talking to your bot.
This is mostly useful inside of a Python object macro to get the user
ID of the person who caused the object macro to be invoked (i.e. to
set a variable for that user from within the object).
This will return ``None`` if used outside of the context of getting a
reply (the value is unset at the end of the ``reply()`` method).
"""
if self._brain._current_user is None:
# They're doing it wrong.
self._warn("current_user() is meant to be used from within a Python object macro!")
return self._brain._current_user
|
def function[current_user, parameter[self]]:
constant[Retrieve the user ID of the current user talking to your bot.
This is mostly useful inside of a Python object macro to get the user
ID of the person who caused the object macro to be invoked (i.e. to
set a variable for that user from within the object).
This will return ``None`` if used outside of the context of getting a
reply (the value is unset at the end of the ``reply()`` method).
]
if compare[name[self]._brain._current_user is constant[None]] begin[:]
call[name[self]._warn, parameter[constant[current_user() is meant to be used from within a Python object macro!]]]
return[name[self]._brain._current_user]
|
keyword[def] identifier[current_user] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[_brain] . identifier[_current_user] keyword[is] keyword[None] :
identifier[self] . identifier[_warn] ( literal[string] )
keyword[return] identifier[self] . identifier[_brain] . identifier[_current_user]
|
def current_user(self):
"""Retrieve the user ID of the current user talking to your bot.
This is mostly useful inside of a Python object macro to get the user
ID of the person who caused the object macro to be invoked (i.e. to
set a variable for that user from within the object).
This will return ``None`` if used outside of the context of getting a
reply (the value is unset at the end of the ``reply()`` method).
"""
if self._brain._current_user is None:
# They're doing it wrong.
self._warn('current_user() is meant to be used from within a Python object macro!') # depends on [control=['if'], data=[]]
return self._brain._current_user
|
def tower_layout(graph, height='freeenergy', scale=None, center=None, dim=2):
"""
Position all nodes of graph stacked on top of each other.
Parameters
----------
graph : `networkx.Graph` or `list` of nodes
A position will be assigned to every node in graph.
height : `str` or `None`, optional
The node attribute that holds the numerical value used for the node
height. This defaults to ``'freeenergy'``. If `None`, all node heights
are set to zero.
scale : number, optional
Scale factor for positions.
center : array-like, optional
Coordinate pair around which to center the layout. Default is the
origin.
dim : `int`
Dimension of layout. If `dim` > 2, the remaining dimensions are set to
zero in the returned positions.
Returns
-------
pos : mapping
A mapping of positions keyed by node.
Examples
--------
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.atoms import create_data, read_cclib
>>> from pyrrole.drawing import tower_layout
>>> data = create_data(
... read_cclib("data/acetate/acetic_acid.out", "AcOH(g)"),
... read_cclib("data/acetate/acetic_acid@water.out", "AcOH(aq)"))
>>> digraph = (ChemicalSystem("AcOH(g) <=> AcOH(aq)", data)
... .to_digraph())
>>> layout = tower_layout(digraph)
>>> layout['AcOH(g)']
array([ 0. , -228.56450866])
Passing ``scale=1`` means scaling positions to ``(-1, 1)`` in all axes:
>>> layout = tower_layout(digraph, scale=1)
>>> layout['AcOH(g)'][1] <= 1.
True
"""
# TODO: private function of packages should not be used.
graph, center = _nx.drawing.layout._process_params(graph, center, dim)
num_nodes = len(graph)
if num_nodes == 0:
return {}
elif num_nodes == 1:
return {_nx.utils.arbitrary_element(graph): center}
paddims = max(0, (dim - 2))
if height is None:
y = _np.zeros(len(graph))
else:
y = _np.array([data for node, data in graph.nodes(data=height)])
pos_arr = _np.column_stack([_np.zeros((num_nodes, 1)), y,
_np.zeros((num_nodes, paddims))])
if scale is not None:
pos_arr = _nx.drawing.layout.rescale_layout(pos_arr,
scale=scale) + center
pos = dict(zip(graph, pos_arr))
# TODO: make test
return pos
|
def function[tower_layout, parameter[graph, height, scale, center, dim]]:
constant[
Position all nodes of graph stacked on top of each other.
Parameters
----------
graph : `networkx.Graph` or `list` of nodes
A position will be assigned to every node in graph.
height : `str` or `None`, optional
The node attribute that holds the numerical value used for the node
height. This defaults to ``'freeenergy'``. If `None`, all node heights
are set to zero.
scale : number, optional
Scale factor for positions.
center : array-like, optional
Coordinate pair around which to center the layout. Default is the
origin.
dim : `int`
Dimension of layout. If `dim` > 2, the remaining dimensions are set to
zero in the returned positions.
Returns
-------
pos : mapping
A mapping of positions keyed by node.
Examples
--------
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.atoms import create_data, read_cclib
>>> from pyrrole.drawing import tower_layout
>>> data = create_data(
... read_cclib("data/acetate/acetic_acid.out", "AcOH(g)"),
... read_cclib("data/acetate/acetic_acid@water.out", "AcOH(aq)"))
>>> digraph = (ChemicalSystem("AcOH(g) <=> AcOH(aq)", data)
... .to_digraph())
>>> layout = tower_layout(digraph)
>>> layout['AcOH(g)']
array([ 0. , -228.56450866])
Passing ``scale=1`` means scaling positions to ``(-1, 1)`` in all axes:
>>> layout = tower_layout(digraph, scale=1)
>>> layout['AcOH(g)'][1] <= 1.
True
]
<ast.Tuple object at 0x7da1b0a21ea0> assign[=] call[name[_nx].drawing.layout._process_params, parameter[name[graph], name[center], name[dim]]]
variable[num_nodes] assign[=] call[name[len], parameter[name[graph]]]
if compare[name[num_nodes] equal[==] constant[0]] begin[:]
return[dictionary[[], []]]
variable[paddims] assign[=] call[name[max], parameter[constant[0], binary_operation[name[dim] - constant[2]]]]
if compare[name[height] is constant[None]] begin[:]
variable[y] assign[=] call[name[_np].zeros, parameter[call[name[len], parameter[name[graph]]]]]
variable[pos_arr] assign[=] call[name[_np].column_stack, parameter[list[[<ast.Call object at 0x7da1b0bd6b30>, <ast.Name object at 0x7da1b0bd4220>, <ast.Call object at 0x7da1b0bd4940>]]]]
if compare[name[scale] is_not constant[None]] begin[:]
variable[pos_arr] assign[=] binary_operation[call[name[_nx].drawing.layout.rescale_layout, parameter[name[pos_arr]]] + name[center]]
variable[pos] assign[=] call[name[dict], parameter[call[name[zip], parameter[name[graph], name[pos_arr]]]]]
return[name[pos]]
|
keyword[def] identifier[tower_layout] ( identifier[graph] , identifier[height] = literal[string] , identifier[scale] = keyword[None] , identifier[center] = keyword[None] , identifier[dim] = literal[int] ):
literal[string]
identifier[graph] , identifier[center] = identifier[_nx] . identifier[drawing] . identifier[layout] . identifier[_process_params] ( identifier[graph] , identifier[center] , identifier[dim] )
identifier[num_nodes] = identifier[len] ( identifier[graph] )
keyword[if] identifier[num_nodes] == literal[int] :
keyword[return] {}
keyword[elif] identifier[num_nodes] == literal[int] :
keyword[return] { identifier[_nx] . identifier[utils] . identifier[arbitrary_element] ( identifier[graph] ): identifier[center] }
identifier[paddims] = identifier[max] ( literal[int] ,( identifier[dim] - literal[int] ))
keyword[if] identifier[height] keyword[is] keyword[None] :
identifier[y] = identifier[_np] . identifier[zeros] ( identifier[len] ( identifier[graph] ))
keyword[else] :
identifier[y] = identifier[_np] . identifier[array] ([ identifier[data] keyword[for] identifier[node] , identifier[data] keyword[in] identifier[graph] . identifier[nodes] ( identifier[data] = identifier[height] )])
identifier[pos_arr] = identifier[_np] . identifier[column_stack] ([ identifier[_np] . identifier[zeros] (( identifier[num_nodes] , literal[int] )), identifier[y] ,
identifier[_np] . identifier[zeros] (( identifier[num_nodes] , identifier[paddims] ))])
keyword[if] identifier[scale] keyword[is] keyword[not] keyword[None] :
identifier[pos_arr] = identifier[_nx] . identifier[drawing] . identifier[layout] . identifier[rescale_layout] ( identifier[pos_arr] ,
identifier[scale] = identifier[scale] )+ identifier[center]
identifier[pos] = identifier[dict] ( identifier[zip] ( identifier[graph] , identifier[pos_arr] ))
keyword[return] identifier[pos]
|
def tower_layout(graph, height='freeenergy', scale=None, center=None, dim=2):
"""
Position all nodes of graph stacked on top of each other.
Parameters
----------
graph : `networkx.Graph` or `list` of nodes
A position will be assigned to every node in graph.
height : `str` or `None`, optional
The node attribute that holds the numerical value used for the node
height. This defaults to ``'freeenergy'``. If `None`, all node heights
are set to zero.
scale : number, optional
Scale factor for positions.
center : array-like, optional
Coordinate pair around which to center the layout. Default is the
origin.
dim : `int`
Dimension of layout. If `dim` > 2, the remaining dimensions are set to
zero in the returned positions.
Returns
-------
pos : mapping
A mapping of positions keyed by node.
Examples
--------
>>> from pyrrole import ChemicalSystem
>>> from pyrrole.atoms import create_data, read_cclib
>>> from pyrrole.drawing import tower_layout
>>> data = create_data(
... read_cclib("data/acetate/acetic_acid.out", "AcOH(g)"),
... read_cclib("data/acetate/acetic_acid@water.out", "AcOH(aq)"))
>>> digraph = (ChemicalSystem("AcOH(g) <=> AcOH(aq)", data)
... .to_digraph())
>>> layout = tower_layout(digraph)
>>> layout['AcOH(g)']
array([ 0. , -228.56450866])
Passing ``scale=1`` means scaling positions to ``(-1, 1)`` in all axes:
>>> layout = tower_layout(digraph, scale=1)
>>> layout['AcOH(g)'][1] <= 1.
True
"""
# TODO: private function of packages should not be used.
(graph, center) = _nx.drawing.layout._process_params(graph, center, dim)
num_nodes = len(graph)
if num_nodes == 0:
return {} # depends on [control=['if'], data=[]]
elif num_nodes == 1:
return {_nx.utils.arbitrary_element(graph): center} # depends on [control=['if'], data=[]]
paddims = max(0, dim - 2)
if height is None:
y = _np.zeros(len(graph)) # depends on [control=['if'], data=[]]
else:
y = _np.array([data for (node, data) in graph.nodes(data=height)])
pos_arr = _np.column_stack([_np.zeros((num_nodes, 1)), y, _np.zeros((num_nodes, paddims))])
if scale is not None:
pos_arr = _nx.drawing.layout.rescale_layout(pos_arr, scale=scale) + center # depends on [control=['if'], data=['scale']]
pos = dict(zip(graph, pos_arr))
# TODO: make test
return pos
|
def do_unalias(self, arg):
"""unalias name
Delete the specified alias.
"""
args = arg.split()
if len(args) == 0: return
if args[0] in self.aliases:
del self.aliases[args[0]]
|
def function[do_unalias, parameter[self, arg]]:
constant[unalias name
Delete the specified alias.
]
variable[args] assign[=] call[name[arg].split, parameter[]]
if compare[call[name[len], parameter[name[args]]] equal[==] constant[0]] begin[:]
return[None]
if compare[call[name[args]][constant[0]] in name[self].aliases] begin[:]
<ast.Delete object at 0x7da1b0e301f0>
|
keyword[def] identifier[do_unalias] ( identifier[self] , identifier[arg] ):
literal[string]
identifier[args] = identifier[arg] . identifier[split] ()
keyword[if] identifier[len] ( identifier[args] )== literal[int] : keyword[return]
keyword[if] identifier[args] [ literal[int] ] keyword[in] identifier[self] . identifier[aliases] :
keyword[del] identifier[self] . identifier[aliases] [ identifier[args] [ literal[int] ]]
|
def do_unalias(self, arg):
"""unalias name
Delete the specified alias.
"""
args = arg.split()
if len(args) == 0:
return # depends on [control=['if'], data=[]]
if args[0] in self.aliases:
del self.aliases[args[0]] # depends on [control=['if'], data=[]]
|
def add_sched_block_instance(self, config_dict):
"""Add Scheduling Block to the database.
Args:
config_dict (dict): SBI configuration
"""
# Get schema for validation
schema = self._get_schema()
LOG.debug('Adding SBI with config: %s', config_dict)
# Validates the schema
validate(config_dict, schema)
# Add status field and value to the data
updated_block = self._add_status(config_dict)
# Splitting into different names and fields before
# adding to the database
scheduling_block_data, processing_block_data = \
self._split_sched_block_instance(updated_block)
# Adding Scheduling block instance with id
name = "scheduling_block:" + updated_block["id"]
self._db.set_specified_values(name, scheduling_block_data)
# Add a event to the scheduling block event list to notify
# of a new scheduling block being added to the db.
self._db.push_event(self.scheduling_event_name,
updated_block["status"],
updated_block["id"])
# Adding Processing block with id
for value in processing_block_data:
name = ("scheduling_block:" + updated_block["id"] +
":processing_block:" + value['id'])
self._db.set_specified_values(name, value)
# Add a event to the processing block event list to notify
# of a new processing block being added to the db.
self._db.push_event(self.processing_event_name,
value["status"],
value["id"])
|
def function[add_sched_block_instance, parameter[self, config_dict]]:
constant[Add Scheduling Block to the database.
Args:
config_dict (dict): SBI configuration
]
variable[schema] assign[=] call[name[self]._get_schema, parameter[]]
call[name[LOG].debug, parameter[constant[Adding SBI with config: %s], name[config_dict]]]
call[name[validate], parameter[name[config_dict], name[schema]]]
variable[updated_block] assign[=] call[name[self]._add_status, parameter[name[config_dict]]]
<ast.Tuple object at 0x7da18bc70a30> assign[=] call[name[self]._split_sched_block_instance, parameter[name[updated_block]]]
variable[name] assign[=] binary_operation[constant[scheduling_block:] + call[name[updated_block]][constant[id]]]
call[name[self]._db.set_specified_values, parameter[name[name], name[scheduling_block_data]]]
call[name[self]._db.push_event, parameter[name[self].scheduling_event_name, call[name[updated_block]][constant[status]], call[name[updated_block]][constant[id]]]]
for taget[name[value]] in starred[name[processing_block_data]] begin[:]
variable[name] assign[=] binary_operation[binary_operation[binary_operation[constant[scheduling_block:] + call[name[updated_block]][constant[id]]] + constant[:processing_block:]] + call[name[value]][constant[id]]]
call[name[self]._db.set_specified_values, parameter[name[name], name[value]]]
call[name[self]._db.push_event, parameter[name[self].processing_event_name, call[name[value]][constant[status]], call[name[value]][constant[id]]]]
|
keyword[def] identifier[add_sched_block_instance] ( identifier[self] , identifier[config_dict] ):
literal[string]
identifier[schema] = identifier[self] . identifier[_get_schema] ()
identifier[LOG] . identifier[debug] ( literal[string] , identifier[config_dict] )
identifier[validate] ( identifier[config_dict] , identifier[schema] )
identifier[updated_block] = identifier[self] . identifier[_add_status] ( identifier[config_dict] )
identifier[scheduling_block_data] , identifier[processing_block_data] = identifier[self] . identifier[_split_sched_block_instance] ( identifier[updated_block] )
identifier[name] = literal[string] + identifier[updated_block] [ literal[string] ]
identifier[self] . identifier[_db] . identifier[set_specified_values] ( identifier[name] , identifier[scheduling_block_data] )
identifier[self] . identifier[_db] . identifier[push_event] ( identifier[self] . identifier[scheduling_event_name] ,
identifier[updated_block] [ literal[string] ],
identifier[updated_block] [ literal[string] ])
keyword[for] identifier[value] keyword[in] identifier[processing_block_data] :
identifier[name] =( literal[string] + identifier[updated_block] [ literal[string] ]+
literal[string] + identifier[value] [ literal[string] ])
identifier[self] . identifier[_db] . identifier[set_specified_values] ( identifier[name] , identifier[value] )
identifier[self] . identifier[_db] . identifier[push_event] ( identifier[self] . identifier[processing_event_name] ,
identifier[value] [ literal[string] ],
identifier[value] [ literal[string] ])
|
def add_sched_block_instance(self, config_dict):
"""Add Scheduling Block to the database.
Args:
config_dict (dict): SBI configuration
"""
# Get schema for validation
schema = self._get_schema()
LOG.debug('Adding SBI with config: %s', config_dict)
# Validates the schema
validate(config_dict, schema)
# Add status field and value to the data
updated_block = self._add_status(config_dict)
# Splitting into different names and fields before
# adding to the database
(scheduling_block_data, processing_block_data) = self._split_sched_block_instance(updated_block)
# Adding Scheduling block instance with id
name = 'scheduling_block:' + updated_block['id']
self._db.set_specified_values(name, scheduling_block_data)
# Add a event to the scheduling block event list to notify
# of a new scheduling block being added to the db.
self._db.push_event(self.scheduling_event_name, updated_block['status'], updated_block['id'])
# Adding Processing block with id
for value in processing_block_data:
name = 'scheduling_block:' + updated_block['id'] + ':processing_block:' + value['id']
self._db.set_specified_values(name, value)
# Add a event to the processing block event list to notify
# of a new processing block being added to the db.
self._db.push_event(self.processing_event_name, value['status'], value['id']) # depends on [control=['for'], data=['value']]
|
def fetch(self, method, url, data=None, expected_status_code=None):
"""Prepare the headers, encode data, call API and provide
data it returns
"""
kwargs = self.prepare_request(method, url, data)
log.debug(json.dumps(kwargs))
response = getattr(requests, method.lower())(url, **kwargs)
log.debug(json.dumps(response.content))
if response.status_code >= 400:
response.raise_for_status()
if (expected_status_code
and response.status_code != expected_status_code):
raise NotExpectedStatusCode(self._get_error_reason(response))
return response
|
def function[fetch, parameter[self, method, url, data, expected_status_code]]:
constant[Prepare the headers, encode data, call API and provide
data it returns
]
variable[kwargs] assign[=] call[name[self].prepare_request, parameter[name[method], name[url], name[data]]]
call[name[log].debug, parameter[call[name[json].dumps, parameter[name[kwargs]]]]]
variable[response] assign[=] call[call[name[getattr], parameter[name[requests], call[name[method].lower, parameter[]]]], parameter[name[url]]]
call[name[log].debug, parameter[call[name[json].dumps, parameter[name[response].content]]]]
if compare[name[response].status_code greater_or_equal[>=] constant[400]] begin[:]
call[name[response].raise_for_status, parameter[]]
if <ast.BoolOp object at 0x7da18f00e320> begin[:]
<ast.Raise object at 0x7da18f00dcf0>
return[name[response]]
|
keyword[def] identifier[fetch] ( identifier[self] , identifier[method] , identifier[url] , identifier[data] = keyword[None] , identifier[expected_status_code] = keyword[None] ):
literal[string]
identifier[kwargs] = identifier[self] . identifier[prepare_request] ( identifier[method] , identifier[url] , identifier[data] )
identifier[log] . identifier[debug] ( identifier[json] . identifier[dumps] ( identifier[kwargs] ))
identifier[response] = identifier[getattr] ( identifier[requests] , identifier[method] . identifier[lower] ())( identifier[url] ,** identifier[kwargs] )
identifier[log] . identifier[debug] ( identifier[json] . identifier[dumps] ( identifier[response] . identifier[content] ))
keyword[if] identifier[response] . identifier[status_code] >= literal[int] :
identifier[response] . identifier[raise_for_status] ()
keyword[if] ( identifier[expected_status_code]
keyword[and] identifier[response] . identifier[status_code] != identifier[expected_status_code] ):
keyword[raise] identifier[NotExpectedStatusCode] ( identifier[self] . identifier[_get_error_reason] ( identifier[response] ))
keyword[return] identifier[response]
|
def fetch(self, method, url, data=None, expected_status_code=None):
"""Prepare the headers, encode data, call API and provide
data it returns
"""
kwargs = self.prepare_request(method, url, data)
log.debug(json.dumps(kwargs))
response = getattr(requests, method.lower())(url, **kwargs)
log.debug(json.dumps(response.content))
if response.status_code >= 400:
response.raise_for_status() # depends on [control=['if'], data=[]]
if expected_status_code and response.status_code != expected_status_code:
raise NotExpectedStatusCode(self._get_error_reason(response)) # depends on [control=['if'], data=[]]
return response
|
def validate_empty_attributes(fully_qualified_name: str, spec: Dict[str, Any],
*attributes: str) -> List[EmptyAttributeError]:
""" Validates to ensure that a set of attributes do not contain empty values """
return [
EmptyAttributeError(fully_qualified_name, spec, attribute)
for attribute in attributes
if not spec.get(attribute, None)
]
|
def function[validate_empty_attributes, parameter[fully_qualified_name, spec]]:
constant[ Validates to ensure that a set of attributes do not contain empty values ]
return[<ast.ListComp object at 0x7da20c992f50>]
|
keyword[def] identifier[validate_empty_attributes] ( identifier[fully_qualified_name] : identifier[str] , identifier[spec] : identifier[Dict] [ identifier[str] , identifier[Any] ],
* identifier[attributes] : identifier[str] )-> identifier[List] [ identifier[EmptyAttributeError] ]:
literal[string]
keyword[return] [
identifier[EmptyAttributeError] ( identifier[fully_qualified_name] , identifier[spec] , identifier[attribute] )
keyword[for] identifier[attribute] keyword[in] identifier[attributes]
keyword[if] keyword[not] identifier[spec] . identifier[get] ( identifier[attribute] , keyword[None] )
]
|
def validate_empty_attributes(fully_qualified_name: str, spec: Dict[str, Any], *attributes: str) -> List[EmptyAttributeError]:
""" Validates to ensure that a set of attributes do not contain empty values """
return [EmptyAttributeError(fully_qualified_name, spec, attribute) for attribute in attributes if not spec.get(attribute, None)]
|
def write(self, message):
"""
(coroutine)
Write a single message into the pipe.
"""
if self.done_f.done():
raise BrokenPipeError
try:
yield From(write_message_to_pipe(self.pipe_instance.pipe_handle, message))
except BrokenPipeError:
self.done_f.set_result(None)
raise
|
def function[write, parameter[self, message]]:
constant[
(coroutine)
Write a single message into the pipe.
]
if call[name[self].done_f.done, parameter[]] begin[:]
<ast.Raise object at 0x7da1b206a530>
<ast.Try object at 0x7da1b206b700>
|
keyword[def] identifier[write] ( identifier[self] , identifier[message] ):
literal[string]
keyword[if] identifier[self] . identifier[done_f] . identifier[done] ():
keyword[raise] identifier[BrokenPipeError]
keyword[try] :
keyword[yield] identifier[From] ( identifier[write_message_to_pipe] ( identifier[self] . identifier[pipe_instance] . identifier[pipe_handle] , identifier[message] ))
keyword[except] identifier[BrokenPipeError] :
identifier[self] . identifier[done_f] . identifier[set_result] ( keyword[None] )
keyword[raise]
|
def write(self, message):
"""
(coroutine)
Write a single message into the pipe.
"""
if self.done_f.done():
raise BrokenPipeError # depends on [control=['if'], data=[]]
try:
yield From(write_message_to_pipe(self.pipe_instance.pipe_handle, message)) # depends on [control=['try'], data=[]]
except BrokenPipeError:
self.done_f.set_result(None)
raise # depends on [control=['except'], data=[]]
|
def get_info(brain_or_object, endpoint=None, complete=False):
"""Extract the data from the catalog brain or object
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:param endpoint: The named URL endpoint for the root of the items
:type endpoint: str/unicode
:param complete: Flag to wake up the object and fetch all data
:type complete: bool
:returns: Data mapping for the object/catalog brain
:rtype: dict
"""
# also extract the brain data for objects
if not is_brain(brain_or_object):
brain_or_object = get_brain(brain_or_object)
if brain_or_object is None:
logger.warn("Couldn't find/fetch brain of {}".format(brain_or_object))
return {}
complete = True
# When querying uid catalog we have to be sure that we skip the objects
# used to relate two or more objects
if is_relationship_object(brain_or_object):
logger.warn("Skipping relationship object {}".format(repr(brain_or_object)))
return {}
# extract the data from the initial object with the proper adapter
info = IInfo(brain_or_object).to_dict()
# update with url info (always included)
url_info = get_url_info(brain_or_object, endpoint)
info.update(url_info)
# include the parent url info
parent = get_parent_info(brain_or_object)
info.update(parent)
# add the complete data of the object if requested
# -> requires to wake up the object if it is a catalog brain
if complete:
# ensure we have a full content object
obj = api.get_object(brain_or_object)
# get the compatible adapter
adapter = IInfo(obj)
# update the data set with the complete information
info.update(adapter.to_dict())
# update the data set with the workflow information
# -> only possible if `?complete=yes&workflow=yes`
if req.get_workflow(False):
info.update(get_workflow_info(obj))
# # add sharing data if the user requested it
# # -> only possible if `?complete=yes`
# if req.get_sharing(False):
# sharing = get_sharing_info(obj)
# info.update({"sharing": sharing})
return info
|
def function[get_info, parameter[brain_or_object, endpoint, complete]]:
constant[Extract the data from the catalog brain or object
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:param endpoint: The named URL endpoint for the root of the items
:type endpoint: str/unicode
:param complete: Flag to wake up the object and fetch all data
:type complete: bool
:returns: Data mapping for the object/catalog brain
:rtype: dict
]
if <ast.UnaryOp object at 0x7da18bcc8250> begin[:]
variable[brain_or_object] assign[=] call[name[get_brain], parameter[name[brain_or_object]]]
if compare[name[brain_or_object] is constant[None]] begin[:]
call[name[logger].warn, parameter[call[constant[Couldn't find/fetch brain of {}].format, parameter[name[brain_or_object]]]]]
return[dictionary[[], []]]
variable[complete] assign[=] constant[True]
if call[name[is_relationship_object], parameter[name[brain_or_object]]] begin[:]
call[name[logger].warn, parameter[call[constant[Skipping relationship object {}].format, parameter[call[name[repr], parameter[name[brain_or_object]]]]]]]
return[dictionary[[], []]]
variable[info] assign[=] call[call[name[IInfo], parameter[name[brain_or_object]]].to_dict, parameter[]]
variable[url_info] assign[=] call[name[get_url_info], parameter[name[brain_or_object], name[endpoint]]]
call[name[info].update, parameter[name[url_info]]]
variable[parent] assign[=] call[name[get_parent_info], parameter[name[brain_or_object]]]
call[name[info].update, parameter[name[parent]]]
if name[complete] begin[:]
variable[obj] assign[=] call[name[api].get_object, parameter[name[brain_or_object]]]
variable[adapter] assign[=] call[name[IInfo], parameter[name[obj]]]
call[name[info].update, parameter[call[name[adapter].to_dict, parameter[]]]]
if call[name[req].get_workflow, parameter[constant[False]]] begin[:]
call[name[info].update, parameter[call[name[get_workflow_info], parameter[name[obj]]]]]
return[name[info]]
|
keyword[def] identifier[get_info] ( identifier[brain_or_object] , identifier[endpoint] = keyword[None] , identifier[complete] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[is_brain] ( identifier[brain_or_object] ):
identifier[brain_or_object] = identifier[get_brain] ( identifier[brain_or_object] )
keyword[if] identifier[brain_or_object] keyword[is] keyword[None] :
identifier[logger] . identifier[warn] ( literal[string] . identifier[format] ( identifier[brain_or_object] ))
keyword[return] {}
identifier[complete] = keyword[True]
keyword[if] identifier[is_relationship_object] ( identifier[brain_or_object] ):
identifier[logger] . identifier[warn] ( literal[string] . identifier[format] ( identifier[repr] ( identifier[brain_or_object] )))
keyword[return] {}
identifier[info] = identifier[IInfo] ( identifier[brain_or_object] ). identifier[to_dict] ()
identifier[url_info] = identifier[get_url_info] ( identifier[brain_or_object] , identifier[endpoint] )
identifier[info] . identifier[update] ( identifier[url_info] )
identifier[parent] = identifier[get_parent_info] ( identifier[brain_or_object] )
identifier[info] . identifier[update] ( identifier[parent] )
keyword[if] identifier[complete] :
identifier[obj] = identifier[api] . identifier[get_object] ( identifier[brain_or_object] )
identifier[adapter] = identifier[IInfo] ( identifier[obj] )
identifier[info] . identifier[update] ( identifier[adapter] . identifier[to_dict] ())
keyword[if] identifier[req] . identifier[get_workflow] ( keyword[False] ):
identifier[info] . identifier[update] ( identifier[get_workflow_info] ( identifier[obj] ))
keyword[return] identifier[info]
|
def get_info(brain_or_object, endpoint=None, complete=False):
"""Extract the data from the catalog brain or object
:param brain_or_object: A single catalog brain or content object
:type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
:param endpoint: The named URL endpoint for the root of the items
:type endpoint: str/unicode
:param complete: Flag to wake up the object and fetch all data
:type complete: bool
:returns: Data mapping for the object/catalog brain
:rtype: dict
"""
# also extract the brain data for objects
if not is_brain(brain_or_object):
brain_or_object = get_brain(brain_or_object)
if brain_or_object is None:
logger.warn("Couldn't find/fetch brain of {}".format(brain_or_object))
return {} # depends on [control=['if'], data=['brain_or_object']]
complete = True # depends on [control=['if'], data=[]]
# When querying uid catalog we have to be sure that we skip the objects
# used to relate two or more objects
if is_relationship_object(brain_or_object):
logger.warn('Skipping relationship object {}'.format(repr(brain_or_object)))
return {} # depends on [control=['if'], data=[]]
# extract the data from the initial object with the proper adapter
info = IInfo(brain_or_object).to_dict()
# update with url info (always included)
url_info = get_url_info(brain_or_object, endpoint)
info.update(url_info)
# include the parent url info
parent = get_parent_info(brain_or_object)
info.update(parent)
# add the complete data of the object if requested
# -> requires to wake up the object if it is a catalog brain
if complete:
# ensure we have a full content object
obj = api.get_object(brain_or_object)
# get the compatible adapter
adapter = IInfo(obj)
# update the data set with the complete information
info.update(adapter.to_dict())
# update the data set with the workflow information
# -> only possible if `?complete=yes&workflow=yes`
if req.get_workflow(False):
info.update(get_workflow_info(obj)) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
# # add sharing data if the user requested it
# # -> only possible if `?complete=yes`
# if req.get_sharing(False):
# sharing = get_sharing_info(obj)
# info.update({"sharing": sharing})
return info
|
def to_array(self):
"""
Serializes this ShippingAddress to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(ShippingAddress, self).to_array()
array['country_code'] = u(self.country_code) # py2: type unicode, py3: type str
array['state'] = u(self.state) # py2: type unicode, py3: type str
array['city'] = u(self.city) # py2: type unicode, py3: type str
array['street_line1'] = u(self.street_line1) # py2: type unicode, py3: type str
array['street_line2'] = u(self.street_line2) # py2: type unicode, py3: type str
array['post_code'] = u(self.post_code) # py2: type unicode, py3: type str
return array
|
def function[to_array, parameter[self]]:
constant[
Serializes this ShippingAddress to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
]
variable[array] assign[=] call[call[name[super], parameter[name[ShippingAddress], name[self]]].to_array, parameter[]]
call[name[array]][constant[country_code]] assign[=] call[name[u], parameter[name[self].country_code]]
call[name[array]][constant[state]] assign[=] call[name[u], parameter[name[self].state]]
call[name[array]][constant[city]] assign[=] call[name[u], parameter[name[self].city]]
call[name[array]][constant[street_line1]] assign[=] call[name[u], parameter[name[self].street_line1]]
call[name[array]][constant[street_line2]] assign[=] call[name[u], parameter[name[self].street_line2]]
call[name[array]][constant[post_code]] assign[=] call[name[u], parameter[name[self].post_code]]
return[name[array]]
|
keyword[def] identifier[to_array] ( identifier[self] ):
literal[string]
identifier[array] = identifier[super] ( identifier[ShippingAddress] , identifier[self] ). identifier[to_array] ()
identifier[array] [ literal[string] ]= identifier[u] ( identifier[self] . identifier[country_code] )
identifier[array] [ literal[string] ]= identifier[u] ( identifier[self] . identifier[state] )
identifier[array] [ literal[string] ]= identifier[u] ( identifier[self] . identifier[city] )
identifier[array] [ literal[string] ]= identifier[u] ( identifier[self] . identifier[street_line1] )
identifier[array] [ literal[string] ]= identifier[u] ( identifier[self] . identifier[street_line2] )
identifier[array] [ literal[string] ]= identifier[u] ( identifier[self] . identifier[post_code] )
keyword[return] identifier[array]
|
def to_array(self):
"""
Serializes this ShippingAddress to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(ShippingAddress, self).to_array()
array['country_code'] = u(self.country_code) # py2: type unicode, py3: type str
array['state'] = u(self.state) # py2: type unicode, py3: type str
array['city'] = u(self.city) # py2: type unicode, py3: type str
array['street_line1'] = u(self.street_line1) # py2: type unicode, py3: type str
array['street_line2'] = u(self.street_line2) # py2: type unicode, py3: type str
array['post_code'] = u(self.post_code) # py2: type unicode, py3: type str
return array
|
def delete_os_out_nwk(self, tenant_id, fw_dict, is_fw_virt=False):
"""Deletes the Openstack Out network and update the DB. """
ret = True
tenant_name = fw_dict.get('tenant_name')
try:
ret = self._delete_os_nwk(tenant_id, tenant_name, "out",
is_fw_virt=is_fw_virt)
except Exception as exc:
LOG.error("Deletion of Out Openstack Network failed tenant "
"%(tenant)s, Exception %(exc)s",
{'tenant': tenant_id, 'exc': str(exc)})
ret = False
# Updating the FW DB
if ret:
res = fw_const.OS_OUT_NETWORK_DEL_SUCCESS
else:
res = fw_const.OS_OUT_NETWORK_DEL_FAIL
self.update_fw_db_result(tenant_id, os_status=res)
return ret
|
def function[delete_os_out_nwk, parameter[self, tenant_id, fw_dict, is_fw_virt]]:
constant[Deletes the Openstack Out network and update the DB. ]
variable[ret] assign[=] constant[True]
variable[tenant_name] assign[=] call[name[fw_dict].get, parameter[constant[tenant_name]]]
<ast.Try object at 0x7da1b1c62ef0>
if name[ret] begin[:]
variable[res] assign[=] name[fw_const].OS_OUT_NETWORK_DEL_SUCCESS
call[name[self].update_fw_db_result, parameter[name[tenant_id]]]
return[name[ret]]
|
keyword[def] identifier[delete_os_out_nwk] ( identifier[self] , identifier[tenant_id] , identifier[fw_dict] , identifier[is_fw_virt] = keyword[False] ):
literal[string]
identifier[ret] = keyword[True]
identifier[tenant_name] = identifier[fw_dict] . identifier[get] ( literal[string] )
keyword[try] :
identifier[ret] = identifier[self] . identifier[_delete_os_nwk] ( identifier[tenant_id] , identifier[tenant_name] , literal[string] ,
identifier[is_fw_virt] = identifier[is_fw_virt] )
keyword[except] identifier[Exception] keyword[as] identifier[exc] :
identifier[LOG] . identifier[error] ( literal[string]
literal[string] ,
{ literal[string] : identifier[tenant_id] , literal[string] : identifier[str] ( identifier[exc] )})
identifier[ret] = keyword[False]
keyword[if] identifier[ret] :
identifier[res] = identifier[fw_const] . identifier[OS_OUT_NETWORK_DEL_SUCCESS]
keyword[else] :
identifier[res] = identifier[fw_const] . identifier[OS_OUT_NETWORK_DEL_FAIL]
identifier[self] . identifier[update_fw_db_result] ( identifier[tenant_id] , identifier[os_status] = identifier[res] )
keyword[return] identifier[ret]
|
def delete_os_out_nwk(self, tenant_id, fw_dict, is_fw_virt=False):
"""Deletes the Openstack Out network and update the DB. """
ret = True
tenant_name = fw_dict.get('tenant_name')
try:
ret = self._delete_os_nwk(tenant_id, tenant_name, 'out', is_fw_virt=is_fw_virt) # depends on [control=['try'], data=[]]
except Exception as exc:
LOG.error('Deletion of Out Openstack Network failed tenant %(tenant)s, Exception %(exc)s', {'tenant': tenant_id, 'exc': str(exc)})
ret = False # depends on [control=['except'], data=['exc']]
# Updating the FW DB
if ret:
res = fw_const.OS_OUT_NETWORK_DEL_SUCCESS # depends on [control=['if'], data=[]]
else:
res = fw_const.OS_OUT_NETWORK_DEL_FAIL
self.update_fw_db_result(tenant_id, os_status=res)
return ret
|
def transformed(self, t):
"""
Transforms an m-dimensional Rect using t, an nxn matrix that can
transform vectors in the form: [x, y, z, …, 1].
The Rect is padded to n dimensions.
"""
assert t.shape[0] == t.shape[1]
extra_dimensions = t.shape[0] - self.dimensions - 1
def transform(a):
return t.dot(np.concatenate(
(a, [0] * extra_dimensions, [1]),
axis=0
))[:self.dimensions]
return Rect(transform(self.mins), transform(self.maxes))
|
def function[transformed, parameter[self, t]]:
constant[
Transforms an m-dimensional Rect using t, an nxn matrix that can
transform vectors in the form: [x, y, z, …, 1].
The Rect is padded to n dimensions.
]
assert[compare[call[name[t].shape][constant[0]] equal[==] call[name[t].shape][constant[1]]]]
variable[extra_dimensions] assign[=] binary_operation[binary_operation[call[name[t].shape][constant[0]] - name[self].dimensions] - constant[1]]
def function[transform, parameter[a]]:
return[call[call[name[t].dot, parameter[call[name[np].concatenate, parameter[tuple[[<ast.Name object at 0x7da18fe91120>, <ast.BinOp object at 0x7da18fe91690>, <ast.List object at 0x7da18fe91210>]]]]]]][<ast.Slice object at 0x7da18fe90490>]]
return[call[name[Rect], parameter[call[name[transform], parameter[name[self].mins]], call[name[transform], parameter[name[self].maxes]]]]]
|
keyword[def] identifier[transformed] ( identifier[self] , identifier[t] ):
literal[string]
keyword[assert] identifier[t] . identifier[shape] [ literal[int] ]== identifier[t] . identifier[shape] [ literal[int] ]
identifier[extra_dimensions] = identifier[t] . identifier[shape] [ literal[int] ]- identifier[self] . identifier[dimensions] - literal[int]
keyword[def] identifier[transform] ( identifier[a] ):
keyword[return] identifier[t] . identifier[dot] ( identifier[np] . identifier[concatenate] (
( identifier[a] ,[ literal[int] ]* identifier[extra_dimensions] ,[ literal[int] ]),
identifier[axis] = literal[int]
))[: identifier[self] . identifier[dimensions] ]
keyword[return] identifier[Rect] ( identifier[transform] ( identifier[self] . identifier[mins] ), identifier[transform] ( identifier[self] . identifier[maxes] ))
|
def transformed(self, t):
"""
Transforms an m-dimensional Rect using t, an nxn matrix that can
transform vectors in the form: [x, y, z, …, 1].
The Rect is padded to n dimensions.
"""
assert t.shape[0] == t.shape[1]
extra_dimensions = t.shape[0] - self.dimensions - 1
def transform(a):
return t.dot(np.concatenate((a, [0] * extra_dimensions, [1]), axis=0))[:self.dimensions]
return Rect(transform(self.mins), transform(self.maxes))
|
def agg_wt_avg(mat, min_wt = 0.01, corr_metric='spearman'):
''' Aggregate a set of replicate profiles into a single signature using
a weighted average.
Args:
mat (pandas df): a matrix of replicate profiles, where the columns are
samples and the rows are features; columns correspond to the
replicates of a single perturbagen
min_wt (float): Minimum raw weight when calculating weighted average
corr_metric (string): Spearman or Pearson; the correlation method
Returns:
out_sig (pandas series): weighted average values
upper_tri_df (pandas df): the correlations between each profile that went into the signature
raw weights (pandas series): weights before normalization
weights (pandas series): weights after normalization
'''
assert mat.shape[1] > 0, "mat is empty! mat: {}".format(mat)
if mat.shape[1] == 1:
out_sig = mat
upper_tri_df = None
raw_weights = None
weights = None
else:
assert corr_metric in ["spearman", "pearson"]
# Make correlation matrix column wise
corr_mat = mat.corr(method=corr_metric)
# Save the values in the upper triangle
upper_tri_df = get_upper_triangle(corr_mat)
# Calculate weight per replicate
raw_weights, weights = calculate_weights(corr_mat, min_wt)
# Apply weights to values
weighted_values = mat * weights
out_sig = weighted_values.sum(axis=1)
return out_sig, upper_tri_df, raw_weights, weights
|
def function[agg_wt_avg, parameter[mat, min_wt, corr_metric]]:
constant[ Aggregate a set of replicate profiles into a single signature using
a weighted average.
Args:
mat (pandas df): a matrix of replicate profiles, where the columns are
samples and the rows are features; columns correspond to the
replicates of a single perturbagen
min_wt (float): Minimum raw weight when calculating weighted average
corr_metric (string): Spearman or Pearson; the correlation method
Returns:
out_sig (pandas series): weighted average values
upper_tri_df (pandas df): the correlations between each profile that went into the signature
raw weights (pandas series): weights before normalization
weights (pandas series): weights after normalization
]
assert[compare[call[name[mat].shape][constant[1]] greater[>] constant[0]]]
if compare[call[name[mat].shape][constant[1]] equal[==] constant[1]] begin[:]
variable[out_sig] assign[=] name[mat]
variable[upper_tri_df] assign[=] constant[None]
variable[raw_weights] assign[=] constant[None]
variable[weights] assign[=] constant[None]
return[tuple[[<ast.Name object at 0x7da20c76dab0>, <ast.Name object at 0x7da20c76fd90>, <ast.Name object at 0x7da20c76f2e0>, <ast.Name object at 0x7da20c76dcc0>]]]
|
keyword[def] identifier[agg_wt_avg] ( identifier[mat] , identifier[min_wt] = literal[int] , identifier[corr_metric] = literal[string] ):
literal[string]
keyword[assert] identifier[mat] . identifier[shape] [ literal[int] ]> literal[int] , literal[string] . identifier[format] ( identifier[mat] )
keyword[if] identifier[mat] . identifier[shape] [ literal[int] ]== literal[int] :
identifier[out_sig] = identifier[mat]
identifier[upper_tri_df] = keyword[None]
identifier[raw_weights] = keyword[None]
identifier[weights] = keyword[None]
keyword[else] :
keyword[assert] identifier[corr_metric] keyword[in] [ literal[string] , literal[string] ]
identifier[corr_mat] = identifier[mat] . identifier[corr] ( identifier[method] = identifier[corr_metric] )
identifier[upper_tri_df] = identifier[get_upper_triangle] ( identifier[corr_mat] )
identifier[raw_weights] , identifier[weights] = identifier[calculate_weights] ( identifier[corr_mat] , identifier[min_wt] )
identifier[weighted_values] = identifier[mat] * identifier[weights]
identifier[out_sig] = identifier[weighted_values] . identifier[sum] ( identifier[axis] = literal[int] )
keyword[return] identifier[out_sig] , identifier[upper_tri_df] , identifier[raw_weights] , identifier[weights]
|
def agg_wt_avg(mat, min_wt=0.01, corr_metric='spearman'):
""" Aggregate a set of replicate profiles into a single signature using
a weighted average.
Args:
mat (pandas df): a matrix of replicate profiles, where the columns are
samples and the rows are features; columns correspond to the
replicates of a single perturbagen
min_wt (float): Minimum raw weight when calculating weighted average
corr_metric (string): Spearman or Pearson; the correlation method
Returns:
out_sig (pandas series): weighted average values
upper_tri_df (pandas df): the correlations between each profile that went into the signature
raw weights (pandas series): weights before normalization
weights (pandas series): weights after normalization
"""
assert mat.shape[1] > 0, 'mat is empty! mat: {}'.format(mat)
if mat.shape[1] == 1:
out_sig = mat
upper_tri_df = None
raw_weights = None
weights = None # depends on [control=['if'], data=[]]
else:
assert corr_metric in ['spearman', 'pearson']
# Make correlation matrix column wise
corr_mat = mat.corr(method=corr_metric)
# Save the values in the upper triangle
upper_tri_df = get_upper_triangle(corr_mat)
# Calculate weight per replicate
(raw_weights, weights) = calculate_weights(corr_mat, min_wt)
# Apply weights to values
weighted_values = mat * weights
out_sig = weighted_values.sum(axis=1)
return (out_sig, upper_tri_df, raw_weights, weights)
|
def make_flat_df(frames, return_addresses):
"""
Takes a list of dictionaries, each representing what is returned from the
model at a particular time, and creates a dataframe whose columns correspond
to the keys of `return addresses`
Parameters
----------
frames: list of dictionaries
each dictionary represents the result of a prticular time in the model
return_addresses: a dictionary,
keys will be column names of the resulting dataframe, and are what the
user passed in as 'return_columns'. Values are a tuple:
(py_name, {coords dictionary}) which tells us where to look for the value
to put in that specific column.
Returns
-------
"""
# Todo: could also try a list comprehension here, or parallel apply
visited = list(map(lambda x: visit_addresses(x, return_addresses), frames))
return pd.DataFrame(visited)
|
def function[make_flat_df, parameter[frames, return_addresses]]:
constant[
Takes a list of dictionaries, each representing what is returned from the
model at a particular time, and creates a dataframe whose columns correspond
to the keys of `return addresses`
Parameters
----------
frames: list of dictionaries
each dictionary represents the result of a prticular time in the model
return_addresses: a dictionary,
keys will be column names of the resulting dataframe, and are what the
user passed in as 'return_columns'. Values are a tuple:
(py_name, {coords dictionary}) which tells us where to look for the value
to put in that specific column.
Returns
-------
]
variable[visited] assign[=] call[name[list], parameter[call[name[map], parameter[<ast.Lambda object at 0x7da20eb294b0>, name[frames]]]]]
return[call[name[pd].DataFrame, parameter[name[visited]]]]
|
keyword[def] identifier[make_flat_df] ( identifier[frames] , identifier[return_addresses] ):
literal[string]
identifier[visited] = identifier[list] ( identifier[map] ( keyword[lambda] identifier[x] : identifier[visit_addresses] ( identifier[x] , identifier[return_addresses] ), identifier[frames] ))
keyword[return] identifier[pd] . identifier[DataFrame] ( identifier[visited] )
|
def make_flat_df(frames, return_addresses):
"""
Takes a list of dictionaries, each representing what is returned from the
model at a particular time, and creates a dataframe whose columns correspond
to the keys of `return addresses`
Parameters
----------
frames: list of dictionaries
each dictionary represents the result of a prticular time in the model
return_addresses: a dictionary,
keys will be column names of the resulting dataframe, and are what the
user passed in as 'return_columns'. Values are a tuple:
(py_name, {coords dictionary}) which tells us where to look for the value
to put in that specific column.
Returns
-------
"""
# Todo: could also try a list comprehension here, or parallel apply
visited = list(map(lambda x: visit_addresses(x, return_addresses), frames))
return pd.DataFrame(visited)
|
def add_splash_ids(splash_mapping_file_pth, conn, db_type='sqlite'):
""" Add splash ids to database (in case stored in a different file to the msp files like for MoNA)
Example:
>>> from msp2db.db import get_connection
>>> from msp2db.parse import add_splash_ids
>>> conn = get_connection('sqlite', 'library.db')
>>> add_splash_ids('splash_mapping_file.csv', conn, db_type='sqlite')
Args:
splash_mapping_file_pth (str): Path to the splash mapping file (needs to be csv format and have no headers,
should contain two columns. The first the accession number the second the splash.
e.g. AU100601, splash10-0a4i-1900000000-d2bc1c887f6f99ed0f74 \n
"""
# get dictionary of accession and library_spectra_meta_id
cursor = conn.cursor()
cursor.execute("SELECT id, accession FROM library_spectra_meta")
accession_d = {row[1]: row[0] for row in cursor}
if db_type == 'sqlite':
type_sign = '?'
else:
type_sign = '%s'
rows = []
c = 0
# loop through splash mapping file
with open(splash_mapping_file_pth, "r") as f:
for line in f:
c+=1
line = line.rstrip()
line_l = line.split(',')
accession = line_l[0]
splash = line_l[1]
try:
aid = accession_d[accession]
except KeyError as e:
print("can't find accession {}".format(accession))
continue
row = (splash, aid)
rows.append(row)
if c > 200:
print(row)
cursor.executemany("UPDATE library_spectra_meta SET splash = {t} WHERE id = {t} ".format(t=type_sign), rows)
conn.commit()
rows = []
c = 0
cursor.executemany("UPDATE library_spectra_meta SET splash = {t} WHERE id = {t} ".format(t=type_sign), rows)
conn.commit()
|
def function[add_splash_ids, parameter[splash_mapping_file_pth, conn, db_type]]:
constant[ Add splash ids to database (in case stored in a different file to the msp files like for MoNA)
Example:
>>> from msp2db.db import get_connection
>>> from msp2db.parse import add_splash_ids
>>> conn = get_connection('sqlite', 'library.db')
>>> add_splash_ids('splash_mapping_file.csv', conn, db_type='sqlite')
Args:
splash_mapping_file_pth (str): Path to the splash mapping file (needs to be csv format and have no headers,
should contain two columns. The first the accession number the second the splash.
e.g. AU100601, splash10-0a4i-1900000000-d2bc1c887f6f99ed0f74
]
variable[cursor] assign[=] call[name[conn].cursor, parameter[]]
call[name[cursor].execute, parameter[constant[SELECT id, accession FROM library_spectra_meta]]]
variable[accession_d] assign[=] <ast.DictComp object at 0x7da204623910>
if compare[name[db_type] equal[==] constant[sqlite]] begin[:]
variable[type_sign] assign[=] constant[?]
variable[rows] assign[=] list[[]]
variable[c] assign[=] constant[0]
with call[name[open], parameter[name[splash_mapping_file_pth], constant[r]]] begin[:]
for taget[name[line]] in starred[name[f]] begin[:]
<ast.AugAssign object at 0x7da204623190>
variable[line] assign[=] call[name[line].rstrip, parameter[]]
variable[line_l] assign[=] call[name[line].split, parameter[constant[,]]]
variable[accession] assign[=] call[name[line_l]][constant[0]]
variable[splash] assign[=] call[name[line_l]][constant[1]]
<ast.Try object at 0x7da204620250>
variable[row] assign[=] tuple[[<ast.Name object at 0x7da204621720>, <ast.Name object at 0x7da204620e20>]]
call[name[rows].append, parameter[name[row]]]
if compare[name[c] greater[>] constant[200]] begin[:]
call[name[print], parameter[name[row]]]
call[name[cursor].executemany, parameter[call[constant[UPDATE library_spectra_meta SET splash = {t} WHERE id = {t} ].format, parameter[]], name[rows]]]
call[name[conn].commit, parameter[]]
variable[rows] assign[=] list[[]]
variable[c] assign[=] constant[0]
call[name[cursor].executemany, parameter[call[constant[UPDATE library_spectra_meta SET splash = {t} WHERE id = {t} ].format, parameter[]], name[rows]]]
call[name[conn].commit, parameter[]]
|
keyword[def] identifier[add_splash_ids] ( identifier[splash_mapping_file_pth] , identifier[conn] , identifier[db_type] = literal[string] ):
literal[string]
identifier[cursor] = identifier[conn] . identifier[cursor] ()
identifier[cursor] . identifier[execute] ( literal[string] )
identifier[accession_d] ={ identifier[row] [ literal[int] ]: identifier[row] [ literal[int] ] keyword[for] identifier[row] keyword[in] identifier[cursor] }
keyword[if] identifier[db_type] == literal[string] :
identifier[type_sign] = literal[string]
keyword[else] :
identifier[type_sign] = literal[string]
identifier[rows] =[]
identifier[c] = literal[int]
keyword[with] identifier[open] ( identifier[splash_mapping_file_pth] , literal[string] ) keyword[as] identifier[f] :
keyword[for] identifier[line] keyword[in] identifier[f] :
identifier[c] += literal[int]
identifier[line] = identifier[line] . identifier[rstrip] ()
identifier[line_l] = identifier[line] . identifier[split] ( literal[string] )
identifier[accession] = identifier[line_l] [ literal[int] ]
identifier[splash] = identifier[line_l] [ literal[int] ]
keyword[try] :
identifier[aid] = identifier[accession_d] [ identifier[accession] ]
keyword[except] identifier[KeyError] keyword[as] identifier[e] :
identifier[print] ( literal[string] . identifier[format] ( identifier[accession] ))
keyword[continue]
identifier[row] =( identifier[splash] , identifier[aid] )
identifier[rows] . identifier[append] ( identifier[row] )
keyword[if] identifier[c] > literal[int] :
identifier[print] ( identifier[row] )
identifier[cursor] . identifier[executemany] ( literal[string] . identifier[format] ( identifier[t] = identifier[type_sign] ), identifier[rows] )
identifier[conn] . identifier[commit] ()
identifier[rows] =[]
identifier[c] = literal[int]
identifier[cursor] . identifier[executemany] ( literal[string] . identifier[format] ( identifier[t] = identifier[type_sign] ), identifier[rows] )
identifier[conn] . identifier[commit] ()
|
def add_splash_ids(splash_mapping_file_pth, conn, db_type='sqlite'):
""" Add splash ids to database (in case stored in a different file to the msp files like for MoNA)
Example:
>>> from msp2db.db import get_connection
>>> from msp2db.parse import add_splash_ids
>>> conn = get_connection('sqlite', 'library.db')
>>> add_splash_ids('splash_mapping_file.csv', conn, db_type='sqlite')
Args:
splash_mapping_file_pth (str): Path to the splash mapping file (needs to be csv format and have no headers,
should contain two columns. The first the accession number the second the splash.
e.g. AU100601, splash10-0a4i-1900000000-d2bc1c887f6f99ed0f74
"""
# get dictionary of accession and library_spectra_meta_id
cursor = conn.cursor()
cursor.execute('SELECT id, accession FROM library_spectra_meta')
accession_d = {row[1]: row[0] for row in cursor}
if db_type == 'sqlite':
type_sign = '?' # depends on [control=['if'], data=[]]
else:
type_sign = '%s'
rows = []
c = 0
# loop through splash mapping file
with open(splash_mapping_file_pth, 'r') as f:
for line in f:
c += 1
line = line.rstrip()
line_l = line.split(',')
accession = line_l[0]
splash = line_l[1]
try:
aid = accession_d[accession] # depends on [control=['try'], data=[]]
except KeyError as e:
print("can't find accession {}".format(accession))
continue # depends on [control=['except'], data=[]]
row = (splash, aid)
rows.append(row)
if c > 200:
print(row)
cursor.executemany('UPDATE library_spectra_meta SET splash = {t} WHERE id = {t} '.format(t=type_sign), rows)
conn.commit()
rows = []
c = 0 # depends on [control=['if'], data=['c']] # depends on [control=['for'], data=['line']] # depends on [control=['with'], data=['f']]
cursor.executemany('UPDATE library_spectra_meta SET splash = {t} WHERE id = {t} '.format(t=type_sign), rows)
conn.commit()
|
def timeout(seconds=None, use_signals=True, timeout_exception=TimeoutError, exception_message=None):
"""Add a timeout parameter to a function and return it.
:param seconds: optional time limit in seconds or fractions of a second. If None is passed, no timeout is applied.
This adds some flexibility to the usage: you can disable timing out depending on the settings.
:type seconds: float
:param use_signals: flag indicating whether signals should be used for timing function out or the multiprocessing
When using multiprocessing, timeout granularity is limited to 10ths of a second.
:type use_signals: bool
:raises: TimeoutError if time limit is reached
It is illegal to pass anything other than a function as the first
parameter. The function is wrapped and returned to the caller.
"""
def decorate(function):
if not seconds:
return function
if use_signals:
def handler(signum, frame):
_raise_exception(timeout_exception, exception_message)
@wraps(function)
def new_function(*args, **kwargs):
new_seconds = kwargs.pop('timeout', seconds)
if new_seconds:
old = signal.signal(signal.SIGALRM, handler)
signal.setitimer(signal.ITIMER_REAL, new_seconds)
try:
return function(*args, **kwargs)
finally:
if new_seconds:
signal.setitimer(signal.ITIMER_REAL, 0)
signal.signal(signal.SIGALRM, old)
return new_function
else:
@wraps(function)
def new_function(*args, **kwargs):
timeout_wrapper = _Timeout(function, timeout_exception, exception_message, seconds)
return timeout_wrapper(*args, **kwargs)
return new_function
return decorate
|
def function[timeout, parameter[seconds, use_signals, timeout_exception, exception_message]]:
constant[Add a timeout parameter to a function and return it.
:param seconds: optional time limit in seconds or fractions of a second. If None is passed, no timeout is applied.
This adds some flexibility to the usage: you can disable timing out depending on the settings.
:type seconds: float
:param use_signals: flag indicating whether signals should be used for timing function out or the multiprocessing
When using multiprocessing, timeout granularity is limited to 10ths of a second.
:type use_signals: bool
:raises: TimeoutError if time limit is reached
It is illegal to pass anything other than a function as the first
parameter. The function is wrapped and returned to the caller.
]
def function[decorate, parameter[function]]:
if <ast.UnaryOp object at 0x7da20e955510> begin[:]
return[name[function]]
if name[use_signals] begin[:]
def function[handler, parameter[signum, frame]]:
call[name[_raise_exception], parameter[name[timeout_exception], name[exception_message]]]
def function[new_function, parameter[]]:
variable[new_seconds] assign[=] call[name[kwargs].pop, parameter[constant[timeout], name[seconds]]]
if name[new_seconds] begin[:]
variable[old] assign[=] call[name[signal].signal, parameter[name[signal].SIGALRM, name[handler]]]
call[name[signal].setitimer, parameter[name[signal].ITIMER_REAL, name[new_seconds]]]
<ast.Try object at 0x7da20e957250>
return[name[new_function]]
return[name[decorate]]
|
keyword[def] identifier[timeout] ( identifier[seconds] = keyword[None] , identifier[use_signals] = keyword[True] , identifier[timeout_exception] = identifier[TimeoutError] , identifier[exception_message] = keyword[None] ):
literal[string]
keyword[def] identifier[decorate] ( identifier[function] ):
keyword[if] keyword[not] identifier[seconds] :
keyword[return] identifier[function]
keyword[if] identifier[use_signals] :
keyword[def] identifier[handler] ( identifier[signum] , identifier[frame] ):
identifier[_raise_exception] ( identifier[timeout_exception] , identifier[exception_message] )
@ identifier[wraps] ( identifier[function] )
keyword[def] identifier[new_function] (* identifier[args] ,** identifier[kwargs] ):
identifier[new_seconds] = identifier[kwargs] . identifier[pop] ( literal[string] , identifier[seconds] )
keyword[if] identifier[new_seconds] :
identifier[old] = identifier[signal] . identifier[signal] ( identifier[signal] . identifier[SIGALRM] , identifier[handler] )
identifier[signal] . identifier[setitimer] ( identifier[signal] . identifier[ITIMER_REAL] , identifier[new_seconds] )
keyword[try] :
keyword[return] identifier[function] (* identifier[args] ,** identifier[kwargs] )
keyword[finally] :
keyword[if] identifier[new_seconds] :
identifier[signal] . identifier[setitimer] ( identifier[signal] . identifier[ITIMER_REAL] , literal[int] )
identifier[signal] . identifier[signal] ( identifier[signal] . identifier[SIGALRM] , identifier[old] )
keyword[return] identifier[new_function]
keyword[else] :
@ identifier[wraps] ( identifier[function] )
keyword[def] identifier[new_function] (* identifier[args] ,** identifier[kwargs] ):
identifier[timeout_wrapper] = identifier[_Timeout] ( identifier[function] , identifier[timeout_exception] , identifier[exception_message] , identifier[seconds] )
keyword[return] identifier[timeout_wrapper] (* identifier[args] ,** identifier[kwargs] )
keyword[return] identifier[new_function]
keyword[return] identifier[decorate]
|
def timeout(seconds=None, use_signals=True, timeout_exception=TimeoutError, exception_message=None):
"""Add a timeout parameter to a function and return it.
:param seconds: optional time limit in seconds or fractions of a second. If None is passed, no timeout is applied.
This adds some flexibility to the usage: you can disable timing out depending on the settings.
:type seconds: float
:param use_signals: flag indicating whether signals should be used for timing function out or the multiprocessing
When using multiprocessing, timeout granularity is limited to 10ths of a second.
:type use_signals: bool
:raises: TimeoutError if time limit is reached
It is illegal to pass anything other than a function as the first
parameter. The function is wrapped and returned to the caller.
"""
def decorate(function):
if not seconds:
return function # depends on [control=['if'], data=[]]
if use_signals:
def handler(signum, frame):
_raise_exception(timeout_exception, exception_message)
@wraps(function)
def new_function(*args, **kwargs):
new_seconds = kwargs.pop('timeout', seconds)
if new_seconds:
old = signal.signal(signal.SIGALRM, handler)
signal.setitimer(signal.ITIMER_REAL, new_seconds) # depends on [control=['if'], data=[]]
try:
return function(*args, **kwargs) # depends on [control=['try'], data=[]]
finally:
if new_seconds:
signal.setitimer(signal.ITIMER_REAL, 0)
signal.signal(signal.SIGALRM, old) # depends on [control=['if'], data=[]]
return new_function # depends on [control=['if'], data=[]]
else:
@wraps(function)
def new_function(*args, **kwargs):
timeout_wrapper = _Timeout(function, timeout_exception, exception_message, seconds)
return timeout_wrapper(*args, **kwargs)
return new_function
return decorate
|
def _smooth_distribution(p, eps=0.0001):
"""Given a discrete distribution (may have not been normalized to 1),
smooth it by replacing zeros with eps multiplied by a scaling factor and taking the
corresponding amount off the non-zero values.
Ref: http://web.engr.illinois.edu/~hanj/cs412/bk3/KL-divergence.pdf
"""
is_zeros = (p == 0).astype(np.float32)
is_nonzeros = (p != 0).astype(np.float32)
n_zeros = is_zeros.sum()
n_nonzeros = p.size - n_zeros
if not n_nonzeros:
raise ValueError('The discrete probability distribution is malformed. All entries are 0.')
eps1 = eps * float(n_zeros) / float(n_nonzeros)
assert eps1 < 1.0, 'n_zeros=%d, n_nonzeros=%d, eps1=%f' % (n_zeros, n_nonzeros, eps1)
hist = p.astype(np.float32)
hist += eps * is_zeros + (-eps1) * is_nonzeros
assert (hist <= 0).sum() == 0
return hist
|
def function[_smooth_distribution, parameter[p, eps]]:
constant[Given a discrete distribution (may have not been normalized to 1),
smooth it by replacing zeros with eps multiplied by a scaling factor and taking the
corresponding amount off the non-zero values.
Ref: http://web.engr.illinois.edu/~hanj/cs412/bk3/KL-divergence.pdf
]
variable[is_zeros] assign[=] call[compare[name[p] equal[==] constant[0]].astype, parameter[name[np].float32]]
variable[is_nonzeros] assign[=] call[compare[name[p] not_equal[!=] constant[0]].astype, parameter[name[np].float32]]
variable[n_zeros] assign[=] call[name[is_zeros].sum, parameter[]]
variable[n_nonzeros] assign[=] binary_operation[name[p].size - name[n_zeros]]
if <ast.UnaryOp object at 0x7da1b200aec0> begin[:]
<ast.Raise object at 0x7da1b2009b40>
variable[eps1] assign[=] binary_operation[binary_operation[name[eps] * call[name[float], parameter[name[n_zeros]]]] / call[name[float], parameter[name[n_nonzeros]]]]
assert[compare[name[eps1] less[<] constant[1.0]]]
variable[hist] assign[=] call[name[p].astype, parameter[name[np].float32]]
<ast.AugAssign object at 0x7da1b200aad0>
assert[compare[call[compare[name[hist] less_or_equal[<=] constant[0]].sum, parameter[]] equal[==] constant[0]]]
return[name[hist]]
|
keyword[def] identifier[_smooth_distribution] ( identifier[p] , identifier[eps] = literal[int] ):
literal[string]
identifier[is_zeros] =( identifier[p] == literal[int] ). identifier[astype] ( identifier[np] . identifier[float32] )
identifier[is_nonzeros] =( identifier[p] != literal[int] ). identifier[astype] ( identifier[np] . identifier[float32] )
identifier[n_zeros] = identifier[is_zeros] . identifier[sum] ()
identifier[n_nonzeros] = identifier[p] . identifier[size] - identifier[n_zeros]
keyword[if] keyword[not] identifier[n_nonzeros] :
keyword[raise] identifier[ValueError] ( literal[string] )
identifier[eps1] = identifier[eps] * identifier[float] ( identifier[n_zeros] )/ identifier[float] ( identifier[n_nonzeros] )
keyword[assert] identifier[eps1] < literal[int] , literal[string] %( identifier[n_zeros] , identifier[n_nonzeros] , identifier[eps1] )
identifier[hist] = identifier[p] . identifier[astype] ( identifier[np] . identifier[float32] )
identifier[hist] += identifier[eps] * identifier[is_zeros] +(- identifier[eps1] )* identifier[is_nonzeros]
keyword[assert] ( identifier[hist] <= literal[int] ). identifier[sum] ()== literal[int]
keyword[return] identifier[hist]
|
def _smooth_distribution(p, eps=0.0001):
"""Given a discrete distribution (may have not been normalized to 1),
smooth it by replacing zeros with eps multiplied by a scaling factor and taking the
corresponding amount off the non-zero values.
Ref: http://web.engr.illinois.edu/~hanj/cs412/bk3/KL-divergence.pdf
"""
is_zeros = (p == 0).astype(np.float32)
is_nonzeros = (p != 0).astype(np.float32)
n_zeros = is_zeros.sum()
n_nonzeros = p.size - n_zeros
if not n_nonzeros:
raise ValueError('The discrete probability distribution is malformed. All entries are 0.') # depends on [control=['if'], data=[]]
eps1 = eps * float(n_zeros) / float(n_nonzeros)
assert eps1 < 1.0, 'n_zeros=%d, n_nonzeros=%d, eps1=%f' % (n_zeros, n_nonzeros, eps1)
hist = p.astype(np.float32)
hist += eps * is_zeros + -eps1 * is_nonzeros
assert (hist <= 0).sum() == 0
return hist
|
def create_or_update(cls, video, language_code, metadata, file_data=None):
"""
Create or update Transcript object.
Arguments:
video (Video): Video for which transcript is going to be saved.
language_code (str): language code for (to be created/updated) transcript
metadata (dict): A dict containing (to be overwritten) properties
file_data (InMemoryUploadedFile): File data to be saved
Returns:
Returns a tuple of (video_transcript, created).
"""
try:
video_transcript = cls.objects.get(video=video, language_code=language_code)
retrieved = True
except cls.DoesNotExist:
video_transcript = cls(video=video, language_code=language_code)
retrieved = False
for prop, value in six.iteritems(metadata):
if prop in ['language_code', 'file_format', 'provider']:
setattr(video_transcript, prop, value)
transcript_name = metadata.get('file_name')
try:
if transcript_name:
video_transcript.transcript.name = transcript_name
elif file_data:
with closing(file_data) as transcript_file_data:
file_name = '{uuid}.{ext}'.format(uuid=uuid4().hex, ext=video_transcript.file_format)
video_transcript.transcript.save(file_name, transcript_file_data)
video_transcript.save()
except Exception:
logger.exception(
'[VAL] Transcript save failed to storage for video_id "%s" language code "%s"',
video.edx_video_id,
language_code
)
raise
return video_transcript, not retrieved
|
def function[create_or_update, parameter[cls, video, language_code, metadata, file_data]]:
constant[
Create or update Transcript object.
Arguments:
video (Video): Video for which transcript is going to be saved.
language_code (str): language code for (to be created/updated) transcript
metadata (dict): A dict containing (to be overwritten) properties
file_data (InMemoryUploadedFile): File data to be saved
Returns:
Returns a tuple of (video_transcript, created).
]
<ast.Try object at 0x7da1b0368040>
for taget[tuple[[<ast.Name object at 0x7da1b036b550>, <ast.Name object at 0x7da1b03681f0>]]] in starred[call[name[six].iteritems, parameter[name[metadata]]]] begin[:]
if compare[name[prop] in list[[<ast.Constant object at 0x7da1b05f97e0>, <ast.Constant object at 0x7da1b05f8490>, <ast.Constant object at 0x7da1b05fac20>]]] begin[:]
call[name[setattr], parameter[name[video_transcript], name[prop], name[value]]]
variable[transcript_name] assign[=] call[name[metadata].get, parameter[constant[file_name]]]
<ast.Try object at 0x7da1b05f8700>
return[tuple[[<ast.Name object at 0x7da1b05f85b0>, <ast.UnaryOp object at 0x7da1b05fbb50>]]]
|
keyword[def] identifier[create_or_update] ( identifier[cls] , identifier[video] , identifier[language_code] , identifier[metadata] , identifier[file_data] = keyword[None] ):
literal[string]
keyword[try] :
identifier[video_transcript] = identifier[cls] . identifier[objects] . identifier[get] ( identifier[video] = identifier[video] , identifier[language_code] = identifier[language_code] )
identifier[retrieved] = keyword[True]
keyword[except] identifier[cls] . identifier[DoesNotExist] :
identifier[video_transcript] = identifier[cls] ( identifier[video] = identifier[video] , identifier[language_code] = identifier[language_code] )
identifier[retrieved] = keyword[False]
keyword[for] identifier[prop] , identifier[value] keyword[in] identifier[six] . identifier[iteritems] ( identifier[metadata] ):
keyword[if] identifier[prop] keyword[in] [ literal[string] , literal[string] , literal[string] ]:
identifier[setattr] ( identifier[video_transcript] , identifier[prop] , identifier[value] )
identifier[transcript_name] = identifier[metadata] . identifier[get] ( literal[string] )
keyword[try] :
keyword[if] identifier[transcript_name] :
identifier[video_transcript] . identifier[transcript] . identifier[name] = identifier[transcript_name]
keyword[elif] identifier[file_data] :
keyword[with] identifier[closing] ( identifier[file_data] ) keyword[as] identifier[transcript_file_data] :
identifier[file_name] = literal[string] . identifier[format] ( identifier[uuid] = identifier[uuid4] (). identifier[hex] , identifier[ext] = identifier[video_transcript] . identifier[file_format] )
identifier[video_transcript] . identifier[transcript] . identifier[save] ( identifier[file_name] , identifier[transcript_file_data] )
identifier[video_transcript] . identifier[save] ()
keyword[except] identifier[Exception] :
identifier[logger] . identifier[exception] (
literal[string] ,
identifier[video] . identifier[edx_video_id] ,
identifier[language_code]
)
keyword[raise]
keyword[return] identifier[video_transcript] , keyword[not] identifier[retrieved]
|
def create_or_update(cls, video, language_code, metadata, file_data=None):
"""
Create or update Transcript object.
Arguments:
video (Video): Video for which transcript is going to be saved.
language_code (str): language code for (to be created/updated) transcript
metadata (dict): A dict containing (to be overwritten) properties
file_data (InMemoryUploadedFile): File data to be saved
Returns:
Returns a tuple of (video_transcript, created).
"""
try:
video_transcript = cls.objects.get(video=video, language_code=language_code)
retrieved = True # depends on [control=['try'], data=[]]
except cls.DoesNotExist:
video_transcript = cls(video=video, language_code=language_code)
retrieved = False # depends on [control=['except'], data=[]]
for (prop, value) in six.iteritems(metadata):
if prop in ['language_code', 'file_format', 'provider']:
setattr(video_transcript, prop, value) # depends on [control=['if'], data=['prop']] # depends on [control=['for'], data=[]]
transcript_name = metadata.get('file_name')
try:
if transcript_name:
video_transcript.transcript.name = transcript_name # depends on [control=['if'], data=[]]
elif file_data:
with closing(file_data) as transcript_file_data:
file_name = '{uuid}.{ext}'.format(uuid=uuid4().hex, ext=video_transcript.file_format)
video_transcript.transcript.save(file_name, transcript_file_data) # depends on [control=['with'], data=['transcript_file_data']] # depends on [control=['if'], data=[]]
video_transcript.save() # depends on [control=['try'], data=[]]
except Exception:
logger.exception('[VAL] Transcript save failed to storage for video_id "%s" language code "%s"', video.edx_video_id, language_code)
raise # depends on [control=['except'], data=[]]
return (video_transcript, not retrieved)
|
def lookup_attribute_chain(attrname, namespace):
"""
>>> attrname = funcname
>>> namespace = mod.__dict__
>>> import utool as ut
>>> globals_ = ut.util_inspect.__dict__
>>> attrname = 'KWReg.print_defaultkw'
"""
#subdict = meta_util_six.get_funcglobals(root_func)
subtup = attrname.split('.')
subdict = namespace
for attr in subtup[:-1]:
subdict = subdict[attr].__dict__
leaf_name = subtup[-1]
leaf_attr = subdict[leaf_name]
return leaf_attr
|
def function[lookup_attribute_chain, parameter[attrname, namespace]]:
constant[
>>> attrname = funcname
>>> namespace = mod.__dict__
>>> import utool as ut
>>> globals_ = ut.util_inspect.__dict__
>>> attrname = 'KWReg.print_defaultkw'
]
variable[subtup] assign[=] call[name[attrname].split, parameter[constant[.]]]
variable[subdict] assign[=] name[namespace]
for taget[name[attr]] in starred[call[name[subtup]][<ast.Slice object at 0x7da1b24e2e60>]] begin[:]
variable[subdict] assign[=] call[name[subdict]][name[attr]].__dict__
variable[leaf_name] assign[=] call[name[subtup]][<ast.UnaryOp object at 0x7da1b24b5480>]
variable[leaf_attr] assign[=] call[name[subdict]][name[leaf_name]]
return[name[leaf_attr]]
|
keyword[def] identifier[lookup_attribute_chain] ( identifier[attrname] , identifier[namespace] ):
literal[string]
identifier[subtup] = identifier[attrname] . identifier[split] ( literal[string] )
identifier[subdict] = identifier[namespace]
keyword[for] identifier[attr] keyword[in] identifier[subtup] [:- literal[int] ]:
identifier[subdict] = identifier[subdict] [ identifier[attr] ]. identifier[__dict__]
identifier[leaf_name] = identifier[subtup] [- literal[int] ]
identifier[leaf_attr] = identifier[subdict] [ identifier[leaf_name] ]
keyword[return] identifier[leaf_attr]
|
def lookup_attribute_chain(attrname, namespace):
"""
>>> attrname = funcname
>>> namespace = mod.__dict__
>>> import utool as ut
>>> globals_ = ut.util_inspect.__dict__
>>> attrname = 'KWReg.print_defaultkw'
"""
#subdict = meta_util_six.get_funcglobals(root_func)
subtup = attrname.split('.')
subdict = namespace
for attr in subtup[:-1]:
subdict = subdict[attr].__dict__ # depends on [control=['for'], data=['attr']]
leaf_name = subtup[-1]
leaf_attr = subdict[leaf_name]
return leaf_attr
|
def reduceByWindow(self, reduceFunc, invReduceFunc, windowDuration, slideDuration):
"""
Return a new DStream in which each RDD has a single element generated by reducing all
elements in a sliding window over this DStream.
if `invReduceFunc` is not None, the reduction is done incrementally
using the old window's reduced value :
1. reduce the new values that entered the window (e.g., adding new counts)
2. "inverse reduce" the old values that left the window (e.g., subtracting old counts)
This is more efficient than `invReduceFunc` is None.
@param reduceFunc: associative and commutative reduce function
@param invReduceFunc: inverse reduce function of `reduceFunc`; such that for all y,
and invertible x:
`invReduceFunc(reduceFunc(x, y), x) = y`
@param windowDuration: width of the window; must be a multiple of this DStream's
batching interval
@param slideDuration: sliding interval of the window (i.e., the interval after which
the new DStream will generate RDDs); must be a multiple of this
DStream's batching interval
"""
keyed = self.map(lambda x: (1, x))
reduced = keyed.reduceByKeyAndWindow(reduceFunc, invReduceFunc,
windowDuration, slideDuration, 1)
return reduced.map(lambda kv: kv[1])
|
def function[reduceByWindow, parameter[self, reduceFunc, invReduceFunc, windowDuration, slideDuration]]:
constant[
Return a new DStream in which each RDD has a single element generated by reducing all
elements in a sliding window over this DStream.
if `invReduceFunc` is not None, the reduction is done incrementally
using the old window's reduced value :
1. reduce the new values that entered the window (e.g., adding new counts)
2. "inverse reduce" the old values that left the window (e.g., subtracting old counts)
This is more efficient than `invReduceFunc` is None.
@param reduceFunc: associative and commutative reduce function
@param invReduceFunc: inverse reduce function of `reduceFunc`; such that for all y,
and invertible x:
`invReduceFunc(reduceFunc(x, y), x) = y`
@param windowDuration: width of the window; must be a multiple of this DStream's
batching interval
@param slideDuration: sliding interval of the window (i.e., the interval after which
the new DStream will generate RDDs); must be a multiple of this
DStream's batching interval
]
variable[keyed] assign[=] call[name[self].map, parameter[<ast.Lambda object at 0x7da20c993880>]]
variable[reduced] assign[=] call[name[keyed].reduceByKeyAndWindow, parameter[name[reduceFunc], name[invReduceFunc], name[windowDuration], name[slideDuration], constant[1]]]
return[call[name[reduced].map, parameter[<ast.Lambda object at 0x7da20c9913f0>]]]
|
keyword[def] identifier[reduceByWindow] ( identifier[self] , identifier[reduceFunc] , identifier[invReduceFunc] , identifier[windowDuration] , identifier[slideDuration] ):
literal[string]
identifier[keyed] = identifier[self] . identifier[map] ( keyword[lambda] identifier[x] :( literal[int] , identifier[x] ))
identifier[reduced] = identifier[keyed] . identifier[reduceByKeyAndWindow] ( identifier[reduceFunc] , identifier[invReduceFunc] ,
identifier[windowDuration] , identifier[slideDuration] , literal[int] )
keyword[return] identifier[reduced] . identifier[map] ( keyword[lambda] identifier[kv] : identifier[kv] [ literal[int] ])
|
def reduceByWindow(self, reduceFunc, invReduceFunc, windowDuration, slideDuration):
"""
Return a new DStream in which each RDD has a single element generated by reducing all
elements in a sliding window over this DStream.
if `invReduceFunc` is not None, the reduction is done incrementally
using the old window's reduced value :
1. reduce the new values that entered the window (e.g., adding new counts)
2. "inverse reduce" the old values that left the window (e.g., subtracting old counts)
This is more efficient than `invReduceFunc` is None.
@param reduceFunc: associative and commutative reduce function
@param invReduceFunc: inverse reduce function of `reduceFunc`; such that for all y,
and invertible x:
`invReduceFunc(reduceFunc(x, y), x) = y`
@param windowDuration: width of the window; must be a multiple of this DStream's
batching interval
@param slideDuration: sliding interval of the window (i.e., the interval after which
the new DStream will generate RDDs); must be a multiple of this
DStream's batching interval
"""
keyed = self.map(lambda x: (1, x))
reduced = keyed.reduceByKeyAndWindow(reduceFunc, invReduceFunc, windowDuration, slideDuration, 1)
return reduced.map(lambda kv: kv[1])
|
def bucketize(src, key=None, value_transform=None, key_filter=None):
"""Group values in the *src* iterable by the value returned by *key*,
which defaults to :class:`bool`, grouping values by truthiness.
>>> bucketize(range(5))
{False: [0], True: [1, 2, 3, 4]}
>>> is_odd = lambda x: x % 2 == 1
>>> bucketize(range(5), is_odd)
{False: [0, 2, 4], True: [1, 3]}
Value lists are not deduplicated:
>>> bucketize([None, None, None, 'hello'])
{False: [None, None, None], True: ['hello']}
Bucketize into more than 3 groups
>>> bucketize(range(10), lambda x: x % 3)
{0: [0, 3, 6, 9], 1: [1, 4, 7], 2: [2, 5, 8]}
``bucketize`` has a couple of advanced options useful in certain
cases. *value_transform* can be used to modify values as they are
added to buckets, and *key_filter* will allow excluding certain
buckets from being collected.
>>> bucketize(range(5), value_transform=lambda x: x*x)
{False: [0], True: [1, 4, 9, 16]}
>>> bucketize(range(10), key=lambda x: x % 3, key_filter=lambda k: k % 3 != 1)
{0: [0, 3, 6, 9], 2: [2, 5, 8]}
Note in some of these examples there were at most two keys, ``True`` and
``False``, and each key present has a list with at least one
item. See :func:`partition` for a version specialized for binary
use cases.
"""
if not is_iterable(src):
raise TypeError('expected an iterable')
if key is None:
key = bool
if not callable(key):
raise TypeError('expected callable key function')
if value_transform is None:
value_transform = lambda x: x
if not callable(value_transform):
raise TypeError('expected callable value transform function')
ret = {}
for val in src:
key_of_val = key(val)
if key_filter is None or key_filter(key_of_val):
ret.setdefault(key_of_val, []).append(value_transform(val))
return ret
|
def function[bucketize, parameter[src, key, value_transform, key_filter]]:
constant[Group values in the *src* iterable by the value returned by *key*,
which defaults to :class:`bool`, grouping values by truthiness.
>>> bucketize(range(5))
{False: [0], True: [1, 2, 3, 4]}
>>> is_odd = lambda x: x % 2 == 1
>>> bucketize(range(5), is_odd)
{False: [0, 2, 4], True: [1, 3]}
Value lists are not deduplicated:
>>> bucketize([None, None, None, 'hello'])
{False: [None, None, None], True: ['hello']}
Bucketize into more than 3 groups
>>> bucketize(range(10), lambda x: x % 3)
{0: [0, 3, 6, 9], 1: [1, 4, 7], 2: [2, 5, 8]}
``bucketize`` has a couple of advanced options useful in certain
cases. *value_transform* can be used to modify values as they are
added to buckets, and *key_filter* will allow excluding certain
buckets from being collected.
>>> bucketize(range(5), value_transform=lambda x: x*x)
{False: [0], True: [1, 4, 9, 16]}
>>> bucketize(range(10), key=lambda x: x % 3, key_filter=lambda k: k % 3 != 1)
{0: [0, 3, 6, 9], 2: [2, 5, 8]}
Note in some of these examples there were at most two keys, ``True`` and
``False``, and each key present has a list with at least one
item. See :func:`partition` for a version specialized for binary
use cases.
]
if <ast.UnaryOp object at 0x7da2054a6a40> begin[:]
<ast.Raise object at 0x7da2054a5d20>
if compare[name[key] is constant[None]] begin[:]
variable[key] assign[=] name[bool]
if <ast.UnaryOp object at 0x7da2054a6d70> begin[:]
<ast.Raise object at 0x7da2054a5b40>
if compare[name[value_transform] is constant[None]] begin[:]
variable[value_transform] assign[=] <ast.Lambda object at 0x7da2054a6620>
if <ast.UnaryOp object at 0x7da2054a6740> begin[:]
<ast.Raise object at 0x7da2054a75b0>
variable[ret] assign[=] dictionary[[], []]
for taget[name[val]] in starred[name[src]] begin[:]
variable[key_of_val] assign[=] call[name[key], parameter[name[val]]]
if <ast.BoolOp object at 0x7da2054a6260> begin[:]
call[call[name[ret].setdefault, parameter[name[key_of_val], list[[]]]].append, parameter[call[name[value_transform], parameter[name[val]]]]]
return[name[ret]]
|
keyword[def] identifier[bucketize] ( identifier[src] , identifier[key] = keyword[None] , identifier[value_transform] = keyword[None] , identifier[key_filter] = keyword[None] ):
literal[string]
keyword[if] keyword[not] identifier[is_iterable] ( identifier[src] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[key] keyword[is] keyword[None] :
identifier[key] = identifier[bool]
keyword[if] keyword[not] identifier[callable] ( identifier[key] ):
keyword[raise] identifier[TypeError] ( literal[string] )
keyword[if] identifier[value_transform] keyword[is] keyword[None] :
identifier[value_transform] = keyword[lambda] identifier[x] : identifier[x]
keyword[if] keyword[not] identifier[callable] ( identifier[value_transform] ):
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[ret] ={}
keyword[for] identifier[val] keyword[in] identifier[src] :
identifier[key_of_val] = identifier[key] ( identifier[val] )
keyword[if] identifier[key_filter] keyword[is] keyword[None] keyword[or] identifier[key_filter] ( identifier[key_of_val] ):
identifier[ret] . identifier[setdefault] ( identifier[key_of_val] ,[]). identifier[append] ( identifier[value_transform] ( identifier[val] ))
keyword[return] identifier[ret]
|
def bucketize(src, key=None, value_transform=None, key_filter=None):
"""Group values in the *src* iterable by the value returned by *key*,
which defaults to :class:`bool`, grouping values by truthiness.
>>> bucketize(range(5))
{False: [0], True: [1, 2, 3, 4]}
>>> is_odd = lambda x: x % 2 == 1
>>> bucketize(range(5), is_odd)
{False: [0, 2, 4], True: [1, 3]}
Value lists are not deduplicated:
>>> bucketize([None, None, None, 'hello'])
{False: [None, None, None], True: ['hello']}
Bucketize into more than 3 groups
>>> bucketize(range(10), lambda x: x % 3)
{0: [0, 3, 6, 9], 1: [1, 4, 7], 2: [2, 5, 8]}
``bucketize`` has a couple of advanced options useful in certain
cases. *value_transform* can be used to modify values as they are
added to buckets, and *key_filter* will allow excluding certain
buckets from being collected.
>>> bucketize(range(5), value_transform=lambda x: x*x)
{False: [0], True: [1, 4, 9, 16]}
>>> bucketize(range(10), key=lambda x: x % 3, key_filter=lambda k: k % 3 != 1)
{0: [0, 3, 6, 9], 2: [2, 5, 8]}
Note in some of these examples there were at most two keys, ``True`` and
``False``, and each key present has a list with at least one
item. See :func:`partition` for a version specialized for binary
use cases.
"""
if not is_iterable(src):
raise TypeError('expected an iterable') # depends on [control=['if'], data=[]]
if key is None:
key = bool # depends on [control=['if'], data=['key']]
if not callable(key):
raise TypeError('expected callable key function') # depends on [control=['if'], data=[]]
if value_transform is None:
value_transform = lambda x: x # depends on [control=['if'], data=['value_transform']]
if not callable(value_transform):
raise TypeError('expected callable value transform function') # depends on [control=['if'], data=[]]
ret = {}
for val in src:
key_of_val = key(val)
if key_filter is None or key_filter(key_of_val):
ret.setdefault(key_of_val, []).append(value_transform(val)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['val']]
return ret
|
def handlePortfolio(self, msg):
""" handle portfolio updates """
# log handler msg
self.log_msg("portfolio", msg)
# contract identifier
contract_tuple = self.contract_to_tuple(msg.contract)
contractString = self.contractString(contract_tuple)
# try creating the contract
self.registerContract(msg.contract)
# new account?
if msg.accountName not in self._portfolios.keys():
self._portfolios[msg.accountName] = {}
self._portfolios[msg.accountName][contractString] = {
"symbol": contractString,
"position": int(msg.position),
"marketPrice": float(msg.marketPrice),
"marketValue": float(msg.marketValue),
"averageCost": float(msg.averageCost),
"unrealizedPNL": float(msg.unrealizedPNL),
"realizedPNL": float(msg.realizedPNL),
"totalPNL": float(msg.realizedPNL) + float(msg.unrealizedPNL),
"account": msg.accountName
}
# fire callback
self.ibCallback(caller="handlePortfolio", msg=msg)
|
def function[handlePortfolio, parameter[self, msg]]:
constant[ handle portfolio updates ]
call[name[self].log_msg, parameter[constant[portfolio], name[msg]]]
variable[contract_tuple] assign[=] call[name[self].contract_to_tuple, parameter[name[msg].contract]]
variable[contractString] assign[=] call[name[self].contractString, parameter[name[contract_tuple]]]
call[name[self].registerContract, parameter[name[msg].contract]]
if compare[name[msg].accountName <ast.NotIn object at 0x7da2590d7190> call[name[self]._portfolios.keys, parameter[]]] begin[:]
call[name[self]._portfolios][name[msg].accountName] assign[=] dictionary[[], []]
call[call[name[self]._portfolios][name[msg].accountName]][name[contractString]] assign[=] dictionary[[<ast.Constant object at 0x7da1b18e07f0>, <ast.Constant object at 0x7da1b18e07c0>, <ast.Constant object at 0x7da1b18e04c0>, <ast.Constant object at 0x7da1b18e3f10>, <ast.Constant object at 0x7da1b18e08b0>, <ast.Constant object at 0x7da1b18e3e80>, <ast.Constant object at 0x7da1b18e0340>, <ast.Constant object at 0x7da1b18e3ee0>, <ast.Constant object at 0x7da1b18e05e0>], [<ast.Name object at 0x7da1b18e0670>, <ast.Call object at 0x7da1b18e3fa0>, <ast.Call object at 0x7da1b26ada50>, <ast.Call object at 0x7da1b26aded0>, <ast.Call object at 0x7da1b26adba0>, <ast.Call object at 0x7da1b26aceb0>, <ast.Call object at 0x7da1b26aefb0>, <ast.BinOp object at 0x7da1b26addb0>, <ast.Attribute object at 0x7da1b26ad120>]]
call[name[self].ibCallback, parameter[]]
|
keyword[def] identifier[handlePortfolio] ( identifier[self] , identifier[msg] ):
literal[string]
identifier[self] . identifier[log_msg] ( literal[string] , identifier[msg] )
identifier[contract_tuple] = identifier[self] . identifier[contract_to_tuple] ( identifier[msg] . identifier[contract] )
identifier[contractString] = identifier[self] . identifier[contractString] ( identifier[contract_tuple] )
identifier[self] . identifier[registerContract] ( identifier[msg] . identifier[contract] )
keyword[if] identifier[msg] . identifier[accountName] keyword[not] keyword[in] identifier[self] . identifier[_portfolios] . identifier[keys] ():
identifier[self] . identifier[_portfolios] [ identifier[msg] . identifier[accountName] ]={}
identifier[self] . identifier[_portfolios] [ identifier[msg] . identifier[accountName] ][ identifier[contractString] ]={
literal[string] : identifier[contractString] ,
literal[string] : identifier[int] ( identifier[msg] . identifier[position] ),
literal[string] : identifier[float] ( identifier[msg] . identifier[marketPrice] ),
literal[string] : identifier[float] ( identifier[msg] . identifier[marketValue] ),
literal[string] : identifier[float] ( identifier[msg] . identifier[averageCost] ),
literal[string] : identifier[float] ( identifier[msg] . identifier[unrealizedPNL] ),
literal[string] : identifier[float] ( identifier[msg] . identifier[realizedPNL] ),
literal[string] : identifier[float] ( identifier[msg] . identifier[realizedPNL] )+ identifier[float] ( identifier[msg] . identifier[unrealizedPNL] ),
literal[string] : identifier[msg] . identifier[accountName]
}
identifier[self] . identifier[ibCallback] ( identifier[caller] = literal[string] , identifier[msg] = identifier[msg] )
|
def handlePortfolio(self, msg):
""" handle portfolio updates """
# log handler msg
self.log_msg('portfolio', msg)
# contract identifier
contract_tuple = self.contract_to_tuple(msg.contract)
contractString = self.contractString(contract_tuple)
# try creating the contract
self.registerContract(msg.contract)
# new account?
if msg.accountName not in self._portfolios.keys():
self._portfolios[msg.accountName] = {} # depends on [control=['if'], data=[]]
self._portfolios[msg.accountName][contractString] = {'symbol': contractString, 'position': int(msg.position), 'marketPrice': float(msg.marketPrice), 'marketValue': float(msg.marketValue), 'averageCost': float(msg.averageCost), 'unrealizedPNL': float(msg.unrealizedPNL), 'realizedPNL': float(msg.realizedPNL), 'totalPNL': float(msg.realizedPNL) + float(msg.unrealizedPNL), 'account': msg.accountName}
# fire callback
self.ibCallback(caller='handlePortfolio', msg=msg)
|
def get_base_indentation(code, include_start=False):
"""Heuristically extracts the base indentation from the provided code.
Finds the smallest indentation following a newline not at the end of the
string.
"""
new_line_indentation = re_new_line_indentation[include_start].finditer(code)
new_line_indentation = tuple(m.groups(0)[0] for m in new_line_indentation)
if new_line_indentation:
return min(new_line_indentation, key=len)
else:
return ""
|
def function[get_base_indentation, parameter[code, include_start]]:
constant[Heuristically extracts the base indentation from the provided code.
Finds the smallest indentation following a newline not at the end of the
string.
]
variable[new_line_indentation] assign[=] call[call[name[re_new_line_indentation]][name[include_start]].finditer, parameter[name[code]]]
variable[new_line_indentation] assign[=] call[name[tuple], parameter[<ast.GeneratorExp object at 0x7da18f811bd0>]]
if name[new_line_indentation] begin[:]
return[call[name[min], parameter[name[new_line_indentation]]]]
|
keyword[def] identifier[get_base_indentation] ( identifier[code] , identifier[include_start] = keyword[False] ):
literal[string]
identifier[new_line_indentation] = identifier[re_new_line_indentation] [ identifier[include_start] ]. identifier[finditer] ( identifier[code] )
identifier[new_line_indentation] = identifier[tuple] ( identifier[m] . identifier[groups] ( literal[int] )[ literal[int] ] keyword[for] identifier[m] keyword[in] identifier[new_line_indentation] )
keyword[if] identifier[new_line_indentation] :
keyword[return] identifier[min] ( identifier[new_line_indentation] , identifier[key] = identifier[len] )
keyword[else] :
keyword[return] literal[string]
|
def get_base_indentation(code, include_start=False):
"""Heuristically extracts the base indentation from the provided code.
Finds the smallest indentation following a newline not at the end of the
string.
"""
new_line_indentation = re_new_line_indentation[include_start].finditer(code)
new_line_indentation = tuple((m.groups(0)[0] for m in new_line_indentation))
if new_line_indentation:
return min(new_line_indentation, key=len) # depends on [control=['if'], data=[]]
else:
return ''
|
def CompleteHuntIfExpirationTimeReached(hunt_obj):
"""Marks the hunt as complete if it's past its expiry time."""
# TODO(hanuszczak): This should not set the hunt state to `COMPLETED` but we
# should have a sparate `EXPIRED` state instead and set that.
if (hunt_obj.hunt_state not in [
rdf_hunt_objects.Hunt.HuntState.STOPPED,
rdf_hunt_objects.Hunt.HuntState.COMPLETED
] and hunt_obj.expired):
StopHunt(hunt_obj.hunt_id, reason="Hunt completed.")
data_store.REL_DB.UpdateHuntObject(
hunt_obj.hunt_id, hunt_state=hunt_obj.HuntState.COMPLETED)
return data_store.REL_DB.ReadHuntObject(hunt_obj.hunt_id)
return hunt_obj
|
def function[CompleteHuntIfExpirationTimeReached, parameter[hunt_obj]]:
constant[Marks the hunt as complete if it's past its expiry time.]
if <ast.BoolOp object at 0x7da1b1b44940> begin[:]
call[name[StopHunt], parameter[name[hunt_obj].hunt_id]]
call[name[data_store].REL_DB.UpdateHuntObject, parameter[name[hunt_obj].hunt_id]]
return[call[name[data_store].REL_DB.ReadHuntObject, parameter[name[hunt_obj].hunt_id]]]
return[name[hunt_obj]]
|
keyword[def] identifier[CompleteHuntIfExpirationTimeReached] ( identifier[hunt_obj] ):
literal[string]
keyword[if] ( identifier[hunt_obj] . identifier[hunt_state] keyword[not] keyword[in] [
identifier[rdf_hunt_objects] . identifier[Hunt] . identifier[HuntState] . identifier[STOPPED] ,
identifier[rdf_hunt_objects] . identifier[Hunt] . identifier[HuntState] . identifier[COMPLETED]
] keyword[and] identifier[hunt_obj] . identifier[expired] ):
identifier[StopHunt] ( identifier[hunt_obj] . identifier[hunt_id] , identifier[reason] = literal[string] )
identifier[data_store] . identifier[REL_DB] . identifier[UpdateHuntObject] (
identifier[hunt_obj] . identifier[hunt_id] , identifier[hunt_state] = identifier[hunt_obj] . identifier[HuntState] . identifier[COMPLETED] )
keyword[return] identifier[data_store] . identifier[REL_DB] . identifier[ReadHuntObject] ( identifier[hunt_obj] . identifier[hunt_id] )
keyword[return] identifier[hunt_obj]
|
def CompleteHuntIfExpirationTimeReached(hunt_obj):
"""Marks the hunt as complete if it's past its expiry time."""
# TODO(hanuszczak): This should not set the hunt state to `COMPLETED` but we
# should have a sparate `EXPIRED` state instead and set that.
if hunt_obj.hunt_state not in [rdf_hunt_objects.Hunt.HuntState.STOPPED, rdf_hunt_objects.Hunt.HuntState.COMPLETED] and hunt_obj.expired:
StopHunt(hunt_obj.hunt_id, reason='Hunt completed.')
data_store.REL_DB.UpdateHuntObject(hunt_obj.hunt_id, hunt_state=hunt_obj.HuntState.COMPLETED)
return data_store.REL_DB.ReadHuntObject(hunt_obj.hunt_id) # depends on [control=['if'], data=[]]
return hunt_obj
|
def create_comment(self, comment_form):
"""Creates a new ``Comment``.
arg: comment_form (osid.commenting.CommentForm): the form for
this ``Comment``
return: (osid.commenting.Comment) - the new ``Comment``
raise: IllegalState - ``comment_form`` already used in a create
transaction
raise: InvalidArgument - one or more of the form elements is
invalid
raise: NullArgument - ``comment_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``comment_form`` did not originate from
``get_comment_form_for_create()``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.create_resource_template
collection = JSONClientValidated('commenting',
collection='Comment',
runtime=self._runtime)
if not isinstance(comment_form, ABCCommentForm):
raise errors.InvalidArgument('argument type is not an CommentForm')
if comment_form.is_for_update():
raise errors.InvalidArgument('the CommentForm is for update only, not create')
try:
if self._forms[comment_form.get_id().get_identifier()] == CREATED:
raise errors.IllegalState('comment_form already used in a create transaction')
except KeyError:
raise errors.Unsupported('comment_form did not originate from this session')
if not comment_form.is_valid():
raise errors.InvalidArgument('one or more of the form elements is invalid')
insert_result = collection.insert_one(comment_form._my_map)
self._forms[comment_form.get_id().get_identifier()] = CREATED
result = objects.Comment(
osid_object_map=collection.find_one({'_id': insert_result.inserted_id}),
runtime=self._runtime,
proxy=self._proxy)
return result
|
def function[create_comment, parameter[self, comment_form]]:
constant[Creates a new ``Comment``.
arg: comment_form (osid.commenting.CommentForm): the form for
this ``Comment``
return: (osid.commenting.Comment) - the new ``Comment``
raise: IllegalState - ``comment_form`` already used in a create
transaction
raise: InvalidArgument - one or more of the form elements is
invalid
raise: NullArgument - ``comment_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``comment_form`` did not originate from
``get_comment_form_for_create()``
*compliance: mandatory -- This method must be implemented.*
]
variable[collection] assign[=] call[name[JSONClientValidated], parameter[constant[commenting]]]
if <ast.UnaryOp object at 0x7da1b0a7a920> begin[:]
<ast.Raise object at 0x7da1b0a7a800>
if call[name[comment_form].is_for_update, parameter[]] begin[:]
<ast.Raise object at 0x7da1b0a78910>
<ast.Try object at 0x7da1b0a7ab00>
if <ast.UnaryOp object at 0x7da1b0a78280> begin[:]
<ast.Raise object at 0x7da1b0a7a3b0>
variable[insert_result] assign[=] call[name[collection].insert_one, parameter[name[comment_form]._my_map]]
call[name[self]._forms][call[call[name[comment_form].get_id, parameter[]].get_identifier, parameter[]]] assign[=] name[CREATED]
variable[result] assign[=] call[name[objects].Comment, parameter[]]
return[name[result]]
|
keyword[def] identifier[create_comment] ( identifier[self] , identifier[comment_form] ):
literal[string]
identifier[collection] = identifier[JSONClientValidated] ( literal[string] ,
identifier[collection] = literal[string] ,
identifier[runtime] = identifier[self] . identifier[_runtime] )
keyword[if] keyword[not] identifier[isinstance] ( identifier[comment_form] , identifier[ABCCommentForm] ):
keyword[raise] identifier[errors] . identifier[InvalidArgument] ( literal[string] )
keyword[if] identifier[comment_form] . identifier[is_for_update] ():
keyword[raise] identifier[errors] . identifier[InvalidArgument] ( literal[string] )
keyword[try] :
keyword[if] identifier[self] . identifier[_forms] [ identifier[comment_form] . identifier[get_id] (). identifier[get_identifier] ()]== identifier[CREATED] :
keyword[raise] identifier[errors] . identifier[IllegalState] ( literal[string] )
keyword[except] identifier[KeyError] :
keyword[raise] identifier[errors] . identifier[Unsupported] ( literal[string] )
keyword[if] keyword[not] identifier[comment_form] . identifier[is_valid] ():
keyword[raise] identifier[errors] . identifier[InvalidArgument] ( literal[string] )
identifier[insert_result] = identifier[collection] . identifier[insert_one] ( identifier[comment_form] . identifier[_my_map] )
identifier[self] . identifier[_forms] [ identifier[comment_form] . identifier[get_id] (). identifier[get_identifier] ()]= identifier[CREATED]
identifier[result] = identifier[objects] . identifier[Comment] (
identifier[osid_object_map] = identifier[collection] . identifier[find_one] ({ literal[string] : identifier[insert_result] . identifier[inserted_id] }),
identifier[runtime] = identifier[self] . identifier[_runtime] ,
identifier[proxy] = identifier[self] . identifier[_proxy] )
keyword[return] identifier[result]
|
def create_comment(self, comment_form):
"""Creates a new ``Comment``.
arg: comment_form (osid.commenting.CommentForm): the form for
this ``Comment``
return: (osid.commenting.Comment) - the new ``Comment``
raise: IllegalState - ``comment_form`` already used in a create
transaction
raise: InvalidArgument - one or more of the form elements is
invalid
raise: NullArgument - ``comment_form`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``comment_form`` did not originate from
``get_comment_form_for_create()``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.create_resource_template
collection = JSONClientValidated('commenting', collection='Comment', runtime=self._runtime)
if not isinstance(comment_form, ABCCommentForm):
raise errors.InvalidArgument('argument type is not an CommentForm') # depends on [control=['if'], data=[]]
if comment_form.is_for_update():
raise errors.InvalidArgument('the CommentForm is for update only, not create') # depends on [control=['if'], data=[]]
try:
if self._forms[comment_form.get_id().get_identifier()] == CREATED:
raise errors.IllegalState('comment_form already used in a create transaction') # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except KeyError:
raise errors.Unsupported('comment_form did not originate from this session') # depends on [control=['except'], data=[]]
if not comment_form.is_valid():
raise errors.InvalidArgument('one or more of the form elements is invalid') # depends on [control=['if'], data=[]]
insert_result = collection.insert_one(comment_form._my_map)
self._forms[comment_form.get_id().get_identifier()] = CREATED
result = objects.Comment(osid_object_map=collection.find_one({'_id': insert_result.inserted_id}), runtime=self._runtime, proxy=self._proxy)
return result
|
def _get_library_metadata(self, date_range):
"""
Retrieve the libraries for the given date range, the assumption is that the date ranges do not overlap and
they are CLOSED_CLOSED.
At the moment the date range is mandatory
"""
if date_range is None:
raise Exception("A date range must be provided")
if not (date_range.start and date_range.end):
raise Exception("The date range {0} must contain a start and end date".format(date_range))
start = date_range.start if date_range.start.tzinfo is not None else date_range.start.replace(tzinfo=mktz())
end = date_range.end if date_range.end.tzinfo is not None else date_range.end.replace(tzinfo=mktz())
query = {'$or': [{'start': {'$lte': start}, 'end': {'$gte': start}},
{'start': {'$gte': start}, 'end': {'$lte': end}},
{'start': {'$lte': end}, 'end': {'$gte': end}}]}
cursor = self._collection.find(query,
projection={'library_name': 1, 'start': 1, 'end': 1},
sort=[('start', pymongo.ASCENDING)])
results = []
for res in cursor:
start = res['start']
if date_range.start.tzinfo is not None and start.tzinfo is None:
start = start.replace(tzinfo=mktz("UTC")).astimezone(tz=date_range.start.tzinfo)
end = res['end']
if date_range.end.tzinfo is not None and end.tzinfo is None:
end = end.replace(tzinfo=mktz("UTC")).astimezone(tz=date_range.end.tzinfo)
results.append(TickStoreLibrary(res['library_name'], DateRange(start, end, CLOSED_CLOSED)))
return results
|
def function[_get_library_metadata, parameter[self, date_range]]:
constant[
Retrieve the libraries for the given date range, the assumption is that the date ranges do not overlap and
they are CLOSED_CLOSED.
At the moment the date range is mandatory
]
if compare[name[date_range] is constant[None]] begin[:]
<ast.Raise object at 0x7da20c7c8340>
if <ast.UnaryOp object at 0x7da20c7cb040> begin[:]
<ast.Raise object at 0x7da20c7c9bd0>
variable[start] assign[=] <ast.IfExp object at 0x7da20c7c8490>
variable[end] assign[=] <ast.IfExp object at 0x7da20c7c9780>
variable[query] assign[=] dictionary[[<ast.Constant object at 0x7da20c7c83a0>], [<ast.List object at 0x7da20c7c9d80>]]
variable[cursor] assign[=] call[name[self]._collection.find, parameter[name[query]]]
variable[results] assign[=] list[[]]
for taget[name[res]] in starred[name[cursor]] begin[:]
variable[start] assign[=] call[name[res]][constant[start]]
if <ast.BoolOp object at 0x7da20c7cad10> begin[:]
variable[start] assign[=] call[call[name[start].replace, parameter[]].astimezone, parameter[]]
variable[end] assign[=] call[name[res]][constant[end]]
if <ast.BoolOp object at 0x7da20c7caf50> begin[:]
variable[end] assign[=] call[call[name[end].replace, parameter[]].astimezone, parameter[]]
call[name[results].append, parameter[call[name[TickStoreLibrary], parameter[call[name[res]][constant[library_name]], call[name[DateRange], parameter[name[start], name[end], name[CLOSED_CLOSED]]]]]]]
return[name[results]]
|
keyword[def] identifier[_get_library_metadata] ( identifier[self] , identifier[date_range] ):
literal[string]
keyword[if] identifier[date_range] keyword[is] keyword[None] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[if] keyword[not] ( identifier[date_range] . identifier[start] keyword[and] identifier[date_range] . identifier[end] ):
keyword[raise] identifier[Exception] ( literal[string] . identifier[format] ( identifier[date_range] ))
identifier[start] = identifier[date_range] . identifier[start] keyword[if] identifier[date_range] . identifier[start] . identifier[tzinfo] keyword[is] keyword[not] keyword[None] keyword[else] identifier[date_range] . identifier[start] . identifier[replace] ( identifier[tzinfo] = identifier[mktz] ())
identifier[end] = identifier[date_range] . identifier[end] keyword[if] identifier[date_range] . identifier[end] . identifier[tzinfo] keyword[is] keyword[not] keyword[None] keyword[else] identifier[date_range] . identifier[end] . identifier[replace] ( identifier[tzinfo] = identifier[mktz] ())
identifier[query] ={ literal[string] :[{ literal[string] :{ literal[string] : identifier[start] }, literal[string] :{ literal[string] : identifier[start] }},
{ literal[string] :{ literal[string] : identifier[start] }, literal[string] :{ literal[string] : identifier[end] }},
{ literal[string] :{ literal[string] : identifier[end] }, literal[string] :{ literal[string] : identifier[end] }}]}
identifier[cursor] = identifier[self] . identifier[_collection] . identifier[find] ( identifier[query] ,
identifier[projection] ={ literal[string] : literal[int] , literal[string] : literal[int] , literal[string] : literal[int] },
identifier[sort] =[( literal[string] , identifier[pymongo] . identifier[ASCENDING] )])
identifier[results] =[]
keyword[for] identifier[res] keyword[in] identifier[cursor] :
identifier[start] = identifier[res] [ literal[string] ]
keyword[if] identifier[date_range] . identifier[start] . identifier[tzinfo] keyword[is] keyword[not] keyword[None] keyword[and] identifier[start] . identifier[tzinfo] keyword[is] keyword[None] :
identifier[start] = identifier[start] . identifier[replace] ( identifier[tzinfo] = identifier[mktz] ( literal[string] )). identifier[astimezone] ( identifier[tz] = identifier[date_range] . identifier[start] . identifier[tzinfo] )
identifier[end] = identifier[res] [ literal[string] ]
keyword[if] identifier[date_range] . identifier[end] . identifier[tzinfo] keyword[is] keyword[not] keyword[None] keyword[and] identifier[end] . identifier[tzinfo] keyword[is] keyword[None] :
identifier[end] = identifier[end] . identifier[replace] ( identifier[tzinfo] = identifier[mktz] ( literal[string] )). identifier[astimezone] ( identifier[tz] = identifier[date_range] . identifier[end] . identifier[tzinfo] )
identifier[results] . identifier[append] ( identifier[TickStoreLibrary] ( identifier[res] [ literal[string] ], identifier[DateRange] ( identifier[start] , identifier[end] , identifier[CLOSED_CLOSED] )))
keyword[return] identifier[results]
|
def _get_library_metadata(self, date_range):
"""
Retrieve the libraries for the given date range, the assumption is that the date ranges do not overlap and
they are CLOSED_CLOSED.
At the moment the date range is mandatory
"""
if date_range is None:
raise Exception('A date range must be provided') # depends on [control=['if'], data=[]]
if not (date_range.start and date_range.end):
raise Exception('The date range {0} must contain a start and end date'.format(date_range)) # depends on [control=['if'], data=[]]
start = date_range.start if date_range.start.tzinfo is not None else date_range.start.replace(tzinfo=mktz())
end = date_range.end if date_range.end.tzinfo is not None else date_range.end.replace(tzinfo=mktz())
query = {'$or': [{'start': {'$lte': start}, 'end': {'$gte': start}}, {'start': {'$gte': start}, 'end': {'$lte': end}}, {'start': {'$lte': end}, 'end': {'$gte': end}}]}
cursor = self._collection.find(query, projection={'library_name': 1, 'start': 1, 'end': 1}, sort=[('start', pymongo.ASCENDING)])
results = []
for res in cursor:
start = res['start']
if date_range.start.tzinfo is not None and start.tzinfo is None:
start = start.replace(tzinfo=mktz('UTC')).astimezone(tz=date_range.start.tzinfo) # depends on [control=['if'], data=[]]
end = res['end']
if date_range.end.tzinfo is not None and end.tzinfo is None:
end = end.replace(tzinfo=mktz('UTC')).astimezone(tz=date_range.end.tzinfo) # depends on [control=['if'], data=[]]
results.append(TickStoreLibrary(res['library_name'], DateRange(start, end, CLOSED_CLOSED))) # depends on [control=['for'], data=['res']]
return results
|
def translate_array(array, lval, obj_count=1, arr_count=1):
"""array has to be any js array for example [1,2,3]
lval has to be name of this array.
Returns python code that adds lval to the PY scope it should be put before lval"""
array = array[1:-1]
array, obj_rep, obj_count = remove_objects(array, obj_count)
array, arr_rep, arr_count = remove_arrays(array, arr_count)
#functions can be also defined in arrays, this caused many problems since in Python
# functions cant be defined inside literal
# remove functions (they dont contain arrays or objects so can be translated easily)
# hoisted functions are treated like inline
array, hoisted, inline = functions.remove_functions(array, all_inline=True)
assert not hoisted
arr = []
# separate elements in array
for e in argsplit(array, ','):
# translate expressions in array PyJsLvalInline will not be translated!
e = exp_translator(e.replace('\n', ''))
arr.append(e if e else 'None')
arr = '%s = Js([%s])\n' % (lval, ','.join(arr))
#But we can have more code to add to define arrays/objects/functions defined inside this array
# translate nested objects:
# functions:
for nested_name, nested_info in inline.iteritems():
nested_block, nested_args = nested_info
new_def = FUNC_TRANSLATOR(nested_name, nested_block, nested_args)
arr = new_def + arr
for lval, obj in obj_rep.iteritems():
new_def, obj_count, arr_count = translate_object(
obj, lval, obj_count, arr_count)
# add object definition BEFORE array definition
arr = new_def + arr
for lval, obj in arr_rep.iteritems():
new_def, obj_count, arr_count = translate_array(
obj, lval, obj_count, arr_count)
# add object definition BEFORE array definition
arr = new_def + arr
return arr, obj_count, arr_count
|
def function[translate_array, parameter[array, lval, obj_count, arr_count]]:
constant[array has to be any js array for example [1,2,3]
lval has to be name of this array.
Returns python code that adds lval to the PY scope it should be put before lval]
variable[array] assign[=] call[name[array]][<ast.Slice object at 0x7da1b021ea70>]
<ast.Tuple object at 0x7da1b021c5e0> assign[=] call[name[remove_objects], parameter[name[array], name[obj_count]]]
<ast.Tuple object at 0x7da1b021c1c0> assign[=] call[name[remove_arrays], parameter[name[array], name[arr_count]]]
<ast.Tuple object at 0x7da1b021f610> assign[=] call[name[functions].remove_functions, parameter[name[array]]]
assert[<ast.UnaryOp object at 0x7da1b021e5f0>]
variable[arr] assign[=] list[[]]
for taget[name[e]] in starred[call[name[argsplit], parameter[name[array], constant[,]]]] begin[:]
variable[e] assign[=] call[name[exp_translator], parameter[call[name[e].replace, parameter[constant[
], constant[]]]]]
call[name[arr].append, parameter[<ast.IfExp object at 0x7da1b021c3a0>]]
variable[arr] assign[=] binary_operation[constant[%s = Js([%s])
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b021f4c0>, <ast.Call object at 0x7da1b021e7a0>]]]
for taget[tuple[[<ast.Name object at 0x7da1b021d150>, <ast.Name object at 0x7da1b021d5a0>]]] in starred[call[name[inline].iteritems, parameter[]]] begin[:]
<ast.Tuple object at 0x7da1b021cb20> assign[=] name[nested_info]
variable[new_def] assign[=] call[name[FUNC_TRANSLATOR], parameter[name[nested_name], name[nested_block], name[nested_args]]]
variable[arr] assign[=] binary_operation[name[new_def] + name[arr]]
for taget[tuple[[<ast.Name object at 0x7da1b021da20>, <ast.Name object at 0x7da1b021dab0>]]] in starred[call[name[obj_rep].iteritems, parameter[]]] begin[:]
<ast.Tuple object at 0x7da1b021d3c0> assign[=] call[name[translate_object], parameter[name[obj], name[lval], name[obj_count], name[arr_count]]]
variable[arr] assign[=] binary_operation[name[new_def] + name[arr]]
for taget[tuple[[<ast.Name object at 0x7da1b021c0d0>, <ast.Name object at 0x7da1b021f280>]]] in starred[call[name[arr_rep].iteritems, parameter[]]] begin[:]
<ast.Tuple object at 0x7da1b021c0a0> assign[=] call[name[translate_array], parameter[name[obj], name[lval], name[obj_count], name[arr_count]]]
variable[arr] assign[=] binary_operation[name[new_def] + name[arr]]
return[tuple[[<ast.Name object at 0x7da1b021fdf0>, <ast.Name object at 0x7da1b021d2a0>, <ast.Name object at 0x7da1b021c2e0>]]]
|
keyword[def] identifier[translate_array] ( identifier[array] , identifier[lval] , identifier[obj_count] = literal[int] , identifier[arr_count] = literal[int] ):
literal[string]
identifier[array] = identifier[array] [ literal[int] :- literal[int] ]
identifier[array] , identifier[obj_rep] , identifier[obj_count] = identifier[remove_objects] ( identifier[array] , identifier[obj_count] )
identifier[array] , identifier[arr_rep] , identifier[arr_count] = identifier[remove_arrays] ( identifier[array] , identifier[arr_count] )
identifier[array] , identifier[hoisted] , identifier[inline] = identifier[functions] . identifier[remove_functions] ( identifier[array] , identifier[all_inline] = keyword[True] )
keyword[assert] keyword[not] identifier[hoisted]
identifier[arr] =[]
keyword[for] identifier[e] keyword[in] identifier[argsplit] ( identifier[array] , literal[string] ):
identifier[e] = identifier[exp_translator] ( identifier[e] . identifier[replace] ( literal[string] , literal[string] ))
identifier[arr] . identifier[append] ( identifier[e] keyword[if] identifier[e] keyword[else] literal[string] )
identifier[arr] = literal[string] %( identifier[lval] , literal[string] . identifier[join] ( identifier[arr] ))
keyword[for] identifier[nested_name] , identifier[nested_info] keyword[in] identifier[inline] . identifier[iteritems] ():
identifier[nested_block] , identifier[nested_args] = identifier[nested_info]
identifier[new_def] = identifier[FUNC_TRANSLATOR] ( identifier[nested_name] , identifier[nested_block] , identifier[nested_args] )
identifier[arr] = identifier[new_def] + identifier[arr]
keyword[for] identifier[lval] , identifier[obj] keyword[in] identifier[obj_rep] . identifier[iteritems] ():
identifier[new_def] , identifier[obj_count] , identifier[arr_count] = identifier[translate_object] (
identifier[obj] , identifier[lval] , identifier[obj_count] , identifier[arr_count] )
identifier[arr] = identifier[new_def] + identifier[arr]
keyword[for] identifier[lval] , identifier[obj] keyword[in] identifier[arr_rep] . identifier[iteritems] ():
identifier[new_def] , identifier[obj_count] , identifier[arr_count] = identifier[translate_array] (
identifier[obj] , identifier[lval] , identifier[obj_count] , identifier[arr_count] )
identifier[arr] = identifier[new_def] + identifier[arr]
keyword[return] identifier[arr] , identifier[obj_count] , identifier[arr_count]
|
def translate_array(array, lval, obj_count=1, arr_count=1):
"""array has to be any js array for example [1,2,3]
lval has to be name of this array.
Returns python code that adds lval to the PY scope it should be put before lval"""
array = array[1:-1]
(array, obj_rep, obj_count) = remove_objects(array, obj_count)
(array, arr_rep, arr_count) = remove_arrays(array, arr_count)
#functions can be also defined in arrays, this caused many problems since in Python
# functions cant be defined inside literal
# remove functions (they dont contain arrays or objects so can be translated easily)
# hoisted functions are treated like inline
(array, hoisted, inline) = functions.remove_functions(array, all_inline=True)
assert not hoisted
arr = []
# separate elements in array
for e in argsplit(array, ','):
# translate expressions in array PyJsLvalInline will not be translated!
e = exp_translator(e.replace('\n', ''))
arr.append(e if e else 'None') # depends on [control=['for'], data=['e']]
arr = '%s = Js([%s])\n' % (lval, ','.join(arr))
#But we can have more code to add to define arrays/objects/functions defined inside this array
# translate nested objects:
# functions:
for (nested_name, nested_info) in inline.iteritems():
(nested_block, nested_args) = nested_info
new_def = FUNC_TRANSLATOR(nested_name, nested_block, nested_args)
arr = new_def + arr # depends on [control=['for'], data=[]]
for (lval, obj) in obj_rep.iteritems():
(new_def, obj_count, arr_count) = translate_object(obj, lval, obj_count, arr_count)
# add object definition BEFORE array definition
arr = new_def + arr # depends on [control=['for'], data=[]]
for (lval, obj) in arr_rep.iteritems():
(new_def, obj_count, arr_count) = translate_array(obj, lval, obj_count, arr_count)
# add object definition BEFORE array definition
arr = new_def + arr # depends on [control=['for'], data=[]]
return (arr, obj_count, arr_count)
|
def create_Dim(self, name,value):
'''
Adds a dimension to class.
:parameter name: dimension name
:parameter value: dimension value
'''
if not self._dimensions.has_key(name) :
self.message(3, 'Create dimension {0}:{1}'.format(name,value))
self._dimensions[name]=value
self._dimensions['_ndims']=len(self._dimensions) - 1
else :
self.message(3, 'Dimension {0} already exists'.format(name))
|
def function[create_Dim, parameter[self, name, value]]:
constant[
Adds a dimension to class.
:parameter name: dimension name
:parameter value: dimension value
]
if <ast.UnaryOp object at 0x7da1b0911270> begin[:]
call[name[self].message, parameter[constant[3], call[constant[Create dimension {0}:{1}].format, parameter[name[name], name[value]]]]]
call[name[self]._dimensions][name[name]] assign[=] name[value]
call[name[self]._dimensions][constant[_ndims]] assign[=] binary_operation[call[name[len], parameter[name[self]._dimensions]] - constant[1]]
|
keyword[def] identifier[create_Dim] ( identifier[self] , identifier[name] , identifier[value] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[_dimensions] . identifier[has_key] ( identifier[name] ):
identifier[self] . identifier[message] ( literal[int] , literal[string] . identifier[format] ( identifier[name] , identifier[value] ))
identifier[self] . identifier[_dimensions] [ identifier[name] ]= identifier[value]
identifier[self] . identifier[_dimensions] [ literal[string] ]= identifier[len] ( identifier[self] . identifier[_dimensions] )- literal[int]
keyword[else] :
identifier[self] . identifier[message] ( literal[int] , literal[string] . identifier[format] ( identifier[name] ))
|
def create_Dim(self, name, value):
"""
Adds a dimension to class.
:parameter name: dimension name
:parameter value: dimension value
"""
if not self._dimensions.has_key(name):
self.message(3, 'Create dimension {0}:{1}'.format(name, value))
self._dimensions[name] = value
self._dimensions['_ndims'] = len(self._dimensions) - 1 # depends on [control=['if'], data=[]]
else:
self.message(3, 'Dimension {0} already exists'.format(name))
|
def _patched_pep257():
"""Monkey-patch pep257 after imports to avoid info logging."""
import pep257
if getattr(pep257, "log", None):
def _dummy(*args, **kwargs):
del args
del kwargs
old_log_info = pep257.log.info
pep257.log.info = _dummy # suppress(unused-attribute)
try:
yield
finally:
if getattr(pep257, "log", None):
pep257.log.info = old_log_info
|
def function[_patched_pep257, parameter[]]:
constant[Monkey-patch pep257 after imports to avoid info logging.]
import module[pep257]
if call[name[getattr], parameter[name[pep257], constant[log], constant[None]]] begin[:]
def function[_dummy, parameter[]]:
<ast.Delete object at 0x7da18f00e950>
<ast.Delete object at 0x7da18f00de10>
variable[old_log_info] assign[=] name[pep257].log.info
name[pep257].log.info assign[=] name[_dummy]
<ast.Try object at 0x7da18f00c7c0>
|
keyword[def] identifier[_patched_pep257] ():
literal[string]
keyword[import] identifier[pep257]
keyword[if] identifier[getattr] ( identifier[pep257] , literal[string] , keyword[None] ):
keyword[def] identifier[_dummy] (* identifier[args] ,** identifier[kwargs] ):
keyword[del] identifier[args]
keyword[del] identifier[kwargs]
identifier[old_log_info] = identifier[pep257] . identifier[log] . identifier[info]
identifier[pep257] . identifier[log] . identifier[info] = identifier[_dummy]
keyword[try] :
keyword[yield]
keyword[finally] :
keyword[if] identifier[getattr] ( identifier[pep257] , literal[string] , keyword[None] ):
identifier[pep257] . identifier[log] . identifier[info] = identifier[old_log_info]
|
def _patched_pep257():
"""Monkey-patch pep257 after imports to avoid info logging."""
import pep257
if getattr(pep257, 'log', None):
def _dummy(*args, **kwargs):
del args
del kwargs
old_log_info = pep257.log.info
pep257.log.info = _dummy # suppress(unused-attribute) # depends on [control=['if'], data=[]]
try:
yield # depends on [control=['try'], data=[]]
finally:
if getattr(pep257, 'log', None):
pep257.log.info = old_log_info # depends on [control=['if'], data=[]]
|
def color(x, y):
"""triangles.
Colors:
- http://paletton.com/#uid=70l150klllletuehUpNoMgTsdcs shade 2
"""
if (x-4) > (y-4) and -(y-4) <= (x-4):
# right
return "#CDB95B"
elif (x-4) > (y-4) and -(y-4) > (x-4):
# top
return "#CD845B"
elif (x-4) <= (y-4) and -(y-4) <= (x-4):
# bottom
return "#57488E"
elif (x-4) <= (y-4) and -(y-4) > (x-4):
# left
return "#3B8772"
# should not happen
return "black"
|
def function[color, parameter[x, y]]:
constant[triangles.
Colors:
- http://paletton.com/#uid=70l150klllletuehUpNoMgTsdcs shade 2
]
if <ast.BoolOp object at 0x7da18f812da0> begin[:]
return[constant[#CDB95B]]
return[constant[black]]
|
keyword[def] identifier[color] ( identifier[x] , identifier[y] ):
literal[string]
keyword[if] ( identifier[x] - literal[int] )>( identifier[y] - literal[int] ) keyword[and] -( identifier[y] - literal[int] )<=( identifier[x] - literal[int] ):
keyword[return] literal[string]
keyword[elif] ( identifier[x] - literal[int] )>( identifier[y] - literal[int] ) keyword[and] -( identifier[y] - literal[int] )>( identifier[x] - literal[int] ):
keyword[return] literal[string]
keyword[elif] ( identifier[x] - literal[int] )<=( identifier[y] - literal[int] ) keyword[and] -( identifier[y] - literal[int] )<=( identifier[x] - literal[int] ):
keyword[return] literal[string]
keyword[elif] ( identifier[x] - literal[int] )<=( identifier[y] - literal[int] ) keyword[and] -( identifier[y] - literal[int] )>( identifier[x] - literal[int] ):
keyword[return] literal[string]
keyword[return] literal[string]
|
def color(x, y):
"""triangles.
Colors:
- http://paletton.com/#uid=70l150klllletuehUpNoMgTsdcs shade 2
"""
if x - 4 > y - 4 and -(y - 4) <= x - 4:
# right
return '#CDB95B' # depends on [control=['if'], data=[]]
elif x - 4 > y - 4 and -(y - 4) > x - 4:
# top
return '#CD845B' # depends on [control=['if'], data=[]]
elif x - 4 <= y - 4 and -(y - 4) <= x - 4:
# bottom
return '#57488E' # depends on [control=['if'], data=[]]
elif x - 4 <= y - 4 and -(y - 4) > x - 4:
# left
return '#3B8772' # depends on [control=['if'], data=[]]
# should not happen
return 'black'
|
def ReadConflicts(self, collection_link, feed_options=None):
"""Reads conflicts.
:param str collection_link:
The link to the document collection.
:param dict feed_options:
:return:
Query Iterable of Conflicts.
:rtype:
query_iterable.QueryIterable
"""
if feed_options is None:
feed_options = {}
return self.QueryConflicts(collection_link, None, feed_options)
|
def function[ReadConflicts, parameter[self, collection_link, feed_options]]:
constant[Reads conflicts.
:param str collection_link:
The link to the document collection.
:param dict feed_options:
:return:
Query Iterable of Conflicts.
:rtype:
query_iterable.QueryIterable
]
if compare[name[feed_options] is constant[None]] begin[:]
variable[feed_options] assign[=] dictionary[[], []]
return[call[name[self].QueryConflicts, parameter[name[collection_link], constant[None], name[feed_options]]]]
|
keyword[def] identifier[ReadConflicts] ( identifier[self] , identifier[collection_link] , identifier[feed_options] = keyword[None] ):
literal[string]
keyword[if] identifier[feed_options] keyword[is] keyword[None] :
identifier[feed_options] ={}
keyword[return] identifier[self] . identifier[QueryConflicts] ( identifier[collection_link] , keyword[None] , identifier[feed_options] )
|
def ReadConflicts(self, collection_link, feed_options=None):
"""Reads conflicts.
:param str collection_link:
The link to the document collection.
:param dict feed_options:
:return:
Query Iterable of Conflicts.
:rtype:
query_iterable.QueryIterable
"""
if feed_options is None:
feed_options = {} # depends on [control=['if'], data=['feed_options']]
return self.QueryConflicts(collection_link, None, feed_options)
|
def dispatch(self, receiver):
''' Dispatch handling of this event to a receiver.
This method will invoke ``receiver._session_callback_added`` if
it exists.
'''
super(SessionCallbackAdded, self).dispatch(receiver)
if hasattr(receiver, '_session_callback_added'):
receiver._session_callback_added(self)
|
def function[dispatch, parameter[self, receiver]]:
constant[ Dispatch handling of this event to a receiver.
This method will invoke ``receiver._session_callback_added`` if
it exists.
]
call[call[name[super], parameter[name[SessionCallbackAdded], name[self]]].dispatch, parameter[name[receiver]]]
if call[name[hasattr], parameter[name[receiver], constant[_session_callback_added]]] begin[:]
call[name[receiver]._session_callback_added, parameter[name[self]]]
|
keyword[def] identifier[dispatch] ( identifier[self] , identifier[receiver] ):
literal[string]
identifier[super] ( identifier[SessionCallbackAdded] , identifier[self] ). identifier[dispatch] ( identifier[receiver] )
keyword[if] identifier[hasattr] ( identifier[receiver] , literal[string] ):
identifier[receiver] . identifier[_session_callback_added] ( identifier[self] )
|
def dispatch(self, receiver):
""" Dispatch handling of this event to a receiver.
This method will invoke ``receiver._session_callback_added`` if
it exists.
"""
super(SessionCallbackAdded, self).dispatch(receiver)
if hasattr(receiver, '_session_callback_added'):
receiver._session_callback_added(self) # depends on [control=['if'], data=[]]
|
def __write_filter_tmpl(html_tpl):
'''
doing for directory.
'''
out_dir = os.path.join(os.getcwd(), CRUD_PATH, 'list')
if os.path.exists(out_dir):
pass
else:
os.mkdir(out_dir)
# for var_name in VAR_NAMES:
for var_name, bl_val in SWITCH_DICS.items():
if var_name.startswith('dic_'):
# 此处简化一下,不考虑子类的问题。
subdir = ''
outfile = os.path.join(out_dir, 'list' + '_' + var_name.split('_')[1] + '.html')
html_view_str_arr = []
# tview_var = eval('dic_vars.' + var_name)
for the_val in bl_val:
# sig = eval('html_vars.html_' + x)
sig = HTML_DICS['html_' + the_val]
if sig['type'] == 'select':
html_view_str_arr.append(__gen_select_filter('html_' + the_val))
with open(outfile, 'w') as outfileo:
outstr = minify(
html_tpl.replace(
'xxxxxx',
''.join(html_view_str_arr)
).replace(
'yyyyyy',
var_name.split('_')[1][:2]
).replace(
'ssssss',
subdir
).replace(
'kkkk',
KIND_DICS['kind_' + var_name.split('_')[-1]]
)
)
outfileo.write(outstr)
|
def function[__write_filter_tmpl, parameter[html_tpl]]:
constant[
doing for directory.
]
variable[out_dir] assign[=] call[name[os].path.join, parameter[call[name[os].getcwd, parameter[]], name[CRUD_PATH], constant[list]]]
if call[name[os].path.exists, parameter[name[out_dir]]] begin[:]
pass
for taget[tuple[[<ast.Name object at 0x7da1b0416200>, <ast.Name object at 0x7da1b04179d0>]]] in starred[call[name[SWITCH_DICS].items, parameter[]]] begin[:]
if call[name[var_name].startswith, parameter[constant[dic_]]] begin[:]
variable[subdir] assign[=] constant[]
variable[outfile] assign[=] call[name[os].path.join, parameter[name[out_dir], binary_operation[binary_operation[binary_operation[constant[list] + constant[_]] + call[call[name[var_name].split, parameter[constant[_]]]][constant[1]]] + constant[.html]]]]
variable[html_view_str_arr] assign[=] list[[]]
for taget[name[the_val]] in starred[name[bl_val]] begin[:]
variable[sig] assign[=] call[name[HTML_DICS]][binary_operation[constant[html_] + name[the_val]]]
if compare[call[name[sig]][constant[type]] equal[==] constant[select]] begin[:]
call[name[html_view_str_arr].append, parameter[call[name[__gen_select_filter], parameter[binary_operation[constant[html_] + name[the_val]]]]]]
with call[name[open], parameter[name[outfile], constant[w]]] begin[:]
variable[outstr] assign[=] call[name[minify], parameter[call[call[call[call[name[html_tpl].replace, parameter[constant[xxxxxx], call[constant[].join, parameter[name[html_view_str_arr]]]]].replace, parameter[constant[yyyyyy], call[call[call[name[var_name].split, parameter[constant[_]]]][constant[1]]][<ast.Slice object at 0x7da1b040d5a0>]]].replace, parameter[constant[ssssss], name[subdir]]].replace, parameter[constant[kkkk], call[name[KIND_DICS]][binary_operation[constant[kind_] + call[call[name[var_name].split, parameter[constant[_]]]][<ast.UnaryOp object at 0x7da1b040d0c0>]]]]]]]
call[name[outfileo].write, parameter[name[outstr]]]
|
keyword[def] identifier[__write_filter_tmpl] ( identifier[html_tpl] ):
literal[string]
identifier[out_dir] = identifier[os] . identifier[path] . identifier[join] ( identifier[os] . identifier[getcwd] (), identifier[CRUD_PATH] , literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[exists] ( identifier[out_dir] ):
keyword[pass]
keyword[else] :
identifier[os] . identifier[mkdir] ( identifier[out_dir] )
keyword[for] identifier[var_name] , identifier[bl_val] keyword[in] identifier[SWITCH_DICS] . identifier[items] ():
keyword[if] identifier[var_name] . identifier[startswith] ( literal[string] ):
identifier[subdir] = literal[string]
identifier[outfile] = identifier[os] . identifier[path] . identifier[join] ( identifier[out_dir] , literal[string] + literal[string] + identifier[var_name] . identifier[split] ( literal[string] )[ literal[int] ]+ literal[string] )
identifier[html_view_str_arr] =[]
keyword[for] identifier[the_val] keyword[in] identifier[bl_val] :
identifier[sig] = identifier[HTML_DICS] [ literal[string] + identifier[the_val] ]
keyword[if] identifier[sig] [ literal[string] ]== literal[string] :
identifier[html_view_str_arr] . identifier[append] ( identifier[__gen_select_filter] ( literal[string] + identifier[the_val] ))
keyword[with] identifier[open] ( identifier[outfile] , literal[string] ) keyword[as] identifier[outfileo] :
identifier[outstr] = identifier[minify] (
identifier[html_tpl] . identifier[replace] (
literal[string] ,
literal[string] . identifier[join] ( identifier[html_view_str_arr] )
). identifier[replace] (
literal[string] ,
identifier[var_name] . identifier[split] ( literal[string] )[ literal[int] ][: literal[int] ]
). identifier[replace] (
literal[string] ,
identifier[subdir]
). identifier[replace] (
literal[string] ,
identifier[KIND_DICS] [ literal[string] + identifier[var_name] . identifier[split] ( literal[string] )[- literal[int] ]]
)
)
identifier[outfileo] . identifier[write] ( identifier[outstr] )
|
def __write_filter_tmpl(html_tpl):
"""
doing for directory.
"""
out_dir = os.path.join(os.getcwd(), CRUD_PATH, 'list')
if os.path.exists(out_dir):
pass # depends on [control=['if'], data=[]]
else:
os.mkdir(out_dir)
# for var_name in VAR_NAMES:
for (var_name, bl_val) in SWITCH_DICS.items():
if var_name.startswith('dic_'):
# 此处简化一下,不考虑子类的问题。
subdir = ''
outfile = os.path.join(out_dir, 'list' + '_' + var_name.split('_')[1] + '.html')
html_view_str_arr = []
# tview_var = eval('dic_vars.' + var_name)
for the_val in bl_val:
# sig = eval('html_vars.html_' + x)
sig = HTML_DICS['html_' + the_val]
if sig['type'] == 'select':
html_view_str_arr.append(__gen_select_filter('html_' + the_val)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['the_val']]
with open(outfile, 'w') as outfileo:
outstr = minify(html_tpl.replace('xxxxxx', ''.join(html_view_str_arr)).replace('yyyyyy', var_name.split('_')[1][:2]).replace('ssssss', subdir).replace('kkkk', KIND_DICS['kind_' + var_name.split('_')[-1]]))
outfileo.write(outstr) # depends on [control=['with'], data=['outfileo']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
|
def save_admin_log(build, **kwargs):
"""Saves an action to the admin log."""
message = kwargs.pop('message', None)
release = kwargs.pop('release', None)
run = kwargs.pop('run', None)
if not len(kwargs) == 1:
raise TypeError('Must specify a LOG_TYPE argument')
log_enum = kwargs.keys()[0]
log_type = getattr(models.AdminLog, log_enum.upper(), None)
if not log_type:
raise TypeError('Bad log_type argument: %s' % log_enum)
if current_user.is_anonymous():
user_id = None
else:
user_id = current_user.get_id()
log = models.AdminLog(
build_id=build.id,
log_type=log_type,
message=message,
user_id=user_id)
if release:
log.release_id = release.id
if run:
log.run_id = run.id
log.release_id = run.release_id
db.session.add(log)
|
def function[save_admin_log, parameter[build]]:
constant[Saves an action to the admin log.]
variable[message] assign[=] call[name[kwargs].pop, parameter[constant[message], constant[None]]]
variable[release] assign[=] call[name[kwargs].pop, parameter[constant[release], constant[None]]]
variable[run] assign[=] call[name[kwargs].pop, parameter[constant[run], constant[None]]]
if <ast.UnaryOp object at 0x7da18bc735b0> begin[:]
<ast.Raise object at 0x7da18bc73d90>
variable[log_enum] assign[=] call[call[name[kwargs].keys, parameter[]]][constant[0]]
variable[log_type] assign[=] call[name[getattr], parameter[name[models].AdminLog, call[name[log_enum].upper, parameter[]], constant[None]]]
if <ast.UnaryOp object at 0x7da18bc733a0> begin[:]
<ast.Raise object at 0x7da18bc71d80>
if call[name[current_user].is_anonymous, parameter[]] begin[:]
variable[user_id] assign[=] constant[None]
variable[log] assign[=] call[name[models].AdminLog, parameter[]]
if name[release] begin[:]
name[log].release_id assign[=] name[release].id
if name[run] begin[:]
name[log].run_id assign[=] name[run].id
name[log].release_id assign[=] name[run].release_id
call[name[db].session.add, parameter[name[log]]]
|
keyword[def] identifier[save_admin_log] ( identifier[build] ,** identifier[kwargs] ):
literal[string]
identifier[message] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] )
identifier[release] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] )
identifier[run] = identifier[kwargs] . identifier[pop] ( literal[string] , keyword[None] )
keyword[if] keyword[not] identifier[len] ( identifier[kwargs] )== literal[int] :
keyword[raise] identifier[TypeError] ( literal[string] )
identifier[log_enum] = identifier[kwargs] . identifier[keys] ()[ literal[int] ]
identifier[log_type] = identifier[getattr] ( identifier[models] . identifier[AdminLog] , identifier[log_enum] . identifier[upper] (), keyword[None] )
keyword[if] keyword[not] identifier[log_type] :
keyword[raise] identifier[TypeError] ( literal[string] % identifier[log_enum] )
keyword[if] identifier[current_user] . identifier[is_anonymous] ():
identifier[user_id] = keyword[None]
keyword[else] :
identifier[user_id] = identifier[current_user] . identifier[get_id] ()
identifier[log] = identifier[models] . identifier[AdminLog] (
identifier[build_id] = identifier[build] . identifier[id] ,
identifier[log_type] = identifier[log_type] ,
identifier[message] = identifier[message] ,
identifier[user_id] = identifier[user_id] )
keyword[if] identifier[release] :
identifier[log] . identifier[release_id] = identifier[release] . identifier[id]
keyword[if] identifier[run] :
identifier[log] . identifier[run_id] = identifier[run] . identifier[id]
identifier[log] . identifier[release_id] = identifier[run] . identifier[release_id]
identifier[db] . identifier[session] . identifier[add] ( identifier[log] )
|
def save_admin_log(build, **kwargs):
"""Saves an action to the admin log."""
message = kwargs.pop('message', None)
release = kwargs.pop('release', None)
run = kwargs.pop('run', None)
if not len(kwargs) == 1:
raise TypeError('Must specify a LOG_TYPE argument') # depends on [control=['if'], data=[]]
log_enum = kwargs.keys()[0]
log_type = getattr(models.AdminLog, log_enum.upper(), None)
if not log_type:
raise TypeError('Bad log_type argument: %s' % log_enum) # depends on [control=['if'], data=[]]
if current_user.is_anonymous():
user_id = None # depends on [control=['if'], data=[]]
else:
user_id = current_user.get_id()
log = models.AdminLog(build_id=build.id, log_type=log_type, message=message, user_id=user_id)
if release:
log.release_id = release.id # depends on [control=['if'], data=[]]
if run:
log.run_id = run.id
log.release_id = run.release_id # depends on [control=['if'], data=[]]
db.session.add(log)
|
def get_user_stats_for_game(self, steamID, appID, format=None):
"""Request the user stats for a given game.
steamID: The users ID
appID: The app id
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid' : steamID, 'appid' : appID}
if format is not None:
parameters['format'] = format
url = self.create_request_url(self.interface, 'GetUserStatsForGame', 2,
parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
|
def function[get_user_stats_for_game, parameter[self, steamID, appID, format]]:
constant[Request the user stats for a given game.
steamID: The users ID
appID: The app id
format: Return format. None defaults to json. (json, xml, vdf)
]
variable[parameters] assign[=] dictionary[[<ast.Constant object at 0x7da18c4ce9b0>, <ast.Constant object at 0x7da18c4ce5c0>], [<ast.Name object at 0x7da18c4cd690>, <ast.Name object at 0x7da18c4ccd90>]]
if compare[name[format] is_not constant[None]] begin[:]
call[name[parameters]][constant[format]] assign[=] name[format]
variable[url] assign[=] call[name[self].create_request_url, parameter[name[self].interface, constant[GetUserStatsForGame], constant[2], name[parameters]]]
variable[data] assign[=] call[name[self].retrieve_request, parameter[name[url]]]
return[call[name[self].return_data, parameter[name[data]]]]
|
keyword[def] identifier[get_user_stats_for_game] ( identifier[self] , identifier[steamID] , identifier[appID] , identifier[format] = keyword[None] ):
literal[string]
identifier[parameters] ={ literal[string] : identifier[steamID] , literal[string] : identifier[appID] }
keyword[if] identifier[format] keyword[is] keyword[not] keyword[None] :
identifier[parameters] [ literal[string] ]= identifier[format]
identifier[url] = identifier[self] . identifier[create_request_url] ( identifier[self] . identifier[interface] , literal[string] , literal[int] ,
identifier[parameters] )
identifier[data] = identifier[self] . identifier[retrieve_request] ( identifier[url] )
keyword[return] identifier[self] . identifier[return_data] ( identifier[data] , identifier[format] = identifier[format] )
|
def get_user_stats_for_game(self, steamID, appID, format=None):
"""Request the user stats for a given game.
steamID: The users ID
appID: The app id
format: Return format. None defaults to json. (json, xml, vdf)
"""
parameters = {'steamid': steamID, 'appid': appID}
if format is not None:
parameters['format'] = format # depends on [control=['if'], data=['format']]
url = self.create_request_url(self.interface, 'GetUserStatsForGame', 2, parameters)
data = self.retrieve_request(url)
return self.return_data(data, format=format)
|
def format_timedelta(td_object):
"""Format a timedelta object for display to users
Returns
-------
str
"""
def get_total_seconds(td):
# timedelta.total_seconds not in py2.6
return (td.microseconds +
(td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
seconds = int(get_total_seconds(td_object))
periods = [('year', 60*60*24*365),
('month', 60*60*24*30),
('day', 60*60*24),
('hour', 60*60),
('minute', 60),
('second', 1)]
strings = []
for period_name, period_seconds in periods:
if seconds > period_seconds:
period_value, seconds = divmod(seconds, period_seconds)
if period_value == 1:
strings.append("%s %s" % (period_value, period_name))
else:
strings.append("%s %ss" % (period_value, period_name))
return ", ".join(strings)
|
def function[format_timedelta, parameter[td_object]]:
constant[Format a timedelta object for display to users
Returns
-------
str
]
def function[get_total_seconds, parameter[td]]:
return[binary_operation[binary_operation[name[td].microseconds + binary_operation[binary_operation[name[td].seconds + binary_operation[binary_operation[name[td].days * constant[24]] * constant[3600]]] * constant[1000000.0]]] / constant[1000000.0]]]
variable[seconds] assign[=] call[name[int], parameter[call[name[get_total_seconds], parameter[name[td_object]]]]]
variable[periods] assign[=] list[[<ast.Tuple object at 0x7da1aff4f8b0>, <ast.Tuple object at 0x7da1aff4f9d0>, <ast.Tuple object at 0x7da1aff4c430>, <ast.Tuple object at 0x7da1aff4dd50>, <ast.Tuple object at 0x7da1aff4e980>, <ast.Tuple object at 0x7da1aff4eef0>]]
variable[strings] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da1aff4d8a0>, <ast.Name object at 0x7da1aff4c820>]]] in starred[name[periods]] begin[:]
if compare[name[seconds] greater[>] name[period_seconds]] begin[:]
<ast.Tuple object at 0x7da1aff4ed10> assign[=] call[name[divmod], parameter[name[seconds], name[period_seconds]]]
if compare[name[period_value] equal[==] constant[1]] begin[:]
call[name[strings].append, parameter[binary_operation[constant[%s %s] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1aff4de10>, <ast.Name object at 0x7da1aff4f130>]]]]]
return[call[constant[, ].join, parameter[name[strings]]]]
|
keyword[def] identifier[format_timedelta] ( identifier[td_object] ):
literal[string]
keyword[def] identifier[get_total_seconds] ( identifier[td] ):
keyword[return] ( identifier[td] . identifier[microseconds] +
( identifier[td] . identifier[seconds] + identifier[td] . identifier[days] * literal[int] * literal[int] )* literal[int] )/ literal[int]
identifier[seconds] = identifier[int] ( identifier[get_total_seconds] ( identifier[td_object] ))
identifier[periods] =[( literal[string] , literal[int] * literal[int] * literal[int] * literal[int] ),
( literal[string] , literal[int] * literal[int] * literal[int] * literal[int] ),
( literal[string] , literal[int] * literal[int] * literal[int] ),
( literal[string] , literal[int] * literal[int] ),
( literal[string] , literal[int] ),
( literal[string] , literal[int] )]
identifier[strings] =[]
keyword[for] identifier[period_name] , identifier[period_seconds] keyword[in] identifier[periods] :
keyword[if] identifier[seconds] > identifier[period_seconds] :
identifier[period_value] , identifier[seconds] = identifier[divmod] ( identifier[seconds] , identifier[period_seconds] )
keyword[if] identifier[period_value] == literal[int] :
identifier[strings] . identifier[append] ( literal[string] %( identifier[period_value] , identifier[period_name] ))
keyword[else] :
identifier[strings] . identifier[append] ( literal[string] %( identifier[period_value] , identifier[period_name] ))
keyword[return] literal[string] . identifier[join] ( identifier[strings] )
|
def format_timedelta(td_object):
"""Format a timedelta object for display to users
Returns
-------
str
"""
def get_total_seconds(td):
# timedelta.total_seconds not in py2.6
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1000000.0) / 1000000.0
seconds = int(get_total_seconds(td_object))
periods = [('year', 60 * 60 * 24 * 365), ('month', 60 * 60 * 24 * 30), ('day', 60 * 60 * 24), ('hour', 60 * 60), ('minute', 60), ('second', 1)]
strings = []
for (period_name, period_seconds) in periods:
if seconds > period_seconds:
(period_value, seconds) = divmod(seconds, period_seconds)
if period_value == 1:
strings.append('%s %s' % (period_value, period_name)) # depends on [control=['if'], data=['period_value']]
else:
strings.append('%s %ss' % (period_value, period_name)) # depends on [control=['if'], data=['seconds', 'period_seconds']] # depends on [control=['for'], data=[]]
return ', '.join(strings)
|
def check_cluster(
cluster_config,
data_path,
java_home,
check_replicas,
batch_size,
minutes,
start_time,
end_time,
):
"""Check the integrity of the Kafka log files in a cluster.
start_time and end_time should be in the format specified
by TIME_FORMAT_REGEX.
:param data_path: the path to the log folder on the broker
:type data_path: str
:param java_home: the JAVA_HOME of the broker
:type java_home: str
:param check_replicas: also checks the replica files
:type check_replicas: bool
:param batch_size: the size of the batch
:type batch_size: int
:param minutes: check the files modified in the last N minutes
:type minutes: int
:param start_time: check the files modified after start_time
:type start_time: str
:param end_time: check the files modified before end_time
:type end_time: str
"""
brokers = get_broker_list(cluster_config)
broker_files = find_files(data_path, brokers, minutes, start_time, end_time)
if not check_replicas: # remove replicas
broker_files = filter_leader_files(cluster_config, broker_files)
processes = []
print("Starting {n} parallel processes".format(n=len(broker_files)))
try:
for broker, host, files in broker_files:
print(
" Broker: {host}, {n} files to check".format(
host=host,
n=len(files)),
)
p = Process(
name="dump_process_" + host,
target=check_files_on_host,
args=(java_home, host, files, batch_size),
)
p.start()
processes.append(p)
print("Processes running:")
for process in processes:
process.join()
except KeyboardInterrupt:
print("Terminating all processes")
for process in processes:
process.terminate()
process.join()
print("All processes terminated")
sys.exit(1)
|
def function[check_cluster, parameter[cluster_config, data_path, java_home, check_replicas, batch_size, minutes, start_time, end_time]]:
constant[Check the integrity of the Kafka log files in a cluster.
start_time and end_time should be in the format specified
by TIME_FORMAT_REGEX.
:param data_path: the path to the log folder on the broker
:type data_path: str
:param java_home: the JAVA_HOME of the broker
:type java_home: str
:param check_replicas: also checks the replica files
:type check_replicas: bool
:param batch_size: the size of the batch
:type batch_size: int
:param minutes: check the files modified in the last N minutes
:type minutes: int
:param start_time: check the files modified after start_time
:type start_time: str
:param end_time: check the files modified before end_time
:type end_time: str
]
variable[brokers] assign[=] call[name[get_broker_list], parameter[name[cluster_config]]]
variable[broker_files] assign[=] call[name[find_files], parameter[name[data_path], name[brokers], name[minutes], name[start_time], name[end_time]]]
if <ast.UnaryOp object at 0x7da1b07ad390> begin[:]
variable[broker_files] assign[=] call[name[filter_leader_files], parameter[name[cluster_config], name[broker_files]]]
variable[processes] assign[=] list[[]]
call[name[print], parameter[call[constant[Starting {n} parallel processes].format, parameter[]]]]
<ast.Try object at 0x7da1b07acac0>
|
keyword[def] identifier[check_cluster] (
identifier[cluster_config] ,
identifier[data_path] ,
identifier[java_home] ,
identifier[check_replicas] ,
identifier[batch_size] ,
identifier[minutes] ,
identifier[start_time] ,
identifier[end_time] ,
):
literal[string]
identifier[brokers] = identifier[get_broker_list] ( identifier[cluster_config] )
identifier[broker_files] = identifier[find_files] ( identifier[data_path] , identifier[brokers] , identifier[minutes] , identifier[start_time] , identifier[end_time] )
keyword[if] keyword[not] identifier[check_replicas] :
identifier[broker_files] = identifier[filter_leader_files] ( identifier[cluster_config] , identifier[broker_files] )
identifier[processes] =[]
identifier[print] ( literal[string] . identifier[format] ( identifier[n] = identifier[len] ( identifier[broker_files] )))
keyword[try] :
keyword[for] identifier[broker] , identifier[host] , identifier[files] keyword[in] identifier[broker_files] :
identifier[print] (
literal[string] . identifier[format] (
identifier[host] = identifier[host] ,
identifier[n] = identifier[len] ( identifier[files] )),
)
identifier[p] = identifier[Process] (
identifier[name] = literal[string] + identifier[host] ,
identifier[target] = identifier[check_files_on_host] ,
identifier[args] =( identifier[java_home] , identifier[host] , identifier[files] , identifier[batch_size] ),
)
identifier[p] . identifier[start] ()
identifier[processes] . identifier[append] ( identifier[p] )
identifier[print] ( literal[string] )
keyword[for] identifier[process] keyword[in] identifier[processes] :
identifier[process] . identifier[join] ()
keyword[except] identifier[KeyboardInterrupt] :
identifier[print] ( literal[string] )
keyword[for] identifier[process] keyword[in] identifier[processes] :
identifier[process] . identifier[terminate] ()
identifier[process] . identifier[join] ()
identifier[print] ( literal[string] )
identifier[sys] . identifier[exit] ( literal[int] )
|
def check_cluster(cluster_config, data_path, java_home, check_replicas, batch_size, minutes, start_time, end_time):
"""Check the integrity of the Kafka log files in a cluster.
start_time and end_time should be in the format specified
by TIME_FORMAT_REGEX.
:param data_path: the path to the log folder on the broker
:type data_path: str
:param java_home: the JAVA_HOME of the broker
:type java_home: str
:param check_replicas: also checks the replica files
:type check_replicas: bool
:param batch_size: the size of the batch
:type batch_size: int
:param minutes: check the files modified in the last N minutes
:type minutes: int
:param start_time: check the files modified after start_time
:type start_time: str
:param end_time: check the files modified before end_time
:type end_time: str
"""
brokers = get_broker_list(cluster_config)
broker_files = find_files(data_path, brokers, minutes, start_time, end_time)
if not check_replicas: # remove replicas
broker_files = filter_leader_files(cluster_config, broker_files) # depends on [control=['if'], data=[]]
processes = []
print('Starting {n} parallel processes'.format(n=len(broker_files)))
try:
for (broker, host, files) in broker_files:
print(' Broker: {host}, {n} files to check'.format(host=host, n=len(files)))
p = Process(name='dump_process_' + host, target=check_files_on_host, args=(java_home, host, files, batch_size))
p.start()
processes.append(p) # depends on [control=['for'], data=[]]
print('Processes running:')
for process in processes:
process.join() # depends on [control=['for'], data=['process']] # depends on [control=['try'], data=[]]
except KeyboardInterrupt:
print('Terminating all processes')
for process in processes:
process.terminate()
process.join() # depends on [control=['for'], data=['process']]
print('All processes terminated')
sys.exit(1) # depends on [control=['except'], data=[]]
|
def _validate_ports_low_level(ports):
"""
Internal helper.
Validates the 'ports' argument to EphemeralOnionService or
EphemeralAuthenticatedOnionService returning None on success or
raising ValueError otherwise.
This only accepts the "list of strings" variants; some
higher-level APIs also allow lists of ints or lists of 2-tuples,
but those must be converted to strings before they get here.
"""
if not isinstance(ports, (list, tuple)):
raise ValueError("'ports' must be a list of strings")
if any([not isinstance(x, (six.text_type, str)) for x in ports]):
raise ValueError("'ports' must be a list of strings")
for port in ports:
_validate_single_port_string(port)
|
def function[_validate_ports_low_level, parameter[ports]]:
constant[
Internal helper.
Validates the 'ports' argument to EphemeralOnionService or
EphemeralAuthenticatedOnionService returning None on success or
raising ValueError otherwise.
This only accepts the "list of strings" variants; some
higher-level APIs also allow lists of ints or lists of 2-tuples,
but those must be converted to strings before they get here.
]
if <ast.UnaryOp object at 0x7da18bcc93c0> begin[:]
<ast.Raise object at 0x7da1b07a2590>
if call[name[any], parameter[<ast.ListComp object at 0x7da1b07a2530>]] begin[:]
<ast.Raise object at 0x7da1b07a0cd0>
for taget[name[port]] in starred[name[ports]] begin[:]
call[name[_validate_single_port_string], parameter[name[port]]]
|
keyword[def] identifier[_validate_ports_low_level] ( identifier[ports] ):
literal[string]
keyword[if] keyword[not] identifier[isinstance] ( identifier[ports] ,( identifier[list] , identifier[tuple] )):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[if] identifier[any] ([ keyword[not] identifier[isinstance] ( identifier[x] ,( identifier[six] . identifier[text_type] , identifier[str] )) keyword[for] identifier[x] keyword[in] identifier[ports] ]):
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[for] identifier[port] keyword[in] identifier[ports] :
identifier[_validate_single_port_string] ( identifier[port] )
|
def _validate_ports_low_level(ports):
"""
Internal helper.
Validates the 'ports' argument to EphemeralOnionService or
EphemeralAuthenticatedOnionService returning None on success or
raising ValueError otherwise.
This only accepts the "list of strings" variants; some
higher-level APIs also allow lists of ints or lists of 2-tuples,
but those must be converted to strings before they get here.
"""
if not isinstance(ports, (list, tuple)):
raise ValueError("'ports' must be a list of strings") # depends on [control=['if'], data=[]]
if any([not isinstance(x, (six.text_type, str)) for x in ports]):
raise ValueError("'ports' must be a list of strings") # depends on [control=['if'], data=[]]
for port in ports:
_validate_single_port_string(port) # depends on [control=['for'], data=['port']]
|
def send(self, cmd, **kwargs):
"""
send: string param=binary data ... -> None
When send is called with the proper arguments, an API command
will be written to the serial port for this XBee device
containing the proper instructions and data.
This method must be called with named arguments in accordance
with the api_command specification. Arguments matching all
field names other than those in reserved_names (like 'id' and
'order') should be given, unless they are of variable length
(of 'None' in the specification. Those are optional).
"""
# Pass through the keyword arguments
self._write(self._build_command(cmd, **kwargs))
|
def function[send, parameter[self, cmd]]:
constant[
send: string param=binary data ... -> None
When send is called with the proper arguments, an API command
will be written to the serial port for this XBee device
containing the proper instructions and data.
This method must be called with named arguments in accordance
with the api_command specification. Arguments matching all
field names other than those in reserved_names (like 'id' and
'order') should be given, unless they are of variable length
(of 'None' in the specification. Those are optional).
]
call[name[self]._write, parameter[call[name[self]._build_command, parameter[name[cmd]]]]]
|
keyword[def] identifier[send] ( identifier[self] , identifier[cmd] ,** identifier[kwargs] ):
literal[string]
identifier[self] . identifier[_write] ( identifier[self] . identifier[_build_command] ( identifier[cmd] ,** identifier[kwargs] ))
|
def send(self, cmd, **kwargs):
"""
send: string param=binary data ... -> None
When send is called with the proper arguments, an API command
will be written to the serial port for this XBee device
containing the proper instructions and data.
This method must be called with named arguments in accordance
with the api_command specification. Arguments matching all
field names other than those in reserved_names (like 'id' and
'order') should be given, unless they are of variable length
(of 'None' in the specification. Those are optional).
"""
# Pass through the keyword arguments
self._write(self._build_command(cmd, **kwargs))
|
def movielens100k(data_set='movielens100k'):
"""Data set of movie ratings collected by the University of Minnesota and 'cleaned up' for use."""
if not data_available(data_set):
import zipfile
download_data(data_set)
dir_path = os.path.join(data_path, data_set)
zip = zipfile.ZipFile(os.path.join(dir_path, 'ml-100k.zip'), 'r')
for name in zip.namelist():
zip.extract(name, dir_path)
import pandas as pd
encoding = 'latin-1'
movie_path = os.path.join(data_path, 'movielens100k', 'ml-100k')
items = pd.read_csv(os.path.join(movie_path, 'u.item'), index_col = 'index', header=None, sep='|',names=['index', 'title', 'date', 'empty', 'imdb_url', 'unknown', 'Action', 'Adventure', 'Animation', 'Children''s', 'Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy', 'Film-Noir', 'Horror', 'Musical', 'Mystery', 'Romance', 'Sci-Fi', 'Thriller', 'War', 'Western'], encoding=encoding)
users = pd.read_csv(os.path.join(movie_path, 'u.user'), index_col = 'index', header=None, sep='|', names=['index', 'age', 'sex', 'job', 'id'], encoding=encoding)
parts = ['u1.base', 'u1.test', 'u2.base', 'u2.test','u3.base', 'u3.test','u4.base', 'u4.test','u5.base', 'u5.test','ua.base', 'ua.test','ub.base', 'ub.test']
ratings = []
for part in parts:
rate_part = pd.read_csv(os.path.join(movie_path, part), index_col = 'index', header=None, sep='\t', names=['user', 'item', 'rating', 'index'], encoding=encoding)
rate_part['split'] = part
ratings.append(rate_part)
Y = pd.concat(ratings)
return data_details_return({'Y':Y, 'film_info':items, 'user_info':users, 'info': 'The Movielens 100k data'}, data_set)
|
def function[movielens100k, parameter[data_set]]:
constant[Data set of movie ratings collected by the University of Minnesota and 'cleaned up' for use.]
if <ast.UnaryOp object at 0x7da1b0fef2b0> begin[:]
import module[zipfile]
call[name[download_data], parameter[name[data_set]]]
variable[dir_path] assign[=] call[name[os].path.join, parameter[name[data_path], name[data_set]]]
variable[zip] assign[=] call[name[zipfile].ZipFile, parameter[call[name[os].path.join, parameter[name[dir_path], constant[ml-100k.zip]]], constant[r]]]
for taget[name[name]] in starred[call[name[zip].namelist, parameter[]]] begin[:]
call[name[zip].extract, parameter[name[name], name[dir_path]]]
import module[pandas] as alias[pd]
variable[encoding] assign[=] constant[latin-1]
variable[movie_path] assign[=] call[name[os].path.join, parameter[name[data_path], constant[movielens100k], constant[ml-100k]]]
variable[items] assign[=] call[name[pd].read_csv, parameter[call[name[os].path.join, parameter[name[movie_path], constant[u.item]]]]]
variable[users] assign[=] call[name[pd].read_csv, parameter[call[name[os].path.join, parameter[name[movie_path], constant[u.user]]]]]
variable[parts] assign[=] list[[<ast.Constant object at 0x7da1b0fec880>, <ast.Constant object at 0x7da1b0fecf10>, <ast.Constant object at 0x7da1b0fef460>, <ast.Constant object at 0x7da1b0fefc10>, <ast.Constant object at 0x7da1b0fed0c0>, <ast.Constant object at 0x7da1b0fec2e0>, <ast.Constant object at 0x7da1b0feeec0>, <ast.Constant object at 0x7da1b0fedfc0>, <ast.Constant object at 0x7da1b0fed960>, <ast.Constant object at 0x7da1b0fee6e0>, <ast.Constant object at 0x7da1b0feccd0>, <ast.Constant object at 0x7da1b0fef4c0>, <ast.Constant object at 0x7da1b0fed7b0>, <ast.Constant object at 0x7da1b0fee980>]]
variable[ratings] assign[=] list[[]]
for taget[name[part]] in starred[name[parts]] begin[:]
variable[rate_part] assign[=] call[name[pd].read_csv, parameter[call[name[os].path.join, parameter[name[movie_path], name[part]]]]]
call[name[rate_part]][constant[split]] assign[=] name[part]
call[name[ratings].append, parameter[name[rate_part]]]
variable[Y] assign[=] call[name[pd].concat, parameter[name[ratings]]]
return[call[name[data_details_return], parameter[dictionary[[<ast.Constant object at 0x7da1b0fef100>, <ast.Constant object at 0x7da1b0feff40>, <ast.Constant object at 0x7da1b0feded0>, <ast.Constant object at 0x7da1b0fec9d0>], [<ast.Name object at 0x7da1b0fedab0>, <ast.Name object at 0x7da1b0fef8e0>, <ast.Name object at 0x7da1b0fee7a0>, <ast.Constant object at 0x7da1b0fed450>]], name[data_set]]]]
|
keyword[def] identifier[movielens100k] ( identifier[data_set] = literal[string] ):
literal[string]
keyword[if] keyword[not] identifier[data_available] ( identifier[data_set] ):
keyword[import] identifier[zipfile]
identifier[download_data] ( identifier[data_set] )
identifier[dir_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[data_path] , identifier[data_set] )
identifier[zip] = identifier[zipfile] . identifier[ZipFile] ( identifier[os] . identifier[path] . identifier[join] ( identifier[dir_path] , literal[string] ), literal[string] )
keyword[for] identifier[name] keyword[in] identifier[zip] . identifier[namelist] ():
identifier[zip] . identifier[extract] ( identifier[name] , identifier[dir_path] )
keyword[import] identifier[pandas] keyword[as] identifier[pd]
identifier[encoding] = literal[string]
identifier[movie_path] = identifier[os] . identifier[path] . identifier[join] ( identifier[data_path] , literal[string] , literal[string] )
identifier[items] = identifier[pd] . identifier[read_csv] ( identifier[os] . identifier[path] . identifier[join] ( identifier[movie_path] , literal[string] ), identifier[index_col] = literal[string] , identifier[header] = keyword[None] , identifier[sep] = literal[string] , identifier[names] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ], identifier[encoding] = identifier[encoding] )
identifier[users] = identifier[pd] . identifier[read_csv] ( identifier[os] . identifier[path] . identifier[join] ( identifier[movie_path] , literal[string] ), identifier[index_col] = literal[string] , identifier[header] = keyword[None] , identifier[sep] = literal[string] , identifier[names] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ], identifier[encoding] = identifier[encoding] )
identifier[parts] =[ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]
identifier[ratings] =[]
keyword[for] identifier[part] keyword[in] identifier[parts] :
identifier[rate_part] = identifier[pd] . identifier[read_csv] ( identifier[os] . identifier[path] . identifier[join] ( identifier[movie_path] , identifier[part] ), identifier[index_col] = literal[string] , identifier[header] = keyword[None] , identifier[sep] = literal[string] , identifier[names] =[ literal[string] , literal[string] , literal[string] , literal[string] ], identifier[encoding] = identifier[encoding] )
identifier[rate_part] [ literal[string] ]= identifier[part]
identifier[ratings] . identifier[append] ( identifier[rate_part] )
identifier[Y] = identifier[pd] . identifier[concat] ( identifier[ratings] )
keyword[return] identifier[data_details_return] ({ literal[string] : identifier[Y] , literal[string] : identifier[items] , literal[string] : identifier[users] , literal[string] : literal[string] }, identifier[data_set] )
|
def movielens100k(data_set='movielens100k'):
"""Data set of movie ratings collected by the University of Minnesota and 'cleaned up' for use."""
if not data_available(data_set):
import zipfile
download_data(data_set)
dir_path = os.path.join(data_path, data_set)
zip = zipfile.ZipFile(os.path.join(dir_path, 'ml-100k.zip'), 'r')
for name in zip.namelist():
zip.extract(name, dir_path) # depends on [control=['for'], data=['name']] # depends on [control=['if'], data=[]]
import pandas as pd
encoding = 'latin-1'
movie_path = os.path.join(data_path, 'movielens100k', 'ml-100k')
items = pd.read_csv(os.path.join(movie_path, 'u.item'), index_col='index', header=None, sep='|', names=['index', 'title', 'date', 'empty', 'imdb_url', 'unknown', 'Action', 'Adventure', 'Animation', 'Childrens', 'Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy', 'Film-Noir', 'Horror', 'Musical', 'Mystery', 'Romance', 'Sci-Fi', 'Thriller', 'War', 'Western'], encoding=encoding)
users = pd.read_csv(os.path.join(movie_path, 'u.user'), index_col='index', header=None, sep='|', names=['index', 'age', 'sex', 'job', 'id'], encoding=encoding)
parts = ['u1.base', 'u1.test', 'u2.base', 'u2.test', 'u3.base', 'u3.test', 'u4.base', 'u4.test', 'u5.base', 'u5.test', 'ua.base', 'ua.test', 'ub.base', 'ub.test']
ratings = []
for part in parts:
rate_part = pd.read_csv(os.path.join(movie_path, part), index_col='index', header=None, sep='\t', names=['user', 'item', 'rating', 'index'], encoding=encoding)
rate_part['split'] = part
ratings.append(rate_part) # depends on [control=['for'], data=['part']]
Y = pd.concat(ratings)
return data_details_return({'Y': Y, 'film_info': items, 'user_info': users, 'info': 'The Movielens 100k data'}, data_set)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.