code stringlengths 75 104k | code_sememe stringlengths 47 309k | token_type stringlengths 215 214k | code_dependency stringlengths 75 155k |
|---|---|---|---|
def depth_may_average_ground_temperature(self, value=None):
"""Corresponds to IDD Field `depth_may_average_ground_temperature`
Args:
value (float): value for IDD Field `depth_may_average_ground_temperature`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value)
except ValueError:
raise ValueError(
'value {} need to be of type float '
'for field `depth_may_average_ground_temperature`'.format(value))
self._depth_may_average_ground_temperature = value | def function[depth_may_average_ground_temperature, parameter[self, value]]:
constant[Corresponds to IDD Field `depth_may_average_ground_temperature`
Args:
value (float): value for IDD Field `depth_may_average_ground_temperature`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
]
if compare[name[value] is_not constant[None]] begin[:]
<ast.Try object at 0x7da20c6c5d50>
name[self]._depth_may_average_ground_temperature assign[=] name[value] | keyword[def] identifier[depth_may_average_ground_temperature] ( identifier[self] , identifier[value] = keyword[None] ):
literal[string]
keyword[if] identifier[value] keyword[is] keyword[not] keyword[None] :
keyword[try] :
identifier[value] = identifier[float] ( identifier[value] )
keyword[except] identifier[ValueError] :
keyword[raise] identifier[ValueError] (
literal[string]
literal[string] . identifier[format] ( identifier[value] ))
identifier[self] . identifier[_depth_may_average_ground_temperature] = identifier[value] | def depth_may_average_ground_temperature(self, value=None):
"""Corresponds to IDD Field `depth_may_average_ground_temperature`
Args:
value (float): value for IDD Field `depth_may_average_ground_temperature`
Unit: C
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
Raises:
ValueError: if `value` is not a valid value
"""
if value is not None:
try:
value = float(value) # depends on [control=['try'], data=[]]
except ValueError:
raise ValueError('value {} need to be of type float for field `depth_may_average_ground_temperature`'.format(value)) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['value']]
self._depth_may_average_ground_temperature = value |
def create_api_key(self, body, params=None):
"""
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-api-key.html>`_
:arg body: The api key request to create an API key
:arg refresh: If `true` (the default) then refresh the affected shards
to make this operation visible to search, if `wait_for` then wait
for a refresh to make this operation visible to search, if `false`
then do nothing with refreshes., valid choices are: 'true', 'false',
'wait_for'
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request(
"PUT", "/_security/api_key", params=params, body=body
) | def function[create_api_key, parameter[self, body, params]]:
constant[
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-api-key.html>`_
:arg body: The api key request to create an API key
:arg refresh: If `true` (the default) then refresh the affected shards
to make this operation visible to search, if `wait_for` then wait
for a refresh to make this operation visible to search, if `false`
then do nothing with refreshes., valid choices are: 'true', 'false',
'wait_for'
]
if compare[name[body] in name[SKIP_IN_PATH]] begin[:]
<ast.Raise object at 0x7da1b217a2c0>
return[call[name[self].transport.perform_request, parameter[constant[PUT], constant[/_security/api_key]]]] | keyword[def] identifier[create_api_key] ( identifier[self] , identifier[body] , identifier[params] = keyword[None] ):
literal[string]
keyword[if] identifier[body] keyword[in] identifier[SKIP_IN_PATH] :
keyword[raise] identifier[ValueError] ( literal[string] )
keyword[return] identifier[self] . identifier[transport] . identifier[perform_request] (
literal[string] , literal[string] , identifier[params] = identifier[params] , identifier[body] = identifier[body]
) | def create_api_key(self, body, params=None):
"""
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/security-api-create-api-key.html>`_
:arg body: The api key request to create an API key
:arg refresh: If `true` (the default) then refresh the affected shards
to make this operation visible to search, if `wait_for` then wait
for a refresh to make this operation visible to search, if `false`
then do nothing with refreshes., valid choices are: 'true', 'false',
'wait_for'
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.") # depends on [control=['if'], data=[]]
return self.transport.perform_request('PUT', '/_security/api_key', params=params, body=body) |
def get_agent(self,
parent,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Retrieves the specified agent.
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.AgentsClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> response = client.get_agent(parent)
Args:
parent (str): Required. The project that the agent to fetch is associated with.
Format: ``projects/<Project ID>``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2.types.Agent` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'get_agent' not in self._inner_api_calls:
self._inner_api_calls[
'get_agent'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_agent,
default_retry=self._method_configs['GetAgent'].retry,
default_timeout=self._method_configs['GetAgent'].timeout,
client_info=self._client_info,
)
request = agent_pb2.GetAgentRequest(parent=parent, )
return self._inner_api_calls['get_agent'](
request, retry=retry, timeout=timeout, metadata=metadata) | def function[get_agent, parameter[self, parent, retry, timeout, metadata]]:
constant[
Retrieves the specified agent.
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.AgentsClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> response = client.get_agent(parent)
Args:
parent (str): Required. The project that the agent to fetch is associated with.
Format: ``projects/<Project ID>``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2.types.Agent` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
]
if compare[constant[get_agent] <ast.NotIn object at 0x7da2590d7190> name[self]._inner_api_calls] begin[:]
call[name[self]._inner_api_calls][constant[get_agent]] assign[=] call[name[google].api_core.gapic_v1.method.wrap_method, parameter[name[self].transport.get_agent]]
variable[request] assign[=] call[name[agent_pb2].GetAgentRequest, parameter[]]
return[call[call[name[self]._inner_api_calls][constant[get_agent]], parameter[name[request]]]] | keyword[def] identifier[get_agent] ( identifier[self] ,
identifier[parent] ,
identifier[retry] = identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[DEFAULT] ,
identifier[timeout] = identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[DEFAULT] ,
identifier[metadata] = keyword[None] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[_inner_api_calls] :
identifier[self] . identifier[_inner_api_calls] [
literal[string] ]= identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[wrap_method] (
identifier[self] . identifier[transport] . identifier[get_agent] ,
identifier[default_retry] = identifier[self] . identifier[_method_configs] [ literal[string] ]. identifier[retry] ,
identifier[default_timeout] = identifier[self] . identifier[_method_configs] [ literal[string] ]. identifier[timeout] ,
identifier[client_info] = identifier[self] . identifier[_client_info] ,
)
identifier[request] = identifier[agent_pb2] . identifier[GetAgentRequest] ( identifier[parent] = identifier[parent] ,)
keyword[return] identifier[self] . identifier[_inner_api_calls] [ literal[string] ](
identifier[request] , identifier[retry] = identifier[retry] , identifier[timeout] = identifier[timeout] , identifier[metadata] = identifier[metadata] ) | def get_agent(self, parent, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None):
"""
Retrieves the specified agent.
Example:
>>> import dialogflow_v2
>>>
>>> client = dialogflow_v2.AgentsClient()
>>>
>>> parent = client.project_path('[PROJECT]')
>>>
>>> response = client.get_agent(parent)
Args:
parent (str): Required. The project that the agent to fetch is associated with.
Format: ``projects/<Project ID>``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2.types.Agent` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'get_agent' not in self._inner_api_calls:
self._inner_api_calls['get_agent'] = google.api_core.gapic_v1.method.wrap_method(self.transport.get_agent, default_retry=self._method_configs['GetAgent'].retry, default_timeout=self._method_configs['GetAgent'].timeout, client_info=self._client_info) # depends on [control=['if'], data=[]]
request = agent_pb2.GetAgentRequest(parent=parent)
return self._inner_api_calls['get_agent'](request, retry=retry, timeout=timeout, metadata=metadata) |
def compare(self, word1, word2):
"""
compare word1 and word2 for equality regardless of plurality
return values:
eq - the strings are equal
p:s - word1 is the plural of word2
s:p - word2 is the plural of word1
p:p - word1 and word2 are two different plural forms of the one word
False - otherwise
"""
return (
self._plequal(word1, word2, self.plural_noun)
or self._plequal(word1, word2, self.plural_verb)
or self._plequal(word1, word2, self.plural_adj)
) | def function[compare, parameter[self, word1, word2]]:
constant[
compare word1 and word2 for equality regardless of plurality
return values:
eq - the strings are equal
p:s - word1 is the plural of word2
s:p - word2 is the plural of word1
p:p - word1 and word2 are two different plural forms of the one word
False - otherwise
]
return[<ast.BoolOp object at 0x7da1b12c85b0>] | keyword[def] identifier[compare] ( identifier[self] , identifier[word1] , identifier[word2] ):
literal[string]
keyword[return] (
identifier[self] . identifier[_plequal] ( identifier[word1] , identifier[word2] , identifier[self] . identifier[plural_noun] )
keyword[or] identifier[self] . identifier[_plequal] ( identifier[word1] , identifier[word2] , identifier[self] . identifier[plural_verb] )
keyword[or] identifier[self] . identifier[_plequal] ( identifier[word1] , identifier[word2] , identifier[self] . identifier[plural_adj] )
) | def compare(self, word1, word2):
"""
compare word1 and word2 for equality regardless of plurality
return values:
eq - the strings are equal
p:s - word1 is the plural of word2
s:p - word2 is the plural of word1
p:p - word1 and word2 are two different plural forms of the one word
False - otherwise
"""
return self._plequal(word1, word2, self.plural_noun) or self._plequal(word1, word2, self.plural_verb) or self._plequal(word1, word2, self.plural_adj) |
def post(self, resource, data=None, json=None):
"""Sends a POST request
Returns:
RTMResponse
"""
return self.do(resource, 'POST', data=data, json=json) | def function[post, parameter[self, resource, data, json]]:
constant[Sends a POST request
Returns:
RTMResponse
]
return[call[name[self].do, parameter[name[resource], constant[POST]]]] | keyword[def] identifier[post] ( identifier[self] , identifier[resource] , identifier[data] = keyword[None] , identifier[json] = keyword[None] ):
literal[string]
keyword[return] identifier[self] . identifier[do] ( identifier[resource] , literal[string] , identifier[data] = identifier[data] , identifier[json] = identifier[json] ) | def post(self, resource, data=None, json=None):
"""Sends a POST request
Returns:
RTMResponse
"""
return self.do(resource, 'POST', data=data, json=json) |
def contains_point(self, x, y):
"""
:param x: x coordinate of a point
:param y: y coordinate of a point
:returns: True if the point (x, y) is on the curve, False otherwise
"""
if x is None and y is None:
return True
return (y * y - (x * x * x + self._a * x + self._b)) % self._p == 0 | def function[contains_point, parameter[self, x, y]]:
constant[
:param x: x coordinate of a point
:param y: y coordinate of a point
:returns: True if the point (x, y) is on the curve, False otherwise
]
if <ast.BoolOp object at 0x7da1b1dde0e0> begin[:]
return[constant[True]]
return[compare[binary_operation[binary_operation[binary_operation[name[y] * name[y]] - binary_operation[binary_operation[binary_operation[binary_operation[name[x] * name[x]] * name[x]] + binary_operation[name[self]._a * name[x]]] + name[self]._b]] <ast.Mod object at 0x7da2590d6920> name[self]._p] equal[==] constant[0]]] | keyword[def] identifier[contains_point] ( identifier[self] , identifier[x] , identifier[y] ):
literal[string]
keyword[if] identifier[x] keyword[is] keyword[None] keyword[and] identifier[y] keyword[is] keyword[None] :
keyword[return] keyword[True]
keyword[return] ( identifier[y] * identifier[y] -( identifier[x] * identifier[x] * identifier[x] + identifier[self] . identifier[_a] * identifier[x] + identifier[self] . identifier[_b] ))% identifier[self] . identifier[_p] == literal[int] | def contains_point(self, x, y):
"""
:param x: x coordinate of a point
:param y: y coordinate of a point
:returns: True if the point (x, y) is on the curve, False otherwise
"""
if x is None and y is None:
return True # depends on [control=['if'], data=[]]
return (y * y - (x * x * x + self._a * x + self._b)) % self._p == 0 |
def _scan(cls, result_key, scanner):
"""
Registers a `scanner` which is a function that will be called once per
logical line in a document. A scanners job is to evaluate the content
of the line and set a so-called `result_key` on the class to be
retrieved later by a rule.
"""
if result_key in cls.scanner_keys:
raise ValueError("'%s' is already a registered scanner key" % result_key)
cls.scanners.append(scanner)
cls.scanner_keys.add(result_key) | def function[_scan, parameter[cls, result_key, scanner]]:
constant[
Registers a `scanner` which is a function that will be called once per
logical line in a document. A scanners job is to evaluate the content
of the line and set a so-called `result_key` on the class to be
retrieved later by a rule.
]
if compare[name[result_key] in name[cls].scanner_keys] begin[:]
<ast.Raise object at 0x7da1b184bd60>
call[name[cls].scanners.append, parameter[name[scanner]]]
call[name[cls].scanner_keys.add, parameter[name[result_key]]] | keyword[def] identifier[_scan] ( identifier[cls] , identifier[result_key] , identifier[scanner] ):
literal[string]
keyword[if] identifier[result_key] keyword[in] identifier[cls] . identifier[scanner_keys] :
keyword[raise] identifier[ValueError] ( literal[string] % identifier[result_key] )
identifier[cls] . identifier[scanners] . identifier[append] ( identifier[scanner] )
identifier[cls] . identifier[scanner_keys] . identifier[add] ( identifier[result_key] ) | def _scan(cls, result_key, scanner):
"""
Registers a `scanner` which is a function that will be called once per
logical line in a document. A scanners job is to evaluate the content
of the line and set a so-called `result_key` on the class to be
retrieved later by a rule.
"""
if result_key in cls.scanner_keys:
raise ValueError("'%s' is already a registered scanner key" % result_key) # depends on [control=['if'], data=['result_key']]
cls.scanners.append(scanner)
cls.scanner_keys.add(result_key) |
def add_results(self, *rvs, **kwargs):
"""
Changes the state to reflect the mutation which yielded the given
result.
In order to use the result, the `fetch_mutation_tokens` option must
have been specified in the connection string, _and_ the result
must have been successful.
:param rvs: One or more :class:`~.OperationResult` which have been
returned from mutations
:param quiet: Suppress errors if one of the results does not
contain a convertible state.
:return: `True` if the result was valid and added, `False` if not
added (and `quiet` was specified
:raise: :exc:`~.MissingTokenError` if `result` does not contain
a valid token
"""
if not rvs:
raise MissingTokenError.pyexc(message='No results passed')
for rv in rvs:
mi = rv._mutinfo
if not mi:
if kwargs.get('quiet'):
return False
raise MissingTokenError.pyexc(
message='Result does not contain token')
self._add_scanvec(mi)
return True | def function[add_results, parameter[self]]:
constant[
Changes the state to reflect the mutation which yielded the given
result.
In order to use the result, the `fetch_mutation_tokens` option must
have been specified in the connection string, _and_ the result
must have been successful.
:param rvs: One or more :class:`~.OperationResult` which have been
returned from mutations
:param quiet: Suppress errors if one of the results does not
contain a convertible state.
:return: `True` if the result was valid and added, `False` if not
added (and `quiet` was specified
:raise: :exc:`~.MissingTokenError` if `result` does not contain
a valid token
]
if <ast.UnaryOp object at 0x7da20c7967d0> begin[:]
<ast.Raise object at 0x7da20c795330>
for taget[name[rv]] in starred[name[rvs]] begin[:]
variable[mi] assign[=] name[rv]._mutinfo
if <ast.UnaryOp object at 0x7da20c796560> begin[:]
if call[name[kwargs].get, parameter[constant[quiet]]] begin[:]
return[constant[False]]
<ast.Raise object at 0x7da18ede6830>
call[name[self]._add_scanvec, parameter[name[mi]]]
return[constant[True]] | keyword[def] identifier[add_results] ( identifier[self] ,* identifier[rvs] ,** identifier[kwargs] ):
literal[string]
keyword[if] keyword[not] identifier[rvs] :
keyword[raise] identifier[MissingTokenError] . identifier[pyexc] ( identifier[message] = literal[string] )
keyword[for] identifier[rv] keyword[in] identifier[rvs] :
identifier[mi] = identifier[rv] . identifier[_mutinfo]
keyword[if] keyword[not] identifier[mi] :
keyword[if] identifier[kwargs] . identifier[get] ( literal[string] ):
keyword[return] keyword[False]
keyword[raise] identifier[MissingTokenError] . identifier[pyexc] (
identifier[message] = literal[string] )
identifier[self] . identifier[_add_scanvec] ( identifier[mi] )
keyword[return] keyword[True] | def add_results(self, *rvs, **kwargs):
"""
Changes the state to reflect the mutation which yielded the given
result.
In order to use the result, the `fetch_mutation_tokens` option must
have been specified in the connection string, _and_ the result
must have been successful.
:param rvs: One or more :class:`~.OperationResult` which have been
returned from mutations
:param quiet: Suppress errors if one of the results does not
contain a convertible state.
:return: `True` if the result was valid and added, `False` if not
added (and `quiet` was specified
:raise: :exc:`~.MissingTokenError` if `result` does not contain
a valid token
"""
if not rvs:
raise MissingTokenError.pyexc(message='No results passed') # depends on [control=['if'], data=[]]
for rv in rvs:
mi = rv._mutinfo
if not mi:
if kwargs.get('quiet'):
return False # depends on [control=['if'], data=[]]
raise MissingTokenError.pyexc(message='Result does not contain token') # depends on [control=['if'], data=[]]
self._add_scanvec(mi) # depends on [control=['for'], data=['rv']]
return True |
def file_mode(self):
"""onefile, fpp, or both"""
fms = self.attributes['file_mode']
eax = set()
if isinstance(fms, six.string_types):
fms = shlex.split(fms)
for fm in fms:
if fm == 'both':
eax.add('fpp')
eax.add('onefile')
elif fm in ['fpp', 'onefile']:
eax.add(fm)
else:
raise Exception('Invalid IOR file mode: ' + fm)
return eax | def function[file_mode, parameter[self]]:
constant[onefile, fpp, or both]
variable[fms] assign[=] call[name[self].attributes][constant[file_mode]]
variable[eax] assign[=] call[name[set], parameter[]]
if call[name[isinstance], parameter[name[fms], name[six].string_types]] begin[:]
variable[fms] assign[=] call[name[shlex].split, parameter[name[fms]]]
for taget[name[fm]] in starred[name[fms]] begin[:]
if compare[name[fm] equal[==] constant[both]] begin[:]
call[name[eax].add, parameter[constant[fpp]]]
call[name[eax].add, parameter[constant[onefile]]]
return[name[eax]] | keyword[def] identifier[file_mode] ( identifier[self] ):
literal[string]
identifier[fms] = identifier[self] . identifier[attributes] [ literal[string] ]
identifier[eax] = identifier[set] ()
keyword[if] identifier[isinstance] ( identifier[fms] , identifier[six] . identifier[string_types] ):
identifier[fms] = identifier[shlex] . identifier[split] ( identifier[fms] )
keyword[for] identifier[fm] keyword[in] identifier[fms] :
keyword[if] identifier[fm] == literal[string] :
identifier[eax] . identifier[add] ( literal[string] )
identifier[eax] . identifier[add] ( literal[string] )
keyword[elif] identifier[fm] keyword[in] [ literal[string] , literal[string] ]:
identifier[eax] . identifier[add] ( identifier[fm] )
keyword[else] :
keyword[raise] identifier[Exception] ( literal[string] + identifier[fm] )
keyword[return] identifier[eax] | def file_mode(self):
"""onefile, fpp, or both"""
fms = self.attributes['file_mode']
eax = set()
if isinstance(fms, six.string_types):
fms = shlex.split(fms) # depends on [control=['if'], data=[]]
for fm in fms:
if fm == 'both':
eax.add('fpp')
eax.add('onefile') # depends on [control=['if'], data=[]]
elif fm in ['fpp', 'onefile']:
eax.add(fm) # depends on [control=['if'], data=['fm']]
else:
raise Exception('Invalid IOR file mode: ' + fm) # depends on [control=['for'], data=['fm']]
return eax |
def delete_access_key(self, access_key_id, user_name=None):
"""
Delete an access key associated with a user.
If the user_name is not specified, it is determined implicitly based
on the AWS Access Key ID used to sign the request.
:type access_key_id: string
:param access_key_id: The ID of the access key to be deleted.
:type user_name: string
:param user_name: The username of the user
"""
params = {'AccessKeyId' : access_key_id}
if user_name:
params['UserName'] = user_name
return self.get_response('DeleteAccessKey', params) | def function[delete_access_key, parameter[self, access_key_id, user_name]]:
constant[
Delete an access key associated with a user.
If the user_name is not specified, it is determined implicitly based
on the AWS Access Key ID used to sign the request.
:type access_key_id: string
:param access_key_id: The ID of the access key to be deleted.
:type user_name: string
:param user_name: The username of the user
]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b2615000>], [<ast.Name object at 0x7da1b2615a20>]]
if name[user_name] begin[:]
call[name[params]][constant[UserName]] assign[=] name[user_name]
return[call[name[self].get_response, parameter[constant[DeleteAccessKey], name[params]]]] | keyword[def] identifier[delete_access_key] ( identifier[self] , identifier[access_key_id] , identifier[user_name] = keyword[None] ):
literal[string]
identifier[params] ={ literal[string] : identifier[access_key_id] }
keyword[if] identifier[user_name] :
identifier[params] [ literal[string] ]= identifier[user_name]
keyword[return] identifier[self] . identifier[get_response] ( literal[string] , identifier[params] ) | def delete_access_key(self, access_key_id, user_name=None):
"""
Delete an access key associated with a user.
If the user_name is not specified, it is determined implicitly based
on the AWS Access Key ID used to sign the request.
:type access_key_id: string
:param access_key_id: The ID of the access key to be deleted.
:type user_name: string
:param user_name: The username of the user
"""
params = {'AccessKeyId': access_key_id}
if user_name:
params['UserName'] = user_name # depends on [control=['if'], data=[]]
return self.get_response('DeleteAccessKey', params) |
def makeEquilibriumTable(out_filename, four_in_files, CRRA):
'''
Make the equilibrium statistics table for the paper, saving it as a tex file
in the tables folder. Also makes a version for the slides that doesn't use
the table environment, nor include the note at bottom.
Parameters
----------
out_filename : str
Name of the file in which to save output (in the tables directory).
Suffix .tex appended automatically.
four_in_files: [str]
A list with four csv files. 0) SOE frictionless 1) SOE Sticky 2) DSGE frictionless 3) DSGE sticky
CRRA : float
Coefficient of relative risk aversion
Returns
-------
None
'''
# Read in statistics from the four files
SOEfrictionless = np.genfromtxt(results_dir + four_in_files[0] + 'Results.csv', delimiter=',')
SOEsticky = np.genfromtxt(results_dir + four_in_files[1] + 'Results.csv', delimiter=',')
DSGEfrictionless = np.genfromtxt(results_dir + four_in_files[2] + 'Results.csv', delimiter=',')
DSGEsticky = np.genfromtxt(results_dir + four_in_files[3] + 'Results.csv', delimiter=',')
# Read in value at birth from the four files
vBirth_SOE_F = np.genfromtxt(results_dir + four_in_files[0] + 'BirthValue.csv', delimiter=',')
vBirth_SOE_S = np.genfromtxt(results_dir + four_in_files[1] + 'BirthValue.csv', delimiter=',')
vBirth_DSGE_F = np.genfromtxt(results_dir + four_in_files[2] + 'BirthValue.csv', delimiter=',')
vBirth_DSGE_S = np.genfromtxt(results_dir + four_in_files[3] + 'BirthValue.csv', delimiter=',')
# Calculate the cost of stickiness in the SOE and DSGE models
StickyCost_SOE = np.mean(1. - (vBirth_SOE_S/vBirth_SOE_F)**(1./(1.-CRRA)))
StickyCost_DSGE = np.mean(1. - (vBirth_DSGE_S/vBirth_DSGE_F)**(1./(1.-CRRA)))
paper_top = "\\begin{minipage}{\\textwidth}\n"
paper_top += " \\begin{table} \n"
paper_top += "\caption{Equilibrium Statistics} \n"
paper_top += "\label{table:Eqbm} \n"
paper_top += "\\newsavebox{\EqbmBox} \n"
paper_top += "\sbox{\EqbmBox}{ \n"
paper_top += "\\newcommand{\EqDir}{\TablesDir/Eqbm} \n"
slides_top = '\\begin{center} \n'
main_table = "\\begin{tabular}{lllcccc} \n"
main_table += "\\toprule \n"
main_table += "&&& \multicolumn{2}{c}{SOE Model} & \multicolumn{2}{c}{HA-DSGE Model} \n"
main_table += "\\\\ %\cline{4-5} \n"
main_table += " &&& \multicolumn{1}{c}{Frictionless} & \multicolumn{1}{c}{Sticky} & \multicolumn{1}{c}{Frictionless} & \multicolumn{1}{c}{Sticky} \n"
main_table += "\\\\ \\midrule \n"
main_table += " \multicolumn{3}{l}{Means} \n"
main_table += "%\\\\ & & $M$ \n"
main_table += "%\\\\ & & $K$ \n"
main_table += "\\\\ & & $A$ & {:.2f}".format(SOEfrictionless[0]) +" &{:.2f}".format(SOEsticky[0]) +" & {:.2f}".format(DSGEfrictionless[0]) +" & {:.2f}".format(DSGEsticky[0]) +" \n"
main_table += "\\\\ & & $C$ & {:.2f}".format(SOEfrictionless[1]) +" &{:.2f}".format(SOEsticky[1]) +" & {:.2f}".format(DSGEfrictionless[1]) +" & {:.2f}".format(DSGEsticky[1]) +" \n"
main_table += "\\\\ \\midrule \n"
main_table += " \multicolumn{3}{l}{Standard Deviations} \n"
main_table += "\\\\ & \multicolumn{4}{l}{Aggregate Time Series (`Macro')} \n"
main_table += "%\\ & & $\Delta \log \mathbf{M}$ \n"
main_table += "\\\\ & & $\log A $ & {:.3f}".format(SOEfrictionless[2]) +" & {:.3f}".format(SOEsticky[2]) +" & {:.3f}".format(DSGEfrictionless[2]) +" & {:.3f}".format(DSGEsticky[2]) +" \n"
main_table += "\\\\ & & $\Delta \log \\CLevBF $ & {:.3f}".format(SOEfrictionless[3]) +" & {:.3f}".format(SOEsticky[3]) +" & {:.3f}".format(DSGEfrictionless[3]) +" & {:.3f}".format(DSGEsticky[3]) +" \n"
main_table += "\\\\ & & $\Delta \log \\YLevBF $ & {:.3f}".format(SOEfrictionless[4]) +" & {:.3f}".format(SOEsticky[4]) +" & {:.3f}".format(DSGEfrictionless[4]) +" & {:.3f}".format(DSGEsticky[4]) +" \n"
main_table += "\\\\ & \multicolumn{3}{l}{Individual Cross Sectional (`Micro')} \n"
main_table += "\\\\ & & $\log \\aLevBF $ & {:.3f}".format(SOEfrictionless[6]) +" & {:.3f}".format(SOEsticky[6]) +" & {:.3f}".format(DSGEfrictionless[6]) +" & {:.3f}".format(DSGEsticky[6]) +" \n"
main_table += "\\\\ & & $\log \\cLevBF $ & {:.3f}".format(SOEfrictionless[7]) +" & {:.3f}".format(SOEsticky[7]) +" & {:.3f}".format(DSGEfrictionless[7]) +" & {:.3f}".format(DSGEsticky[7]) +" \n"
main_table += "\\\\ & & $\log p $ & {:.3f}".format(SOEfrictionless[8]) +" & {:.3f}".format(SOEsticky[8]) +" & {:.3f}".format(DSGEfrictionless[8]) +" & {:.3f}".format(DSGEsticky[8]) +" \n"
main_table += "\\\\ & & $\log \\yLevBF | \\yLevBF > 0 $ & {:.3f}".format(SOEfrictionless[9]) +" & {:.3f}".format(SOEsticky[9]) +" & {:.3f}".format(DSGEfrictionless[9]) +" & {:.3f}".format(DSGEsticky[9]) +" \n"
main_table += "\\\\ & & $\Delta \log \\cLevBF $ & {:.3f}".format(SOEfrictionless[11]) +" & {:.3f}".format(SOEsticky[11]) +" & {:.3f}".format(DSGEfrictionless[11]) +" & {:.3f}".format(DSGEsticky[11]) +" \n"
main_table += " \n"
main_table += " \n"
main_table += "\\\\ \\midrule \multicolumn{3}{l}{Cost of Stickiness} \n"
main_table += " & \multicolumn{2}{c}{" + mystr2(StickyCost_SOE) + "} \n"
main_table += " & \multicolumn{2}{c}{" + mystr2(StickyCost_DSGE) + "} \n"
main_table += "\\\\ \\bottomrule \n"
main_table += " \end{tabular} \n"
paper_bot = " } \n "
paper_bot += "\\usebox{\EqbmBox} \n"
paper_bot += "\ifthenelse{\\boolean{StandAlone}}{\\newlength\TableWidth}{} \n"
paper_bot += "\settowidth\TableWidth{\\usebox{\EqbmBox}} % Calculate width of table so notes will match \n"
paper_bot += "\medskip\medskip \\vspace{0.0cm} \parbox{\TableWidth}{\\footnotesize\n"
paper_bot += "\\textbf{Notes}: The cost of stickiness is calculated as the proportion by which the permanent income of a newborn frictionless consumer would need to be reduced in order to achieve the same reduction of expected value associated with forcing them to become a sticky expectations consumer.} \n"
paper_bot += "\end{table}\n"
paper_bot += "\end{minipage}\n"
paper_bot += "\ifthenelse{\\boolean{StandAlone}}{\end{document}}{} \n"
slides_bot = '\\end{center} \n'
paper_output = paper_top + main_table + paper_bot
with open(tables_dir + out_filename + '.tex','w') as f:
f.write(paper_output)
f.close()
slides_output = slides_top + main_table + slides_bot
with open(tables_dir + out_filename + 'Slides.tex','w') as f:
f.write(slides_output)
f.close() | def function[makeEquilibriumTable, parameter[out_filename, four_in_files, CRRA]]:
constant[
Make the equilibrium statistics table for the paper, saving it as a tex file
in the tables folder. Also makes a version for the slides that doesn't use
the table environment, nor include the note at bottom.
Parameters
----------
out_filename : str
Name of the file in which to save output (in the tables directory).
Suffix .tex appended automatically.
four_in_files: [str]
A list with four csv files. 0) SOE frictionless 1) SOE Sticky 2) DSGE frictionless 3) DSGE sticky
CRRA : float
Coefficient of relative risk aversion
Returns
-------
None
]
variable[SOEfrictionless] assign[=] call[name[np].genfromtxt, parameter[binary_operation[binary_operation[name[results_dir] + call[name[four_in_files]][constant[0]]] + constant[Results.csv]]]]
variable[SOEsticky] assign[=] call[name[np].genfromtxt, parameter[binary_operation[binary_operation[name[results_dir] + call[name[four_in_files]][constant[1]]] + constant[Results.csv]]]]
variable[DSGEfrictionless] assign[=] call[name[np].genfromtxt, parameter[binary_operation[binary_operation[name[results_dir] + call[name[four_in_files]][constant[2]]] + constant[Results.csv]]]]
variable[DSGEsticky] assign[=] call[name[np].genfromtxt, parameter[binary_operation[binary_operation[name[results_dir] + call[name[four_in_files]][constant[3]]] + constant[Results.csv]]]]
variable[vBirth_SOE_F] assign[=] call[name[np].genfromtxt, parameter[binary_operation[binary_operation[name[results_dir] + call[name[four_in_files]][constant[0]]] + constant[BirthValue.csv]]]]
variable[vBirth_SOE_S] assign[=] call[name[np].genfromtxt, parameter[binary_operation[binary_operation[name[results_dir] + call[name[four_in_files]][constant[1]]] + constant[BirthValue.csv]]]]
variable[vBirth_DSGE_F] assign[=] call[name[np].genfromtxt, parameter[binary_operation[binary_operation[name[results_dir] + call[name[four_in_files]][constant[2]]] + constant[BirthValue.csv]]]]
variable[vBirth_DSGE_S] assign[=] call[name[np].genfromtxt, parameter[binary_operation[binary_operation[name[results_dir] + call[name[four_in_files]][constant[3]]] + constant[BirthValue.csv]]]]
variable[StickyCost_SOE] assign[=] call[name[np].mean, parameter[binary_operation[constant[1.0] - binary_operation[binary_operation[name[vBirth_SOE_S] / name[vBirth_SOE_F]] ** binary_operation[constant[1.0] / binary_operation[constant[1.0] - name[CRRA]]]]]]]
variable[StickyCost_DSGE] assign[=] call[name[np].mean, parameter[binary_operation[constant[1.0] - binary_operation[binary_operation[name[vBirth_DSGE_S] / name[vBirth_DSGE_F]] ** binary_operation[constant[1.0] / binary_operation[constant[1.0] - name[CRRA]]]]]]]
variable[paper_top] assign[=] constant[\begin{minipage}{\textwidth}
]
<ast.AugAssign object at 0x7da1b084d390>
<ast.AugAssign object at 0x7da1b084d420>
<ast.AugAssign object at 0x7da1b084d4e0>
<ast.AugAssign object at 0x7da1b084d540>
<ast.AugAssign object at 0x7da1b084d5d0>
<ast.AugAssign object at 0x7da1b084e8c0>
variable[slides_top] assign[=] constant[\begin{center}
]
variable[main_table] assign[=] constant[\begin{tabular}{lllcccc}
]
<ast.AugAssign object at 0x7da1b084e740>
<ast.AugAssign object at 0x7da1b084e680>
<ast.AugAssign object at 0x7da1b084e530>
<ast.AugAssign object at 0x7da1b084e560>
<ast.AugAssign object at 0x7da1b084e710>
<ast.AugAssign object at 0x7da1b084ff70>
<ast.AugAssign object at 0x7da1b084d900>
<ast.AugAssign object at 0x7da1b084ddb0>
<ast.AugAssign object at 0x7da1b084eb00>
<ast.AugAssign object at 0x7da1b084e320>
<ast.AugAssign object at 0x7da1b084fdc0>
<ast.AugAssign object at 0x7da1b084e1d0>
<ast.AugAssign object at 0x7da1b084d780>
<ast.AugAssign object at 0x7da1b084edd0>
<ast.AugAssign object at 0x7da1b084ee30>
<ast.AugAssign object at 0x7da1b084ead0>
<ast.AugAssign object at 0x7da18ede6bf0>
<ast.AugAssign object at 0x7da18ede7730>
<ast.AugAssign object at 0x7da18ede69e0>
<ast.AugAssign object at 0x7da18c4cd720>
<ast.AugAssign object at 0x7da20c992980>
<ast.AugAssign object at 0x7da20c990d60>
<ast.AugAssign object at 0x7da20c990b20>
<ast.AugAssign object at 0x7da20c993c40>
<ast.AugAssign object at 0x7da20c9928f0>
<ast.AugAssign object at 0x7da20c991360>
<ast.AugAssign object at 0x7da20c993fa0>
<ast.AugAssign object at 0x7da2054a7bb0>
<ast.AugAssign object at 0x7da2054a5b10>
<ast.AugAssign object at 0x7da2054a7970>
variable[paper_bot] assign[=] constant[ }
]
<ast.AugAssign object at 0x7da2054a4250>
<ast.AugAssign object at 0x7da2054a4640>
<ast.AugAssign object at 0x7da2054a7310>
<ast.AugAssign object at 0x7da2054a4af0>
<ast.AugAssign object at 0x7da2054a47c0>
<ast.AugAssign object at 0x7da2054a4a30>
<ast.AugAssign object at 0x7da2054a7520>
<ast.AugAssign object at 0x7da2054a44f0>
variable[slides_bot] assign[=] constant[\end{center}
]
variable[paper_output] assign[=] binary_operation[binary_operation[name[paper_top] + name[main_table]] + name[paper_bot]]
with call[name[open], parameter[binary_operation[binary_operation[name[tables_dir] + name[out_filename]] + constant[.tex]], constant[w]]] begin[:]
call[name[f].write, parameter[name[paper_output]]]
call[name[f].close, parameter[]]
variable[slides_output] assign[=] binary_operation[binary_operation[name[slides_top] + name[main_table]] + name[slides_bot]]
with call[name[open], parameter[binary_operation[binary_operation[name[tables_dir] + name[out_filename]] + constant[Slides.tex]], constant[w]]] begin[:]
call[name[f].write, parameter[name[slides_output]]]
call[name[f].close, parameter[]] | keyword[def] identifier[makeEquilibriumTable] ( identifier[out_filename] , identifier[four_in_files] , identifier[CRRA] ):
literal[string]
identifier[SOEfrictionless] = identifier[np] . identifier[genfromtxt] ( identifier[results_dir] + identifier[four_in_files] [ literal[int] ]+ literal[string] , identifier[delimiter] = literal[string] )
identifier[SOEsticky] = identifier[np] . identifier[genfromtxt] ( identifier[results_dir] + identifier[four_in_files] [ literal[int] ]+ literal[string] , identifier[delimiter] = literal[string] )
identifier[DSGEfrictionless] = identifier[np] . identifier[genfromtxt] ( identifier[results_dir] + identifier[four_in_files] [ literal[int] ]+ literal[string] , identifier[delimiter] = literal[string] )
identifier[DSGEsticky] = identifier[np] . identifier[genfromtxt] ( identifier[results_dir] + identifier[four_in_files] [ literal[int] ]+ literal[string] , identifier[delimiter] = literal[string] )
identifier[vBirth_SOE_F] = identifier[np] . identifier[genfromtxt] ( identifier[results_dir] + identifier[four_in_files] [ literal[int] ]+ literal[string] , identifier[delimiter] = literal[string] )
identifier[vBirth_SOE_S] = identifier[np] . identifier[genfromtxt] ( identifier[results_dir] + identifier[four_in_files] [ literal[int] ]+ literal[string] , identifier[delimiter] = literal[string] )
identifier[vBirth_DSGE_F] = identifier[np] . identifier[genfromtxt] ( identifier[results_dir] + identifier[four_in_files] [ literal[int] ]+ literal[string] , identifier[delimiter] = literal[string] )
identifier[vBirth_DSGE_S] = identifier[np] . identifier[genfromtxt] ( identifier[results_dir] + identifier[four_in_files] [ literal[int] ]+ literal[string] , identifier[delimiter] = literal[string] )
identifier[StickyCost_SOE] = identifier[np] . identifier[mean] ( literal[int] -( identifier[vBirth_SOE_S] / identifier[vBirth_SOE_F] )**( literal[int] /( literal[int] - identifier[CRRA] )))
identifier[StickyCost_DSGE] = identifier[np] . identifier[mean] ( literal[int] -( identifier[vBirth_DSGE_S] / identifier[vBirth_DSGE_F] )**( literal[int] /( literal[int] - identifier[CRRA] )))
identifier[paper_top] = literal[string]
identifier[paper_top] += literal[string]
identifier[paper_top] += literal[string]
identifier[paper_top] += literal[string]
identifier[paper_top] += literal[string]
identifier[paper_top] += literal[string]
identifier[paper_top] += literal[string]
identifier[slides_top] = literal[string]
identifier[main_table] = literal[string]
identifier[main_table] += literal[string]
identifier[main_table] += literal[string]
identifier[main_table] += literal[string]
identifier[main_table] += literal[string]
identifier[main_table] += literal[string]
identifier[main_table] += literal[string]
identifier[main_table] += literal[string]
identifier[main_table] += literal[string]
identifier[main_table] += literal[string] . identifier[format] ( identifier[SOEfrictionless] [ literal[int] ])+ literal[string] . identifier[format] ( identifier[SOEsticky] [ literal[int] ])+ literal[string] . identifier[format] ( identifier[DSGEfrictionless] [ literal[int] ])+ literal[string] . identifier[format] ( identifier[DSGEsticky] [ literal[int] ])+ literal[string]
identifier[main_table] += literal[string] . identifier[format] ( identifier[SOEfrictionless] [ literal[int] ])+ literal[string] . identifier[format] ( identifier[SOEsticky] [ literal[int] ])+ literal[string] . identifier[format] ( identifier[DSGEfrictionless] [ literal[int] ])+ literal[string] . identifier[format] ( identifier[DSGEsticky] [ literal[int] ])+ literal[string]
identifier[main_table] += literal[string]
identifier[main_table] += literal[string]
identifier[main_table] += literal[string]
identifier[main_table] += literal[string]
identifier[main_table] += literal[string] . identifier[format] ( identifier[SOEfrictionless] [ literal[int] ])+ literal[string] . identifier[format] ( identifier[SOEsticky] [ literal[int] ])+ literal[string] . identifier[format] ( identifier[DSGEfrictionless] [ literal[int] ])+ literal[string] . identifier[format] ( identifier[DSGEsticky] [ literal[int] ])+ literal[string]
identifier[main_table] += literal[string] . identifier[format] ( identifier[SOEfrictionless] [ literal[int] ])+ literal[string] . identifier[format] ( identifier[SOEsticky] [ literal[int] ])+ literal[string] . identifier[format] ( identifier[DSGEfrictionless] [ literal[int] ])+ literal[string] . identifier[format] ( identifier[DSGEsticky] [ literal[int] ])+ literal[string]
identifier[main_table] += literal[string] . identifier[format] ( identifier[SOEfrictionless] [ literal[int] ])+ literal[string] . identifier[format] ( identifier[SOEsticky] [ literal[int] ])+ literal[string] . identifier[format] ( identifier[DSGEfrictionless] [ literal[int] ])+ literal[string] . identifier[format] ( identifier[DSGEsticky] [ literal[int] ])+ literal[string]
identifier[main_table] += literal[string]
identifier[main_table] += literal[string] . identifier[format] ( identifier[SOEfrictionless] [ literal[int] ])+ literal[string] . identifier[format] ( identifier[SOEsticky] [ literal[int] ])+ literal[string] . identifier[format] ( identifier[DSGEfrictionless] [ literal[int] ])+ literal[string] . identifier[format] ( identifier[DSGEsticky] [ literal[int] ])+ literal[string]
identifier[main_table] += literal[string] . identifier[format] ( identifier[SOEfrictionless] [ literal[int] ])+ literal[string] . identifier[format] ( identifier[SOEsticky] [ literal[int] ])+ literal[string] . identifier[format] ( identifier[DSGEfrictionless] [ literal[int] ])+ literal[string] . identifier[format] ( identifier[DSGEsticky] [ literal[int] ])+ literal[string]
identifier[main_table] += literal[string] . identifier[format] ( identifier[SOEfrictionless] [ literal[int] ])+ literal[string] . identifier[format] ( identifier[SOEsticky] [ literal[int] ])+ literal[string] . identifier[format] ( identifier[DSGEfrictionless] [ literal[int] ])+ literal[string] . identifier[format] ( identifier[DSGEsticky] [ literal[int] ])+ literal[string]
identifier[main_table] += literal[string] . identifier[format] ( identifier[SOEfrictionless] [ literal[int] ])+ literal[string] . identifier[format] ( identifier[SOEsticky] [ literal[int] ])+ literal[string] . identifier[format] ( identifier[DSGEfrictionless] [ literal[int] ])+ literal[string] . identifier[format] ( identifier[DSGEsticky] [ literal[int] ])+ literal[string]
identifier[main_table] += literal[string] . identifier[format] ( identifier[SOEfrictionless] [ literal[int] ])+ literal[string] . identifier[format] ( identifier[SOEsticky] [ literal[int] ])+ literal[string] . identifier[format] ( identifier[DSGEfrictionless] [ literal[int] ])+ literal[string] . identifier[format] ( identifier[DSGEsticky] [ literal[int] ])+ literal[string]
identifier[main_table] += literal[string]
identifier[main_table] += literal[string]
identifier[main_table] += literal[string]
identifier[main_table] += literal[string] + identifier[mystr2] ( identifier[StickyCost_SOE] )+ literal[string]
identifier[main_table] += literal[string] + identifier[mystr2] ( identifier[StickyCost_DSGE] )+ literal[string]
identifier[main_table] += literal[string]
identifier[main_table] += literal[string]
identifier[paper_bot] = literal[string]
identifier[paper_bot] += literal[string]
identifier[paper_bot] += literal[string]
identifier[paper_bot] += literal[string]
identifier[paper_bot] += literal[string]
identifier[paper_bot] += literal[string]
identifier[paper_bot] += literal[string]
identifier[paper_bot] += literal[string]
identifier[paper_bot] += literal[string]
identifier[slides_bot] = literal[string]
identifier[paper_output] = identifier[paper_top] + identifier[main_table] + identifier[paper_bot]
keyword[with] identifier[open] ( identifier[tables_dir] + identifier[out_filename] + literal[string] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[paper_output] )
identifier[f] . identifier[close] ()
identifier[slides_output] = identifier[slides_top] + identifier[main_table] + identifier[slides_bot]
keyword[with] identifier[open] ( identifier[tables_dir] + identifier[out_filename] + literal[string] , literal[string] ) keyword[as] identifier[f] :
identifier[f] . identifier[write] ( identifier[slides_output] )
identifier[f] . identifier[close] () | def makeEquilibriumTable(out_filename, four_in_files, CRRA):
"""
Make the equilibrium statistics table for the paper, saving it as a tex file
in the tables folder. Also makes a version for the slides that doesn't use
the table environment, nor include the note at bottom.
Parameters
----------
out_filename : str
Name of the file in which to save output (in the tables directory).
Suffix .tex appended automatically.
four_in_files: [str]
A list with four csv files. 0) SOE frictionless 1) SOE Sticky 2) DSGE frictionless 3) DSGE sticky
CRRA : float
Coefficient of relative risk aversion
Returns
-------
None
"""
# Read in statistics from the four files
SOEfrictionless = np.genfromtxt(results_dir + four_in_files[0] + 'Results.csv', delimiter=',')
SOEsticky = np.genfromtxt(results_dir + four_in_files[1] + 'Results.csv', delimiter=',')
DSGEfrictionless = np.genfromtxt(results_dir + four_in_files[2] + 'Results.csv', delimiter=',')
DSGEsticky = np.genfromtxt(results_dir + four_in_files[3] + 'Results.csv', delimiter=',')
# Read in value at birth from the four files
vBirth_SOE_F = np.genfromtxt(results_dir + four_in_files[0] + 'BirthValue.csv', delimiter=',')
vBirth_SOE_S = np.genfromtxt(results_dir + four_in_files[1] + 'BirthValue.csv', delimiter=',')
vBirth_DSGE_F = np.genfromtxt(results_dir + four_in_files[2] + 'BirthValue.csv', delimiter=',')
vBirth_DSGE_S = np.genfromtxt(results_dir + four_in_files[3] + 'BirthValue.csv', delimiter=',')
# Calculate the cost of stickiness in the SOE and DSGE models
StickyCost_SOE = np.mean(1.0 - (vBirth_SOE_S / vBirth_SOE_F) ** (1.0 / (1.0 - CRRA)))
StickyCost_DSGE = np.mean(1.0 - (vBirth_DSGE_S / vBirth_DSGE_F) ** (1.0 / (1.0 - CRRA)))
paper_top = '\\begin{minipage}{\\textwidth}\n'
paper_top += ' \\begin{table} \n'
paper_top += '\\caption{Equilibrium Statistics} \n'
paper_top += '\\label{table:Eqbm} \n'
paper_top += '\\newsavebox{\\EqbmBox} \n'
paper_top += '\\sbox{\\EqbmBox}{ \n'
paper_top += '\\newcommand{\\EqDir}{\\TablesDir/Eqbm} \n'
slides_top = '\\begin{center} \n'
main_table = '\\begin{tabular}{lllcccc} \n'
main_table += '\\toprule \n'
main_table += '&&& \\multicolumn{2}{c}{SOE Model} & \\multicolumn{2}{c}{HA-DSGE Model} \n'
main_table += '\\\\ %\\cline{4-5} \n'
main_table += ' &&& \\multicolumn{1}{c}{Frictionless} & \\multicolumn{1}{c}{Sticky} & \\multicolumn{1}{c}{Frictionless} & \\multicolumn{1}{c}{Sticky} \n'
main_table += '\\\\ \\midrule \n'
main_table += ' \\multicolumn{3}{l}{Means} \n'
main_table += '%\\\\ & & $M$ \n'
main_table += '%\\\\ & & $K$ \n'
main_table += '\\\\ & & $A$ & {:.2f}'.format(SOEfrictionless[0]) + ' &{:.2f}'.format(SOEsticky[0]) + ' & {:.2f}'.format(DSGEfrictionless[0]) + ' & {:.2f}'.format(DSGEsticky[0]) + ' \n'
main_table += '\\\\ & & $C$ & {:.2f}'.format(SOEfrictionless[1]) + ' &{:.2f}'.format(SOEsticky[1]) + ' & {:.2f}'.format(DSGEfrictionless[1]) + ' & {:.2f}'.format(DSGEsticky[1]) + ' \n'
main_table += '\\\\ \\midrule \n'
main_table += ' \\multicolumn{3}{l}{Standard Deviations} \n'
main_table += "\\\\ & \\multicolumn{4}{l}{Aggregate Time Series (`Macro')} \n"
main_table += '%\\ & & $\\Delta \\log \\mathbf{M}$ \n'
main_table += '\\\\ & & $\\log A $ & {:.3f}'.format(SOEfrictionless[2]) + ' & {:.3f}'.format(SOEsticky[2]) + ' & {:.3f}'.format(DSGEfrictionless[2]) + ' & {:.3f}'.format(DSGEsticky[2]) + ' \n'
main_table += '\\\\ & & $\\Delta \\log \\CLevBF $ & {:.3f}'.format(SOEfrictionless[3]) + ' & {:.3f}'.format(SOEsticky[3]) + ' & {:.3f}'.format(DSGEfrictionless[3]) + ' & {:.3f}'.format(DSGEsticky[3]) + ' \n'
main_table += '\\\\ & & $\\Delta \\log \\YLevBF $ & {:.3f}'.format(SOEfrictionless[4]) + ' & {:.3f}'.format(SOEsticky[4]) + ' & {:.3f}'.format(DSGEfrictionless[4]) + ' & {:.3f}'.format(DSGEsticky[4]) + ' \n'
main_table += "\\\\ & \\multicolumn{3}{l}{Individual Cross Sectional (`Micro')} \n"
main_table += '\\\\ & & $\\log \\aLevBF $ & {:.3f}'.format(SOEfrictionless[6]) + ' & {:.3f}'.format(SOEsticky[6]) + ' & {:.3f}'.format(DSGEfrictionless[6]) + ' & {:.3f}'.format(DSGEsticky[6]) + ' \n'
main_table += '\\\\ & & $\\log \\cLevBF $ & {:.3f}'.format(SOEfrictionless[7]) + ' & {:.3f}'.format(SOEsticky[7]) + ' & {:.3f}'.format(DSGEfrictionless[7]) + ' & {:.3f}'.format(DSGEsticky[7]) + ' \n'
main_table += '\\\\ & & $\\log p $ & {:.3f}'.format(SOEfrictionless[8]) + ' & {:.3f}'.format(SOEsticky[8]) + ' & {:.3f}'.format(DSGEfrictionless[8]) + ' & {:.3f}'.format(DSGEsticky[8]) + ' \n'
main_table += '\\\\ & & $\\log \\yLevBF | \\yLevBF > 0 $ & {:.3f}'.format(SOEfrictionless[9]) + ' & {:.3f}'.format(SOEsticky[9]) + ' & {:.3f}'.format(DSGEfrictionless[9]) + ' & {:.3f}'.format(DSGEsticky[9]) + ' \n'
main_table += '\\\\ & & $\\Delta \\log \\cLevBF $ & {:.3f}'.format(SOEfrictionless[11]) + ' & {:.3f}'.format(SOEsticky[11]) + ' & {:.3f}'.format(DSGEfrictionless[11]) + ' & {:.3f}'.format(DSGEsticky[11]) + ' \n'
main_table += ' \n'
main_table += ' \n'
main_table += '\\\\ \\midrule \\multicolumn{3}{l}{Cost of Stickiness} \n'
main_table += ' & \\multicolumn{2}{c}{' + mystr2(StickyCost_SOE) + '} \n'
main_table += ' & \\multicolumn{2}{c}{' + mystr2(StickyCost_DSGE) + '} \n'
main_table += '\\\\ \\bottomrule \n'
main_table += ' \\end{tabular} \n'
paper_bot = ' } \n '
paper_bot += '\\usebox{\\EqbmBox} \n'
paper_bot += '\\ifthenelse{\\boolean{StandAlone}}{\\newlength\\TableWidth}{} \n'
paper_bot += '\\settowidth\\TableWidth{\\usebox{\\EqbmBox}} % Calculate width of table so notes will match \n'
paper_bot += '\\medskip\\medskip \\vspace{0.0cm} \\parbox{\\TableWidth}{\\footnotesize\n'
paper_bot += '\\textbf{Notes}: The cost of stickiness is calculated as the proportion by which the permanent income of a newborn frictionless consumer would need to be reduced in order to achieve the same reduction of expected value associated with forcing them to become a sticky expectations consumer.} \n'
paper_bot += '\\end{table}\n'
paper_bot += '\\end{minipage}\n'
paper_bot += '\\ifthenelse{\\boolean{StandAlone}}{\\end{document}}{} \n'
slides_bot = '\\end{center} \n'
paper_output = paper_top + main_table + paper_bot
with open(tables_dir + out_filename + '.tex', 'w') as f:
f.write(paper_output)
f.close() # depends on [control=['with'], data=['f']]
slides_output = slides_top + main_table + slides_bot
with open(tables_dir + out_filename + 'Slides.tex', 'w') as f:
f.write(slides_output)
f.close() # depends on [control=['with'], data=['f']] |
def encode_timestamp(timestamp: hints.Buffer) -> str:
"""
Encode the given buffer to a :class:`~str` using Base32 encoding.
The given :class:`~bytes` are expected to represent the first 6 bytes of a ULID, which
are a timestamp in milliseconds.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param timestamp: Bytes to encode
:type timestamp: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the timestamp is not 6 bytes
"""
length = len(timestamp)
if length != 6:
raise ValueError('Expects 6 bytes for timestamp; got {}'.format(length))
encoding = ENCODING
return \
encoding[(timestamp[0] & 224) >> 5] + \
encoding[timestamp[0] & 31] + \
encoding[(timestamp[1] & 248) >> 3] + \
encoding[((timestamp[1] & 7) << 2) | ((timestamp[2] & 192) >> 6)] + \
encoding[((timestamp[2] & 62) >> 1)] + \
encoding[((timestamp[2] & 1) << 4) | ((timestamp[3] & 240) >> 4)] + \
encoding[((timestamp[3] & 15) << 1) | ((timestamp[4] & 128) >> 7)] + \
encoding[(timestamp[4] & 124) >> 2] + \
encoding[((timestamp[4] & 3) << 3) | ((timestamp[5] & 224) >> 5)] + \
encoding[timestamp[5] & 31] | def function[encode_timestamp, parameter[timestamp]]:
constant[
Encode the given buffer to a :class:`~str` using Base32 encoding.
The given :class:`~bytes` are expected to represent the first 6 bytes of a ULID, which
are a timestamp in milliseconds.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param timestamp: Bytes to encode
:type timestamp: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the timestamp is not 6 bytes
]
variable[length] assign[=] call[name[len], parameter[name[timestamp]]]
if compare[name[length] not_equal[!=] constant[6]] begin[:]
<ast.Raise object at 0x7da1b1166200>
variable[encoding] assign[=] name[ENCODING]
return[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[binary_operation[call[name[encoding]][binary_operation[binary_operation[call[name[timestamp]][constant[0]] <ast.BitAnd object at 0x7da2590d6b60> constant[224]] <ast.RShift object at 0x7da2590d6a40> constant[5]]] + call[name[encoding]][binary_operation[call[name[timestamp]][constant[0]] <ast.BitAnd object at 0x7da2590d6b60> constant[31]]]] + call[name[encoding]][binary_operation[binary_operation[call[name[timestamp]][constant[1]] <ast.BitAnd object at 0x7da2590d6b60> constant[248]] <ast.RShift object at 0x7da2590d6a40> constant[3]]]] + call[name[encoding]][binary_operation[binary_operation[binary_operation[call[name[timestamp]][constant[1]] <ast.BitAnd object at 0x7da2590d6b60> constant[7]] <ast.LShift object at 0x7da2590d69e0> constant[2]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[binary_operation[call[name[timestamp]][constant[2]] <ast.BitAnd object at 0x7da2590d6b60> constant[192]] <ast.RShift object at 0x7da2590d6a40> constant[6]]]]] + call[name[encoding]][binary_operation[binary_operation[call[name[timestamp]][constant[2]] <ast.BitAnd object at 0x7da2590d6b60> constant[62]] <ast.RShift object at 0x7da2590d6a40> constant[1]]]] + call[name[encoding]][binary_operation[binary_operation[binary_operation[call[name[timestamp]][constant[2]] <ast.BitAnd object at 0x7da2590d6b60> constant[1]] <ast.LShift object at 0x7da2590d69e0> constant[4]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[binary_operation[call[name[timestamp]][constant[3]] <ast.BitAnd object at 0x7da2590d6b60> constant[240]] <ast.RShift object at 0x7da2590d6a40> constant[4]]]]] + call[name[encoding]][binary_operation[binary_operation[binary_operation[call[name[timestamp]][constant[3]] <ast.BitAnd object at 0x7da2590d6b60> constant[15]] <ast.LShift object at 0x7da2590d69e0> constant[1]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[binary_operation[call[name[timestamp]][constant[4]] <ast.BitAnd object at 0x7da2590d6b60> constant[128]] <ast.RShift object at 0x7da2590d6a40> constant[7]]]]] + call[name[encoding]][binary_operation[binary_operation[call[name[timestamp]][constant[4]] <ast.BitAnd object at 0x7da2590d6b60> constant[124]] <ast.RShift object at 0x7da2590d6a40> constant[2]]]] + call[name[encoding]][binary_operation[binary_operation[binary_operation[call[name[timestamp]][constant[4]] <ast.BitAnd object at 0x7da2590d6b60> constant[3]] <ast.LShift object at 0x7da2590d69e0> constant[3]] <ast.BitOr object at 0x7da2590d6aa0> binary_operation[binary_operation[call[name[timestamp]][constant[5]] <ast.BitAnd object at 0x7da2590d6b60> constant[224]] <ast.RShift object at 0x7da2590d6a40> constant[5]]]]] + call[name[encoding]][binary_operation[call[name[timestamp]][constant[5]] <ast.BitAnd object at 0x7da2590d6b60> constant[31]]]]] | keyword[def] identifier[encode_timestamp] ( identifier[timestamp] : identifier[hints] . identifier[Buffer] )-> identifier[str] :
literal[string]
identifier[length] = identifier[len] ( identifier[timestamp] )
keyword[if] identifier[length] != literal[int] :
keyword[raise] identifier[ValueError] ( literal[string] . identifier[format] ( identifier[length] ))
identifier[encoding] = identifier[ENCODING]
keyword[return] identifier[encoding] [( identifier[timestamp] [ literal[int] ]& literal[int] )>> literal[int] ]+ identifier[encoding] [ identifier[timestamp] [ literal[int] ]& literal[int] ]+ identifier[encoding] [( identifier[timestamp] [ literal[int] ]& literal[int] )>> literal[int] ]+ identifier[encoding] [(( identifier[timestamp] [ literal[int] ]& literal[int] )<< literal[int] )|(( identifier[timestamp] [ literal[int] ]& literal[int] )>> literal[int] )]+ identifier[encoding] [(( identifier[timestamp] [ literal[int] ]& literal[int] )>> literal[int] )]+ identifier[encoding] [(( identifier[timestamp] [ literal[int] ]& literal[int] )<< literal[int] )|(( identifier[timestamp] [ literal[int] ]& literal[int] )>> literal[int] )]+ identifier[encoding] [(( identifier[timestamp] [ literal[int] ]& literal[int] )<< literal[int] )|(( identifier[timestamp] [ literal[int] ]& literal[int] )>> literal[int] )]+ identifier[encoding] [( identifier[timestamp] [ literal[int] ]& literal[int] )>> literal[int] ]+ identifier[encoding] [(( identifier[timestamp] [ literal[int] ]& literal[int] )<< literal[int] )|(( identifier[timestamp] [ literal[int] ]& literal[int] )>> literal[int] )]+ identifier[encoding] [ identifier[timestamp] [ literal[int] ]& literal[int] ] | def encode_timestamp(timestamp: hints.Buffer) -> str:
"""
Encode the given buffer to a :class:`~str` using Base32 encoding.
The given :class:`~bytes` are expected to represent the first 6 bytes of a ULID, which
are a timestamp in milliseconds.
.. note:: This uses an optimized strategy from the `NUlid` project for encoding ULID
bytes specifically and is not meant for arbitrary encoding.
:param timestamp: Bytes to encode
:type timestamp: :class:`~bytes`, :class:`~bytearray`, or :class:`~memoryview`
:return: Value encoded as a Base32 string
:rtype: :class:`~str`
:raises ValueError: when the timestamp is not 6 bytes
"""
length = len(timestamp)
if length != 6:
raise ValueError('Expects 6 bytes for timestamp; got {}'.format(length)) # depends on [control=['if'], data=['length']]
encoding = ENCODING
return encoding[(timestamp[0] & 224) >> 5] + encoding[timestamp[0] & 31] + encoding[(timestamp[1] & 248) >> 3] + encoding[(timestamp[1] & 7) << 2 | (timestamp[2] & 192) >> 6] + encoding[(timestamp[2] & 62) >> 1] + encoding[(timestamp[2] & 1) << 4 | (timestamp[3] & 240) >> 4] + encoding[(timestamp[3] & 15) << 1 | (timestamp[4] & 128) >> 7] + encoding[(timestamp[4] & 124) >> 2] + encoding[(timestamp[4] & 3) << 3 | (timestamp[5] & 224) >> 5] + encoding[timestamp[5] & 31] |
def construct_result_generator_middleware(result_generators):
"""
Constructs a middleware which intercepts requests for any method found in
the provided mapping of endpoints to generator functions, returning
whatever response the generator function returns. Callbacks must be
functions with the signature `fn(method, params)`.
"""
def result_generator_middleware(make_request, web3):
def middleware(method, params):
if method in result_generators:
result = result_generators[method](method, params)
return {'result': result}
else:
return make_request(method, params)
return middleware
return result_generator_middleware | def function[construct_result_generator_middleware, parameter[result_generators]]:
constant[
Constructs a middleware which intercepts requests for any method found in
the provided mapping of endpoints to generator functions, returning
whatever response the generator function returns. Callbacks must be
functions with the signature `fn(method, params)`.
]
def function[result_generator_middleware, parameter[make_request, web3]]:
def function[middleware, parameter[method, params]]:
if compare[name[method] in name[result_generators]] begin[:]
variable[result] assign[=] call[call[name[result_generators]][name[method]], parameter[name[method], name[params]]]
return[dictionary[[<ast.Constant object at 0x7da18f810e50>], [<ast.Name object at 0x7da18f813ee0>]]]
return[name[middleware]]
return[name[result_generator_middleware]] | keyword[def] identifier[construct_result_generator_middleware] ( identifier[result_generators] ):
literal[string]
keyword[def] identifier[result_generator_middleware] ( identifier[make_request] , identifier[web3] ):
keyword[def] identifier[middleware] ( identifier[method] , identifier[params] ):
keyword[if] identifier[method] keyword[in] identifier[result_generators] :
identifier[result] = identifier[result_generators] [ identifier[method] ]( identifier[method] , identifier[params] )
keyword[return] { literal[string] : identifier[result] }
keyword[else] :
keyword[return] identifier[make_request] ( identifier[method] , identifier[params] )
keyword[return] identifier[middleware]
keyword[return] identifier[result_generator_middleware] | def construct_result_generator_middleware(result_generators):
"""
Constructs a middleware which intercepts requests for any method found in
the provided mapping of endpoints to generator functions, returning
whatever response the generator function returns. Callbacks must be
functions with the signature `fn(method, params)`.
"""
def result_generator_middleware(make_request, web3):
def middleware(method, params):
if method in result_generators:
result = result_generators[method](method, params)
return {'result': result} # depends on [control=['if'], data=['method', 'result_generators']]
else:
return make_request(method, params)
return middleware
return result_generator_middleware |
def pluralize(word, pos=NOUN, gender=MALE, role=SUBJECT, custom={}):
""" Returns the plural of a given word.
The inflection is based on probability rather than gender and role.
"""
w = word.lower().capitalize()
if word in custom:
return custom[word]
if pos == NOUN:
for a, b in plural_inflections:
if w.endswith(a):
return w[:-len(a)] + b
# Default rules (baseline = 69%).
if w.startswith("ge"):
return w
if w.endswith("gie"):
return w
if w.endswith("e"):
return w + "n"
if w.endswith("ien"):
return w[:-2] + "um"
if w.endswith(("au", "ein", "eit", "er", "en", "el", "chen", "mus", u"tät", "tik", "tum", "u")):
return w
if w.endswith(("ant", "ei", "enz", "ion", "ist", "or", "schaft", "tur", "ung")):
return w + "en"
if w.endswith("in"):
return w + "nen"
if w.endswith("nis"):
return w + "se"
if w.endswith(("eld", "ild", "ind")):
return w + "er"
if w.endswith("o"):
return w + "s"
if w.endswith("a"):
return w[:-1] + "en"
# Inflect common umlaut vowels: Kopf => Köpfe.
if w.endswith(("all", "and", "ang", "ank", "atz", "auf", "ock", "opf", "uch", "uss")):
umlaut = w[-3]
umlaut = umlaut.replace("a", u"ä")
umlaut = umlaut.replace("o", u"ö")
umlaut = umlaut.replace("u", u"ü")
return w[:-3] + umlaut + w[-2:] + "e"
for a, b in (
("ag", u"äge"),
("ann", u"änner"),
("aum", u"äume"),
("aus", u"äuser"),
("zug", u"züge")):
if w.endswith(a):
return w[:-len(a)] + b
return w + "e"
return w | def function[pluralize, parameter[word, pos, gender, role, custom]]:
constant[ Returns the plural of a given word.
The inflection is based on probability rather than gender and role.
]
variable[w] assign[=] call[call[name[word].lower, parameter[]].capitalize, parameter[]]
if compare[name[word] in name[custom]] begin[:]
return[call[name[custom]][name[word]]]
if compare[name[pos] equal[==] name[NOUN]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da20c7ca6e0>, <ast.Name object at 0x7da20c7c82e0>]]] in starred[name[plural_inflections]] begin[:]
if call[name[w].endswith, parameter[name[a]]] begin[:]
return[binary_operation[call[name[w]][<ast.Slice object at 0x7da20c7c9e40>] + name[b]]]
if call[name[w].startswith, parameter[constant[ge]]] begin[:]
return[name[w]]
if call[name[w].endswith, parameter[constant[gie]]] begin[:]
return[name[w]]
if call[name[w].endswith, parameter[constant[e]]] begin[:]
return[binary_operation[name[w] + constant[n]]]
if call[name[w].endswith, parameter[constant[ien]]] begin[:]
return[binary_operation[call[name[w]][<ast.Slice object at 0x7da20c7c91e0>] + constant[um]]]
if call[name[w].endswith, parameter[tuple[[<ast.Constant object at 0x7da20c7cbaf0>, <ast.Constant object at 0x7da20c7c8a60>, <ast.Constant object at 0x7da20c7c9180>, <ast.Constant object at 0x7da20c7ca7d0>, <ast.Constant object at 0x7da20c7c8fa0>, <ast.Constant object at 0x7da20c7c9d50>, <ast.Constant object at 0x7da20c7cbdf0>, <ast.Constant object at 0x7da20c7c85b0>, <ast.Constant object at 0x7da20c7ca2c0>, <ast.Constant object at 0x7da20c7ca740>, <ast.Constant object at 0x7da20c7cb520>, <ast.Constant object at 0x7da20c7ca110>]]]] begin[:]
return[name[w]]
if call[name[w].endswith, parameter[tuple[[<ast.Constant object at 0x7da20c7ca770>, <ast.Constant object at 0x7da20c7cb4c0>, <ast.Constant object at 0x7da20c7c9510>, <ast.Constant object at 0x7da20c7c8970>, <ast.Constant object at 0x7da20c7c95d0>, <ast.Constant object at 0x7da20c7c8640>, <ast.Constant object at 0x7da20c7c9ab0>, <ast.Constant object at 0x7da20c7c9b70>, <ast.Constant object at 0x7da20c7cb400>]]]] begin[:]
return[binary_operation[name[w] + constant[en]]]
if call[name[w].endswith, parameter[constant[in]]] begin[:]
return[binary_operation[name[w] + constant[nen]]]
if call[name[w].endswith, parameter[constant[nis]]] begin[:]
return[binary_operation[name[w] + constant[se]]]
if call[name[w].endswith, parameter[tuple[[<ast.Constant object at 0x7da1b26af220>, <ast.Constant object at 0x7da1b26adff0>, <ast.Constant object at 0x7da1b26af640>]]]] begin[:]
return[binary_operation[name[w] + constant[er]]]
if call[name[w].endswith, parameter[constant[o]]] begin[:]
return[binary_operation[name[w] + constant[s]]]
if call[name[w].endswith, parameter[constant[a]]] begin[:]
return[binary_operation[call[name[w]][<ast.Slice object at 0x7da1b26af190>] + constant[en]]]
if call[name[w].endswith, parameter[tuple[[<ast.Constant object at 0x7da1b26ac8e0>, <ast.Constant object at 0x7da1b26ad300>, <ast.Constant object at 0x7da1b26aeda0>, <ast.Constant object at 0x7da1b26ae260>, <ast.Constant object at 0x7da1b26acaf0>, <ast.Constant object at 0x7da1b26adc00>, <ast.Constant object at 0x7da1b26af9a0>, <ast.Constant object at 0x7da1b26afdc0>, <ast.Constant object at 0x7da20c6ab8b0>, <ast.Constant object at 0x7da20c6ab3d0>]]]] begin[:]
variable[umlaut] assign[=] call[name[w]][<ast.UnaryOp object at 0x7da20c6ab5b0>]
variable[umlaut] assign[=] call[name[umlaut].replace, parameter[constant[a], constant[ä]]]
variable[umlaut] assign[=] call[name[umlaut].replace, parameter[constant[o], constant[ö]]]
variable[umlaut] assign[=] call[name[umlaut].replace, parameter[constant[u], constant[ü]]]
return[binary_operation[binary_operation[binary_operation[call[name[w]][<ast.Slice object at 0x7da20c6a9480>] + name[umlaut]] + call[name[w]][<ast.Slice object at 0x7da20c6aa530>]] + constant[e]]]
for taget[tuple[[<ast.Name object at 0x7da20c6aa080>, <ast.Name object at 0x7da20c6ab520>]]] in starred[tuple[[<ast.Tuple object at 0x7da20c6a9db0>, <ast.Tuple object at 0x7da20c6ab0d0>, <ast.Tuple object at 0x7da20c6aa560>, <ast.Tuple object at 0x7da20c6a92d0>, <ast.Tuple object at 0x7da20c6a8040>]]] begin[:]
if call[name[w].endswith, parameter[name[a]]] begin[:]
return[binary_operation[call[name[w]][<ast.Slice object at 0x7da20c6abac0>] + name[b]]]
return[binary_operation[name[w] + constant[e]]]
return[name[w]] | keyword[def] identifier[pluralize] ( identifier[word] , identifier[pos] = identifier[NOUN] , identifier[gender] = identifier[MALE] , identifier[role] = identifier[SUBJECT] , identifier[custom] ={}):
literal[string]
identifier[w] = identifier[word] . identifier[lower] (). identifier[capitalize] ()
keyword[if] identifier[word] keyword[in] identifier[custom] :
keyword[return] identifier[custom] [ identifier[word] ]
keyword[if] identifier[pos] == identifier[NOUN] :
keyword[for] identifier[a] , identifier[b] keyword[in] identifier[plural_inflections] :
keyword[if] identifier[w] . identifier[endswith] ( identifier[a] ):
keyword[return] identifier[w] [:- identifier[len] ( identifier[a] )]+ identifier[b]
keyword[if] identifier[w] . identifier[startswith] ( literal[string] ):
keyword[return] identifier[w]
keyword[if] identifier[w] . identifier[endswith] ( literal[string] ):
keyword[return] identifier[w]
keyword[if] identifier[w] . identifier[endswith] ( literal[string] ):
keyword[return] identifier[w] + literal[string]
keyword[if] identifier[w] . identifier[endswith] ( literal[string] ):
keyword[return] identifier[w] [:- literal[int] ]+ literal[string]
keyword[if] identifier[w] . identifier[endswith] (( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] )):
keyword[return] identifier[w]
keyword[if] identifier[w] . identifier[endswith] (( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] )):
keyword[return] identifier[w] + literal[string]
keyword[if] identifier[w] . identifier[endswith] ( literal[string] ):
keyword[return] identifier[w] + literal[string]
keyword[if] identifier[w] . identifier[endswith] ( literal[string] ):
keyword[return] identifier[w] + literal[string]
keyword[if] identifier[w] . identifier[endswith] (( literal[string] , literal[string] , literal[string] )):
keyword[return] identifier[w] + literal[string]
keyword[if] identifier[w] . identifier[endswith] ( literal[string] ):
keyword[return] identifier[w] + literal[string]
keyword[if] identifier[w] . identifier[endswith] ( literal[string] ):
keyword[return] identifier[w] [:- literal[int] ]+ literal[string]
keyword[if] identifier[w] . identifier[endswith] (( literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] , literal[string] )):
identifier[umlaut] = identifier[w] [- literal[int] ]
identifier[umlaut] = identifier[umlaut] . identifier[replace] ( literal[string] , literal[string] )
identifier[umlaut] = identifier[umlaut] . identifier[replace] ( literal[string] , literal[string] )
identifier[umlaut] = identifier[umlaut] . identifier[replace] ( literal[string] , literal[string] )
keyword[return] identifier[w] [:- literal[int] ]+ identifier[umlaut] + identifier[w] [- literal[int] :]+ literal[string]
keyword[for] identifier[a] , identifier[b] keyword[in] (
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] ),
( literal[string] , literal[string] )):
keyword[if] identifier[w] . identifier[endswith] ( identifier[a] ):
keyword[return] identifier[w] [:- identifier[len] ( identifier[a] )]+ identifier[b]
keyword[return] identifier[w] + literal[string]
keyword[return] identifier[w] | def pluralize(word, pos=NOUN, gender=MALE, role=SUBJECT, custom={}):
""" Returns the plural of a given word.
The inflection is based on probability rather than gender and role.
"""
w = word.lower().capitalize()
if word in custom:
return custom[word] # depends on [control=['if'], data=['word', 'custom']]
if pos == NOUN:
for (a, b) in plural_inflections:
if w.endswith(a):
return w[:-len(a)] + b # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
# Default rules (baseline = 69%).
if w.startswith('ge'):
return w # depends on [control=['if'], data=[]]
if w.endswith('gie'):
return w # depends on [control=['if'], data=[]]
if w.endswith('e'):
return w + 'n' # depends on [control=['if'], data=[]]
if w.endswith('ien'):
return w[:-2] + 'um' # depends on [control=['if'], data=[]]
if w.endswith(('au', 'ein', 'eit', 'er', 'en', 'el', 'chen', 'mus', u'tät', 'tik', 'tum', 'u')):
return w # depends on [control=['if'], data=[]]
if w.endswith(('ant', 'ei', 'enz', 'ion', 'ist', 'or', 'schaft', 'tur', 'ung')):
return w + 'en' # depends on [control=['if'], data=[]]
if w.endswith('in'):
return w + 'nen' # depends on [control=['if'], data=[]]
if w.endswith('nis'):
return w + 'se' # depends on [control=['if'], data=[]]
if w.endswith(('eld', 'ild', 'ind')):
return w + 'er' # depends on [control=['if'], data=[]]
if w.endswith('o'):
return w + 's' # depends on [control=['if'], data=[]]
if w.endswith('a'):
return w[:-1] + 'en' # depends on [control=['if'], data=[]]
# Inflect common umlaut vowels: Kopf => Köpfe.
if w.endswith(('all', 'and', 'ang', 'ank', 'atz', 'auf', 'ock', 'opf', 'uch', 'uss')):
umlaut = w[-3]
umlaut = umlaut.replace('a', u'ä')
umlaut = umlaut.replace('o', u'ö')
umlaut = umlaut.replace('u', u'ü')
return w[:-3] + umlaut + w[-2:] + 'e' # depends on [control=['if'], data=[]]
for (a, b) in (('ag', u'äge'), ('ann', u'änner'), ('aum', u'äume'), ('aus', u'äuser'), ('zug', u'züge')):
if w.endswith(a):
return w[:-len(a)] + b # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return w + 'e' # depends on [control=['if'], data=[]]
return w |
def _convert_from_thrift_endpoint(self, thrift_endpoint):
"""Accepts a thrift decoded endpoint and converts it to an Endpoint.
:param thrift_endpoint: thrift encoded endpoint
:type thrift_endpoint: thrift endpoint
:returns: decoded endpoint
:rtype: Encoding
"""
ipv4 = None
ipv6 = None
port = struct.unpack('H', struct.pack('h', thrift_endpoint.port))[0]
if thrift_endpoint.ipv4 != 0:
ipv4 = socket.inet_ntop(
socket.AF_INET,
struct.pack('!i', thrift_endpoint.ipv4),
)
if thrift_endpoint.ipv6:
ipv6 = socket.inet_ntop(socket.AF_INET6, thrift_endpoint.ipv6)
return Endpoint(
service_name=thrift_endpoint.service_name,
ipv4=ipv4,
ipv6=ipv6,
port=port,
) | def function[_convert_from_thrift_endpoint, parameter[self, thrift_endpoint]]:
constant[Accepts a thrift decoded endpoint and converts it to an Endpoint.
:param thrift_endpoint: thrift encoded endpoint
:type thrift_endpoint: thrift endpoint
:returns: decoded endpoint
:rtype: Encoding
]
variable[ipv4] assign[=] constant[None]
variable[ipv6] assign[=] constant[None]
variable[port] assign[=] call[call[name[struct].unpack, parameter[constant[H], call[name[struct].pack, parameter[constant[h], name[thrift_endpoint].port]]]]][constant[0]]
if compare[name[thrift_endpoint].ipv4 not_equal[!=] constant[0]] begin[:]
variable[ipv4] assign[=] call[name[socket].inet_ntop, parameter[name[socket].AF_INET, call[name[struct].pack, parameter[constant[!i], name[thrift_endpoint].ipv4]]]]
if name[thrift_endpoint].ipv6 begin[:]
variable[ipv6] assign[=] call[name[socket].inet_ntop, parameter[name[socket].AF_INET6, name[thrift_endpoint].ipv6]]
return[call[name[Endpoint], parameter[]]] | keyword[def] identifier[_convert_from_thrift_endpoint] ( identifier[self] , identifier[thrift_endpoint] ):
literal[string]
identifier[ipv4] = keyword[None]
identifier[ipv6] = keyword[None]
identifier[port] = identifier[struct] . identifier[unpack] ( literal[string] , identifier[struct] . identifier[pack] ( literal[string] , identifier[thrift_endpoint] . identifier[port] ))[ literal[int] ]
keyword[if] identifier[thrift_endpoint] . identifier[ipv4] != literal[int] :
identifier[ipv4] = identifier[socket] . identifier[inet_ntop] (
identifier[socket] . identifier[AF_INET] ,
identifier[struct] . identifier[pack] ( literal[string] , identifier[thrift_endpoint] . identifier[ipv4] ),
)
keyword[if] identifier[thrift_endpoint] . identifier[ipv6] :
identifier[ipv6] = identifier[socket] . identifier[inet_ntop] ( identifier[socket] . identifier[AF_INET6] , identifier[thrift_endpoint] . identifier[ipv6] )
keyword[return] identifier[Endpoint] (
identifier[service_name] = identifier[thrift_endpoint] . identifier[service_name] ,
identifier[ipv4] = identifier[ipv4] ,
identifier[ipv6] = identifier[ipv6] ,
identifier[port] = identifier[port] ,
) | def _convert_from_thrift_endpoint(self, thrift_endpoint):
"""Accepts a thrift decoded endpoint and converts it to an Endpoint.
:param thrift_endpoint: thrift encoded endpoint
:type thrift_endpoint: thrift endpoint
:returns: decoded endpoint
:rtype: Encoding
"""
ipv4 = None
ipv6 = None
port = struct.unpack('H', struct.pack('h', thrift_endpoint.port))[0]
if thrift_endpoint.ipv4 != 0:
ipv4 = socket.inet_ntop(socket.AF_INET, struct.pack('!i', thrift_endpoint.ipv4)) # depends on [control=['if'], data=[]]
if thrift_endpoint.ipv6:
ipv6 = socket.inet_ntop(socket.AF_INET6, thrift_endpoint.ipv6) # depends on [control=['if'], data=[]]
return Endpoint(service_name=thrift_endpoint.service_name, ipv4=ipv4, ipv6=ipv6, port=port) |
def failback(self, force_full_copy=None):
"""
Fails back a replication session.
This can be applied on a replication session that is failed over. Fail
back will synchronize the changes done to original destination back to
original source site and will restore the original direction of
session.
:param force_full_copy: indicates whether to sync back all data from
the destination SP to the source SP during the failback session.
True - Sync back all data.
False - Sync back changed data only.
"""
req_body = self._cli.make_body(forceFullCopy=force_full_copy)
resp = self.action('failback', **req_body)
resp.raise_if_err()
return resp | def function[failback, parameter[self, force_full_copy]]:
constant[
Fails back a replication session.
This can be applied on a replication session that is failed over. Fail
back will synchronize the changes done to original destination back to
original source site and will restore the original direction of
session.
:param force_full_copy: indicates whether to sync back all data from
the destination SP to the source SP during the failback session.
True - Sync back all data.
False - Sync back changed data only.
]
variable[req_body] assign[=] call[name[self]._cli.make_body, parameter[]]
variable[resp] assign[=] call[name[self].action, parameter[constant[failback]]]
call[name[resp].raise_if_err, parameter[]]
return[name[resp]] | keyword[def] identifier[failback] ( identifier[self] , identifier[force_full_copy] = keyword[None] ):
literal[string]
identifier[req_body] = identifier[self] . identifier[_cli] . identifier[make_body] ( identifier[forceFullCopy] = identifier[force_full_copy] )
identifier[resp] = identifier[self] . identifier[action] ( literal[string] ,** identifier[req_body] )
identifier[resp] . identifier[raise_if_err] ()
keyword[return] identifier[resp] | def failback(self, force_full_copy=None):
"""
Fails back a replication session.
This can be applied on a replication session that is failed over. Fail
back will synchronize the changes done to original destination back to
original source site and will restore the original direction of
session.
:param force_full_copy: indicates whether to sync back all data from
the destination SP to the source SP during the failback session.
True - Sync back all data.
False - Sync back changed data only.
"""
req_body = self._cli.make_body(forceFullCopy=force_full_copy)
resp = self.action('failback', **req_body)
resp.raise_if_err()
return resp |
def getitem(self, index, context=None):
"""Return the inference of a subscript.
This is basically looking up the method in the metaclass and calling it.
:returns: The inferred value of a subscript to this class.
:rtype: NodeNG
:raises AstroidTypeError: If this class does not define a
``__getitem__`` method.
"""
try:
methods = dunder_lookup.lookup(self, "__getitem__")
except exceptions.AttributeInferenceError as exc:
raise exceptions.AstroidTypeError(node=self, context=context) from exc
method = methods[0]
# Create a new callcontext for providing index as an argument.
new_context = contextmod.bind_context_to_node(context, self)
new_context.callcontext = contextmod.CallContext(args=[index])
try:
return next(method.infer_call_result(self, new_context))
except exceptions.InferenceError:
return util.Uninferable | def function[getitem, parameter[self, index, context]]:
constant[Return the inference of a subscript.
This is basically looking up the method in the metaclass and calling it.
:returns: The inferred value of a subscript to this class.
:rtype: NodeNG
:raises AstroidTypeError: If this class does not define a
``__getitem__`` method.
]
<ast.Try object at 0x7da1b1e7a770>
variable[method] assign[=] call[name[methods]][constant[0]]
variable[new_context] assign[=] call[name[contextmod].bind_context_to_node, parameter[name[context], name[self]]]
name[new_context].callcontext assign[=] call[name[contextmod].CallContext, parameter[]]
<ast.Try object at 0x7da1b1e79cf0> | keyword[def] identifier[getitem] ( identifier[self] , identifier[index] , identifier[context] = keyword[None] ):
literal[string]
keyword[try] :
identifier[methods] = identifier[dunder_lookup] . identifier[lookup] ( identifier[self] , literal[string] )
keyword[except] identifier[exceptions] . identifier[AttributeInferenceError] keyword[as] identifier[exc] :
keyword[raise] identifier[exceptions] . identifier[AstroidTypeError] ( identifier[node] = identifier[self] , identifier[context] = identifier[context] ) keyword[from] identifier[exc]
identifier[method] = identifier[methods] [ literal[int] ]
identifier[new_context] = identifier[contextmod] . identifier[bind_context_to_node] ( identifier[context] , identifier[self] )
identifier[new_context] . identifier[callcontext] = identifier[contextmod] . identifier[CallContext] ( identifier[args] =[ identifier[index] ])
keyword[try] :
keyword[return] identifier[next] ( identifier[method] . identifier[infer_call_result] ( identifier[self] , identifier[new_context] ))
keyword[except] identifier[exceptions] . identifier[InferenceError] :
keyword[return] identifier[util] . identifier[Uninferable] | def getitem(self, index, context=None):
"""Return the inference of a subscript.
This is basically looking up the method in the metaclass and calling it.
:returns: The inferred value of a subscript to this class.
:rtype: NodeNG
:raises AstroidTypeError: If this class does not define a
``__getitem__`` method.
"""
try:
methods = dunder_lookup.lookup(self, '__getitem__') # depends on [control=['try'], data=[]]
except exceptions.AttributeInferenceError as exc:
raise exceptions.AstroidTypeError(node=self, context=context) from exc # depends on [control=['except'], data=['exc']]
method = methods[0]
# Create a new callcontext for providing index as an argument.
new_context = contextmod.bind_context_to_node(context, self)
new_context.callcontext = contextmod.CallContext(args=[index])
try:
return next(method.infer_call_result(self, new_context)) # depends on [control=['try'], data=[]]
except exceptions.InferenceError:
return util.Uninferable # depends on [control=['except'], data=[]] |
def calcPF(pf):
""" Simple wrap to calc legacy PF value
Args:
pf: meter power factor reading
Returns:
int: legacy push pf
"""
pf_y = pf[:1]
pf_x = pf[1:]
result = 100
if pf_y == CosTheta.CapacitiveLead:
result = 200 - int(pf_x)
elif pf_y == CosTheta.InductiveLag:
result = int(pf_x)
return result | def function[calcPF, parameter[pf]]:
constant[ Simple wrap to calc legacy PF value
Args:
pf: meter power factor reading
Returns:
int: legacy push pf
]
variable[pf_y] assign[=] call[name[pf]][<ast.Slice object at 0x7da20c6c6a10>]
variable[pf_x] assign[=] call[name[pf]][<ast.Slice object at 0x7da2044c07c0>]
variable[result] assign[=] constant[100]
if compare[name[pf_y] equal[==] name[CosTheta].CapacitiveLead] begin[:]
variable[result] assign[=] binary_operation[constant[200] - call[name[int], parameter[name[pf_x]]]]
return[name[result]] | keyword[def] identifier[calcPF] ( identifier[pf] ):
literal[string]
identifier[pf_y] = identifier[pf] [: literal[int] ]
identifier[pf_x] = identifier[pf] [ literal[int] :]
identifier[result] = literal[int]
keyword[if] identifier[pf_y] == identifier[CosTheta] . identifier[CapacitiveLead] :
identifier[result] = literal[int] - identifier[int] ( identifier[pf_x] )
keyword[elif] identifier[pf_y] == identifier[CosTheta] . identifier[InductiveLag] :
identifier[result] = identifier[int] ( identifier[pf_x] )
keyword[return] identifier[result] | def calcPF(pf):
""" Simple wrap to calc legacy PF value
Args:
pf: meter power factor reading
Returns:
int: legacy push pf
"""
pf_y = pf[:1]
pf_x = pf[1:]
result = 100
if pf_y == CosTheta.CapacitiveLead:
result = 200 - int(pf_x) # depends on [control=['if'], data=[]]
elif pf_y == CosTheta.InductiveLag:
result = int(pf_x) # depends on [control=['if'], data=[]]
return result |
def layer_hazard_classification(layer):
"""Returned this particular hazard classification.
:param layer: hazard layer or exposure layer
:type layer: qgis.core.QgsVectorLayer
:return: Hazard classification.
:rtype: dict
.. versionadded:: 4.0
"""
if not layer.keywords.get('hazard'):
# return nothing if not hazard layer
return None
hazard_classification = None
# retrieve hazard classification from hazard layer
for classification in hazard_classes_all:
classification_name = layer.keywords['classification']
if classification_name == classification['key']:
hazard_classification = classification
break
return hazard_classification | def function[layer_hazard_classification, parameter[layer]]:
constant[Returned this particular hazard classification.
:param layer: hazard layer or exposure layer
:type layer: qgis.core.QgsVectorLayer
:return: Hazard classification.
:rtype: dict
.. versionadded:: 4.0
]
if <ast.UnaryOp object at 0x7da18fe90970> begin[:]
return[constant[None]]
variable[hazard_classification] assign[=] constant[None]
for taget[name[classification]] in starred[name[hazard_classes_all]] begin[:]
variable[classification_name] assign[=] call[name[layer].keywords][constant[classification]]
if compare[name[classification_name] equal[==] call[name[classification]][constant[key]]] begin[:]
variable[hazard_classification] assign[=] name[classification]
break
return[name[hazard_classification]] | keyword[def] identifier[layer_hazard_classification] ( identifier[layer] ):
literal[string]
keyword[if] keyword[not] identifier[layer] . identifier[keywords] . identifier[get] ( literal[string] ):
keyword[return] keyword[None]
identifier[hazard_classification] = keyword[None]
keyword[for] identifier[classification] keyword[in] identifier[hazard_classes_all] :
identifier[classification_name] = identifier[layer] . identifier[keywords] [ literal[string] ]
keyword[if] identifier[classification_name] == identifier[classification] [ literal[string] ]:
identifier[hazard_classification] = identifier[classification]
keyword[break]
keyword[return] identifier[hazard_classification] | def layer_hazard_classification(layer):
"""Returned this particular hazard classification.
:param layer: hazard layer or exposure layer
:type layer: qgis.core.QgsVectorLayer
:return: Hazard classification.
:rtype: dict
.. versionadded:: 4.0
"""
if not layer.keywords.get('hazard'):
# return nothing if not hazard layer
return None # depends on [control=['if'], data=[]]
hazard_classification = None
# retrieve hazard classification from hazard layer
for classification in hazard_classes_all:
classification_name = layer.keywords['classification']
if classification_name == classification['key']:
hazard_classification = classification
break # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['classification']]
return hazard_classification |
def start_tree(self, tree, filename):
"""Some fixers need to maintain tree-wide state.
This method is called once, at the start of tree fix-up.
tree - the root node of the tree to be processed.
filename - the name of the file the tree came from.
"""
self.used_names = tree.used_names
self.set_filename(filename)
self.numbers = itertools.count(1)
self.first_log = True | def function[start_tree, parameter[self, tree, filename]]:
constant[Some fixers need to maintain tree-wide state.
This method is called once, at the start of tree fix-up.
tree - the root node of the tree to be processed.
filename - the name of the file the tree came from.
]
name[self].used_names assign[=] name[tree].used_names
call[name[self].set_filename, parameter[name[filename]]]
name[self].numbers assign[=] call[name[itertools].count, parameter[constant[1]]]
name[self].first_log assign[=] constant[True] | keyword[def] identifier[start_tree] ( identifier[self] , identifier[tree] , identifier[filename] ):
literal[string]
identifier[self] . identifier[used_names] = identifier[tree] . identifier[used_names]
identifier[self] . identifier[set_filename] ( identifier[filename] )
identifier[self] . identifier[numbers] = identifier[itertools] . identifier[count] ( literal[int] )
identifier[self] . identifier[first_log] = keyword[True] | def start_tree(self, tree, filename):
"""Some fixers need to maintain tree-wide state.
This method is called once, at the start of tree fix-up.
tree - the root node of the tree to be processed.
filename - the name of the file the tree came from.
"""
self.used_names = tree.used_names
self.set_filename(filename)
self.numbers = itertools.count(1)
self.first_log = True |
def html_state(self):
"""Display state in HTML format for the admin form."""
ret = ""
state = json.loads(self.state)
for (app, appstate) in state.items():
for (model, modelstate) in appstate.items():
ret += "<p>%s.models.%s</p>" % (app, model,)
ret += "<ul>"
for field in modelstate["fields"] + ["uid"]:
ret += "<li>%s</li>" % field
for fk in modelstate["foreignkeys"]:
ret += "<li>%s (foreign key)</li>" % fk
ret += "</ul>"
return ret | def function[html_state, parameter[self]]:
constant[Display state in HTML format for the admin form.]
variable[ret] assign[=] constant[]
variable[state] assign[=] call[name[json].loads, parameter[name[self].state]]
for taget[tuple[[<ast.Name object at 0x7da2045664d0>, <ast.Name object at 0x7da204567ca0>]]] in starred[call[name[state].items, parameter[]]] begin[:]
for taget[tuple[[<ast.Name object at 0x7da204566dd0>, <ast.Name object at 0x7da204565ae0>]]] in starred[call[name[appstate].items, parameter[]]] begin[:]
<ast.AugAssign object at 0x7da2045664a0>
<ast.AugAssign object at 0x7da204565a80>
for taget[name[field]] in starred[binary_operation[call[name[modelstate]][constant[fields]] + list[[<ast.Constant object at 0x7da204565990>]]]] begin[:]
<ast.AugAssign object at 0x7da204565450>
for taget[name[fk]] in starred[call[name[modelstate]][constant[foreignkeys]]] begin[:]
<ast.AugAssign object at 0x7da2045649d0>
<ast.AugAssign object at 0x7da2044c09d0>
return[name[ret]] | keyword[def] identifier[html_state] ( identifier[self] ):
literal[string]
identifier[ret] = literal[string]
identifier[state] = identifier[json] . identifier[loads] ( identifier[self] . identifier[state] )
keyword[for] ( identifier[app] , identifier[appstate] ) keyword[in] identifier[state] . identifier[items] ():
keyword[for] ( identifier[model] , identifier[modelstate] ) keyword[in] identifier[appstate] . identifier[items] ():
identifier[ret] += literal[string] %( identifier[app] , identifier[model] ,)
identifier[ret] += literal[string]
keyword[for] identifier[field] keyword[in] identifier[modelstate] [ literal[string] ]+[ literal[string] ]:
identifier[ret] += literal[string] % identifier[field]
keyword[for] identifier[fk] keyword[in] identifier[modelstate] [ literal[string] ]:
identifier[ret] += literal[string] % identifier[fk]
identifier[ret] += literal[string]
keyword[return] identifier[ret] | def html_state(self):
"""Display state in HTML format for the admin form."""
ret = ''
state = json.loads(self.state)
for (app, appstate) in state.items():
for (model, modelstate) in appstate.items():
ret += '<p>%s.models.%s</p>' % (app, model)
ret += '<ul>'
for field in modelstate['fields'] + ['uid']:
ret += '<li>%s</li>' % field # depends on [control=['for'], data=['field']]
for fk in modelstate['foreignkeys']:
ret += '<li>%s (foreign key)</li>' % fk # depends on [control=['for'], data=['fk']]
ret += '</ul>' # depends on [control=['for'], data=[]] # depends on [control=['for'], data=[]]
return ret |
def humanise_seconds(seconds):
"""Utility function to humanise seconds value into e.g. 10 seconds ago.
The function will try to make a nice phrase of the seconds count
provided.
.. note:: Currently seconds that amount to days are not supported.
:param seconds: Mandatory seconds value e.g. 1100.
:type seconds: int
:returns: A humanised version of the seconds count.
:rtype: str
"""
days = seconds / (3600 * 24)
day_modulus = seconds % (3600 * 24)
hours = day_modulus / 3600
hour_modulus = day_modulus % 3600
minutes = hour_modulus / 60
if seconds < 60:
return tr('%i seconds' % seconds)
if seconds < 120:
return tr('a minute')
if seconds < 3600:
return tr('%s minutes' % minutes)
if seconds < 7200:
return tr('over an hour')
if seconds < 86400:
return tr('%i hours and %i minutes' % (hours, minutes))
else:
# If all else fails...
return tr('%i days, %i hours and %i minutes' % (
days, hours, minutes)) | def function[humanise_seconds, parameter[seconds]]:
constant[Utility function to humanise seconds value into e.g. 10 seconds ago.
The function will try to make a nice phrase of the seconds count
provided.
.. note:: Currently seconds that amount to days are not supported.
:param seconds: Mandatory seconds value e.g. 1100.
:type seconds: int
:returns: A humanised version of the seconds count.
:rtype: str
]
variable[days] assign[=] binary_operation[name[seconds] / binary_operation[constant[3600] * constant[24]]]
variable[day_modulus] assign[=] binary_operation[name[seconds] <ast.Mod object at 0x7da2590d6920> binary_operation[constant[3600] * constant[24]]]
variable[hours] assign[=] binary_operation[name[day_modulus] / constant[3600]]
variable[hour_modulus] assign[=] binary_operation[name[day_modulus] <ast.Mod object at 0x7da2590d6920> constant[3600]]
variable[minutes] assign[=] binary_operation[name[hour_modulus] / constant[60]]
if compare[name[seconds] less[<] constant[60]] begin[:]
return[call[name[tr], parameter[binary_operation[constant[%i seconds] <ast.Mod object at 0x7da2590d6920> name[seconds]]]]]
if compare[name[seconds] less[<] constant[120]] begin[:]
return[call[name[tr], parameter[constant[a minute]]]]
if compare[name[seconds] less[<] constant[3600]] begin[:]
return[call[name[tr], parameter[binary_operation[constant[%s minutes] <ast.Mod object at 0x7da2590d6920> name[minutes]]]]]
if compare[name[seconds] less[<] constant[7200]] begin[:]
return[call[name[tr], parameter[constant[over an hour]]]]
if compare[name[seconds] less[<] constant[86400]] begin[:]
return[call[name[tr], parameter[binary_operation[constant[%i hours and %i minutes] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da18f58d240>, <ast.Name object at 0x7da18f58f550>]]]]]] | keyword[def] identifier[humanise_seconds] ( identifier[seconds] ):
literal[string]
identifier[days] = identifier[seconds] /( literal[int] * literal[int] )
identifier[day_modulus] = identifier[seconds] %( literal[int] * literal[int] )
identifier[hours] = identifier[day_modulus] / literal[int]
identifier[hour_modulus] = identifier[day_modulus] % literal[int]
identifier[minutes] = identifier[hour_modulus] / literal[int]
keyword[if] identifier[seconds] < literal[int] :
keyword[return] identifier[tr] ( literal[string] % identifier[seconds] )
keyword[if] identifier[seconds] < literal[int] :
keyword[return] identifier[tr] ( literal[string] )
keyword[if] identifier[seconds] < literal[int] :
keyword[return] identifier[tr] ( literal[string] % identifier[minutes] )
keyword[if] identifier[seconds] < literal[int] :
keyword[return] identifier[tr] ( literal[string] )
keyword[if] identifier[seconds] < literal[int] :
keyword[return] identifier[tr] ( literal[string] %( identifier[hours] , identifier[minutes] ))
keyword[else] :
keyword[return] identifier[tr] ( literal[string] %(
identifier[days] , identifier[hours] , identifier[minutes] )) | def humanise_seconds(seconds):
"""Utility function to humanise seconds value into e.g. 10 seconds ago.
The function will try to make a nice phrase of the seconds count
provided.
.. note:: Currently seconds that amount to days are not supported.
:param seconds: Mandatory seconds value e.g. 1100.
:type seconds: int
:returns: A humanised version of the seconds count.
:rtype: str
"""
days = seconds / (3600 * 24)
day_modulus = seconds % (3600 * 24)
hours = day_modulus / 3600
hour_modulus = day_modulus % 3600
minutes = hour_modulus / 60
if seconds < 60:
return tr('%i seconds' % seconds) # depends on [control=['if'], data=['seconds']]
if seconds < 120:
return tr('a minute') # depends on [control=['if'], data=[]]
if seconds < 3600:
return tr('%s minutes' % minutes) # depends on [control=['if'], data=[]]
if seconds < 7200:
return tr('over an hour') # depends on [control=['if'], data=[]]
if seconds < 86400:
return tr('%i hours and %i minutes' % (hours, minutes)) # depends on [control=['if'], data=[]]
else:
# If all else fails...
return tr('%i days, %i hours and %i minutes' % (days, hours, minutes)) |
def create(cls, user_id, client_id, token, secret,
token_type='', extra_data=None):
"""Create a new access token.
.. note:: Creates RemoteAccount as well if it does not exists.
:param user_id: The user id.
:param client_id: The client id.
:param token: The token.
:param secret: The secret key.
:param token_type: The token type. (Default: ``''``)
:param extra_data: Extra data to set in the remote account if the
remote account doesn't exists. (Default: ``None``)
:returns: A :class:`invenio_oauthclient.models.RemoteToken` instance.
"""
account = RemoteAccount.get(user_id, client_id)
with db.session.begin_nested():
if account is None:
account = RemoteAccount(
user_id=user_id,
client_id=client_id,
extra_data=extra_data or dict(),
)
db.session.add(account)
token = cls(
token_type=token_type,
remote_account=account,
access_token=token,
secret=secret,
)
db.session.add(token)
return token | def function[create, parameter[cls, user_id, client_id, token, secret, token_type, extra_data]]:
constant[Create a new access token.
.. note:: Creates RemoteAccount as well if it does not exists.
:param user_id: The user id.
:param client_id: The client id.
:param token: The token.
:param secret: The secret key.
:param token_type: The token type. (Default: ``''``)
:param extra_data: Extra data to set in the remote account if the
remote account doesn't exists. (Default: ``None``)
:returns: A :class:`invenio_oauthclient.models.RemoteToken` instance.
]
variable[account] assign[=] call[name[RemoteAccount].get, parameter[name[user_id], name[client_id]]]
with call[name[db].session.begin_nested, parameter[]] begin[:]
if compare[name[account] is constant[None]] begin[:]
variable[account] assign[=] call[name[RemoteAccount], parameter[]]
call[name[db].session.add, parameter[name[account]]]
variable[token] assign[=] call[name[cls], parameter[]]
call[name[db].session.add, parameter[name[token]]]
return[name[token]] | keyword[def] identifier[create] ( identifier[cls] , identifier[user_id] , identifier[client_id] , identifier[token] , identifier[secret] ,
identifier[token_type] = literal[string] , identifier[extra_data] = keyword[None] ):
literal[string]
identifier[account] = identifier[RemoteAccount] . identifier[get] ( identifier[user_id] , identifier[client_id] )
keyword[with] identifier[db] . identifier[session] . identifier[begin_nested] ():
keyword[if] identifier[account] keyword[is] keyword[None] :
identifier[account] = identifier[RemoteAccount] (
identifier[user_id] = identifier[user_id] ,
identifier[client_id] = identifier[client_id] ,
identifier[extra_data] = identifier[extra_data] keyword[or] identifier[dict] (),
)
identifier[db] . identifier[session] . identifier[add] ( identifier[account] )
identifier[token] = identifier[cls] (
identifier[token_type] = identifier[token_type] ,
identifier[remote_account] = identifier[account] ,
identifier[access_token] = identifier[token] ,
identifier[secret] = identifier[secret] ,
)
identifier[db] . identifier[session] . identifier[add] ( identifier[token] )
keyword[return] identifier[token] | def create(cls, user_id, client_id, token, secret, token_type='', extra_data=None):
"""Create a new access token.
.. note:: Creates RemoteAccount as well if it does not exists.
:param user_id: The user id.
:param client_id: The client id.
:param token: The token.
:param secret: The secret key.
:param token_type: The token type. (Default: ``''``)
:param extra_data: Extra data to set in the remote account if the
remote account doesn't exists. (Default: ``None``)
:returns: A :class:`invenio_oauthclient.models.RemoteToken` instance.
"""
account = RemoteAccount.get(user_id, client_id)
with db.session.begin_nested():
if account is None:
account = RemoteAccount(user_id=user_id, client_id=client_id, extra_data=extra_data or dict())
db.session.add(account) # depends on [control=['if'], data=['account']]
token = cls(token_type=token_type, remote_account=account, access_token=token, secret=secret)
db.session.add(token) # depends on [control=['with'], data=[]]
return token |
def _parse_qsignature_output(in_file, out_file, warning_file, data):
""" Parse xml file produced by qsignature
:param in_file: (str) with the path to the xml file
:param out_file: (str) with the path to output file
:param warning_file: (str) with the path to warning file
:returns: (list) with samples that could be duplicated
"""
name = {}
error, warnings, similar = set(), set(), set()
same, replicate, related = 0, 0.1, 0.18
mixup_check = dd.get_mixup_check(data)
if mixup_check == "qsignature_full":
same, replicate, related = 0, 0.01, 0.061
with open(in_file, 'r') as in_handle:
with file_transaction(data, out_file) as out_tx_file:
with file_transaction(data, warning_file) as warn_tx_file:
with open(out_tx_file, 'w') as out_handle:
with open(warn_tx_file, 'w') as warn_handle:
et = ET.parse(in_handle)
for i in list(et.iter('file')):
name[i.attrib['id']] = os.path.basename(i.attrib['name']).replace(".qsig.vcf", "")
for i in list(et.iter('comparison')):
msg = None
pair = "-".join([name[i.attrib['file1']], name[i.attrib['file2']]])
out_handle.write("%s\t%s\t%s\n" %
(name[i.attrib['file1']], name[i.attrib['file2']], i.attrib['score']))
if float(i.attrib['score']) == same:
msg = 'qsignature ERROR: read same samples:%s\n'
error.add(pair)
elif float(i.attrib['score']) < replicate:
msg = 'qsignature WARNING: read similar/replicate samples:%s\n'
warnings.add(pair)
elif float(i.attrib['score']) < related:
msg = 'qsignature NOTE: read relative samples:%s\n'
similar.add(pair)
if msg:
logger.info(msg % pair)
warn_handle.write(msg % pair)
return error, warnings, similar | def function[_parse_qsignature_output, parameter[in_file, out_file, warning_file, data]]:
constant[ Parse xml file produced by qsignature
:param in_file: (str) with the path to the xml file
:param out_file: (str) with the path to output file
:param warning_file: (str) with the path to warning file
:returns: (list) with samples that could be duplicated
]
variable[name] assign[=] dictionary[[], []]
<ast.Tuple object at 0x7da1b19ba950> assign[=] tuple[[<ast.Call object at 0x7da1b19baef0>, <ast.Call object at 0x7da1b19ba8f0>, <ast.Call object at 0x7da1b19b9b70>]]
<ast.Tuple object at 0x7da1b19b9990> assign[=] tuple[[<ast.Constant object at 0x7da1b19bbb20>, <ast.Constant object at 0x7da1b19babc0>, <ast.Constant object at 0x7da1b19b80d0>]]
variable[mixup_check] assign[=] call[name[dd].get_mixup_check, parameter[name[data]]]
if compare[name[mixup_check] equal[==] constant[qsignature_full]] begin[:]
<ast.Tuple object at 0x7da1b19b9fc0> assign[=] tuple[[<ast.Constant object at 0x7da1b19b8610>, <ast.Constant object at 0x7da1b19b9210>, <ast.Constant object at 0x7da1b19b9cf0>]]
with call[name[open], parameter[name[in_file], constant[r]]] begin[:]
with call[name[file_transaction], parameter[name[data], name[out_file]]] begin[:]
with call[name[file_transaction], parameter[name[data], name[warning_file]]] begin[:]
with call[name[open], parameter[name[out_tx_file], constant[w]]] begin[:]
with call[name[open], parameter[name[warn_tx_file], constant[w]]] begin[:]
variable[et] assign[=] call[name[ET].parse, parameter[name[in_handle]]]
for taget[name[i]] in starred[call[name[list], parameter[call[name[et].iter, parameter[constant[file]]]]]] begin[:]
call[name[name]][call[name[i].attrib][constant[id]]] assign[=] call[call[name[os].path.basename, parameter[call[name[i].attrib][constant[name]]]].replace, parameter[constant[.qsig.vcf], constant[]]]
for taget[name[i]] in starred[call[name[list], parameter[call[name[et].iter, parameter[constant[comparison]]]]]] begin[:]
variable[msg] assign[=] constant[None]
variable[pair] assign[=] call[constant[-].join, parameter[list[[<ast.Subscript object at 0x7da1b18a9000>, <ast.Subscript object at 0x7da1b18a8e20>]]]]
call[name[out_handle].write, parameter[binary_operation[constant[%s %s %s
] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Subscript object at 0x7da1b18a8d00>, <ast.Subscript object at 0x7da1b18abdf0>, <ast.Subscript object at 0x7da1b18a9ed0>]]]]]
if compare[call[name[float], parameter[call[name[i].attrib][constant[score]]]] equal[==] name[same]] begin[:]
variable[msg] assign[=] constant[qsignature ERROR: read same samples:%s
]
call[name[error].add, parameter[name[pair]]]
if name[msg] begin[:]
call[name[logger].info, parameter[binary_operation[name[msg] <ast.Mod object at 0x7da2590d6920> name[pair]]]]
call[name[warn_handle].write, parameter[binary_operation[name[msg] <ast.Mod object at 0x7da2590d6920> name[pair]]]]
return[tuple[[<ast.Name object at 0x7da1b18aaa70>, <ast.Name object at 0x7da1b18a9900>, <ast.Name object at 0x7da1b18aada0>]]] | keyword[def] identifier[_parse_qsignature_output] ( identifier[in_file] , identifier[out_file] , identifier[warning_file] , identifier[data] ):
literal[string]
identifier[name] ={}
identifier[error] , identifier[warnings] , identifier[similar] = identifier[set] (), identifier[set] (), identifier[set] ()
identifier[same] , identifier[replicate] , identifier[related] = literal[int] , literal[int] , literal[int]
identifier[mixup_check] = identifier[dd] . identifier[get_mixup_check] ( identifier[data] )
keyword[if] identifier[mixup_check] == literal[string] :
identifier[same] , identifier[replicate] , identifier[related] = literal[int] , literal[int] , literal[int]
keyword[with] identifier[open] ( identifier[in_file] , literal[string] ) keyword[as] identifier[in_handle] :
keyword[with] identifier[file_transaction] ( identifier[data] , identifier[out_file] ) keyword[as] identifier[out_tx_file] :
keyword[with] identifier[file_transaction] ( identifier[data] , identifier[warning_file] ) keyword[as] identifier[warn_tx_file] :
keyword[with] identifier[open] ( identifier[out_tx_file] , literal[string] ) keyword[as] identifier[out_handle] :
keyword[with] identifier[open] ( identifier[warn_tx_file] , literal[string] ) keyword[as] identifier[warn_handle] :
identifier[et] = identifier[ET] . identifier[parse] ( identifier[in_handle] )
keyword[for] identifier[i] keyword[in] identifier[list] ( identifier[et] . identifier[iter] ( literal[string] )):
identifier[name] [ identifier[i] . identifier[attrib] [ literal[string] ]]= identifier[os] . identifier[path] . identifier[basename] ( identifier[i] . identifier[attrib] [ literal[string] ]). identifier[replace] ( literal[string] , literal[string] )
keyword[for] identifier[i] keyword[in] identifier[list] ( identifier[et] . identifier[iter] ( literal[string] )):
identifier[msg] = keyword[None]
identifier[pair] = literal[string] . identifier[join] ([ identifier[name] [ identifier[i] . identifier[attrib] [ literal[string] ]], identifier[name] [ identifier[i] . identifier[attrib] [ literal[string] ]]])
identifier[out_handle] . identifier[write] ( literal[string] %
( identifier[name] [ identifier[i] . identifier[attrib] [ literal[string] ]], identifier[name] [ identifier[i] . identifier[attrib] [ literal[string] ]], identifier[i] . identifier[attrib] [ literal[string] ]))
keyword[if] identifier[float] ( identifier[i] . identifier[attrib] [ literal[string] ])== identifier[same] :
identifier[msg] = literal[string]
identifier[error] . identifier[add] ( identifier[pair] )
keyword[elif] identifier[float] ( identifier[i] . identifier[attrib] [ literal[string] ])< identifier[replicate] :
identifier[msg] = literal[string]
identifier[warnings] . identifier[add] ( identifier[pair] )
keyword[elif] identifier[float] ( identifier[i] . identifier[attrib] [ literal[string] ])< identifier[related] :
identifier[msg] = literal[string]
identifier[similar] . identifier[add] ( identifier[pair] )
keyword[if] identifier[msg] :
identifier[logger] . identifier[info] ( identifier[msg] % identifier[pair] )
identifier[warn_handle] . identifier[write] ( identifier[msg] % identifier[pair] )
keyword[return] identifier[error] , identifier[warnings] , identifier[similar] | def _parse_qsignature_output(in_file, out_file, warning_file, data):
""" Parse xml file produced by qsignature
:param in_file: (str) with the path to the xml file
:param out_file: (str) with the path to output file
:param warning_file: (str) with the path to warning file
:returns: (list) with samples that could be duplicated
"""
name = {}
(error, warnings, similar) = (set(), set(), set())
(same, replicate, related) = (0, 0.1, 0.18)
mixup_check = dd.get_mixup_check(data)
if mixup_check == 'qsignature_full':
(same, replicate, related) = (0, 0.01, 0.061) # depends on [control=['if'], data=[]]
with open(in_file, 'r') as in_handle:
with file_transaction(data, out_file) as out_tx_file:
with file_transaction(data, warning_file) as warn_tx_file:
with open(out_tx_file, 'w') as out_handle:
with open(warn_tx_file, 'w') as warn_handle:
et = ET.parse(in_handle)
for i in list(et.iter('file')):
name[i.attrib['id']] = os.path.basename(i.attrib['name']).replace('.qsig.vcf', '') # depends on [control=['for'], data=['i']]
for i in list(et.iter('comparison')):
msg = None
pair = '-'.join([name[i.attrib['file1']], name[i.attrib['file2']]])
out_handle.write('%s\t%s\t%s\n' % (name[i.attrib['file1']], name[i.attrib['file2']], i.attrib['score']))
if float(i.attrib['score']) == same:
msg = 'qsignature ERROR: read same samples:%s\n'
error.add(pair) # depends on [control=['if'], data=[]]
elif float(i.attrib['score']) < replicate:
msg = 'qsignature WARNING: read similar/replicate samples:%s\n'
warnings.add(pair) # depends on [control=['if'], data=[]]
elif float(i.attrib['score']) < related:
msg = 'qsignature NOTE: read relative samples:%s\n'
similar.add(pair) # depends on [control=['if'], data=[]]
if msg:
logger.info(msg % pair)
warn_handle.write(msg % pair) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']] # depends on [control=['with'], data=['warn_handle']] # depends on [control=['with'], data=['open', 'out_handle']] # depends on [control=['with'], data=['warn_tx_file']] # depends on [control=['with'], data=['file_transaction', 'out_tx_file']] # depends on [control=['with'], data=['open', 'in_handle']]
return (error, warnings, similar) |
def objc_type_encoding(self):
"""Return the Objective-C type encoding as a str."""
if not hasattr(self, '_objc_type_encoding'):
self._objc_type_encoding = \
conf.lib.clang_getDeclObjCTypeEncoding(self)
return self._objc_type_encoding | def function[objc_type_encoding, parameter[self]]:
constant[Return the Objective-C type encoding as a str.]
if <ast.UnaryOp object at 0x7da1b06d17b0> begin[:]
name[self]._objc_type_encoding assign[=] call[name[conf].lib.clang_getDeclObjCTypeEncoding, parameter[name[self]]]
return[name[self]._objc_type_encoding] | keyword[def] identifier[objc_type_encoding] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_objc_type_encoding] = identifier[conf] . identifier[lib] . identifier[clang_getDeclObjCTypeEncoding] ( identifier[self] )
keyword[return] identifier[self] . identifier[_objc_type_encoding] | def objc_type_encoding(self):
"""Return the Objective-C type encoding as a str."""
if not hasattr(self, '_objc_type_encoding'):
self._objc_type_encoding = conf.lib.clang_getDeclObjCTypeEncoding(self) # depends on [control=['if'], data=[]]
return self._objc_type_encoding |
def merge_results(old_results, new_results):
"""Update results in baseline with latest information.
:type old_results: dict
:param old_results: results of status quo
:type new_results: dict
:param new_results: results to replace status quo
:rtype: dict
"""
for filename, old_secrets in old_results.items():
if filename not in new_results:
continue
old_secrets_mapping = dict()
for old_secret in old_secrets:
old_secrets_mapping[old_secret['hashed_secret']] = old_secret
for new_secret in new_results[filename]:
if new_secret['hashed_secret'] not in old_secrets_mapping:
# We don't join the two secret sets, because if the newer
# result set did not discover an old secret, it probably
# moved.
continue
old_secret = old_secrets_mapping[new_secret['hashed_secret']]
# Only propagate 'is_secret' if it's not already there
if 'is_secret' in old_secret and 'is_secret' not in new_secret:
new_secret['is_secret'] = old_secret['is_secret']
return new_results | def function[merge_results, parameter[old_results, new_results]]:
constant[Update results in baseline with latest information.
:type old_results: dict
:param old_results: results of status quo
:type new_results: dict
:param new_results: results to replace status quo
:rtype: dict
]
for taget[tuple[[<ast.Name object at 0x7da18f811180>, <ast.Name object at 0x7da18f813c10>]]] in starred[call[name[old_results].items, parameter[]]] begin[:]
if compare[name[filename] <ast.NotIn object at 0x7da2590d7190> name[new_results]] begin[:]
continue
variable[old_secrets_mapping] assign[=] call[name[dict], parameter[]]
for taget[name[old_secret]] in starred[name[old_secrets]] begin[:]
call[name[old_secrets_mapping]][call[name[old_secret]][constant[hashed_secret]]] assign[=] name[old_secret]
for taget[name[new_secret]] in starred[call[name[new_results]][name[filename]]] begin[:]
if compare[call[name[new_secret]][constant[hashed_secret]] <ast.NotIn object at 0x7da2590d7190> name[old_secrets_mapping]] begin[:]
continue
variable[old_secret] assign[=] call[name[old_secrets_mapping]][call[name[new_secret]][constant[hashed_secret]]]
if <ast.BoolOp object at 0x7da18f810340> begin[:]
call[name[new_secret]][constant[is_secret]] assign[=] call[name[old_secret]][constant[is_secret]]
return[name[new_results]] | keyword[def] identifier[merge_results] ( identifier[old_results] , identifier[new_results] ):
literal[string]
keyword[for] identifier[filename] , identifier[old_secrets] keyword[in] identifier[old_results] . identifier[items] ():
keyword[if] identifier[filename] keyword[not] keyword[in] identifier[new_results] :
keyword[continue]
identifier[old_secrets_mapping] = identifier[dict] ()
keyword[for] identifier[old_secret] keyword[in] identifier[old_secrets] :
identifier[old_secrets_mapping] [ identifier[old_secret] [ literal[string] ]]= identifier[old_secret]
keyword[for] identifier[new_secret] keyword[in] identifier[new_results] [ identifier[filename] ]:
keyword[if] identifier[new_secret] [ literal[string] ] keyword[not] keyword[in] identifier[old_secrets_mapping] :
keyword[continue]
identifier[old_secret] = identifier[old_secrets_mapping] [ identifier[new_secret] [ literal[string] ]]
keyword[if] literal[string] keyword[in] identifier[old_secret] keyword[and] literal[string] keyword[not] keyword[in] identifier[new_secret] :
identifier[new_secret] [ literal[string] ]= identifier[old_secret] [ literal[string] ]
keyword[return] identifier[new_results] | def merge_results(old_results, new_results):
"""Update results in baseline with latest information.
:type old_results: dict
:param old_results: results of status quo
:type new_results: dict
:param new_results: results to replace status quo
:rtype: dict
"""
for (filename, old_secrets) in old_results.items():
if filename not in new_results:
continue # depends on [control=['if'], data=[]]
old_secrets_mapping = dict()
for old_secret in old_secrets:
old_secrets_mapping[old_secret['hashed_secret']] = old_secret # depends on [control=['for'], data=['old_secret']]
for new_secret in new_results[filename]:
if new_secret['hashed_secret'] not in old_secrets_mapping:
# We don't join the two secret sets, because if the newer
# result set did not discover an old secret, it probably
# moved.
continue # depends on [control=['if'], data=[]]
old_secret = old_secrets_mapping[new_secret['hashed_secret']]
# Only propagate 'is_secret' if it's not already there
if 'is_secret' in old_secret and 'is_secret' not in new_secret:
new_secret['is_secret'] = old_secret['is_secret'] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['new_secret']] # depends on [control=['for'], data=[]]
return new_results |
def validate(instance):
""" Validates a given ``instance``.
:param object instance: The instance to validate
:raises jsonschema.exceptions.ValidationError: On failed validation
"""
jsonschema.validate(
to_dict(instance, dict_type=dict), build_schema(instance.__class__)
) | def function[validate, parameter[instance]]:
constant[ Validates a given ``instance``.
:param object instance: The instance to validate
:raises jsonschema.exceptions.ValidationError: On failed validation
]
call[name[jsonschema].validate, parameter[call[name[to_dict], parameter[name[instance]]], call[name[build_schema], parameter[name[instance].__class__]]]] | keyword[def] identifier[validate] ( identifier[instance] ):
literal[string]
identifier[jsonschema] . identifier[validate] (
identifier[to_dict] ( identifier[instance] , identifier[dict_type] = identifier[dict] ), identifier[build_schema] ( identifier[instance] . identifier[__class__] )
) | def validate(instance):
""" Validates a given ``instance``.
:param object instance: The instance to validate
:raises jsonschema.exceptions.ValidationError: On failed validation
"""
jsonschema.validate(to_dict(instance, dict_type=dict), build_schema(instance.__class__)) |
def post(self, url, entity):
"""
To make a POST request to Falkonry API server
:param url: string
:param entity: Instantiated class object
"""
try:
if entity is None or entity == "":
jsonData = ""
else:
jsonData = entity.to_json()
except Exception as e:
jsonData = jsonpickle.pickler.encode(entity)
response = requests.post(
self.host + url,
jsonData,
headers={
"Content-Type": "application/json",
'Authorization': 'Bearer ' + self.token,
'x-falkonry-source':self.sourceHeader
},
verify=False
)
if response.status_code == 201:
try:
return json.loads(response._content.decode('utf-8'))
except Exception as e:
return json.loads(response.content)
elif response.status_code == 409:
try:
return json.loads(response._content.decode('utf-8'))
except Exception as e:
return json.loads(response.content)
elif response.status_code == 401:
raise Exception(json.dumps({'message':'Unauthorized Access'}))
else:
raise Exception(response.content) | def function[post, parameter[self, url, entity]]:
constant[
To make a POST request to Falkonry API server
:param url: string
:param entity: Instantiated class object
]
<ast.Try object at 0x7da18dc07100>
variable[response] assign[=] call[name[requests].post, parameter[binary_operation[name[self].host + name[url]], name[jsonData]]]
if compare[name[response].status_code equal[==] constant[201]] begin[:]
<ast.Try object at 0x7da18bcc9b10> | keyword[def] identifier[post] ( identifier[self] , identifier[url] , identifier[entity] ):
literal[string]
keyword[try] :
keyword[if] identifier[entity] keyword[is] keyword[None] keyword[or] identifier[entity] == literal[string] :
identifier[jsonData] = literal[string]
keyword[else] :
identifier[jsonData] = identifier[entity] . identifier[to_json] ()
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[jsonData] = identifier[jsonpickle] . identifier[pickler] . identifier[encode] ( identifier[entity] )
identifier[response] = identifier[requests] . identifier[post] (
identifier[self] . identifier[host] + identifier[url] ,
identifier[jsonData] ,
identifier[headers] ={
literal[string] : literal[string] ,
literal[string] : literal[string] + identifier[self] . identifier[token] ,
literal[string] : identifier[self] . identifier[sourceHeader]
},
identifier[verify] = keyword[False]
)
keyword[if] identifier[response] . identifier[status_code] == literal[int] :
keyword[try] :
keyword[return] identifier[json] . identifier[loads] ( identifier[response] . identifier[_content] . identifier[decode] ( literal[string] ))
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[return] identifier[json] . identifier[loads] ( identifier[response] . identifier[content] )
keyword[elif] identifier[response] . identifier[status_code] == literal[int] :
keyword[try] :
keyword[return] identifier[json] . identifier[loads] ( identifier[response] . identifier[_content] . identifier[decode] ( literal[string] ))
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[return] identifier[json] . identifier[loads] ( identifier[response] . identifier[content] )
keyword[elif] identifier[response] . identifier[status_code] == literal[int] :
keyword[raise] identifier[Exception] ( identifier[json] . identifier[dumps] ({ literal[string] : literal[string] }))
keyword[else] :
keyword[raise] identifier[Exception] ( identifier[response] . identifier[content] ) | def post(self, url, entity):
"""
To make a POST request to Falkonry API server
:param url: string
:param entity: Instantiated class object
"""
try:
if entity is None or entity == '':
jsonData = '' # depends on [control=['if'], data=[]]
else:
jsonData = entity.to_json() # depends on [control=['try'], data=[]]
except Exception as e:
jsonData = jsonpickle.pickler.encode(entity) # depends on [control=['except'], data=[]]
response = requests.post(self.host + url, jsonData, headers={'Content-Type': 'application/json', 'Authorization': 'Bearer ' + self.token, 'x-falkonry-source': self.sourceHeader}, verify=False)
if response.status_code == 201:
try:
return json.loads(response._content.decode('utf-8')) # depends on [control=['try'], data=[]]
except Exception as e:
return json.loads(response.content) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif response.status_code == 409:
try:
return json.loads(response._content.decode('utf-8')) # depends on [control=['try'], data=[]]
except Exception as e:
return json.loads(response.content) # depends on [control=['except'], data=[]] # depends on [control=['if'], data=[]]
elif response.status_code == 401:
raise Exception(json.dumps({'message': 'Unauthorized Access'})) # depends on [control=['if'], data=[]]
else:
raise Exception(response.content) |
def _set_mpls_config(self, v, load=False):
"""
Setter method for mpls_config, mapped from YANG variable /mpls_config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_config() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=mpls_config.mpls_config, is_container='container', presence=False, yang_name="mpls-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'sort-priority': u'151'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mpls_config must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=mpls_config.mpls_config, is_container='container', presence=False, yang_name="mpls-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'sort-priority': u'151'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)""",
})
self.__mpls_config = t
if hasattr(self, '_set'):
self._set() | def function[_set_mpls_config, parameter[self, v, load]]:
constant[
Setter method for mpls_config, mapped from YANG variable /mpls_config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_config() directly.
]
if call[name[hasattr], parameter[name[v], constant[_utype]]] begin[:]
variable[v] assign[=] call[name[v]._utype, parameter[name[v]]]
<ast.Try object at 0x7da204623d90>
name[self].__mpls_config assign[=] name[t]
if call[name[hasattr], parameter[name[self], constant[_set]]] begin[:]
call[name[self]._set, parameter[]] | keyword[def] identifier[_set_mpls_config] ( identifier[self] , identifier[v] , identifier[load] = keyword[False] ):
literal[string]
keyword[if] identifier[hasattr] ( identifier[v] , literal[string] ):
identifier[v] = identifier[v] . identifier[_utype] ( identifier[v] )
keyword[try] :
identifier[t] = identifier[YANGDynClass] ( identifier[v] , identifier[base] = identifier[mpls_config] . identifier[mpls_config] , identifier[is_container] = literal[string] , identifier[presence] = keyword[False] , identifier[yang_name] = literal[string] , identifier[rest_name] = literal[string] , identifier[parent] = identifier[self] , identifier[path_helper] = identifier[self] . identifier[_path_helper] , identifier[extmethods] = identifier[self] . identifier[_extmethods] , identifier[register_paths] = keyword[True] , identifier[extensions] ={ literal[string] :{ literal[string] : keyword[None] , literal[string] : literal[string] }}, identifier[namespace] = literal[string] , identifier[defining_module] = literal[string] , identifier[yang_type] = literal[string] , identifier[is_config] = keyword[True] )
keyword[except] ( identifier[TypeError] , identifier[ValueError] ):
keyword[raise] identifier[ValueError] ({
literal[string] : literal[string] ,
literal[string] : literal[string] ,
literal[string] : literal[string] ,
})
identifier[self] . identifier[__mpls_config] = identifier[t]
keyword[if] identifier[hasattr] ( identifier[self] , literal[string] ):
identifier[self] . identifier[_set] () | def _set_mpls_config(self, v, load=False):
"""
Setter method for mpls_config, mapped from YANG variable /mpls_config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_config() directly.
"""
if hasattr(v, '_utype'):
v = v._utype(v) # depends on [control=['if'], data=[]]
try:
t = YANGDynClass(v, base=mpls_config.mpls_config, is_container='container', presence=False, yang_name='mpls-config', rest_name='', parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'sort-priority': u'151'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True) # depends on [control=['try'], data=[]]
except (TypeError, ValueError):
raise ValueError({'error-string': 'mpls_config must be of a type compatible with container', 'defined-type': 'container', 'generated-type': 'YANGDynClass(base=mpls_config.mpls_config, is_container=\'container\', presence=False, yang_name="mpls-config", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u\'tailf-common\': {u\'cli-drop-node-name\': None, u\'sort-priority\': u\'151\'}}, namespace=\'urn:brocade.com:mgmt:brocade-mpls\', defining_module=\'brocade-mpls\', yang_type=\'container\', is_config=True)'}) # depends on [control=['except'], data=[]]
self.__mpls_config = t
if hasattr(self, '_set'):
self._set() # depends on [control=['if'], data=[]] |
def activate(self, event):
"""Change the value."""
self._index += 1
if self._index >= len(self._values):
self._index = 0
self._selection = self._values[self._index]
self.ao2.speak(self._selection) | def function[activate, parameter[self, event]]:
constant[Change the value.]
<ast.AugAssign object at 0x7da1b1451f60>
if compare[name[self]._index greater_or_equal[>=] call[name[len], parameter[name[self]._values]]] begin[:]
name[self]._index assign[=] constant[0]
name[self]._selection assign[=] call[name[self]._values][name[self]._index]
call[name[self].ao2.speak, parameter[name[self]._selection]] | keyword[def] identifier[activate] ( identifier[self] , identifier[event] ):
literal[string]
identifier[self] . identifier[_index] += literal[int]
keyword[if] identifier[self] . identifier[_index] >= identifier[len] ( identifier[self] . identifier[_values] ):
identifier[self] . identifier[_index] = literal[int]
identifier[self] . identifier[_selection] = identifier[self] . identifier[_values] [ identifier[self] . identifier[_index] ]
identifier[self] . identifier[ao2] . identifier[speak] ( identifier[self] . identifier[_selection] ) | def activate(self, event):
"""Change the value."""
self._index += 1
if self._index >= len(self._values):
self._index = 0 # depends on [control=['if'], data=[]]
self._selection = self._values[self._index]
self.ao2.speak(self._selection) |
def create_new_board(self, query_params=None):
'''
Create a new board. name is required in query_params. Returns a Board
object.
Returns:
Board: Returns the created board
'''
board_json = self.fetch_json(
uri_path='/boards',
http_method='POST',
query_params=query_params or {}
)
return self.create_board(board_json) | def function[create_new_board, parameter[self, query_params]]:
constant[
Create a new board. name is required in query_params. Returns a Board
object.
Returns:
Board: Returns the created board
]
variable[board_json] assign[=] call[name[self].fetch_json, parameter[]]
return[call[name[self].create_board, parameter[name[board_json]]]] | keyword[def] identifier[create_new_board] ( identifier[self] , identifier[query_params] = keyword[None] ):
literal[string]
identifier[board_json] = identifier[self] . identifier[fetch_json] (
identifier[uri_path] = literal[string] ,
identifier[http_method] = literal[string] ,
identifier[query_params] = identifier[query_params] keyword[or] {}
)
keyword[return] identifier[self] . identifier[create_board] ( identifier[board_json] ) | def create_new_board(self, query_params=None):
"""
Create a new board. name is required in query_params. Returns a Board
object.
Returns:
Board: Returns the created board
"""
board_json = self.fetch_json(uri_path='/boards', http_method='POST', query_params=query_params or {})
return self.create_board(board_json) |
def _make_job_id(job_id, prefix=None):
"""Construct an ID for a new job.
:type job_id: str or ``NoneType``
:param job_id: the user-provided job ID
:type prefix: str or ``NoneType``
:param prefix: (Optional) the user-provided prefix for a job ID
:rtype: str
:returns: A job ID
"""
if job_id is not None:
return job_id
elif prefix is not None:
return str(prefix) + str(uuid.uuid4())
else:
return str(uuid.uuid4()) | def function[_make_job_id, parameter[job_id, prefix]]:
constant[Construct an ID for a new job.
:type job_id: str or ``NoneType``
:param job_id: the user-provided job ID
:type prefix: str or ``NoneType``
:param prefix: (Optional) the user-provided prefix for a job ID
:rtype: str
:returns: A job ID
]
if compare[name[job_id] is_not constant[None]] begin[:]
return[name[job_id]] | keyword[def] identifier[_make_job_id] ( identifier[job_id] , identifier[prefix] = keyword[None] ):
literal[string]
keyword[if] identifier[job_id] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[job_id]
keyword[elif] identifier[prefix] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[str] ( identifier[prefix] )+ identifier[str] ( identifier[uuid] . identifier[uuid4] ())
keyword[else] :
keyword[return] identifier[str] ( identifier[uuid] . identifier[uuid4] ()) | def _make_job_id(job_id, prefix=None):
"""Construct an ID for a new job.
:type job_id: str or ``NoneType``
:param job_id: the user-provided job ID
:type prefix: str or ``NoneType``
:param prefix: (Optional) the user-provided prefix for a job ID
:rtype: str
:returns: A job ID
"""
if job_id is not None:
return job_id # depends on [control=['if'], data=['job_id']]
elif prefix is not None:
return str(prefix) + str(uuid.uuid4()) # depends on [control=['if'], data=['prefix']]
else:
return str(uuid.uuid4()) |
def _update_service_profile(self, handle, service_profile,
vlan_id, ucsm_ip):
"""Updates Service Profile on the UCS Manager.
Each of the ethernet ports on the Service Profile representing
the UCS Server, is updated with the VLAN profile corresponding
to the vlan_id passed in.
"""
virtio_port_list = (
CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].ucsm_virtio_eth_ports)
eth_port_paths = ["%s%s" % (service_profile, ep)
for ep in virtio_port_list]
vlan_name = self.make_vlan_name(vlan_id)
try:
obj = handle.query_dn(service_profile)
if not obj:
LOG.debug('UCS Manager network driver could not find '
'Service Profile %s in UCSM %s',
service_profile, ucsm_ip)
return False
for eth_port_path in eth_port_paths:
eth = handle.query_dn(eth_port_path)
if eth:
eth_if = self.ucsmsdk.vnicEtherIf(
parent_mo_or_dn=eth,
name=vlan_name,
default_net="no")
handle.add_mo(eth_if)
if not eth_if:
LOG.debug('UCS Manager network driver could not '
'update Service Profile %s with vlan %d',
service_profile, vlan_id)
return False
else:
LOG.debug('UCS Manager network driver did not find '
'ethernet port at %s', eth_port_path)
handle.commit()
return True
except Exception as e:
return self._handle_ucsm_exception(e, 'Service Profile',
vlan_name, ucsm_ip) | def function[_update_service_profile, parameter[self, handle, service_profile, vlan_id, ucsm_ip]]:
constant[Updates Service Profile on the UCS Manager.
Each of the ethernet ports on the Service Profile representing
the UCS Server, is updated with the VLAN profile corresponding
to the vlan_id passed in.
]
variable[virtio_port_list] assign[=] call[name[CONF].ml2_cisco_ucsm.ucsms][name[ucsm_ip]].ucsm_virtio_eth_ports
variable[eth_port_paths] assign[=] <ast.ListComp object at 0x7da1b1be77f0>
variable[vlan_name] assign[=] call[name[self].make_vlan_name, parameter[name[vlan_id]]]
<ast.Try object at 0x7da1b1be45e0> | keyword[def] identifier[_update_service_profile] ( identifier[self] , identifier[handle] , identifier[service_profile] ,
identifier[vlan_id] , identifier[ucsm_ip] ):
literal[string]
identifier[virtio_port_list] =(
identifier[CONF] . identifier[ml2_cisco_ucsm] . identifier[ucsms] [ identifier[ucsm_ip] ]. identifier[ucsm_virtio_eth_ports] )
identifier[eth_port_paths] =[ literal[string] %( identifier[service_profile] , identifier[ep] )
keyword[for] identifier[ep] keyword[in] identifier[virtio_port_list] ]
identifier[vlan_name] = identifier[self] . identifier[make_vlan_name] ( identifier[vlan_id] )
keyword[try] :
identifier[obj] = identifier[handle] . identifier[query_dn] ( identifier[service_profile] )
keyword[if] keyword[not] identifier[obj] :
identifier[LOG] . identifier[debug] ( literal[string]
literal[string] ,
identifier[service_profile] , identifier[ucsm_ip] )
keyword[return] keyword[False]
keyword[for] identifier[eth_port_path] keyword[in] identifier[eth_port_paths] :
identifier[eth] = identifier[handle] . identifier[query_dn] ( identifier[eth_port_path] )
keyword[if] identifier[eth] :
identifier[eth_if] = identifier[self] . identifier[ucsmsdk] . identifier[vnicEtherIf] (
identifier[parent_mo_or_dn] = identifier[eth] ,
identifier[name] = identifier[vlan_name] ,
identifier[default_net] = literal[string] )
identifier[handle] . identifier[add_mo] ( identifier[eth_if] )
keyword[if] keyword[not] identifier[eth_if] :
identifier[LOG] . identifier[debug] ( literal[string]
literal[string] ,
identifier[service_profile] , identifier[vlan_id] )
keyword[return] keyword[False]
keyword[else] :
identifier[LOG] . identifier[debug] ( literal[string]
literal[string] , identifier[eth_port_path] )
identifier[handle] . identifier[commit] ()
keyword[return] keyword[True]
keyword[except] identifier[Exception] keyword[as] identifier[e] :
keyword[return] identifier[self] . identifier[_handle_ucsm_exception] ( identifier[e] , literal[string] ,
identifier[vlan_name] , identifier[ucsm_ip] ) | def _update_service_profile(self, handle, service_profile, vlan_id, ucsm_ip):
"""Updates Service Profile on the UCS Manager.
Each of the ethernet ports on the Service Profile representing
the UCS Server, is updated with the VLAN profile corresponding
to the vlan_id passed in.
"""
virtio_port_list = CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].ucsm_virtio_eth_ports
eth_port_paths = ['%s%s' % (service_profile, ep) for ep in virtio_port_list]
vlan_name = self.make_vlan_name(vlan_id)
try:
obj = handle.query_dn(service_profile)
if not obj:
LOG.debug('UCS Manager network driver could not find Service Profile %s in UCSM %s', service_profile, ucsm_ip)
return False # depends on [control=['if'], data=[]]
for eth_port_path in eth_port_paths:
eth = handle.query_dn(eth_port_path)
if eth:
eth_if = self.ucsmsdk.vnicEtherIf(parent_mo_or_dn=eth, name=vlan_name, default_net='no')
handle.add_mo(eth_if)
if not eth_if:
LOG.debug('UCS Manager network driver could not update Service Profile %s with vlan %d', service_profile, vlan_id)
return False # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]]
else:
LOG.debug('UCS Manager network driver did not find ethernet port at %s', eth_port_path) # depends on [control=['for'], data=['eth_port_path']]
handle.commit()
return True # depends on [control=['try'], data=[]]
except Exception as e:
return self._handle_ucsm_exception(e, 'Service Profile', vlan_name, ucsm_ip) # depends on [control=['except'], data=['e']] |
def status(self, *ids):
"""Return the statuses of jobs with associated ids at the
time of call: either 'SUBMITED', 'CANCELLED', 'RUNNING',
'COMPLETED' or 'FAILED'.
"""
with self.lock:
if len(ids) > 1:
return [self._status[i] for i in ids]
else:
return self._status[ids[0]] | def function[status, parameter[self]]:
constant[Return the statuses of jobs with associated ids at the
time of call: either 'SUBMITED', 'CANCELLED', 'RUNNING',
'COMPLETED' or 'FAILED'.
]
with name[self].lock begin[:]
if compare[call[name[len], parameter[name[ids]]] greater[>] constant[1]] begin[:]
return[<ast.ListComp object at 0x7da204344640>] | keyword[def] identifier[status] ( identifier[self] ,* identifier[ids] ):
literal[string]
keyword[with] identifier[self] . identifier[lock] :
keyword[if] identifier[len] ( identifier[ids] )> literal[int] :
keyword[return] [ identifier[self] . identifier[_status] [ identifier[i] ] keyword[for] identifier[i] keyword[in] identifier[ids] ]
keyword[else] :
keyword[return] identifier[self] . identifier[_status] [ identifier[ids] [ literal[int] ]] | def status(self, *ids):
"""Return the statuses of jobs with associated ids at the
time of call: either 'SUBMITED', 'CANCELLED', 'RUNNING',
'COMPLETED' or 'FAILED'.
"""
with self.lock:
if len(ids) > 1:
return [self._status[i] for i in ids] # depends on [control=['if'], data=[]]
else:
return self._status[ids[0]] # depends on [control=['with'], data=[]] |
def as_dict(self):
"""
Returns dictionary with changeset's attributes and their values.
"""
data = get_dict_for_attrs(self, ['id', 'raw_id', 'short_id',
'revision', 'date', 'message'])
data['author'] = {'name': self.author_name, 'email': self.author_email}
data['added'] = [node.path for node in self.added]
data['changed'] = [node.path for node in self.changed]
data['removed'] = [node.path for node in self.removed]
return data | def function[as_dict, parameter[self]]:
constant[
Returns dictionary with changeset's attributes and their values.
]
variable[data] assign[=] call[name[get_dict_for_attrs], parameter[name[self], list[[<ast.Constant object at 0x7da1b25471c0>, <ast.Constant object at 0x7da1b2545ae0>, <ast.Constant object at 0x7da1b2547cd0>, <ast.Constant object at 0x7da1b2544f10>, <ast.Constant object at 0x7da1b25440d0>, <ast.Constant object at 0x7da1b25476d0>]]]]
call[name[data]][constant[author]] assign[=] dictionary[[<ast.Constant object at 0x7da1b2547430>, <ast.Constant object at 0x7da1b2547880>], [<ast.Attribute object at 0x7da1b25475b0>, <ast.Attribute object at 0x7da1b25464d0>]]
call[name[data]][constant[added]] assign[=] <ast.ListComp object at 0x7da1b2544e20>
call[name[data]][constant[changed]] assign[=] <ast.ListComp object at 0x7da1b2545240>
call[name[data]][constant[removed]] assign[=] <ast.ListComp object at 0x7da204344cd0>
return[name[data]] | keyword[def] identifier[as_dict] ( identifier[self] ):
literal[string]
identifier[data] = identifier[get_dict_for_attrs] ( identifier[self] ,[ literal[string] , literal[string] , literal[string] ,
literal[string] , literal[string] , literal[string] ])
identifier[data] [ literal[string] ]={ literal[string] : identifier[self] . identifier[author_name] , literal[string] : identifier[self] . identifier[author_email] }
identifier[data] [ literal[string] ]=[ identifier[node] . identifier[path] keyword[for] identifier[node] keyword[in] identifier[self] . identifier[added] ]
identifier[data] [ literal[string] ]=[ identifier[node] . identifier[path] keyword[for] identifier[node] keyword[in] identifier[self] . identifier[changed] ]
identifier[data] [ literal[string] ]=[ identifier[node] . identifier[path] keyword[for] identifier[node] keyword[in] identifier[self] . identifier[removed] ]
keyword[return] identifier[data] | def as_dict(self):
"""
Returns dictionary with changeset's attributes and their values.
"""
data = get_dict_for_attrs(self, ['id', 'raw_id', 'short_id', 'revision', 'date', 'message'])
data['author'] = {'name': self.author_name, 'email': self.author_email}
data['added'] = [node.path for node in self.added]
data['changed'] = [node.path for node in self.changed]
data['removed'] = [node.path for node in self.removed]
return data |
def update_user(self):
"""
Save the state of the current user
"""
# First create a copy of the current user
user_dict = self.serialize()
# Then delete the entities in the description field
del user_dict['description']['entities']
# Then upload user_dict
user, meta = self._api.update_user('me', data=user_dict) | def function[update_user, parameter[self]]:
constant[
Save the state of the current user
]
variable[user_dict] assign[=] call[name[self].serialize, parameter[]]
<ast.Delete object at 0x7da2044c1f30>
<ast.Tuple object at 0x7da2044c2e90> assign[=] call[name[self]._api.update_user, parameter[constant[me]]] | keyword[def] identifier[update_user] ( identifier[self] ):
literal[string]
identifier[user_dict] = identifier[self] . identifier[serialize] ()
keyword[del] identifier[user_dict] [ literal[string] ][ literal[string] ]
identifier[user] , identifier[meta] = identifier[self] . identifier[_api] . identifier[update_user] ( literal[string] , identifier[data] = identifier[user_dict] ) | def update_user(self):
"""
Save the state of the current user
"""
# First create a copy of the current user
user_dict = self.serialize()
# Then delete the entities in the description field
del user_dict['description']['entities']
# Then upload user_dict
(user, meta) = self._api.update_user('me', data=user_dict) |
def _calc_specpp_loE(self, Egamma):
"""
Delta-functional approximation for low energies Egamma < 0.1 TeV
"""
from scipy.integrate import quad
Egamma = Egamma.to("TeV").value
Epimin = Egamma + self._m_pi ** 2 / (4 * Egamma)
result = (
2
* quad(
self._delta_integrand, Epimin, np.inf, epsrel=1e-3, epsabs=0
)[0]
)
return result * u.Unit("1/(s TeV)") | def function[_calc_specpp_loE, parameter[self, Egamma]]:
constant[
Delta-functional approximation for low energies Egamma < 0.1 TeV
]
from relative_module[scipy.integrate] import module[quad]
variable[Egamma] assign[=] call[name[Egamma].to, parameter[constant[TeV]]].value
variable[Epimin] assign[=] binary_operation[name[Egamma] + binary_operation[binary_operation[name[self]._m_pi ** constant[2]] / binary_operation[constant[4] * name[Egamma]]]]
variable[result] assign[=] binary_operation[constant[2] * call[call[name[quad], parameter[name[self]._delta_integrand, name[Epimin], name[np].inf]]][constant[0]]]
return[binary_operation[name[result] * call[name[u].Unit, parameter[constant[1/(s TeV)]]]]] | keyword[def] identifier[_calc_specpp_loE] ( identifier[self] , identifier[Egamma] ):
literal[string]
keyword[from] identifier[scipy] . identifier[integrate] keyword[import] identifier[quad]
identifier[Egamma] = identifier[Egamma] . identifier[to] ( literal[string] ). identifier[value]
identifier[Epimin] = identifier[Egamma] + identifier[self] . identifier[_m_pi] ** literal[int] /( literal[int] * identifier[Egamma] )
identifier[result] =(
literal[int]
* identifier[quad] (
identifier[self] . identifier[_delta_integrand] , identifier[Epimin] , identifier[np] . identifier[inf] , identifier[epsrel] = literal[int] , identifier[epsabs] = literal[int]
)[ literal[int] ]
)
keyword[return] identifier[result] * identifier[u] . identifier[Unit] ( literal[string] ) | def _calc_specpp_loE(self, Egamma):
"""
Delta-functional approximation for low energies Egamma < 0.1 TeV
"""
from scipy.integrate import quad
Egamma = Egamma.to('TeV').value
Epimin = Egamma + self._m_pi ** 2 / (4 * Egamma)
result = 2 * quad(self._delta_integrand, Epimin, np.inf, epsrel=0.001, epsabs=0)[0]
return result * u.Unit('1/(s TeV)') |
def _UpdateChildIndex(self, urn, mutation_pool):
"""Update the child indexes.
This function maintains the index for direct child relations. When we set
an AFF4 path, we always add an attribute like
index:dir/%(childname)s to its parent. This is written
asynchronously to its parent.
In order to query for all direct children of an AFF4 object, we then simple
get the attributes which match the regex index:dir/.+ which are the
direct children.
Args:
urn: The AFF4 object for which we update the index.
mutation_pool: A MutationPool object to write to.
"""
try:
# Create navigation aids by touching intermediate subject names.
while urn.Path() != "/":
basename = urn.Basename()
dirname = rdfvalue.RDFURN(urn.Dirname())
try:
self.intermediate_cache.Get(urn)
return
except KeyError:
extra_attributes = None
# This is a performance optimization. On the root there is no point
# setting the last access time since it gets accessed all the time.
# TODO(amoser): Can we get rid of the index in the root node entirely?
# It's too big to query anyways...
if dirname != u"/":
extra_attributes = {
AFF4Object.SchemaCls.LAST: [
rdfvalue.RDFDatetime.Now().SerializeToDataStore()
]
}
mutation_pool.AFF4AddChild(
dirname, basename, extra_attributes=extra_attributes)
self.intermediate_cache.Put(urn, 1)
urn = dirname
except access_control.UnauthorizedAccess:
pass | def function[_UpdateChildIndex, parameter[self, urn, mutation_pool]]:
constant[Update the child indexes.
This function maintains the index for direct child relations. When we set
an AFF4 path, we always add an attribute like
index:dir/%(childname)s to its parent. This is written
asynchronously to its parent.
In order to query for all direct children of an AFF4 object, we then simple
get the attributes which match the regex index:dir/.+ which are the
direct children.
Args:
urn: The AFF4 object for which we update the index.
mutation_pool: A MutationPool object to write to.
]
<ast.Try object at 0x7da1b2344f10> | keyword[def] identifier[_UpdateChildIndex] ( identifier[self] , identifier[urn] , identifier[mutation_pool] ):
literal[string]
keyword[try] :
keyword[while] identifier[urn] . identifier[Path] ()!= literal[string] :
identifier[basename] = identifier[urn] . identifier[Basename] ()
identifier[dirname] = identifier[rdfvalue] . identifier[RDFURN] ( identifier[urn] . identifier[Dirname] ())
keyword[try] :
identifier[self] . identifier[intermediate_cache] . identifier[Get] ( identifier[urn] )
keyword[return]
keyword[except] identifier[KeyError] :
identifier[extra_attributes] = keyword[None]
keyword[if] identifier[dirname] != literal[string] :
identifier[extra_attributes] ={
identifier[AFF4Object] . identifier[SchemaCls] . identifier[LAST] :[
identifier[rdfvalue] . identifier[RDFDatetime] . identifier[Now] (). identifier[SerializeToDataStore] ()
]
}
identifier[mutation_pool] . identifier[AFF4AddChild] (
identifier[dirname] , identifier[basename] , identifier[extra_attributes] = identifier[extra_attributes] )
identifier[self] . identifier[intermediate_cache] . identifier[Put] ( identifier[urn] , literal[int] )
identifier[urn] = identifier[dirname]
keyword[except] identifier[access_control] . identifier[UnauthorizedAccess] :
keyword[pass] | def _UpdateChildIndex(self, urn, mutation_pool):
"""Update the child indexes.
This function maintains the index for direct child relations. When we set
an AFF4 path, we always add an attribute like
index:dir/%(childname)s to its parent. This is written
asynchronously to its parent.
In order to query for all direct children of an AFF4 object, we then simple
get the attributes which match the regex index:dir/.+ which are the
direct children.
Args:
urn: The AFF4 object for which we update the index.
mutation_pool: A MutationPool object to write to.
"""
try:
# Create navigation aids by touching intermediate subject names.
while urn.Path() != '/':
basename = urn.Basename()
dirname = rdfvalue.RDFURN(urn.Dirname())
try:
self.intermediate_cache.Get(urn)
return # depends on [control=['try'], data=[]]
except KeyError:
extra_attributes = None
# This is a performance optimization. On the root there is no point
# setting the last access time since it gets accessed all the time.
# TODO(amoser): Can we get rid of the index in the root node entirely?
# It's too big to query anyways...
if dirname != u'/':
extra_attributes = {AFF4Object.SchemaCls.LAST: [rdfvalue.RDFDatetime.Now().SerializeToDataStore()]} # depends on [control=['if'], data=[]]
mutation_pool.AFF4AddChild(dirname, basename, extra_attributes=extra_attributes)
self.intermediate_cache.Put(urn, 1)
urn = dirname # depends on [control=['except'], data=[]] # depends on [control=['while'], data=[]] # depends on [control=['try'], data=[]]
except access_control.UnauthorizedAccess:
pass # depends on [control=['except'], data=[]] |
def add_process_work_item_type_rule(self, process_rule_create, process_id, wit_ref_name):
"""AddProcessWorkItemTypeRule.
[Preview API] Adds a rule to work item type in the process.
:param :class:`<CreateProcessRuleRequest> <azure.devops.v5_0.work_item_tracking_process.models.CreateProcessRuleRequest>` process_rule_create:
:param str process_id: The ID of the process
:param str wit_ref_name: The reference name of the work item type
:rtype: :class:`<ProcessRule> <azure.devops.v5_0.work_item_tracking_process.models.ProcessRule>`
"""
route_values = {}
if process_id is not None:
route_values['processId'] = self._serialize.url('process_id', process_id, 'str')
if wit_ref_name is not None:
route_values['witRefName'] = self._serialize.url('wit_ref_name', wit_ref_name, 'str')
content = self._serialize.body(process_rule_create, 'CreateProcessRuleRequest')
response = self._send(http_method='POST',
location_id='76fe3432-d825-479d-a5f6-983bbb78b4f3',
version='5.0-preview.2',
route_values=route_values,
content=content)
return self._deserialize('ProcessRule', response) | def function[add_process_work_item_type_rule, parameter[self, process_rule_create, process_id, wit_ref_name]]:
constant[AddProcessWorkItemTypeRule.
[Preview API] Adds a rule to work item type in the process.
:param :class:`<CreateProcessRuleRequest> <azure.devops.v5_0.work_item_tracking_process.models.CreateProcessRuleRequest>` process_rule_create:
:param str process_id: The ID of the process
:param str wit_ref_name: The reference name of the work item type
:rtype: :class:`<ProcessRule> <azure.devops.v5_0.work_item_tracking_process.models.ProcessRule>`
]
variable[route_values] assign[=] dictionary[[], []]
if compare[name[process_id] is_not constant[None]] begin[:]
call[name[route_values]][constant[processId]] assign[=] call[name[self]._serialize.url, parameter[constant[process_id], name[process_id], constant[str]]]
if compare[name[wit_ref_name] is_not constant[None]] begin[:]
call[name[route_values]][constant[witRefName]] assign[=] call[name[self]._serialize.url, parameter[constant[wit_ref_name], name[wit_ref_name], constant[str]]]
variable[content] assign[=] call[name[self]._serialize.body, parameter[name[process_rule_create], constant[CreateProcessRuleRequest]]]
variable[response] assign[=] call[name[self]._send, parameter[]]
return[call[name[self]._deserialize, parameter[constant[ProcessRule], name[response]]]] | keyword[def] identifier[add_process_work_item_type_rule] ( identifier[self] , identifier[process_rule_create] , identifier[process_id] , identifier[wit_ref_name] ):
literal[string]
identifier[route_values] ={}
keyword[if] identifier[process_id] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[process_id] , literal[string] )
keyword[if] identifier[wit_ref_name] keyword[is] keyword[not] keyword[None] :
identifier[route_values] [ literal[string] ]= identifier[self] . identifier[_serialize] . identifier[url] ( literal[string] , identifier[wit_ref_name] , literal[string] )
identifier[content] = identifier[self] . identifier[_serialize] . identifier[body] ( identifier[process_rule_create] , literal[string] )
identifier[response] = identifier[self] . identifier[_send] ( identifier[http_method] = literal[string] ,
identifier[location_id] = literal[string] ,
identifier[version] = literal[string] ,
identifier[route_values] = identifier[route_values] ,
identifier[content] = identifier[content] )
keyword[return] identifier[self] . identifier[_deserialize] ( literal[string] , identifier[response] ) | def add_process_work_item_type_rule(self, process_rule_create, process_id, wit_ref_name):
"""AddProcessWorkItemTypeRule.
[Preview API] Adds a rule to work item type in the process.
:param :class:`<CreateProcessRuleRequest> <azure.devops.v5_0.work_item_tracking_process.models.CreateProcessRuleRequest>` process_rule_create:
:param str process_id: The ID of the process
:param str wit_ref_name: The reference name of the work item type
:rtype: :class:`<ProcessRule> <azure.devops.v5_0.work_item_tracking_process.models.ProcessRule>`
"""
route_values = {}
if process_id is not None:
route_values['processId'] = self._serialize.url('process_id', process_id, 'str') # depends on [control=['if'], data=['process_id']]
if wit_ref_name is not None:
route_values['witRefName'] = self._serialize.url('wit_ref_name', wit_ref_name, 'str') # depends on [control=['if'], data=['wit_ref_name']]
content = self._serialize.body(process_rule_create, 'CreateProcessRuleRequest')
response = self._send(http_method='POST', location_id='76fe3432-d825-479d-a5f6-983bbb78b4f3', version='5.0-preview.2', route_values=route_values, content=content)
return self._deserialize('ProcessRule', response) |
def download_file(url, file_name):
"""
Helper for downloading a remote file to disk.
"""
logger.info("Downloading URL: %s", url)
file_size = 0
if not os.path.isfile(file_name):
response = requests.get(url, stream=True)
with open(file_name, "wb") as fp:
if not response.ok:
raise Exception("Download exception. Will fail.")
for block in response.iter_content(1024):
if not block:
break
fp.write(block)
file_size += len(block)
logger.info("Download finished, size is %d bytes.", file_size)
return file_size | def function[download_file, parameter[url, file_name]]:
constant[
Helper for downloading a remote file to disk.
]
call[name[logger].info, parameter[constant[Downloading URL: %s], name[url]]]
variable[file_size] assign[=] constant[0]
if <ast.UnaryOp object at 0x7da204962ef0> begin[:]
variable[response] assign[=] call[name[requests].get, parameter[name[url]]]
with call[name[open], parameter[name[file_name], constant[wb]]] begin[:]
if <ast.UnaryOp object at 0x7da204960940> begin[:]
<ast.Raise object at 0x7da204960c70>
for taget[name[block]] in starred[call[name[response].iter_content, parameter[constant[1024]]]] begin[:]
if <ast.UnaryOp object at 0x7da20c991c60> begin[:]
break
call[name[fp].write, parameter[name[block]]]
<ast.AugAssign object at 0x7da20c9905e0>
call[name[logger].info, parameter[constant[Download finished, size is %d bytes.], name[file_size]]]
return[name[file_size]] | keyword[def] identifier[download_file] ( identifier[url] , identifier[file_name] ):
literal[string]
identifier[logger] . identifier[info] ( literal[string] , identifier[url] )
identifier[file_size] = literal[int]
keyword[if] keyword[not] identifier[os] . identifier[path] . identifier[isfile] ( identifier[file_name] ):
identifier[response] = identifier[requests] . identifier[get] ( identifier[url] , identifier[stream] = keyword[True] )
keyword[with] identifier[open] ( identifier[file_name] , literal[string] ) keyword[as] identifier[fp] :
keyword[if] keyword[not] identifier[response] . identifier[ok] :
keyword[raise] identifier[Exception] ( literal[string] )
keyword[for] identifier[block] keyword[in] identifier[response] . identifier[iter_content] ( literal[int] ):
keyword[if] keyword[not] identifier[block] :
keyword[break]
identifier[fp] . identifier[write] ( identifier[block] )
identifier[file_size] += identifier[len] ( identifier[block] )
identifier[logger] . identifier[info] ( literal[string] , identifier[file_size] )
keyword[return] identifier[file_size] | def download_file(url, file_name):
"""
Helper for downloading a remote file to disk.
"""
logger.info('Downloading URL: %s', url)
file_size = 0
if not os.path.isfile(file_name):
response = requests.get(url, stream=True)
with open(file_name, 'wb') as fp:
if not response.ok:
raise Exception('Download exception. Will fail.') # depends on [control=['if'], data=[]]
for block in response.iter_content(1024):
if not block:
break # depends on [control=['if'], data=[]]
fp.write(block)
file_size += len(block) # depends on [control=['for'], data=['block']] # depends on [control=['with'], data=['fp']]
logger.info('Download finished, size is %d bytes.', file_size) # depends on [control=['if'], data=[]]
return file_size |
def read(config_values):
"""Reads an ordered list of configuration values and deep merge the values in reverse order."""
if not config_values:
raise RheaError('Cannot read config_value: `{}`'.format(config_values))
config_values = to_list(config_values)
config = {}
for config_value in config_values:
config_value = ConfigSpec.get_from(value=config_value)
config_value.check_type()
config_results = config_value.read()
if config_results and isinstance(config_results, Mapping):
config = deep_update(config, config_results)
elif config_value.check_if_exists:
raise RheaError('Cannot read config_value: `{}`'.format(config_value))
return config | def function[read, parameter[config_values]]:
constant[Reads an ordered list of configuration values and deep merge the values in reverse order.]
if <ast.UnaryOp object at 0x7da1b193a1d0> begin[:]
<ast.Raise object at 0x7da1b193ae00>
variable[config_values] assign[=] call[name[to_list], parameter[name[config_values]]]
variable[config] assign[=] dictionary[[], []]
for taget[name[config_value]] in starred[name[config_values]] begin[:]
variable[config_value] assign[=] call[name[ConfigSpec].get_from, parameter[]]
call[name[config_value].check_type, parameter[]]
variable[config_results] assign[=] call[name[config_value].read, parameter[]]
if <ast.BoolOp object at 0x7da1b19b4ca0> begin[:]
variable[config] assign[=] call[name[deep_update], parameter[name[config], name[config_results]]]
return[name[config]] | keyword[def] identifier[read] ( identifier[config_values] ):
literal[string]
keyword[if] keyword[not] identifier[config_values] :
keyword[raise] identifier[RheaError] ( literal[string] . identifier[format] ( identifier[config_values] ))
identifier[config_values] = identifier[to_list] ( identifier[config_values] )
identifier[config] ={}
keyword[for] identifier[config_value] keyword[in] identifier[config_values] :
identifier[config_value] = identifier[ConfigSpec] . identifier[get_from] ( identifier[value] = identifier[config_value] )
identifier[config_value] . identifier[check_type] ()
identifier[config_results] = identifier[config_value] . identifier[read] ()
keyword[if] identifier[config_results] keyword[and] identifier[isinstance] ( identifier[config_results] , identifier[Mapping] ):
identifier[config] = identifier[deep_update] ( identifier[config] , identifier[config_results] )
keyword[elif] identifier[config_value] . identifier[check_if_exists] :
keyword[raise] identifier[RheaError] ( literal[string] . identifier[format] ( identifier[config_value] ))
keyword[return] identifier[config] | def read(config_values):
"""Reads an ordered list of configuration values and deep merge the values in reverse order."""
if not config_values:
raise RheaError('Cannot read config_value: `{}`'.format(config_values)) # depends on [control=['if'], data=[]]
config_values = to_list(config_values)
config = {}
for config_value in config_values:
config_value = ConfigSpec.get_from(value=config_value)
config_value.check_type()
config_results = config_value.read()
if config_results and isinstance(config_results, Mapping):
config = deep_update(config, config_results) # depends on [control=['if'], data=[]]
elif config_value.check_if_exists:
raise RheaError('Cannot read config_value: `{}`'.format(config_value)) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['config_value']]
return config |
def show(self):
"""
Create the Toplevel widget and its child widgets to show in the spot of the cursor.
This is the callback for the delayed :obj:`<Enter>` event (see :meth:`~Balloon._on_enter`).
"""
self._toplevel = tk.Toplevel(self.master)
self._canvas = tk.Canvas(self._toplevel, background=self.__background)
self.header_label = ttk.Label(self._canvas, text=self.__headertext, background=self.__background,
image=self._photo_image, compound=tk.LEFT)
self.text_label = ttk.Label(self._canvas, text=self.__text, wraplength=self.__width,
background=self.__background)
self._toplevel.attributes("-topmost", True)
self._toplevel.overrideredirect(True)
self._grid_widgets()
x, y = self.master.winfo_pointerxy()
self._canvas.update()
# Update the Geometry of the Toplevel to update its position and size
self._toplevel.geometry("{0}x{1}+{2}+{3}".format(self._canvas.winfo_width(), self._canvas.winfo_height(),
x + 2, y + 2)) | def function[show, parameter[self]]:
constant[
Create the Toplevel widget and its child widgets to show in the spot of the cursor.
This is the callback for the delayed :obj:`<Enter>` event (see :meth:`~Balloon._on_enter`).
]
name[self]._toplevel assign[=] call[name[tk].Toplevel, parameter[name[self].master]]
name[self]._canvas assign[=] call[name[tk].Canvas, parameter[name[self]._toplevel]]
name[self].header_label assign[=] call[name[ttk].Label, parameter[name[self]._canvas]]
name[self].text_label assign[=] call[name[ttk].Label, parameter[name[self]._canvas]]
call[name[self]._toplevel.attributes, parameter[constant[-topmost], constant[True]]]
call[name[self]._toplevel.overrideredirect, parameter[constant[True]]]
call[name[self]._grid_widgets, parameter[]]
<ast.Tuple object at 0x7da1b2363700> assign[=] call[name[self].master.winfo_pointerxy, parameter[]]
call[name[self]._canvas.update, parameter[]]
call[name[self]._toplevel.geometry, parameter[call[constant[{0}x{1}+{2}+{3}].format, parameter[call[name[self]._canvas.winfo_width, parameter[]], call[name[self]._canvas.winfo_height, parameter[]], binary_operation[name[x] + constant[2]], binary_operation[name[y] + constant[2]]]]]] | keyword[def] identifier[show] ( identifier[self] ):
literal[string]
identifier[self] . identifier[_toplevel] = identifier[tk] . identifier[Toplevel] ( identifier[self] . identifier[master] )
identifier[self] . identifier[_canvas] = identifier[tk] . identifier[Canvas] ( identifier[self] . identifier[_toplevel] , identifier[background] = identifier[self] . identifier[__background] )
identifier[self] . identifier[header_label] = identifier[ttk] . identifier[Label] ( identifier[self] . identifier[_canvas] , identifier[text] = identifier[self] . identifier[__headertext] , identifier[background] = identifier[self] . identifier[__background] ,
identifier[image] = identifier[self] . identifier[_photo_image] , identifier[compound] = identifier[tk] . identifier[LEFT] )
identifier[self] . identifier[text_label] = identifier[ttk] . identifier[Label] ( identifier[self] . identifier[_canvas] , identifier[text] = identifier[self] . identifier[__text] , identifier[wraplength] = identifier[self] . identifier[__width] ,
identifier[background] = identifier[self] . identifier[__background] )
identifier[self] . identifier[_toplevel] . identifier[attributes] ( literal[string] , keyword[True] )
identifier[self] . identifier[_toplevel] . identifier[overrideredirect] ( keyword[True] )
identifier[self] . identifier[_grid_widgets] ()
identifier[x] , identifier[y] = identifier[self] . identifier[master] . identifier[winfo_pointerxy] ()
identifier[self] . identifier[_canvas] . identifier[update] ()
identifier[self] . identifier[_toplevel] . identifier[geometry] ( literal[string] . identifier[format] ( identifier[self] . identifier[_canvas] . identifier[winfo_width] (), identifier[self] . identifier[_canvas] . identifier[winfo_height] (),
identifier[x] + literal[int] , identifier[y] + literal[int] )) | def show(self):
"""
Create the Toplevel widget and its child widgets to show in the spot of the cursor.
This is the callback for the delayed :obj:`<Enter>` event (see :meth:`~Balloon._on_enter`).
"""
self._toplevel = tk.Toplevel(self.master)
self._canvas = tk.Canvas(self._toplevel, background=self.__background)
self.header_label = ttk.Label(self._canvas, text=self.__headertext, background=self.__background, image=self._photo_image, compound=tk.LEFT)
self.text_label = ttk.Label(self._canvas, text=self.__text, wraplength=self.__width, background=self.__background)
self._toplevel.attributes('-topmost', True)
self._toplevel.overrideredirect(True)
self._grid_widgets()
(x, y) = self.master.winfo_pointerxy()
self._canvas.update()
# Update the Geometry of the Toplevel to update its position and size
self._toplevel.geometry('{0}x{1}+{2}+{3}'.format(self._canvas.winfo_width(), self._canvas.winfo_height(), x + 2, y + 2)) |
def render_workflow_html_template(filename, subtemplate, filelists, **kwargs):
""" Writes a template given inputs from the workflow generator. Takes
a list of tuples. Each tuple is a pycbc File object. Also the name of the
subtemplate to render and the filename of the output.
"""
dirnam = os.path.dirname(filename)
makedir(dirnam)
try:
filenames = [f.name for filelist in filelists for f in filelist if f is not None]
except TypeError:
filenames = []
# render subtemplate
subtemplate_dir = pycbc.results.__path__[0] + '/templates/wells'
env = Environment(loader=FileSystemLoader(subtemplate_dir))
env.globals.update(get_embedded_config=get_embedded_config)
env.globals.update(path_exists=os.path.exists)
env.globals.update(len=len)
subtemplate = env.get_template(subtemplate)
context = {'filelists' : filelists,
'dir' : dirnam}
context.update(kwargs)
output = subtemplate.render(context)
# save as html page
kwds = {'render-function' : 'render_tmplt',
'filenames' : ','.join(filenames)}
save_html_with_metadata(str(output), filename, None, kwds) | def function[render_workflow_html_template, parameter[filename, subtemplate, filelists]]:
constant[ Writes a template given inputs from the workflow generator. Takes
a list of tuples. Each tuple is a pycbc File object. Also the name of the
subtemplate to render and the filename of the output.
]
variable[dirnam] assign[=] call[name[os].path.dirname, parameter[name[filename]]]
call[name[makedir], parameter[name[dirnam]]]
<ast.Try object at 0x7da20c76ee00>
variable[subtemplate_dir] assign[=] binary_operation[call[name[pycbc].results.__path__][constant[0]] + constant[/templates/wells]]
variable[env] assign[=] call[name[Environment], parameter[]]
call[name[env].globals.update, parameter[]]
call[name[env].globals.update, parameter[]]
call[name[env].globals.update, parameter[]]
variable[subtemplate] assign[=] call[name[env].get_template, parameter[name[subtemplate]]]
variable[context] assign[=] dictionary[[<ast.Constant object at 0x7da20c76d090>, <ast.Constant object at 0x7da20c76edd0>], [<ast.Name object at 0x7da20c76ec50>, <ast.Name object at 0x7da20c76db40>]]
call[name[context].update, parameter[name[kwargs]]]
variable[output] assign[=] call[name[subtemplate].render, parameter[name[context]]]
variable[kwds] assign[=] dictionary[[<ast.Constant object at 0x7da20c76ee90>, <ast.Constant object at 0x7da20c76cf10>], [<ast.Constant object at 0x7da20c76e620>, <ast.Call object at 0x7da20c76ece0>]]
call[name[save_html_with_metadata], parameter[call[name[str], parameter[name[output]]], name[filename], constant[None], name[kwds]]] | keyword[def] identifier[render_workflow_html_template] ( identifier[filename] , identifier[subtemplate] , identifier[filelists] ,** identifier[kwargs] ):
literal[string]
identifier[dirnam] = identifier[os] . identifier[path] . identifier[dirname] ( identifier[filename] )
identifier[makedir] ( identifier[dirnam] )
keyword[try] :
identifier[filenames] =[ identifier[f] . identifier[name] keyword[for] identifier[filelist] keyword[in] identifier[filelists] keyword[for] identifier[f] keyword[in] identifier[filelist] keyword[if] identifier[f] keyword[is] keyword[not] keyword[None] ]
keyword[except] identifier[TypeError] :
identifier[filenames] =[]
identifier[subtemplate_dir] = identifier[pycbc] . identifier[results] . identifier[__path__] [ literal[int] ]+ literal[string]
identifier[env] = identifier[Environment] ( identifier[loader] = identifier[FileSystemLoader] ( identifier[subtemplate_dir] ))
identifier[env] . identifier[globals] . identifier[update] ( identifier[get_embedded_config] = identifier[get_embedded_config] )
identifier[env] . identifier[globals] . identifier[update] ( identifier[path_exists] = identifier[os] . identifier[path] . identifier[exists] )
identifier[env] . identifier[globals] . identifier[update] ( identifier[len] = identifier[len] )
identifier[subtemplate] = identifier[env] . identifier[get_template] ( identifier[subtemplate] )
identifier[context] ={ literal[string] : identifier[filelists] ,
literal[string] : identifier[dirnam] }
identifier[context] . identifier[update] ( identifier[kwargs] )
identifier[output] = identifier[subtemplate] . identifier[render] ( identifier[context] )
identifier[kwds] ={ literal[string] : literal[string] ,
literal[string] : literal[string] . identifier[join] ( identifier[filenames] )}
identifier[save_html_with_metadata] ( identifier[str] ( identifier[output] ), identifier[filename] , keyword[None] , identifier[kwds] ) | def render_workflow_html_template(filename, subtemplate, filelists, **kwargs):
""" Writes a template given inputs from the workflow generator. Takes
a list of tuples. Each tuple is a pycbc File object. Also the name of the
subtemplate to render and the filename of the output.
"""
dirnam = os.path.dirname(filename)
makedir(dirnam)
try:
filenames = [f.name for filelist in filelists for f in filelist if f is not None] # depends on [control=['try'], data=[]]
except TypeError:
filenames = [] # depends on [control=['except'], data=[]]
# render subtemplate
subtemplate_dir = pycbc.results.__path__[0] + '/templates/wells'
env = Environment(loader=FileSystemLoader(subtemplate_dir))
env.globals.update(get_embedded_config=get_embedded_config)
env.globals.update(path_exists=os.path.exists)
env.globals.update(len=len)
subtemplate = env.get_template(subtemplate)
context = {'filelists': filelists, 'dir': dirnam}
context.update(kwargs)
output = subtemplate.render(context)
# save as html page
kwds = {'render-function': 'render_tmplt', 'filenames': ','.join(filenames)}
save_html_with_metadata(str(output), filename, None, kwds) |
def user_delete(self, uid, channel=None):
"""Delete user (helper)
Note that in IPMI, user 'deletion' isn't a concept. This function
will make a best effort to provide the expected result (e.g.
web interfaces skipping names and ipmitool skipping as well.
:param uid: user number [1:16]
:param channel: number [1:7]
"""
# TODO(jjohnson2): Provide OEM extensibility to cover user deletion
if channel is None:
channel = self.get_network_channel()
self.set_user_password(uid, mode='disable', password=None)
# TODO(steveweber) perhaps should set user access on all channels
# so new users dont get extra access
self.set_user_access(uid, channel=channel, callback=False,
link_auth=False, ipmi_msg=False,
privilege_level='no_access')
try:
# First try to set name to all \x00 explicitly
self.set_user_name(uid, '')
except exc.IpmiException as ie:
if ie.ipmicode != 0xcc:
raise
# An invalid data field in request is frequently reported.
# however another convention that exists is all '\xff'
# if this fails, pass up the error so that calling code knows
# that the deletion did not go as planned for now
self.set_user_name(uid, b'\xff' * 16)
return True | def function[user_delete, parameter[self, uid, channel]]:
constant[Delete user (helper)
Note that in IPMI, user 'deletion' isn't a concept. This function
will make a best effort to provide the expected result (e.g.
web interfaces skipping names and ipmitool skipping as well.
:param uid: user number [1:16]
:param channel: number [1:7]
]
if compare[name[channel] is constant[None]] begin[:]
variable[channel] assign[=] call[name[self].get_network_channel, parameter[]]
call[name[self].set_user_password, parameter[name[uid]]]
call[name[self].set_user_access, parameter[name[uid]]]
<ast.Try object at 0x7da20e954c40>
return[constant[True]] | keyword[def] identifier[user_delete] ( identifier[self] , identifier[uid] , identifier[channel] = keyword[None] ):
literal[string]
keyword[if] identifier[channel] keyword[is] keyword[None] :
identifier[channel] = identifier[self] . identifier[get_network_channel] ()
identifier[self] . identifier[set_user_password] ( identifier[uid] , identifier[mode] = literal[string] , identifier[password] = keyword[None] )
identifier[self] . identifier[set_user_access] ( identifier[uid] , identifier[channel] = identifier[channel] , identifier[callback] = keyword[False] ,
identifier[link_auth] = keyword[False] , identifier[ipmi_msg] = keyword[False] ,
identifier[privilege_level] = literal[string] )
keyword[try] :
identifier[self] . identifier[set_user_name] ( identifier[uid] , literal[string] )
keyword[except] identifier[exc] . identifier[IpmiException] keyword[as] identifier[ie] :
keyword[if] identifier[ie] . identifier[ipmicode] != literal[int] :
keyword[raise]
identifier[self] . identifier[set_user_name] ( identifier[uid] , literal[string] * literal[int] )
keyword[return] keyword[True] | def user_delete(self, uid, channel=None):
"""Delete user (helper)
Note that in IPMI, user 'deletion' isn't a concept. This function
will make a best effort to provide the expected result (e.g.
web interfaces skipping names and ipmitool skipping as well.
:param uid: user number [1:16]
:param channel: number [1:7]
"""
# TODO(jjohnson2): Provide OEM extensibility to cover user deletion
if channel is None:
channel = self.get_network_channel() # depends on [control=['if'], data=['channel']]
self.set_user_password(uid, mode='disable', password=None)
# TODO(steveweber) perhaps should set user access on all channels
# so new users dont get extra access
self.set_user_access(uid, channel=channel, callback=False, link_auth=False, ipmi_msg=False, privilege_level='no_access')
try:
# First try to set name to all \x00 explicitly
self.set_user_name(uid, '') # depends on [control=['try'], data=[]]
except exc.IpmiException as ie:
if ie.ipmicode != 204:
raise # depends on [control=['if'], data=[]]
# An invalid data field in request is frequently reported.
# however another convention that exists is all '\xff'
# if this fails, pass up the error so that calling code knows
# that the deletion did not go as planned for now
self.set_user_name(uid, b'\xff' * 16) # depends on [control=['except'], data=['ie']]
return True |
def to_json(self):
"""Convert the Humidity Condition to a dictionary."""
return {
'hum_type': self.hum_type,
'hum_value': self.hum_value,
'barometric_pressure': self.barometric_pressure,
'schedule': self.schedule,
'wet_bulb_range': self.wet_bulb_range,
} | def function[to_json, parameter[self]]:
constant[Convert the Humidity Condition to a dictionary.]
return[dictionary[[<ast.Constant object at 0x7da1b12652a0>, <ast.Constant object at 0x7da1b1265f30>, <ast.Constant object at 0x7da1b1264ee0>, <ast.Constant object at 0x7da1b12666b0>, <ast.Constant object at 0x7da1b1267430>], [<ast.Attribute object at 0x7da1b1266aa0>, <ast.Attribute object at 0x7da1b1265240>, <ast.Attribute object at 0x7da1b1267b20>, <ast.Attribute object at 0x7da1b1266f20>, <ast.Attribute object at 0x7da1b12678e0>]]] | keyword[def] identifier[to_json] ( identifier[self] ):
literal[string]
keyword[return] {
literal[string] : identifier[self] . identifier[hum_type] ,
literal[string] : identifier[self] . identifier[hum_value] ,
literal[string] : identifier[self] . identifier[barometric_pressure] ,
literal[string] : identifier[self] . identifier[schedule] ,
literal[string] : identifier[self] . identifier[wet_bulb_range] ,
} | def to_json(self):
"""Convert the Humidity Condition to a dictionary."""
return {'hum_type': self.hum_type, 'hum_value': self.hum_value, 'barometric_pressure': self.barometric_pressure, 'schedule': self.schedule, 'wet_bulb_range': self.wet_bulb_range} |
def tokenize_sentences(self):
"""Apply sentence tokenization to this Text instance. Creates ``sentences`` layer.
Automatically tokenizes paragraphs, if they are not already tokenized.
Also, if word tokenization has already been performed, tries to fit
the sentence tokenization into the existing word tokenization;
"""
if not self.is_tagged(PARAGRAPHS):
self.tokenize_paragraphs()
tok = self.__sentence_tokenizer
text = self.text
dicts = []
for paragraph in self[PARAGRAPHS]:
para_start, para_end = paragraph[START], paragraph[END]
para_text = text[para_start:para_end]
if not self.is_tagged(WORDS):
# Non-hack variant: word tokenization has not been applied yet,
# so we proceed in natural order (first sentences, then words)
spans = tok.span_tokenize(para_text)
for start, end in spans:
dicts.append({'start': start+para_start, 'end': end+para_start})
else:
# A hack variant: word tokenization has already been made, so
# we try to use existing word tokenization (first words, then sentences)
para_words = \
[ w for w in self[WORDS] if w[START]>=para_start and w[END]<=para_end ]
para_word_texts = \
[ w[TEXT] for w in para_words ]
try:
# Apply sentences_from_tokens method (if available)
sents = tok.sentences_from_tokens( para_word_texts )
except AttributeError as e:
raise
# Align result of the sentence tokenization with the initial word tokenization
# in order to determine the sentence boundaries
i = 0
for sentence in sents:
j = 0
firstToken = None
lastToken = None
while i < len(para_words):
if para_words[i][TEXT] != sentence[j]:
raise Exception('Error on aligning: ', para_word_texts,' and ',sentence,' at positions ',i,j)
if j == 0:
firstToken = para_words[i]
if j == len(sentence) - 1:
lastToken = para_words[i]
i+=1
break
j+=1
i+=1
sentenceDict = \
{'start': firstToken[START], 'end': lastToken[END]}
dicts.append( sentenceDict )
# Note: We also need to invalidate the cached properties providing the
# sentence information, as otherwise, if the properties have been
# called already, new calls would return the old state of sentence
# tokenization;
for sentence_attrib in ['sentences', 'sentence_texts', 'sentence_spans', \
'sentence_starts', 'sentence_ends']:
try:
# invalidate the cache
delattr(self, sentence_attrib)
except AttributeError:
# it's ok, if the cached property has not been called yet
pass
self[SENTENCES] = dicts
return self | def function[tokenize_sentences, parameter[self]]:
constant[Apply sentence tokenization to this Text instance. Creates ``sentences`` layer.
Automatically tokenizes paragraphs, if they are not already tokenized.
Also, if word tokenization has already been performed, tries to fit
the sentence tokenization into the existing word tokenization;
]
if <ast.UnaryOp object at 0x7da20c6c7af0> begin[:]
call[name[self].tokenize_paragraphs, parameter[]]
variable[tok] assign[=] name[self].__sentence_tokenizer
variable[text] assign[=] name[self].text
variable[dicts] assign[=] list[[]]
for taget[name[paragraph]] in starred[call[name[self]][name[PARAGRAPHS]]] begin[:]
<ast.Tuple object at 0x7da20c6c4880> assign[=] tuple[[<ast.Subscript object at 0x7da20c6c5870>, <ast.Subscript object at 0x7da20c6c7400>]]
variable[para_text] assign[=] call[name[text]][<ast.Slice object at 0x7da20c6c6ce0>]
if <ast.UnaryOp object at 0x7da20c6c66e0> begin[:]
variable[spans] assign[=] call[name[tok].span_tokenize, parameter[name[para_text]]]
for taget[tuple[[<ast.Name object at 0x7da18fe93b20>, <ast.Name object at 0x7da18fe92fb0>]]] in starred[name[spans]] begin[:]
call[name[dicts].append, parameter[dictionary[[<ast.Constant object at 0x7da18fe90ca0>, <ast.Constant object at 0x7da18fe91000>], [<ast.BinOp object at 0x7da18fe92aa0>, <ast.BinOp object at 0x7da18fe90910>]]]]
call[name[self]][name[SENTENCES]] assign[=] name[dicts]
return[name[self]] | keyword[def] identifier[tokenize_sentences] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[is_tagged] ( identifier[PARAGRAPHS] ):
identifier[self] . identifier[tokenize_paragraphs] ()
identifier[tok] = identifier[self] . identifier[__sentence_tokenizer]
identifier[text] = identifier[self] . identifier[text]
identifier[dicts] =[]
keyword[for] identifier[paragraph] keyword[in] identifier[self] [ identifier[PARAGRAPHS] ]:
identifier[para_start] , identifier[para_end] = identifier[paragraph] [ identifier[START] ], identifier[paragraph] [ identifier[END] ]
identifier[para_text] = identifier[text] [ identifier[para_start] : identifier[para_end] ]
keyword[if] keyword[not] identifier[self] . identifier[is_tagged] ( identifier[WORDS] ):
identifier[spans] = identifier[tok] . identifier[span_tokenize] ( identifier[para_text] )
keyword[for] identifier[start] , identifier[end] keyword[in] identifier[spans] :
identifier[dicts] . identifier[append] ({ literal[string] : identifier[start] + identifier[para_start] , literal[string] : identifier[end] + identifier[para_start] })
keyword[else] :
identifier[para_words] =[ identifier[w] keyword[for] identifier[w] keyword[in] identifier[self] [ identifier[WORDS] ] keyword[if] identifier[w] [ identifier[START] ]>= identifier[para_start] keyword[and] identifier[w] [ identifier[END] ]<= identifier[para_end] ]
identifier[para_word_texts] =[ identifier[w] [ identifier[TEXT] ] keyword[for] identifier[w] keyword[in] identifier[para_words] ]
keyword[try] :
identifier[sents] = identifier[tok] . identifier[sentences_from_tokens] ( identifier[para_word_texts] )
keyword[except] identifier[AttributeError] keyword[as] identifier[e] :
keyword[raise]
identifier[i] = literal[int]
keyword[for] identifier[sentence] keyword[in] identifier[sents] :
identifier[j] = literal[int]
identifier[firstToken] = keyword[None]
identifier[lastToken] = keyword[None]
keyword[while] identifier[i] < identifier[len] ( identifier[para_words] ):
keyword[if] identifier[para_words] [ identifier[i] ][ identifier[TEXT] ]!= identifier[sentence] [ identifier[j] ]:
keyword[raise] identifier[Exception] ( literal[string] , identifier[para_word_texts] , literal[string] , identifier[sentence] , literal[string] , identifier[i] , identifier[j] )
keyword[if] identifier[j] == literal[int] :
identifier[firstToken] = identifier[para_words] [ identifier[i] ]
keyword[if] identifier[j] == identifier[len] ( identifier[sentence] )- literal[int] :
identifier[lastToken] = identifier[para_words] [ identifier[i] ]
identifier[i] += literal[int]
keyword[break]
identifier[j] += literal[int]
identifier[i] += literal[int]
identifier[sentenceDict] ={ literal[string] : identifier[firstToken] [ identifier[START] ], literal[string] : identifier[lastToken] [ identifier[END] ]}
identifier[dicts] . identifier[append] ( identifier[sentenceDict] )
keyword[for] identifier[sentence_attrib] keyword[in] [ literal[string] , literal[string] , literal[string] , literal[string] , literal[string] ]:
keyword[try] :
identifier[delattr] ( identifier[self] , identifier[sentence_attrib] )
keyword[except] identifier[AttributeError] :
keyword[pass]
identifier[self] [ identifier[SENTENCES] ]= identifier[dicts]
keyword[return] identifier[self] | def tokenize_sentences(self):
"""Apply sentence tokenization to this Text instance. Creates ``sentences`` layer.
Automatically tokenizes paragraphs, if they are not already tokenized.
Also, if word tokenization has already been performed, tries to fit
the sentence tokenization into the existing word tokenization;
"""
if not self.is_tagged(PARAGRAPHS):
self.tokenize_paragraphs() # depends on [control=['if'], data=[]]
tok = self.__sentence_tokenizer
text = self.text
dicts = []
for paragraph in self[PARAGRAPHS]:
(para_start, para_end) = (paragraph[START], paragraph[END])
para_text = text[para_start:para_end]
if not self.is_tagged(WORDS):
# Non-hack variant: word tokenization has not been applied yet,
# so we proceed in natural order (first sentences, then words)
spans = tok.span_tokenize(para_text)
for (start, end) in spans:
dicts.append({'start': start + para_start, 'end': end + para_start}) # depends on [control=['for'], data=[]] # depends on [control=['if'], data=[]]
else:
# A hack variant: word tokenization has already been made, so
# we try to use existing word tokenization (first words, then sentences)
para_words = [w for w in self[WORDS] if w[START] >= para_start and w[END] <= para_end]
para_word_texts = [w[TEXT] for w in para_words]
try:
# Apply sentences_from_tokens method (if available)
sents = tok.sentences_from_tokens(para_word_texts) # depends on [control=['try'], data=[]]
except AttributeError as e:
raise # depends on [control=['except'], data=[]]
# Align result of the sentence tokenization with the initial word tokenization
# in order to determine the sentence boundaries
i = 0
for sentence in sents:
j = 0
firstToken = None
lastToken = None
while i < len(para_words):
if para_words[i][TEXT] != sentence[j]:
raise Exception('Error on aligning: ', para_word_texts, ' and ', sentence, ' at positions ', i, j) # depends on [control=['if'], data=[]]
if j == 0:
firstToken = para_words[i] # depends on [control=['if'], data=[]]
if j == len(sentence) - 1:
lastToken = para_words[i]
i += 1
break # depends on [control=['if'], data=[]]
j += 1
i += 1 # depends on [control=['while'], data=['i']]
sentenceDict = {'start': firstToken[START], 'end': lastToken[END]}
dicts.append(sentenceDict) # depends on [control=['for'], data=['sentence']]
# Note: We also need to invalidate the cached properties providing the
# sentence information, as otherwise, if the properties have been
# called already, new calls would return the old state of sentence
# tokenization;
for sentence_attrib in ['sentences', 'sentence_texts', 'sentence_spans', 'sentence_starts', 'sentence_ends']:
try:
# invalidate the cache
delattr(self, sentence_attrib) # depends on [control=['try'], data=[]]
except AttributeError:
# it's ok, if the cached property has not been called yet
pass # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['sentence_attrib']] # depends on [control=['for'], data=['paragraph']]
self[SENTENCES] = dicts
return self |
def correlation_plot(self, data):
""" Create heatmap of Pearson's correlation coefficient.
Parameters
----------
data : pd.DataFrame()
Data to display.
Returns
-------
matplotlib.figure
Heatmap.
"""
# CHECK: Add saved filename in result.json
fig = plt.figure(Plot_Data.count)
corr = data.corr()
ax = sns.heatmap(corr)
Plot_Data.count += 1
return fig | def function[correlation_plot, parameter[self, data]]:
constant[ Create heatmap of Pearson's correlation coefficient.
Parameters
----------
data : pd.DataFrame()
Data to display.
Returns
-------
matplotlib.figure
Heatmap.
]
variable[fig] assign[=] call[name[plt].figure, parameter[name[Plot_Data].count]]
variable[corr] assign[=] call[name[data].corr, parameter[]]
variable[ax] assign[=] call[name[sns].heatmap, parameter[name[corr]]]
<ast.AugAssign object at 0x7da2043465c0>
return[name[fig]] | keyword[def] identifier[correlation_plot] ( identifier[self] , identifier[data] ):
literal[string]
identifier[fig] = identifier[plt] . identifier[figure] ( identifier[Plot_Data] . identifier[count] )
identifier[corr] = identifier[data] . identifier[corr] ()
identifier[ax] = identifier[sns] . identifier[heatmap] ( identifier[corr] )
identifier[Plot_Data] . identifier[count] += literal[int]
keyword[return] identifier[fig] | def correlation_plot(self, data):
""" Create heatmap of Pearson's correlation coefficient.
Parameters
----------
data : pd.DataFrame()
Data to display.
Returns
-------
matplotlib.figure
Heatmap.
"""
# CHECK: Add saved filename in result.json
fig = plt.figure(Plot_Data.count)
corr = data.corr()
ax = sns.heatmap(corr)
Plot_Data.count += 1
return fig |
def project_process(index, start, end):
"""Compute the metrics for the project process section of the enriched
github issues index.
Returns a dictionary containing "bmi_metrics", "time_to_close_metrics",
"time_to_close_review_metrics" and patchsets_metrics as the keys and
the related Metrics as the values.
time_to_close_title and time_to_close_review_title contain the file names
to be used for time_to_close_metrics and time_to_close_review_metrics
metrics data.
:param index: index object
:param start: start date to get the data from
:param end: end date to get the data upto
:return: dictionary with the value of the metrics
"""
results = {
"bmi_metrics": [BMIPR(index, start, end)],
"time_to_close_metrics": [],
"time_to_close_review_metrics": [DaysToClosePRAverage(index, start, end),
DaysToClosePRMedian(index, start, end)],
"patchsets_metrics": []
}
return results | def function[project_process, parameter[index, start, end]]:
constant[Compute the metrics for the project process section of the enriched
github issues index.
Returns a dictionary containing "bmi_metrics", "time_to_close_metrics",
"time_to_close_review_metrics" and patchsets_metrics as the keys and
the related Metrics as the values.
time_to_close_title and time_to_close_review_title contain the file names
to be used for time_to_close_metrics and time_to_close_review_metrics
metrics data.
:param index: index object
:param start: start date to get the data from
:param end: end date to get the data upto
:return: dictionary with the value of the metrics
]
variable[results] assign[=] dictionary[[<ast.Constant object at 0x7da1b2607280>, <ast.Constant object at 0x7da1b26048e0>, <ast.Constant object at 0x7da1b2605f60>, <ast.Constant object at 0x7da1b2607ca0>], [<ast.List object at 0x7da1b2604730>, <ast.List object at 0x7da1b2606ad0>, <ast.List object at 0x7da1b2607d60>, <ast.List object at 0x7da1b2607250>]]
return[name[results]] | keyword[def] identifier[project_process] ( identifier[index] , identifier[start] , identifier[end] ):
literal[string]
identifier[results] ={
literal[string] :[ identifier[BMIPR] ( identifier[index] , identifier[start] , identifier[end] )],
literal[string] :[],
literal[string] :[ identifier[DaysToClosePRAverage] ( identifier[index] , identifier[start] , identifier[end] ),
identifier[DaysToClosePRMedian] ( identifier[index] , identifier[start] , identifier[end] )],
literal[string] :[]
}
keyword[return] identifier[results] | def project_process(index, start, end):
"""Compute the metrics for the project process section of the enriched
github issues index.
Returns a dictionary containing "bmi_metrics", "time_to_close_metrics",
"time_to_close_review_metrics" and patchsets_metrics as the keys and
the related Metrics as the values.
time_to_close_title and time_to_close_review_title contain the file names
to be used for time_to_close_metrics and time_to_close_review_metrics
metrics data.
:param index: index object
:param start: start date to get the data from
:param end: end date to get the data upto
:return: dictionary with the value of the metrics
"""
results = {'bmi_metrics': [BMIPR(index, start, end)], 'time_to_close_metrics': [], 'time_to_close_review_metrics': [DaysToClosePRAverage(index, start, end), DaysToClosePRMedian(index, start, end)], 'patchsets_metrics': []}
return results |
def clear_all_assignments(semester=None, pool=None, shifts=None):
"""
Clears all regular workshift assignments.
Parameters
----------
semester : workshift.models.Semester, optional
pool : workshift.models.WorkshiftPool, optional
If set, grab workshifts from a specific pool. Otherwise, the primary
workshift pool will be used.
shifts : list of workshift.models.RegularWorkshift, optional
"""
if semester is None:
try:
semester = Semester.objects.get(current=True)
except (Semester.DoesNotExist, Semester.MultipleObjectsReturned):
return []
if pool is None:
pool = WorkshiftPool.objects.get(
semester=semester,
is_primary=True,
)
if shifts is None:
shifts = RegularWorkshift.objects.filter(
pool=pool,
is_manager_shift=False,
workshift_type__assignment=WorkshiftType.AUTO_ASSIGN,
)
for shift in shifts:
shift.current_assignees.clear() | def function[clear_all_assignments, parameter[semester, pool, shifts]]:
constant[
Clears all regular workshift assignments.
Parameters
----------
semester : workshift.models.Semester, optional
pool : workshift.models.WorkshiftPool, optional
If set, grab workshifts from a specific pool. Otherwise, the primary
workshift pool will be used.
shifts : list of workshift.models.RegularWorkshift, optional
]
if compare[name[semester] is constant[None]] begin[:]
<ast.Try object at 0x7da207f03df0>
if compare[name[pool] is constant[None]] begin[:]
variable[pool] assign[=] call[name[WorkshiftPool].objects.get, parameter[]]
if compare[name[shifts] is constant[None]] begin[:]
variable[shifts] assign[=] call[name[RegularWorkshift].objects.filter, parameter[]]
for taget[name[shift]] in starred[name[shifts]] begin[:]
call[name[shift].current_assignees.clear, parameter[]] | keyword[def] identifier[clear_all_assignments] ( identifier[semester] = keyword[None] , identifier[pool] = keyword[None] , identifier[shifts] = keyword[None] ):
literal[string]
keyword[if] identifier[semester] keyword[is] keyword[None] :
keyword[try] :
identifier[semester] = identifier[Semester] . identifier[objects] . identifier[get] ( identifier[current] = keyword[True] )
keyword[except] ( identifier[Semester] . identifier[DoesNotExist] , identifier[Semester] . identifier[MultipleObjectsReturned] ):
keyword[return] []
keyword[if] identifier[pool] keyword[is] keyword[None] :
identifier[pool] = identifier[WorkshiftPool] . identifier[objects] . identifier[get] (
identifier[semester] = identifier[semester] ,
identifier[is_primary] = keyword[True] ,
)
keyword[if] identifier[shifts] keyword[is] keyword[None] :
identifier[shifts] = identifier[RegularWorkshift] . identifier[objects] . identifier[filter] (
identifier[pool] = identifier[pool] ,
identifier[is_manager_shift] = keyword[False] ,
identifier[workshift_type__assignment] = identifier[WorkshiftType] . identifier[AUTO_ASSIGN] ,
)
keyword[for] identifier[shift] keyword[in] identifier[shifts] :
identifier[shift] . identifier[current_assignees] . identifier[clear] () | def clear_all_assignments(semester=None, pool=None, shifts=None):
"""
Clears all regular workshift assignments.
Parameters
----------
semester : workshift.models.Semester, optional
pool : workshift.models.WorkshiftPool, optional
If set, grab workshifts from a specific pool. Otherwise, the primary
workshift pool will be used.
shifts : list of workshift.models.RegularWorkshift, optional
"""
if semester is None:
try:
semester = Semester.objects.get(current=True) # depends on [control=['try'], data=[]]
except (Semester.DoesNotExist, Semester.MultipleObjectsReturned):
return [] # depends on [control=['except'], data=[]] # depends on [control=['if'], data=['semester']]
if pool is None:
pool = WorkshiftPool.objects.get(semester=semester, is_primary=True) # depends on [control=['if'], data=['pool']]
if shifts is None:
shifts = RegularWorkshift.objects.filter(pool=pool, is_manager_shift=False, workshift_type__assignment=WorkshiftType.AUTO_ASSIGN) # depends on [control=['if'], data=['shifts']]
for shift in shifts:
shift.current_assignees.clear() # depends on [control=['for'], data=['shift']] |
def resolve(self, function):
"""
Resolves a function exported by this module.
@type function: str or int
@param function:
str: Name of the function.
int: Ordinal of the function.
@rtype: int
@return: Memory address of the exported function in the process.
Returns None on error.
"""
# Unknown DLL filename, there's nothing we can do.
filename = self.get_filename()
if not filename:
return None
# If the DLL is already mapped locally, resolve the function.
try:
hlib = win32.GetModuleHandle(filename)
address = win32.GetProcAddress(hlib, function)
except WindowsError:
# Load the DLL locally, resolve the function and unload it.
try:
hlib = win32.LoadLibraryEx(filename,
win32.DONT_RESOLVE_DLL_REFERENCES)
try:
address = win32.GetProcAddress(hlib, function)
finally:
win32.FreeLibrary(hlib)
except WindowsError:
return None
# A NULL pointer means the function was not found.
if address in (None, 0):
return None
# Compensate for DLL base relocations locally and remotely.
return address - hlib + self.lpBaseOfDll | def function[resolve, parameter[self, function]]:
constant[
Resolves a function exported by this module.
@type function: str or int
@param function:
str: Name of the function.
int: Ordinal of the function.
@rtype: int
@return: Memory address of the exported function in the process.
Returns None on error.
]
variable[filename] assign[=] call[name[self].get_filename, parameter[]]
if <ast.UnaryOp object at 0x7da1b06f97b0> begin[:]
return[constant[None]]
<ast.Try object at 0x7da1b06fada0>
if compare[name[address] in tuple[[<ast.Constant object at 0x7da1b06fa500>, <ast.Constant object at 0x7da1b06fab60>]]] begin[:]
return[constant[None]]
return[binary_operation[binary_operation[name[address] - name[hlib]] + name[self].lpBaseOfDll]] | keyword[def] identifier[resolve] ( identifier[self] , identifier[function] ):
literal[string]
identifier[filename] = identifier[self] . identifier[get_filename] ()
keyword[if] keyword[not] identifier[filename] :
keyword[return] keyword[None]
keyword[try] :
identifier[hlib] = identifier[win32] . identifier[GetModuleHandle] ( identifier[filename] )
identifier[address] = identifier[win32] . identifier[GetProcAddress] ( identifier[hlib] , identifier[function] )
keyword[except] identifier[WindowsError] :
keyword[try] :
identifier[hlib] = identifier[win32] . identifier[LoadLibraryEx] ( identifier[filename] ,
identifier[win32] . identifier[DONT_RESOLVE_DLL_REFERENCES] )
keyword[try] :
identifier[address] = identifier[win32] . identifier[GetProcAddress] ( identifier[hlib] , identifier[function] )
keyword[finally] :
identifier[win32] . identifier[FreeLibrary] ( identifier[hlib] )
keyword[except] identifier[WindowsError] :
keyword[return] keyword[None]
keyword[if] identifier[address] keyword[in] ( keyword[None] , literal[int] ):
keyword[return] keyword[None]
keyword[return] identifier[address] - identifier[hlib] + identifier[self] . identifier[lpBaseOfDll] | def resolve(self, function):
"""
Resolves a function exported by this module.
@type function: str or int
@param function:
str: Name of the function.
int: Ordinal of the function.
@rtype: int
@return: Memory address of the exported function in the process.
Returns None on error.
"""
# Unknown DLL filename, there's nothing we can do.
filename = self.get_filename()
if not filename:
return None # depends on [control=['if'], data=[]]
# If the DLL is already mapped locally, resolve the function.
try:
hlib = win32.GetModuleHandle(filename)
address = win32.GetProcAddress(hlib, function) # depends on [control=['try'], data=[]]
except WindowsError:
# Load the DLL locally, resolve the function and unload it.
try:
hlib = win32.LoadLibraryEx(filename, win32.DONT_RESOLVE_DLL_REFERENCES)
try:
address = win32.GetProcAddress(hlib, function) # depends on [control=['try'], data=[]]
finally:
win32.FreeLibrary(hlib) # depends on [control=['try'], data=[]]
except WindowsError:
return None # depends on [control=['except'], data=[]] # depends on [control=['except'], data=[]]
# A NULL pointer means the function was not found.
if address in (None, 0):
return None # depends on [control=['if'], data=[]]
# Compensate for DLL base relocations locally and remotely.
return address - hlib + self.lpBaseOfDll |
def parse_netmhc3_stdout(
stdout,
prediction_method_name="netmhc3",
sequence_key_mapping=None):
"""
Parse the output format for NetMHC 3.x, which looks like:
----------------------------------------------------------------------------------------------------
pos peptide logscore affinity(nM) Bind Level Protein Name Allele
----------------------------------------------------------------------------------------------------
0 SIINKFELL 0.437 441 WB A1 HLA-A02:01
--------------------------------------------------------------------------------------------------
0 SIINKFFFQ 0.206 5411 A2 HLA-A02:01
1 IINKFFFQQ 0.128 12544 A2 HLA-A02:01
2 INKFFFQQQ 0.046 30406 A2 HLA-A02:01
3 NKFFFQQQQ 0.050 29197 A2 HLA-A02:01
--------------------------------------------------------------------------------------------------
"""
return parse_stdout(
stdout=stdout,
prediction_method_name=prediction_method_name,
sequence_key_mapping=sequence_key_mapping,
key_index=4,
offset_index=0,
peptide_index=1,
allele_index=5,
ic50_index=3,
rank_index=None,
log_ic50_index=2,
ignored_value_indices={"WB": 4, "SB": 4}) | def function[parse_netmhc3_stdout, parameter[stdout, prediction_method_name, sequence_key_mapping]]:
constant[
Parse the output format for NetMHC 3.x, which looks like:
----------------------------------------------------------------------------------------------------
pos peptide logscore affinity(nM) Bind Level Protein Name Allele
----------------------------------------------------------------------------------------------------
0 SIINKFELL 0.437 441 WB A1 HLA-A02:01
--------------------------------------------------------------------------------------------------
0 SIINKFFFQ 0.206 5411 A2 HLA-A02:01
1 IINKFFFQQ 0.128 12544 A2 HLA-A02:01
2 INKFFFQQQ 0.046 30406 A2 HLA-A02:01
3 NKFFFQQQQ 0.050 29197 A2 HLA-A02:01
--------------------------------------------------------------------------------------------------
]
return[call[name[parse_stdout], parameter[]]] | keyword[def] identifier[parse_netmhc3_stdout] (
identifier[stdout] ,
identifier[prediction_method_name] = literal[string] ,
identifier[sequence_key_mapping] = keyword[None] ):
literal[string]
keyword[return] identifier[parse_stdout] (
identifier[stdout] = identifier[stdout] ,
identifier[prediction_method_name] = identifier[prediction_method_name] ,
identifier[sequence_key_mapping] = identifier[sequence_key_mapping] ,
identifier[key_index] = literal[int] ,
identifier[offset_index] = literal[int] ,
identifier[peptide_index] = literal[int] ,
identifier[allele_index] = literal[int] ,
identifier[ic50_index] = literal[int] ,
identifier[rank_index] = keyword[None] ,
identifier[log_ic50_index] = literal[int] ,
identifier[ignored_value_indices] ={ literal[string] : literal[int] , literal[string] : literal[int] }) | def parse_netmhc3_stdout(stdout, prediction_method_name='netmhc3', sequence_key_mapping=None):
"""
Parse the output format for NetMHC 3.x, which looks like:
----------------------------------------------------------------------------------------------------
pos peptide logscore affinity(nM) Bind Level Protein Name Allele
----------------------------------------------------------------------------------------------------
0 SIINKFELL 0.437 441 WB A1 HLA-A02:01
--------------------------------------------------------------------------------------------------
0 SIINKFFFQ 0.206 5411 A2 HLA-A02:01
1 IINKFFFQQ 0.128 12544 A2 HLA-A02:01
2 INKFFFQQQ 0.046 30406 A2 HLA-A02:01
3 NKFFFQQQQ 0.050 29197 A2 HLA-A02:01
--------------------------------------------------------------------------------------------------
"""
return parse_stdout(stdout=stdout, prediction_method_name=prediction_method_name, sequence_key_mapping=sequence_key_mapping, key_index=4, offset_index=0, peptide_index=1, allele_index=5, ic50_index=3, rank_index=None, log_ic50_index=2, ignored_value_indices={'WB': 4, 'SB': 4}) |
def updateData(self, axeskey, x, y):
"""Replaces the currently displayed data
:param axeskey: name of data plot to update. Valid options are 'stim' or 'response'
:type axeskey: str
:param x: index values associated with y to plot
:type x: numpy.ndarray
:param y: values to plot at x
:type y: numpy.ndarray
"""
if axeskey == 'stim':
self.stimPlot.setData(x,y)
# call manually to ajust placement of signal
ranges = self.viewRange()
self.rangeChange(self, ranges)
if axeskey == 'response':
self.clearTraces()
if self._traceUnit == 'A':
y = y * self._ampScalar
if self.zeroAction.isChecked():
start_avg = np.mean(y[5:25])
y = y - start_avg
self.tracePlot.setData(x,y*self._polarity) | def function[updateData, parameter[self, axeskey, x, y]]:
constant[Replaces the currently displayed data
:param axeskey: name of data plot to update. Valid options are 'stim' or 'response'
:type axeskey: str
:param x: index values associated with y to plot
:type x: numpy.ndarray
:param y: values to plot at x
:type y: numpy.ndarray
]
if compare[name[axeskey] equal[==] constant[stim]] begin[:]
call[name[self].stimPlot.setData, parameter[name[x], name[y]]]
variable[ranges] assign[=] call[name[self].viewRange, parameter[]]
call[name[self].rangeChange, parameter[name[self], name[ranges]]]
if compare[name[axeskey] equal[==] constant[response]] begin[:]
call[name[self].clearTraces, parameter[]]
if compare[name[self]._traceUnit equal[==] constant[A]] begin[:]
variable[y] assign[=] binary_operation[name[y] * name[self]._ampScalar]
if call[name[self].zeroAction.isChecked, parameter[]] begin[:]
variable[start_avg] assign[=] call[name[np].mean, parameter[call[name[y]][<ast.Slice object at 0x7da18dc055a0>]]]
variable[y] assign[=] binary_operation[name[y] - name[start_avg]]
call[name[self].tracePlot.setData, parameter[name[x], binary_operation[name[y] * name[self]._polarity]]] | keyword[def] identifier[updateData] ( identifier[self] , identifier[axeskey] , identifier[x] , identifier[y] ):
literal[string]
keyword[if] identifier[axeskey] == literal[string] :
identifier[self] . identifier[stimPlot] . identifier[setData] ( identifier[x] , identifier[y] )
identifier[ranges] = identifier[self] . identifier[viewRange] ()
identifier[self] . identifier[rangeChange] ( identifier[self] , identifier[ranges] )
keyword[if] identifier[axeskey] == literal[string] :
identifier[self] . identifier[clearTraces] ()
keyword[if] identifier[self] . identifier[_traceUnit] == literal[string] :
identifier[y] = identifier[y] * identifier[self] . identifier[_ampScalar]
keyword[if] identifier[self] . identifier[zeroAction] . identifier[isChecked] ():
identifier[start_avg] = identifier[np] . identifier[mean] ( identifier[y] [ literal[int] : literal[int] ])
identifier[y] = identifier[y] - identifier[start_avg]
identifier[self] . identifier[tracePlot] . identifier[setData] ( identifier[x] , identifier[y] * identifier[self] . identifier[_polarity] ) | def updateData(self, axeskey, x, y):
"""Replaces the currently displayed data
:param axeskey: name of data plot to update. Valid options are 'stim' or 'response'
:type axeskey: str
:param x: index values associated with y to plot
:type x: numpy.ndarray
:param y: values to plot at x
:type y: numpy.ndarray
"""
if axeskey == 'stim':
self.stimPlot.setData(x, y)
# call manually to ajust placement of signal
ranges = self.viewRange()
self.rangeChange(self, ranges) # depends on [control=['if'], data=[]]
if axeskey == 'response':
self.clearTraces()
if self._traceUnit == 'A':
y = y * self._ampScalar # depends on [control=['if'], data=[]]
if self.zeroAction.isChecked():
start_avg = np.mean(y[5:25])
y = y - start_avg # depends on [control=['if'], data=[]]
self.tracePlot.setData(x, y * self._polarity) # depends on [control=['if'], data=[]] |
def writeSentence(self, cmd, *words):
"""
Write encoded sentence.
:param cmd: Command word.
:param words: Aditional words.
"""
encoded = self.encodeSentence(cmd, *words)
self.log('<---', cmd, *words)
self.transport.write(encoded) | def function[writeSentence, parameter[self, cmd]]:
constant[
Write encoded sentence.
:param cmd: Command word.
:param words: Aditional words.
]
variable[encoded] assign[=] call[name[self].encodeSentence, parameter[name[cmd], <ast.Starred object at 0x7da1b1080ee0>]]
call[name[self].log, parameter[constant[<---], name[cmd], <ast.Starred object at 0x7da1b10801f0>]]
call[name[self].transport.write, parameter[name[encoded]]] | keyword[def] identifier[writeSentence] ( identifier[self] , identifier[cmd] ,* identifier[words] ):
literal[string]
identifier[encoded] = identifier[self] . identifier[encodeSentence] ( identifier[cmd] ,* identifier[words] )
identifier[self] . identifier[log] ( literal[string] , identifier[cmd] ,* identifier[words] )
identifier[self] . identifier[transport] . identifier[write] ( identifier[encoded] ) | def writeSentence(self, cmd, *words):
"""
Write encoded sentence.
:param cmd: Command word.
:param words: Aditional words.
"""
encoded = self.encodeSentence(cmd, *words)
self.log('<---', cmd, *words)
self.transport.write(encoded) |
def Release(self):
"""Remove the lock.
Note that this only resets the lock if we actually hold it since
lock_expiration == self.expires and lock_owner = self.lock_token.
"""
if self.locked:
query = ("UPDATE locks SET lock_expiration=0, lock_owner=0 "
"WHERE lock_expiration=%s "
"AND lock_owner=%s "
"AND subject_hash=unhex(md5(%s))")
args = [self.expires, self.lock_token, self.subject]
self.store.ExecuteQuery(query, args)
self.locked = False | def function[Release, parameter[self]]:
constant[Remove the lock.
Note that this only resets the lock if we actually hold it since
lock_expiration == self.expires and lock_owner = self.lock_token.
]
if name[self].locked begin[:]
variable[query] assign[=] constant[UPDATE locks SET lock_expiration=0, lock_owner=0 WHERE lock_expiration=%s AND lock_owner=%s AND subject_hash=unhex(md5(%s))]
variable[args] assign[=] list[[<ast.Attribute object at 0x7da1b1c3f9d0>, <ast.Attribute object at 0x7da1b1c1b5b0>, <ast.Attribute object at 0x7da1b1c1a3b0>]]
call[name[self].store.ExecuteQuery, parameter[name[query], name[args]]]
name[self].locked assign[=] constant[False] | keyword[def] identifier[Release] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[locked] :
identifier[query] =( literal[string]
literal[string]
literal[string]
literal[string] )
identifier[args] =[ identifier[self] . identifier[expires] , identifier[self] . identifier[lock_token] , identifier[self] . identifier[subject] ]
identifier[self] . identifier[store] . identifier[ExecuteQuery] ( identifier[query] , identifier[args] )
identifier[self] . identifier[locked] = keyword[False] | def Release(self):
"""Remove the lock.
Note that this only resets the lock if we actually hold it since
lock_expiration == self.expires and lock_owner = self.lock_token.
"""
if self.locked:
query = 'UPDATE locks SET lock_expiration=0, lock_owner=0 WHERE lock_expiration=%s AND lock_owner=%s AND subject_hash=unhex(md5(%s))'
args = [self.expires, self.lock_token, self.subject]
self.store.ExecuteQuery(query, args)
self.locked = False # depends on [control=['if'], data=[]] |
def getCompletingSwarms(self):
"""Return the list of all completing swarms.
Parameters:
---------------------------------------------------------------------
retval: list of active swarm Ids
"""
swarmIds = []
for swarmId, info in self._state['swarms'].iteritems():
if info['status'] == 'completing':
swarmIds.append(swarmId)
return swarmIds | def function[getCompletingSwarms, parameter[self]]:
constant[Return the list of all completing swarms.
Parameters:
---------------------------------------------------------------------
retval: list of active swarm Ids
]
variable[swarmIds] assign[=] list[[]]
for taget[tuple[[<ast.Name object at 0x7da20c6c54e0>, <ast.Name object at 0x7da20c6c6740>]]] in starred[call[call[name[self]._state][constant[swarms]].iteritems, parameter[]]] begin[:]
if compare[call[name[info]][constant[status]] equal[==] constant[completing]] begin[:]
call[name[swarmIds].append, parameter[name[swarmId]]]
return[name[swarmIds]] | keyword[def] identifier[getCompletingSwarms] ( identifier[self] ):
literal[string]
identifier[swarmIds] =[]
keyword[for] identifier[swarmId] , identifier[info] keyword[in] identifier[self] . identifier[_state] [ literal[string] ]. identifier[iteritems] ():
keyword[if] identifier[info] [ literal[string] ]== literal[string] :
identifier[swarmIds] . identifier[append] ( identifier[swarmId] )
keyword[return] identifier[swarmIds] | def getCompletingSwarms(self):
"""Return the list of all completing swarms.
Parameters:
---------------------------------------------------------------------
retval: list of active swarm Ids
"""
swarmIds = []
for (swarmId, info) in self._state['swarms'].iteritems():
if info['status'] == 'completing':
swarmIds.append(swarmId) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return swarmIds |
def fromstring(cls, dis_string):
"""Create a DisRSTTree instance from a string containing a *.dis parse."""
temp = tempfile.NamedTemporaryFile(delete=False)
temp.write(dis_string)
temp.close()
dis_tree = cls(dis_filepath=temp.name)
os.unlink(temp.name)
return dis_tree | def function[fromstring, parameter[cls, dis_string]]:
constant[Create a DisRSTTree instance from a string containing a *.dis parse.]
variable[temp] assign[=] call[name[tempfile].NamedTemporaryFile, parameter[]]
call[name[temp].write, parameter[name[dis_string]]]
call[name[temp].close, parameter[]]
variable[dis_tree] assign[=] call[name[cls], parameter[]]
call[name[os].unlink, parameter[name[temp].name]]
return[name[dis_tree]] | keyword[def] identifier[fromstring] ( identifier[cls] , identifier[dis_string] ):
literal[string]
identifier[temp] = identifier[tempfile] . identifier[NamedTemporaryFile] ( identifier[delete] = keyword[False] )
identifier[temp] . identifier[write] ( identifier[dis_string] )
identifier[temp] . identifier[close] ()
identifier[dis_tree] = identifier[cls] ( identifier[dis_filepath] = identifier[temp] . identifier[name] )
identifier[os] . identifier[unlink] ( identifier[temp] . identifier[name] )
keyword[return] identifier[dis_tree] | def fromstring(cls, dis_string):
"""Create a DisRSTTree instance from a string containing a *.dis parse."""
temp = tempfile.NamedTemporaryFile(delete=False)
temp.write(dis_string)
temp.close()
dis_tree = cls(dis_filepath=temp.name)
os.unlink(temp.name)
return dis_tree |
def parse_uinput_mapping(name, mapping):
"""Parses a dict of mapping options."""
axes, buttons, mouse, mouse_options = {}, {}, {}, {}
description = "ds4drv custom mapping ({0})".format(name)
for key, attr in mapping.items():
key = key.upper()
if key.startswith("BTN_") or key.startswith("KEY_"):
buttons[key] = attr
elif key.startswith("ABS_"):
axes[key] = attr
elif key.startswith("REL_"):
mouse[key] = attr
elif key.startswith("MOUSE_"):
mouse_options[key] = attr
create_mapping(name, description, axes=axes, buttons=buttons,
mouse=mouse, mouse_options=mouse_options) | def function[parse_uinput_mapping, parameter[name, mapping]]:
constant[Parses a dict of mapping options.]
<ast.Tuple object at 0x7da207f9a410> assign[=] tuple[[<ast.Dict object at 0x7da207f98940>, <ast.Dict object at 0x7da207f98df0>, <ast.Dict object at 0x7da207f9a980>, <ast.Dict object at 0x7da207f9b340>]]
variable[description] assign[=] call[constant[ds4drv custom mapping ({0})].format, parameter[name[name]]]
for taget[tuple[[<ast.Name object at 0x7da207f98a00>, <ast.Name object at 0x7da207f028c0>]]] in starred[call[name[mapping].items, parameter[]]] begin[:]
variable[key] assign[=] call[name[key].upper, parameter[]]
if <ast.BoolOp object at 0x7da207f02a70> begin[:]
call[name[buttons]][name[key]] assign[=] name[attr]
call[name[create_mapping], parameter[name[name], name[description]]] | keyword[def] identifier[parse_uinput_mapping] ( identifier[name] , identifier[mapping] ):
literal[string]
identifier[axes] , identifier[buttons] , identifier[mouse] , identifier[mouse_options] ={},{},{},{}
identifier[description] = literal[string] . identifier[format] ( identifier[name] )
keyword[for] identifier[key] , identifier[attr] keyword[in] identifier[mapping] . identifier[items] ():
identifier[key] = identifier[key] . identifier[upper] ()
keyword[if] identifier[key] . identifier[startswith] ( literal[string] ) keyword[or] identifier[key] . identifier[startswith] ( literal[string] ):
identifier[buttons] [ identifier[key] ]= identifier[attr]
keyword[elif] identifier[key] . identifier[startswith] ( literal[string] ):
identifier[axes] [ identifier[key] ]= identifier[attr]
keyword[elif] identifier[key] . identifier[startswith] ( literal[string] ):
identifier[mouse] [ identifier[key] ]= identifier[attr]
keyword[elif] identifier[key] . identifier[startswith] ( literal[string] ):
identifier[mouse_options] [ identifier[key] ]= identifier[attr]
identifier[create_mapping] ( identifier[name] , identifier[description] , identifier[axes] = identifier[axes] , identifier[buttons] = identifier[buttons] ,
identifier[mouse] = identifier[mouse] , identifier[mouse_options] = identifier[mouse_options] ) | def parse_uinput_mapping(name, mapping):
"""Parses a dict of mapping options."""
(axes, buttons, mouse, mouse_options) = ({}, {}, {}, {})
description = 'ds4drv custom mapping ({0})'.format(name)
for (key, attr) in mapping.items():
key = key.upper()
if key.startswith('BTN_') or key.startswith('KEY_'):
buttons[key] = attr # depends on [control=['if'], data=[]]
elif key.startswith('ABS_'):
axes[key] = attr # depends on [control=['if'], data=[]]
elif key.startswith('REL_'):
mouse[key] = attr # depends on [control=['if'], data=[]]
elif key.startswith('MOUSE_'):
mouse_options[key] = attr # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
create_mapping(name, description, axes=axes, buttons=buttons, mouse=mouse, mouse_options=mouse_options) |
def process_end(self, marker):
""" Reads and processes DONE/DONEINPROC/DONEPROC streams
Stream format urls:
- DONE: http://msdn.microsoft.com/en-us/library/dd340421.aspx
- DONEINPROC: http://msdn.microsoft.com/en-us/library/dd340553.aspx
- DONEPROC: http://msdn.microsoft.com/en-us/library/dd340753.aspx
:param marker: Can be TDS_DONE_TOKEN or TDS_DONEINPROC_TOKEN or TDS_DONEPROC_TOKEN
"""
code_to_str = {
tds_base.TDS_DONE_TOKEN: 'DONE',
tds_base.TDS_DONEINPROC_TOKEN: 'DONEINPROC',
tds_base.TDS_DONEPROC_TOKEN: 'DONEPROC',
}
self.end_marker = marker
self.more_rows = False
r = self._reader
status = r.get_usmallint()
r.get_usmallint() # cur_cmd
more_results = status & tds_base.TDS_DONE_MORE_RESULTS != 0
was_cancelled = status & tds_base.TDS_DONE_CANCELLED != 0
done_count_valid = status & tds_base.TDS_DONE_COUNT != 0
if self.res_info:
self.res_info.more_results = more_results
rows_affected = r.get_int8() if tds_base.IS_TDS72_PLUS(self) else r.get_int()
self.log_response_message("got {} message, more_res={}, cancelled={}, rows_affected={}".format(
code_to_str[marker], more_results, was_cancelled, rows_affected))
if was_cancelled or (not more_results and not self.in_cancel):
self.in_cancel = False
self.set_state(tds_base.TDS_IDLE)
if done_count_valid:
self.rows_affected = rows_affected
else:
self.rows_affected = -1
self.done_flags = status
if self.done_flags & tds_base.TDS_DONE_ERROR and not was_cancelled and not self.in_cancel:
self.raise_db_exception() | def function[process_end, parameter[self, marker]]:
constant[ Reads and processes DONE/DONEINPROC/DONEPROC streams
Stream format urls:
- DONE: http://msdn.microsoft.com/en-us/library/dd340421.aspx
- DONEINPROC: http://msdn.microsoft.com/en-us/library/dd340553.aspx
- DONEPROC: http://msdn.microsoft.com/en-us/library/dd340753.aspx
:param marker: Can be TDS_DONE_TOKEN or TDS_DONEINPROC_TOKEN or TDS_DONEPROC_TOKEN
]
variable[code_to_str] assign[=] dictionary[[<ast.Attribute object at 0x7da1b0555ab0>, <ast.Attribute object at 0x7da1b0554610>, <ast.Attribute object at 0x7da1b0555060>], [<ast.Constant object at 0x7da1b0555ea0>, <ast.Constant object at 0x7da1b0554700>, <ast.Constant object at 0x7da1b0556e90>]]
name[self].end_marker assign[=] name[marker]
name[self].more_rows assign[=] constant[False]
variable[r] assign[=] name[self]._reader
variable[status] assign[=] call[name[r].get_usmallint, parameter[]]
call[name[r].get_usmallint, parameter[]]
variable[more_results] assign[=] compare[binary_operation[name[status] <ast.BitAnd object at 0x7da2590d6b60> name[tds_base].TDS_DONE_MORE_RESULTS] not_equal[!=] constant[0]]
variable[was_cancelled] assign[=] compare[binary_operation[name[status] <ast.BitAnd object at 0x7da2590d6b60> name[tds_base].TDS_DONE_CANCELLED] not_equal[!=] constant[0]]
variable[done_count_valid] assign[=] compare[binary_operation[name[status] <ast.BitAnd object at 0x7da2590d6b60> name[tds_base].TDS_DONE_COUNT] not_equal[!=] constant[0]]
if name[self].res_info begin[:]
name[self].res_info.more_results assign[=] name[more_results]
variable[rows_affected] assign[=] <ast.IfExp object at 0x7da1b0555d50>
call[name[self].log_response_message, parameter[call[constant[got {} message, more_res={}, cancelled={}, rows_affected={}].format, parameter[call[name[code_to_str]][name[marker]], name[more_results], name[was_cancelled], name[rows_affected]]]]]
if <ast.BoolOp object at 0x7da1b0536110> begin[:]
name[self].in_cancel assign[=] constant[False]
call[name[self].set_state, parameter[name[tds_base].TDS_IDLE]]
if name[done_count_valid] begin[:]
name[self].rows_affected assign[=] name[rows_affected]
name[self].done_flags assign[=] name[status]
if <ast.BoolOp object at 0x7da1b05391b0> begin[:]
call[name[self].raise_db_exception, parameter[]] | keyword[def] identifier[process_end] ( identifier[self] , identifier[marker] ):
literal[string]
identifier[code_to_str] ={
identifier[tds_base] . identifier[TDS_DONE_TOKEN] : literal[string] ,
identifier[tds_base] . identifier[TDS_DONEINPROC_TOKEN] : literal[string] ,
identifier[tds_base] . identifier[TDS_DONEPROC_TOKEN] : literal[string] ,
}
identifier[self] . identifier[end_marker] = identifier[marker]
identifier[self] . identifier[more_rows] = keyword[False]
identifier[r] = identifier[self] . identifier[_reader]
identifier[status] = identifier[r] . identifier[get_usmallint] ()
identifier[r] . identifier[get_usmallint] ()
identifier[more_results] = identifier[status] & identifier[tds_base] . identifier[TDS_DONE_MORE_RESULTS] != literal[int]
identifier[was_cancelled] = identifier[status] & identifier[tds_base] . identifier[TDS_DONE_CANCELLED] != literal[int]
identifier[done_count_valid] = identifier[status] & identifier[tds_base] . identifier[TDS_DONE_COUNT] != literal[int]
keyword[if] identifier[self] . identifier[res_info] :
identifier[self] . identifier[res_info] . identifier[more_results] = identifier[more_results]
identifier[rows_affected] = identifier[r] . identifier[get_int8] () keyword[if] identifier[tds_base] . identifier[IS_TDS72_PLUS] ( identifier[self] ) keyword[else] identifier[r] . identifier[get_int] ()
identifier[self] . identifier[log_response_message] ( literal[string] . identifier[format] (
identifier[code_to_str] [ identifier[marker] ], identifier[more_results] , identifier[was_cancelled] , identifier[rows_affected] ))
keyword[if] identifier[was_cancelled] keyword[or] ( keyword[not] identifier[more_results] keyword[and] keyword[not] identifier[self] . identifier[in_cancel] ):
identifier[self] . identifier[in_cancel] = keyword[False]
identifier[self] . identifier[set_state] ( identifier[tds_base] . identifier[TDS_IDLE] )
keyword[if] identifier[done_count_valid] :
identifier[self] . identifier[rows_affected] = identifier[rows_affected]
keyword[else] :
identifier[self] . identifier[rows_affected] =- literal[int]
identifier[self] . identifier[done_flags] = identifier[status]
keyword[if] identifier[self] . identifier[done_flags] & identifier[tds_base] . identifier[TDS_DONE_ERROR] keyword[and] keyword[not] identifier[was_cancelled] keyword[and] keyword[not] identifier[self] . identifier[in_cancel] :
identifier[self] . identifier[raise_db_exception] () | def process_end(self, marker):
""" Reads and processes DONE/DONEINPROC/DONEPROC streams
Stream format urls:
- DONE: http://msdn.microsoft.com/en-us/library/dd340421.aspx
- DONEINPROC: http://msdn.microsoft.com/en-us/library/dd340553.aspx
- DONEPROC: http://msdn.microsoft.com/en-us/library/dd340753.aspx
:param marker: Can be TDS_DONE_TOKEN or TDS_DONEINPROC_TOKEN or TDS_DONEPROC_TOKEN
"""
code_to_str = {tds_base.TDS_DONE_TOKEN: 'DONE', tds_base.TDS_DONEINPROC_TOKEN: 'DONEINPROC', tds_base.TDS_DONEPROC_TOKEN: 'DONEPROC'}
self.end_marker = marker
self.more_rows = False
r = self._reader
status = r.get_usmallint()
r.get_usmallint() # cur_cmd
more_results = status & tds_base.TDS_DONE_MORE_RESULTS != 0
was_cancelled = status & tds_base.TDS_DONE_CANCELLED != 0
done_count_valid = status & tds_base.TDS_DONE_COUNT != 0
if self.res_info:
self.res_info.more_results = more_results # depends on [control=['if'], data=[]]
rows_affected = r.get_int8() if tds_base.IS_TDS72_PLUS(self) else r.get_int()
self.log_response_message('got {} message, more_res={}, cancelled={}, rows_affected={}'.format(code_to_str[marker], more_results, was_cancelled, rows_affected))
if was_cancelled or (not more_results and (not self.in_cancel)):
self.in_cancel = False
self.set_state(tds_base.TDS_IDLE) # depends on [control=['if'], data=[]]
if done_count_valid:
self.rows_affected = rows_affected # depends on [control=['if'], data=[]]
else:
self.rows_affected = -1
self.done_flags = status
if self.done_flags & tds_base.TDS_DONE_ERROR and (not was_cancelled) and (not self.in_cancel):
self.raise_db_exception() # depends on [control=['if'], data=[]] |
def upgrade(self, only):
"""Remove all package lists with changelog and checksums files
and create lists again"""
repositories = self.meta.repositories
if only:
repositories = only
for repo in repositories:
changelogs = "{0}{1}{2}".format(self.log_path, repo,
"/ChangeLog.txt")
if os.path.isfile(changelogs):
os.remove(changelogs)
if os.path.isdir(self.lib_path + "{0}_repo/".format(repo)):
for f in (os.listdir(self.lib_path + "{0}_repo/".format(
repo))):
files = "{0}{1}_repo/{2}".format(self.lib_path, repo, f)
if os.path.isfile(files):
os.remove(files)
elif os.path.isdir(files):
shutil.rmtree(files)
Update().repository(only) | def function[upgrade, parameter[self, only]]:
constant[Remove all package lists with changelog and checksums files
and create lists again]
variable[repositories] assign[=] name[self].meta.repositories
if name[only] begin[:]
variable[repositories] assign[=] name[only]
for taget[name[repo]] in starred[name[repositories]] begin[:]
variable[changelogs] assign[=] call[constant[{0}{1}{2}].format, parameter[name[self].log_path, name[repo], constant[/ChangeLog.txt]]]
if call[name[os].path.isfile, parameter[name[changelogs]]] begin[:]
call[name[os].remove, parameter[name[changelogs]]]
if call[name[os].path.isdir, parameter[binary_operation[name[self].lib_path + call[constant[{0}_repo/].format, parameter[name[repo]]]]]] begin[:]
for taget[name[f]] in starred[call[name[os].listdir, parameter[binary_operation[name[self].lib_path + call[constant[{0}_repo/].format, parameter[name[repo]]]]]]] begin[:]
variable[files] assign[=] call[constant[{0}{1}_repo/{2}].format, parameter[name[self].lib_path, name[repo], name[f]]]
if call[name[os].path.isfile, parameter[name[files]]] begin[:]
call[name[os].remove, parameter[name[files]]]
call[call[name[Update], parameter[]].repository, parameter[name[only]]] | keyword[def] identifier[upgrade] ( identifier[self] , identifier[only] ):
literal[string]
identifier[repositories] = identifier[self] . identifier[meta] . identifier[repositories]
keyword[if] identifier[only] :
identifier[repositories] = identifier[only]
keyword[for] identifier[repo] keyword[in] identifier[repositories] :
identifier[changelogs] = literal[string] . identifier[format] ( identifier[self] . identifier[log_path] , identifier[repo] ,
literal[string] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[changelogs] ):
identifier[os] . identifier[remove] ( identifier[changelogs] )
keyword[if] identifier[os] . identifier[path] . identifier[isdir] ( identifier[self] . identifier[lib_path] + literal[string] . identifier[format] ( identifier[repo] )):
keyword[for] identifier[f] keyword[in] ( identifier[os] . identifier[listdir] ( identifier[self] . identifier[lib_path] + literal[string] . identifier[format] (
identifier[repo] ))):
identifier[files] = literal[string] . identifier[format] ( identifier[self] . identifier[lib_path] , identifier[repo] , identifier[f] )
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[files] ):
identifier[os] . identifier[remove] ( identifier[files] )
keyword[elif] identifier[os] . identifier[path] . identifier[isdir] ( identifier[files] ):
identifier[shutil] . identifier[rmtree] ( identifier[files] )
identifier[Update] (). identifier[repository] ( identifier[only] ) | def upgrade(self, only):
"""Remove all package lists with changelog and checksums files
and create lists again"""
repositories = self.meta.repositories
if only:
repositories = only # depends on [control=['if'], data=[]]
for repo in repositories:
changelogs = '{0}{1}{2}'.format(self.log_path, repo, '/ChangeLog.txt')
if os.path.isfile(changelogs):
os.remove(changelogs) # depends on [control=['if'], data=[]]
if os.path.isdir(self.lib_path + '{0}_repo/'.format(repo)):
for f in os.listdir(self.lib_path + '{0}_repo/'.format(repo)):
files = '{0}{1}_repo/{2}'.format(self.lib_path, repo, f)
if os.path.isfile(files):
os.remove(files) # depends on [control=['if'], data=[]]
elif os.path.isdir(files):
shutil.rmtree(files) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['f']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['repo']]
Update().repository(only) |
def redraw_label(self):
"""
Re-draws the text by calculating its position.
Currently, the text will always be centered on the position of the label.
"""
# Convenience variables
sx,sy = self.size
x,y = self.pos
# Label position
self._label.font_name = self.font_name
self._label.font_size = self.font_size
self._label.font_color = self.font_color
self._label.x = int(x+sx/2.)
self._label.y = int(y+sy/2.)
self._label.width = self.size[0]
self._label.height = self.size[1]
self._label._update() | def function[redraw_label, parameter[self]]:
constant[
Re-draws the text by calculating its position.
Currently, the text will always be centered on the position of the label.
]
<ast.Tuple object at 0x7da1b01d8d30> assign[=] name[self].size
<ast.Tuple object at 0x7da1b01d8460> assign[=] name[self].pos
name[self]._label.font_name assign[=] name[self].font_name
name[self]._label.font_size assign[=] name[self].font_size
name[self]._label.font_color assign[=] name[self].font_color
name[self]._label.x assign[=] call[name[int], parameter[binary_operation[name[x] + binary_operation[name[sx] / constant[2.0]]]]]
name[self]._label.y assign[=] call[name[int], parameter[binary_operation[name[y] + binary_operation[name[sy] / constant[2.0]]]]]
name[self]._label.width assign[=] call[name[self].size][constant[0]]
name[self]._label.height assign[=] call[name[self].size][constant[1]]
call[name[self]._label._update, parameter[]] | keyword[def] identifier[redraw_label] ( identifier[self] ):
literal[string]
identifier[sx] , identifier[sy] = identifier[self] . identifier[size]
identifier[x] , identifier[y] = identifier[self] . identifier[pos]
identifier[self] . identifier[_label] . identifier[font_name] = identifier[self] . identifier[font_name]
identifier[self] . identifier[_label] . identifier[font_size] = identifier[self] . identifier[font_size]
identifier[self] . identifier[_label] . identifier[font_color] = identifier[self] . identifier[font_color]
identifier[self] . identifier[_label] . identifier[x] = identifier[int] ( identifier[x] + identifier[sx] / literal[int] )
identifier[self] . identifier[_label] . identifier[y] = identifier[int] ( identifier[y] + identifier[sy] / literal[int] )
identifier[self] . identifier[_label] . identifier[width] = identifier[self] . identifier[size] [ literal[int] ]
identifier[self] . identifier[_label] . identifier[height] = identifier[self] . identifier[size] [ literal[int] ]
identifier[self] . identifier[_label] . identifier[_update] () | def redraw_label(self):
"""
Re-draws the text by calculating its position.
Currently, the text will always be centered on the position of the label.
"""
# Convenience variables
(sx, sy) = self.size
(x, y) = self.pos
# Label position
self._label.font_name = self.font_name
self._label.font_size = self.font_size
self._label.font_color = self.font_color
self._label.x = int(x + sx / 2.0)
self._label.y = int(y + sy / 2.0)
self._label.width = self.size[0]
self._label.height = self.size[1]
self._label._update() |
def bokeh(model, scale: float = 0.7, responsive: bool = True):
"""
Adds a Bokeh plot object to the notebook display.
:param model:
The plot object to be added to the notebook display.
:param scale:
How tall the plot should be in the notebook as a fraction of screen
height. A number between 0.1 and 1.0. The default value is 0.7.
:param responsive:
Whether or not the plot should responsively scale to fill the width
of the notebook. The default is True.
"""
r = _get_report()
if 'bokeh' not in r.library_includes:
r.library_includes.append('bokeh')
r.append_body(render_plots.bokeh_plot(
model=model,
scale=scale,
responsive=responsive
))
r.stdout_interceptor.write_source('[ADDED] Bokeh plot\n') | def function[bokeh, parameter[model, scale, responsive]]:
constant[
Adds a Bokeh plot object to the notebook display.
:param model:
The plot object to be added to the notebook display.
:param scale:
How tall the plot should be in the notebook as a fraction of screen
height. A number between 0.1 and 1.0. The default value is 0.7.
:param responsive:
Whether or not the plot should responsively scale to fill the width
of the notebook. The default is True.
]
variable[r] assign[=] call[name[_get_report], parameter[]]
if compare[constant[bokeh] <ast.NotIn object at 0x7da2590d7190> name[r].library_includes] begin[:]
call[name[r].library_includes.append, parameter[constant[bokeh]]]
call[name[r].append_body, parameter[call[name[render_plots].bokeh_plot, parameter[]]]]
call[name[r].stdout_interceptor.write_source, parameter[constant[[ADDED] Bokeh plot
]]] | keyword[def] identifier[bokeh] ( identifier[model] , identifier[scale] : identifier[float] = literal[int] , identifier[responsive] : identifier[bool] = keyword[True] ):
literal[string]
identifier[r] = identifier[_get_report] ()
keyword[if] literal[string] keyword[not] keyword[in] identifier[r] . identifier[library_includes] :
identifier[r] . identifier[library_includes] . identifier[append] ( literal[string] )
identifier[r] . identifier[append_body] ( identifier[render_plots] . identifier[bokeh_plot] (
identifier[model] = identifier[model] ,
identifier[scale] = identifier[scale] ,
identifier[responsive] = identifier[responsive]
))
identifier[r] . identifier[stdout_interceptor] . identifier[write_source] ( literal[string] ) | def bokeh(model, scale: float=0.7, responsive: bool=True):
"""
Adds a Bokeh plot object to the notebook display.
:param model:
The plot object to be added to the notebook display.
:param scale:
How tall the plot should be in the notebook as a fraction of screen
height. A number between 0.1 and 1.0. The default value is 0.7.
:param responsive:
Whether or not the plot should responsively scale to fill the width
of the notebook. The default is True.
"""
r = _get_report()
if 'bokeh' not in r.library_includes:
r.library_includes.append('bokeh') # depends on [control=['if'], data=[]]
r.append_body(render_plots.bokeh_plot(model=model, scale=scale, responsive=responsive))
r.stdout_interceptor.write_source('[ADDED] Bokeh plot\n') |
def fromfd(cls, fd, signals):
"""
Create a new signalfd object from a given file descriptor
:param fd:
A pre-made file descriptor obtained from ``signalfd_create(2)`
:param signals:
A pre-made frozenset that describes the monitored signals
:raises ValueError:
If fd is not a valid file descriptor
:returns:
A new signalfd object
.. note::
If the passed descriptor is incorrect then various methods will
fail and raise OSError with an appropriate message.
"""
if fd < 0:
_err_closed()
self = cls.__new__()
object.__init__(self)
self._sfd = fd
self._signals = signals
return self | def function[fromfd, parameter[cls, fd, signals]]:
constant[
Create a new signalfd object from a given file descriptor
:param fd:
A pre-made file descriptor obtained from ``signalfd_create(2)`
:param signals:
A pre-made frozenset that describes the monitored signals
:raises ValueError:
If fd is not a valid file descriptor
:returns:
A new signalfd object
.. note::
If the passed descriptor is incorrect then various methods will
fail and raise OSError with an appropriate message.
]
if compare[name[fd] less[<] constant[0]] begin[:]
call[name[_err_closed], parameter[]]
variable[self] assign[=] call[name[cls].__new__, parameter[]]
call[name[object].__init__, parameter[name[self]]]
name[self]._sfd assign[=] name[fd]
name[self]._signals assign[=] name[signals]
return[name[self]] | keyword[def] identifier[fromfd] ( identifier[cls] , identifier[fd] , identifier[signals] ):
literal[string]
keyword[if] identifier[fd] < literal[int] :
identifier[_err_closed] ()
identifier[self] = identifier[cls] . identifier[__new__] ()
identifier[object] . identifier[__init__] ( identifier[self] )
identifier[self] . identifier[_sfd] = identifier[fd]
identifier[self] . identifier[_signals] = identifier[signals]
keyword[return] identifier[self] | def fromfd(cls, fd, signals):
"""
Create a new signalfd object from a given file descriptor
:param fd:
A pre-made file descriptor obtained from ``signalfd_create(2)`
:param signals:
A pre-made frozenset that describes the monitored signals
:raises ValueError:
If fd is not a valid file descriptor
:returns:
A new signalfd object
.. note::
If the passed descriptor is incorrect then various methods will
fail and raise OSError with an appropriate message.
"""
if fd < 0:
_err_closed() # depends on [control=['if'], data=[]]
self = cls.__new__()
object.__init__(self)
self._sfd = fd
self._signals = signals
return self |
def one_hot2string(arr, vocab):
"""Convert a one-hot encoded array back to string
"""
tokens = one_hot2token(arr)
indexToLetter = _get_index_dict(vocab)
return [''.join([indexToLetter[x] for x in row]) for row in tokens] | def function[one_hot2string, parameter[arr, vocab]]:
constant[Convert a one-hot encoded array back to string
]
variable[tokens] assign[=] call[name[one_hot2token], parameter[name[arr]]]
variable[indexToLetter] assign[=] call[name[_get_index_dict], parameter[name[vocab]]]
return[<ast.ListComp object at 0x7da204623b80>] | keyword[def] identifier[one_hot2string] ( identifier[arr] , identifier[vocab] ):
literal[string]
identifier[tokens] = identifier[one_hot2token] ( identifier[arr] )
identifier[indexToLetter] = identifier[_get_index_dict] ( identifier[vocab] )
keyword[return] [ literal[string] . identifier[join] ([ identifier[indexToLetter] [ identifier[x] ] keyword[for] identifier[x] keyword[in] identifier[row] ]) keyword[for] identifier[row] keyword[in] identifier[tokens] ] | def one_hot2string(arr, vocab):
"""Convert a one-hot encoded array back to string
"""
tokens = one_hot2token(arr)
indexToLetter = _get_index_dict(vocab)
return [''.join([indexToLetter[x] for x in row]) for row in tokens] |
def _active_mounts_aix(ret):
'''
List active mounts on AIX systems
'''
for line in __salt__['cmd.run_stdout']('mount -p').split('\n'):
comps = re.sub(r"\s+", " ", line).split()
if comps:
if comps[0] == 'node' or comps[0] == '--------':
continue
comps_len = len(comps)
if line.startswith((' ', '\t')):
curr_opts = _resolve_user_group_names(comps[6].split(',')) if 7 == comps_len else []
if curr_opts:
ret[comps[1]] = {'device': comps[0],
'fstype': comps[2],
'opts': curr_opts}
else:
ret[comps[1]] = {'device': comps[0],
'fstype': comps[2]}
else:
curr_opts = _resolve_user_group_names(comps[7].split(',')) if 8 == comps_len else []
if curr_opts:
ret[comps[2]] = {'node': comps[0],
'device': comps[1],
'fstype': comps[3],
'opts': curr_opts}
else:
ret[comps[2]] = {'node': comps[0],
'device': comps[1],
'fstype': comps[3]}
return ret | def function[_active_mounts_aix, parameter[ret]]:
constant[
List active mounts on AIX systems
]
for taget[name[line]] in starred[call[call[call[name[__salt__]][constant[cmd.run_stdout]], parameter[constant[mount -p]]].split, parameter[constant[
]]]] begin[:]
variable[comps] assign[=] call[call[name[re].sub, parameter[constant[\s+], constant[ ], name[line]]].split, parameter[]]
if name[comps] begin[:]
if <ast.BoolOp object at 0x7da1b2120f40> begin[:]
continue
variable[comps_len] assign[=] call[name[len], parameter[name[comps]]]
if call[name[line].startswith, parameter[tuple[[<ast.Constant object at 0x7da1b2121ea0>, <ast.Constant object at 0x7da1b2123c10>]]]] begin[:]
variable[curr_opts] assign[=] <ast.IfExp object at 0x7da1b2121420>
if name[curr_opts] begin[:]
call[name[ret]][call[name[comps]][constant[1]]] assign[=] dictionary[[<ast.Constant object at 0x7da1b2122c80>, <ast.Constant object at 0x7da1b2122cb0>, <ast.Constant object at 0x7da1b2122c50>], [<ast.Subscript object at 0x7da1b2122bf0>, <ast.Subscript object at 0x7da1b21226b0>, <ast.Name object at 0x7da1b21226e0>]]
return[name[ret]] | keyword[def] identifier[_active_mounts_aix] ( identifier[ret] ):
literal[string]
keyword[for] identifier[line] keyword[in] identifier[__salt__] [ literal[string] ]( literal[string] ). identifier[split] ( literal[string] ):
identifier[comps] = identifier[re] . identifier[sub] ( literal[string] , literal[string] , identifier[line] ). identifier[split] ()
keyword[if] identifier[comps] :
keyword[if] identifier[comps] [ literal[int] ]== literal[string] keyword[or] identifier[comps] [ literal[int] ]== literal[string] :
keyword[continue]
identifier[comps_len] = identifier[len] ( identifier[comps] )
keyword[if] identifier[line] . identifier[startswith] (( literal[string] , literal[string] )):
identifier[curr_opts] = identifier[_resolve_user_group_names] ( identifier[comps] [ literal[int] ]. identifier[split] ( literal[string] )) keyword[if] literal[int] == identifier[comps_len] keyword[else] []
keyword[if] identifier[curr_opts] :
identifier[ret] [ identifier[comps] [ literal[int] ]]={ literal[string] : identifier[comps] [ literal[int] ],
literal[string] : identifier[comps] [ literal[int] ],
literal[string] : identifier[curr_opts] }
keyword[else] :
identifier[ret] [ identifier[comps] [ literal[int] ]]={ literal[string] : identifier[comps] [ literal[int] ],
literal[string] : identifier[comps] [ literal[int] ]}
keyword[else] :
identifier[curr_opts] = identifier[_resolve_user_group_names] ( identifier[comps] [ literal[int] ]. identifier[split] ( literal[string] )) keyword[if] literal[int] == identifier[comps_len] keyword[else] []
keyword[if] identifier[curr_opts] :
identifier[ret] [ identifier[comps] [ literal[int] ]]={ literal[string] : identifier[comps] [ literal[int] ],
literal[string] : identifier[comps] [ literal[int] ],
literal[string] : identifier[comps] [ literal[int] ],
literal[string] : identifier[curr_opts] }
keyword[else] :
identifier[ret] [ identifier[comps] [ literal[int] ]]={ literal[string] : identifier[comps] [ literal[int] ],
literal[string] : identifier[comps] [ literal[int] ],
literal[string] : identifier[comps] [ literal[int] ]}
keyword[return] identifier[ret] | def _active_mounts_aix(ret):
"""
List active mounts on AIX systems
"""
for line in __salt__['cmd.run_stdout']('mount -p').split('\n'):
comps = re.sub('\\s+', ' ', line).split()
if comps:
if comps[0] == 'node' or comps[0] == '--------':
continue # depends on [control=['if'], data=[]]
comps_len = len(comps)
if line.startswith((' ', '\t')):
curr_opts = _resolve_user_group_names(comps[6].split(',')) if 7 == comps_len else []
if curr_opts:
ret[comps[1]] = {'device': comps[0], 'fstype': comps[2], 'opts': curr_opts} # depends on [control=['if'], data=[]]
else:
ret[comps[1]] = {'device': comps[0], 'fstype': comps[2]} # depends on [control=['if'], data=[]]
else:
curr_opts = _resolve_user_group_names(comps[7].split(',')) if 8 == comps_len else []
if curr_opts:
ret[comps[2]] = {'node': comps[0], 'device': comps[1], 'fstype': comps[3], 'opts': curr_opts} # depends on [control=['if'], data=[]]
else:
ret[comps[2]] = {'node': comps[0], 'device': comps[1], 'fstype': comps[3]} # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['line']]
return ret |
def step(self):
"""Do a single iteration over all cbpdn and ccmod steps. Those that
are not coupled on the K axis are performed in parallel."""
# If the nproc parameter of __init__ is zero, just iterate
# over the K consensus instances instead of using
# multiprocessing to do the computations in parallel. This is
# useful for debugging and timing comparisons.
if self.nproc == 0:
for k in range(self.xstep.cri.K):
md_step_group(k)
else:
self.pool.map(md_step_group, range(self.xstep.cri.K))
ccmodmd_ystep()
ccmodmd_ustep()
cbpdnmd_setdict() | def function[step, parameter[self]]:
constant[Do a single iteration over all cbpdn and ccmod steps. Those that
are not coupled on the K axis are performed in parallel.]
if compare[name[self].nproc equal[==] constant[0]] begin[:]
for taget[name[k]] in starred[call[name[range], parameter[name[self].xstep.cri.K]]] begin[:]
call[name[md_step_group], parameter[name[k]]]
call[name[ccmodmd_ystep], parameter[]]
call[name[ccmodmd_ustep], parameter[]]
call[name[cbpdnmd_setdict], parameter[]] | keyword[def] identifier[step] ( identifier[self] ):
literal[string]
keyword[if] identifier[self] . identifier[nproc] == literal[int] :
keyword[for] identifier[k] keyword[in] identifier[range] ( identifier[self] . identifier[xstep] . identifier[cri] . identifier[K] ):
identifier[md_step_group] ( identifier[k] )
keyword[else] :
identifier[self] . identifier[pool] . identifier[map] ( identifier[md_step_group] , identifier[range] ( identifier[self] . identifier[xstep] . identifier[cri] . identifier[K] ))
identifier[ccmodmd_ystep] ()
identifier[ccmodmd_ustep] ()
identifier[cbpdnmd_setdict] () | def step(self):
"""Do a single iteration over all cbpdn and ccmod steps. Those that
are not coupled on the K axis are performed in parallel."""
# If the nproc parameter of __init__ is zero, just iterate
# over the K consensus instances instead of using
# multiprocessing to do the computations in parallel. This is
# useful for debugging and timing comparisons.
if self.nproc == 0:
for k in range(self.xstep.cri.K):
md_step_group(k) # depends on [control=['for'], data=['k']] # depends on [control=['if'], data=[]]
else:
self.pool.map(md_step_group, range(self.xstep.cri.K))
ccmodmd_ystep()
ccmodmd_ustep()
cbpdnmd_setdict() |
def start_instance(self, build):
"""
I start a new instance of a VM.
If a base_image is specified, I will make a clone of that otherwise i will
use image directly.
If i'm not given libvirt domain definition XML, I will look for my name
in the list of defined virtual machines and start that.
"""
if self.domain is not None:
log.msg("Cannot start_instance '%s' as already active" %
self.workername)
return False
yield self._prepare_base_image()
try:
if self.xml:
self.domain = yield self.connection.create(self.xml)
else:
self.domain = yield self.connection.lookupByName(self.workername)
yield self.domain.create()
except Exception:
log.err(failure.Failure(),
"Cannot start a VM (%s), failing gracefully and triggering"
"a new build check" % self.workername)
self.domain = None
return False
return True | def function[start_instance, parameter[self, build]]:
constant[
I start a new instance of a VM.
If a base_image is specified, I will make a clone of that otherwise i will
use image directly.
If i'm not given libvirt domain definition XML, I will look for my name
in the list of defined virtual machines and start that.
]
if compare[name[self].domain is_not constant[None]] begin[:]
call[name[log].msg, parameter[binary_operation[constant[Cannot start_instance '%s' as already active] <ast.Mod object at 0x7da2590d6920> name[self].workername]]]
return[constant[False]]
<ast.Yield object at 0x7da18c4cc700>
<ast.Try object at 0x7da18c4cca30>
return[constant[True]] | keyword[def] identifier[start_instance] ( identifier[self] , identifier[build] ):
literal[string]
keyword[if] identifier[self] . identifier[domain] keyword[is] keyword[not] keyword[None] :
identifier[log] . identifier[msg] ( literal[string] %
identifier[self] . identifier[workername] )
keyword[return] keyword[False]
keyword[yield] identifier[self] . identifier[_prepare_base_image] ()
keyword[try] :
keyword[if] identifier[self] . identifier[xml] :
identifier[self] . identifier[domain] = keyword[yield] identifier[self] . identifier[connection] . identifier[create] ( identifier[self] . identifier[xml] )
keyword[else] :
identifier[self] . identifier[domain] = keyword[yield] identifier[self] . identifier[connection] . identifier[lookupByName] ( identifier[self] . identifier[workername] )
keyword[yield] identifier[self] . identifier[domain] . identifier[create] ()
keyword[except] identifier[Exception] :
identifier[log] . identifier[err] ( identifier[failure] . identifier[Failure] (),
literal[string]
literal[string] % identifier[self] . identifier[workername] )
identifier[self] . identifier[domain] = keyword[None]
keyword[return] keyword[False]
keyword[return] keyword[True] | def start_instance(self, build):
"""
I start a new instance of a VM.
If a base_image is specified, I will make a clone of that otherwise i will
use image directly.
If i'm not given libvirt domain definition XML, I will look for my name
in the list of defined virtual machines and start that.
"""
if self.domain is not None:
log.msg("Cannot start_instance '%s' as already active" % self.workername)
return False # depends on [control=['if'], data=[]]
yield self._prepare_base_image()
try:
if self.xml:
self.domain = (yield self.connection.create(self.xml)) # depends on [control=['if'], data=[]]
else:
self.domain = (yield self.connection.lookupByName(self.workername))
yield self.domain.create() # depends on [control=['try'], data=[]]
except Exception:
log.err(failure.Failure(), 'Cannot start a VM (%s), failing gracefully and triggeringa new build check' % self.workername)
self.domain = None
return False # depends on [control=['except'], data=[]]
return True |
def gammatone_erb_constants(n):
"""
Constants for using the real bandwidth in the gammatone filter, given its
order. Returns a pair :math:`(x, y) = (1/a_n, c_n)`.
Based on equations from:
``Holdsworth, J.; Patterson, R.; Nimmo-Smith, I.; Rice, P. Implementing a
GammaTone Filter Bank. In: SVOS Final Report, Annex C, Part A: The
Auditory Filter Bank. 1988.``
First returned value is a bandwidth compensation for direct use in the
gammatone formula:
>>> x, y = gammatone_erb_constants(4)
>>> central_frequency = 1000
>>> round(x, 3)
1.019
>>> bandwidth = x * erb["moore_glasberg_83"](central_frequency)
>>> round(bandwidth, 2)
130.52
Second returned value helps us find the ``3 dB`` bandwidth as:
>>> x, y = gammatone_erb_constants(4)
>>> central_frequency = 1000
>>> bandwidth3dB = x * y * erb["moore_glasberg_83"](central_frequency)
>>> round(bandwidth3dB, 2)
113.55
"""
tnt = 2 * n - 2
return (factorial(n - 1) ** 2 / (pi * factorial(tnt) * 2 ** -tnt),
2 * (2 ** (1. / n) - 1) ** .5
) | def function[gammatone_erb_constants, parameter[n]]:
constant[
Constants for using the real bandwidth in the gammatone filter, given its
order. Returns a pair :math:`(x, y) = (1/a_n, c_n)`.
Based on equations from:
``Holdsworth, J.; Patterson, R.; Nimmo-Smith, I.; Rice, P. Implementing a
GammaTone Filter Bank. In: SVOS Final Report, Annex C, Part A: The
Auditory Filter Bank. 1988.``
First returned value is a bandwidth compensation for direct use in the
gammatone formula:
>>> x, y = gammatone_erb_constants(4)
>>> central_frequency = 1000
>>> round(x, 3)
1.019
>>> bandwidth = x * erb["moore_glasberg_83"](central_frequency)
>>> round(bandwidth, 2)
130.52
Second returned value helps us find the ``3 dB`` bandwidth as:
>>> x, y = gammatone_erb_constants(4)
>>> central_frequency = 1000
>>> bandwidth3dB = x * y * erb["moore_glasberg_83"](central_frequency)
>>> round(bandwidth3dB, 2)
113.55
]
variable[tnt] assign[=] binary_operation[binary_operation[constant[2] * name[n]] - constant[2]]
return[tuple[[<ast.BinOp object at 0x7da1b06c6920>, <ast.BinOp object at 0x7da1b06c4bb0>]]] | keyword[def] identifier[gammatone_erb_constants] ( identifier[n] ):
literal[string]
identifier[tnt] = literal[int] * identifier[n] - literal[int]
keyword[return] ( identifier[factorial] ( identifier[n] - literal[int] )** literal[int] /( identifier[pi] * identifier[factorial] ( identifier[tnt] )* literal[int] **- identifier[tnt] ),
literal[int] *( literal[int] **( literal[int] / identifier[n] )- literal[int] )** literal[int]
) | def gammatone_erb_constants(n):
"""
Constants for using the real bandwidth in the gammatone filter, given its
order. Returns a pair :math:`(x, y) = (1/a_n, c_n)`.
Based on equations from:
``Holdsworth, J.; Patterson, R.; Nimmo-Smith, I.; Rice, P. Implementing a
GammaTone Filter Bank. In: SVOS Final Report, Annex C, Part A: The
Auditory Filter Bank. 1988.``
First returned value is a bandwidth compensation for direct use in the
gammatone formula:
>>> x, y = gammatone_erb_constants(4)
>>> central_frequency = 1000
>>> round(x, 3)
1.019
>>> bandwidth = x * erb["moore_glasberg_83"](central_frequency)
>>> round(bandwidth, 2)
130.52
Second returned value helps us find the ``3 dB`` bandwidth as:
>>> x, y = gammatone_erb_constants(4)
>>> central_frequency = 1000
>>> bandwidth3dB = x * y * erb["moore_glasberg_83"](central_frequency)
>>> round(bandwidth3dB, 2)
113.55
"""
tnt = 2 * n - 2
return (factorial(n - 1) ** 2 / (pi * factorial(tnt) * 2 ** (-tnt)), 2 * (2 ** (1.0 / n) - 1) ** 0.5) |
def list_documents(self,
parent,
page_size=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Returns the list of all documents of the knowledge base.
Example:
>>> import dialogflow_v2beta1
>>>
>>> client = dialogflow_v2beta1.DocumentsClient()
>>>
>>> parent = client.knowledge_base_path('[PROJECT]', '[KNOWLEDGE_BASE]')
>>>
>>> # Iterate over all results
>>> for element in client.list_documents(parent):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_documents(parent, options=CallOptions(page_token=INITIAL_PAGE)):
... for element in page:
... # process element
... pass
Args:
parent (str): Required. The knowledge base to list all documents for.
Format: ``projects/<Project ID>/knowledgeBases/<Knowledge Base ID>``.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.dialogflow_v2beta1.types.Document` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'list_documents' not in self._inner_api_calls:
self._inner_api_calls[
'list_documents'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.list_documents,
default_retry=self._method_configs['ListDocuments'].retry,
default_timeout=self._method_configs['ListDocuments']
.timeout,
client_info=self._client_info,
)
request = document_pb2.ListDocumentsRequest(
parent=parent,
page_size=page_size,
)
iterator = google.api_core.page_iterator.GRPCIterator(
client=None,
method=functools.partial(
self._inner_api_calls['list_documents'],
retry=retry,
timeout=timeout,
metadata=metadata),
request=request,
items_field='documents',
request_token_field='page_token',
response_token_field='next_page_token',
)
return iterator | def function[list_documents, parameter[self, parent, page_size, retry, timeout, metadata]]:
constant[
Returns the list of all documents of the knowledge base.
Example:
>>> import dialogflow_v2beta1
>>>
>>> client = dialogflow_v2beta1.DocumentsClient()
>>>
>>> parent = client.knowledge_base_path('[PROJECT]', '[KNOWLEDGE_BASE]')
>>>
>>> # Iterate over all results
>>> for element in client.list_documents(parent):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_documents(parent, options=CallOptions(page_token=INITIAL_PAGE)):
... for element in page:
... # process element
... pass
Args:
parent (str): Required. The knowledge base to list all documents for.
Format: ``projects/<Project ID>/knowledgeBases/<Knowledge Base ID>``.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.dialogflow_v2beta1.types.Document` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
]
if compare[constant[list_documents] <ast.NotIn object at 0x7da2590d7190> name[self]._inner_api_calls] begin[:]
call[name[self]._inner_api_calls][constant[list_documents]] assign[=] call[name[google].api_core.gapic_v1.method.wrap_method, parameter[name[self].transport.list_documents]]
variable[request] assign[=] call[name[document_pb2].ListDocumentsRequest, parameter[]]
variable[iterator] assign[=] call[name[google].api_core.page_iterator.GRPCIterator, parameter[]]
return[name[iterator]] | keyword[def] identifier[list_documents] ( identifier[self] ,
identifier[parent] ,
identifier[page_size] = keyword[None] ,
identifier[retry] = identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[DEFAULT] ,
identifier[timeout] = identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[DEFAULT] ,
identifier[metadata] = keyword[None] ):
literal[string]
keyword[if] literal[string] keyword[not] keyword[in] identifier[self] . identifier[_inner_api_calls] :
identifier[self] . identifier[_inner_api_calls] [
literal[string] ]= identifier[google] . identifier[api_core] . identifier[gapic_v1] . identifier[method] . identifier[wrap_method] (
identifier[self] . identifier[transport] . identifier[list_documents] ,
identifier[default_retry] = identifier[self] . identifier[_method_configs] [ literal[string] ]. identifier[retry] ,
identifier[default_timeout] = identifier[self] . identifier[_method_configs] [ literal[string] ]
. identifier[timeout] ,
identifier[client_info] = identifier[self] . identifier[_client_info] ,
)
identifier[request] = identifier[document_pb2] . identifier[ListDocumentsRequest] (
identifier[parent] = identifier[parent] ,
identifier[page_size] = identifier[page_size] ,
)
identifier[iterator] = identifier[google] . identifier[api_core] . identifier[page_iterator] . identifier[GRPCIterator] (
identifier[client] = keyword[None] ,
identifier[method] = identifier[functools] . identifier[partial] (
identifier[self] . identifier[_inner_api_calls] [ literal[string] ],
identifier[retry] = identifier[retry] ,
identifier[timeout] = identifier[timeout] ,
identifier[metadata] = identifier[metadata] ),
identifier[request] = identifier[request] ,
identifier[items_field] = literal[string] ,
identifier[request_token_field] = literal[string] ,
identifier[response_token_field] = literal[string] ,
)
keyword[return] identifier[iterator] | def list_documents(self, parent, page_size=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None):
"""
Returns the list of all documents of the knowledge base.
Example:
>>> import dialogflow_v2beta1
>>>
>>> client = dialogflow_v2beta1.DocumentsClient()
>>>
>>> parent = client.knowledge_base_path('[PROJECT]', '[KNOWLEDGE_BASE]')
>>>
>>> # Iterate over all results
>>> for element in client.list_documents(parent):
... # process element
... pass
>>>
>>>
>>> # Alternatively:
>>>
>>> # Iterate over results one page at a time
>>> for page in client.list_documents(parent, options=CallOptions(page_token=INITIAL_PAGE)):
... for element in page:
... # process element
... pass
Args:
parent (str): Required. The knowledge base to list all documents for.
Format: ``projects/<Project ID>/knowledgeBases/<Knowledge Base ID>``.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.gax.PageIterator` instance. By default, this
is an iterable of :class:`~google.cloud.dialogflow_v2beta1.types.Document` instances.
This object can also be configured to iterate over the pages
of the response through the `options` parameter.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'list_documents' not in self._inner_api_calls:
self._inner_api_calls['list_documents'] = google.api_core.gapic_v1.method.wrap_method(self.transport.list_documents, default_retry=self._method_configs['ListDocuments'].retry, default_timeout=self._method_configs['ListDocuments'].timeout, client_info=self._client_info) # depends on [control=['if'], data=[]]
request = document_pb2.ListDocumentsRequest(parent=parent, page_size=page_size)
iterator = google.api_core.page_iterator.GRPCIterator(client=None, method=functools.partial(self._inner_api_calls['list_documents'], retry=retry, timeout=timeout, metadata=metadata), request=request, items_field='documents', request_token_field='page_token', response_token_field='next_page_token')
return iterator |
def required_opts_multi_ifo(opt, parser, ifo, opt_list, required_by=None):
"""Check that all the opts are defined
Parameters
----------
opt : object
Result of option parsing
parser : object
OptionParser instance.
ifo : string
opt_list : list of strings
required_by : string, optional
the option that requires these options (if applicable)
"""
for name in opt_list:
attr = name[2:].replace('-', '_')
try:
if getattr(opt, attr)[ifo] is None:
raise KeyError
except KeyError:
err_str = "%s is missing " % name
if required_by is not None:
err_str += ", required by %s" % required_by
parser.error(err_str) | def function[required_opts_multi_ifo, parameter[opt, parser, ifo, opt_list, required_by]]:
constant[Check that all the opts are defined
Parameters
----------
opt : object
Result of option parsing
parser : object
OptionParser instance.
ifo : string
opt_list : list of strings
required_by : string, optional
the option that requires these options (if applicable)
]
for taget[name[name]] in starred[name[opt_list]] begin[:]
variable[attr] assign[=] call[call[name[name]][<ast.Slice object at 0x7da20c7c8520>].replace, parameter[constant[-], constant[_]]]
<ast.Try object at 0x7da20c7ca830> | keyword[def] identifier[required_opts_multi_ifo] ( identifier[opt] , identifier[parser] , identifier[ifo] , identifier[opt_list] , identifier[required_by] = keyword[None] ):
literal[string]
keyword[for] identifier[name] keyword[in] identifier[opt_list] :
identifier[attr] = identifier[name] [ literal[int] :]. identifier[replace] ( literal[string] , literal[string] )
keyword[try] :
keyword[if] identifier[getattr] ( identifier[opt] , identifier[attr] )[ identifier[ifo] ] keyword[is] keyword[None] :
keyword[raise] identifier[KeyError]
keyword[except] identifier[KeyError] :
identifier[err_str] = literal[string] % identifier[name]
keyword[if] identifier[required_by] keyword[is] keyword[not] keyword[None] :
identifier[err_str] += literal[string] % identifier[required_by]
identifier[parser] . identifier[error] ( identifier[err_str] ) | def required_opts_multi_ifo(opt, parser, ifo, opt_list, required_by=None):
"""Check that all the opts are defined
Parameters
----------
opt : object
Result of option parsing
parser : object
OptionParser instance.
ifo : string
opt_list : list of strings
required_by : string, optional
the option that requires these options (if applicable)
"""
for name in opt_list:
attr = name[2:].replace('-', '_')
try:
if getattr(opt, attr)[ifo] is None:
raise KeyError # depends on [control=['if'], data=[]] # depends on [control=['try'], data=[]]
except KeyError:
err_str = '%s is missing ' % name
if required_by is not None:
err_str += ', required by %s' % required_by # depends on [control=['if'], data=['required_by']]
parser.error(err_str) # depends on [control=['except'], data=[]] # depends on [control=['for'], data=['name']] |
def setupSerialPort(loopback, port):
"""Sets up serial port by connecting to phsyical or software port.
Depending on command line options, this function will either connect to a
SerialTestClass() port for loopback testing or to the specified port from
the command line option. If loopback is True it overrides the physical port
specification.
Args:
loopback: argparse option
port: argparse option
Returns:
serialPort: Pyserial serial port instance
"""
if loopback:
# Implement loopback software serial port
testSerial = SerialTestClass()
serialPort = testSerial.serialPort
else:
# TODO enable serial port command line options (keep simple for user!)
serialPort = serial.Serial(port, 115200, timeout=0)
return serialPort | def function[setupSerialPort, parameter[loopback, port]]:
constant[Sets up serial port by connecting to phsyical or software port.
Depending on command line options, this function will either connect to a
SerialTestClass() port for loopback testing or to the specified port from
the command line option. If loopback is True it overrides the physical port
specification.
Args:
loopback: argparse option
port: argparse option
Returns:
serialPort: Pyserial serial port instance
]
if name[loopback] begin[:]
variable[testSerial] assign[=] call[name[SerialTestClass], parameter[]]
variable[serialPort] assign[=] name[testSerial].serialPort
return[name[serialPort]] | keyword[def] identifier[setupSerialPort] ( identifier[loopback] , identifier[port] ):
literal[string]
keyword[if] identifier[loopback] :
identifier[testSerial] = identifier[SerialTestClass] ()
identifier[serialPort] = identifier[testSerial] . identifier[serialPort]
keyword[else] :
identifier[serialPort] = identifier[serial] . identifier[Serial] ( identifier[port] , literal[int] , identifier[timeout] = literal[int] )
keyword[return] identifier[serialPort] | def setupSerialPort(loopback, port):
"""Sets up serial port by connecting to phsyical or software port.
Depending on command line options, this function will either connect to a
SerialTestClass() port for loopback testing or to the specified port from
the command line option. If loopback is True it overrides the physical port
specification.
Args:
loopback: argparse option
port: argparse option
Returns:
serialPort: Pyserial serial port instance
"""
if loopback:
# Implement loopback software serial port
testSerial = SerialTestClass()
serialPort = testSerial.serialPort # depends on [control=['if'], data=[]]
else:
# TODO enable serial port command line options (keep simple for user!)
serialPort = serial.Serial(port, 115200, timeout=0)
return serialPort |
def get_users(self, usernames):
"""Fetch user info for given usernames
:param username: The usernames you want metadata for (max. 50)
"""
if self.standard_grant_type is not "authorization_code":
raise DeviantartError("Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.")
response = self._req('/user/whois', post_data={
"usernames":usernames
})
users = []
for item in response['results']:
u = User()
u.from_dict(item)
users.append(u)
return users | def function[get_users, parameter[self, usernames]]:
constant[Fetch user info for given usernames
:param username: The usernames you want metadata for (max. 50)
]
if compare[name[self].standard_grant_type is_not constant[authorization_code]] begin[:]
<ast.Raise object at 0x7da20c990670>
variable[response] assign[=] call[name[self]._req, parameter[constant[/user/whois]]]
variable[users] assign[=] list[[]]
for taget[name[item]] in starred[call[name[response]][constant[results]]] begin[:]
variable[u] assign[=] call[name[User], parameter[]]
call[name[u].from_dict, parameter[name[item]]]
call[name[users].append, parameter[name[u]]]
return[name[users]] | keyword[def] identifier[get_users] ( identifier[self] , identifier[usernames] ):
literal[string]
keyword[if] identifier[self] . identifier[standard_grant_type] keyword[is] keyword[not] literal[string] :
keyword[raise] identifier[DeviantartError] ( literal[string] )
identifier[response] = identifier[self] . identifier[_req] ( literal[string] , identifier[post_data] ={
literal[string] : identifier[usernames]
})
identifier[users] =[]
keyword[for] identifier[item] keyword[in] identifier[response] [ literal[string] ]:
identifier[u] = identifier[User] ()
identifier[u] . identifier[from_dict] ( identifier[item] )
identifier[users] . identifier[append] ( identifier[u] )
keyword[return] identifier[users] | def get_users(self, usernames):
"""Fetch user info for given usernames
:param username: The usernames you want metadata for (max. 50)
"""
if self.standard_grant_type is not 'authorization_code':
raise DeviantartError('Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint.') # depends on [control=['if'], data=[]]
response = self._req('/user/whois', post_data={'usernames': usernames})
users = []
for item in response['results']:
u = User()
u.from_dict(item)
users.append(u) # depends on [control=['for'], data=['item']]
return users |
def begin(self):
"""Generate the beginning part"""
name = 'BOOST_METAPARSE_V1_CPP11_IMPL_STRING_HPP'
self.out_f.write('#ifndef {0}\n#define {0}\n'.format(name))
write_autogen_info(self.out_f) | def function[begin, parameter[self]]:
constant[Generate the beginning part]
variable[name] assign[=] constant[BOOST_METAPARSE_V1_CPP11_IMPL_STRING_HPP]
call[name[self].out_f.write, parameter[call[constant[#ifndef {0}
#define {0}
].format, parameter[name[name]]]]]
call[name[write_autogen_info], parameter[name[self].out_f]] | keyword[def] identifier[begin] ( identifier[self] ):
literal[string]
identifier[name] = literal[string]
identifier[self] . identifier[out_f] . identifier[write] ( literal[string] . identifier[format] ( identifier[name] ))
identifier[write_autogen_info] ( identifier[self] . identifier[out_f] ) | def begin(self):
"""Generate the beginning part"""
name = 'BOOST_METAPARSE_V1_CPP11_IMPL_STRING_HPP'
self.out_f.write('#ifndef {0}\n#define {0}\n'.format(name))
write_autogen_info(self.out_f) |
def append_text(self, text):
"""
Append static text to the Format.
:param text: The text to append.
"""
if (self.conversions and
isinstance(self.conversions[-1],
conversions.StringConversion)):
self.conversions[-1].append(text)
else:
self.conversions.append(conversions.StringConversion(text)) | def function[append_text, parameter[self, text]]:
constant[
Append static text to the Format.
:param text: The text to append.
]
if <ast.BoolOp object at 0x7da2054a5d20> begin[:]
call[call[name[self].conversions][<ast.UnaryOp object at 0x7da1b13841f0>].append, parameter[name[text]]] | keyword[def] identifier[append_text] ( identifier[self] , identifier[text] ):
literal[string]
keyword[if] ( identifier[self] . identifier[conversions] keyword[and]
identifier[isinstance] ( identifier[self] . identifier[conversions] [- literal[int] ],
identifier[conversions] . identifier[StringConversion] )):
identifier[self] . identifier[conversions] [- literal[int] ]. identifier[append] ( identifier[text] )
keyword[else] :
identifier[self] . identifier[conversions] . identifier[append] ( identifier[conversions] . identifier[StringConversion] ( identifier[text] )) | def append_text(self, text):
"""
Append static text to the Format.
:param text: The text to append.
"""
if self.conversions and isinstance(self.conversions[-1], conversions.StringConversion):
self.conversions[-1].append(text) # depends on [control=['if'], data=[]]
else:
self.conversions.append(conversions.StringConversion(text)) |
def _check_nodes_have_same_collection(self):
"""Return True if all nodes have collected the same items.
If collections differ, this method returns False while logging
the collection differences and posting collection errors to
pytest_collectreport hook.
"""
node_collection_items = list(self.node2collection.items())
first_node, col = node_collection_items[0]
same_collection = True
for node, collection in node_collection_items[1:]:
msg = report_collection_diff(
col, collection, first_node.gateway.id, node.gateway.id
)
if msg:
same_collection = False
self.log(msg)
if self.config is not None:
rep = CollectReport(
node.gateway.id, "failed", longrepr=msg, result=[]
)
self.config.hook.pytest_collectreport(report=rep)
return same_collection | def function[_check_nodes_have_same_collection, parameter[self]]:
constant[Return True if all nodes have collected the same items.
If collections differ, this method returns False while logging
the collection differences and posting collection errors to
pytest_collectreport hook.
]
variable[node_collection_items] assign[=] call[name[list], parameter[call[name[self].node2collection.items, parameter[]]]]
<ast.Tuple object at 0x7da1b18fe410> assign[=] call[name[node_collection_items]][constant[0]]
variable[same_collection] assign[=] constant[True]
for taget[tuple[[<ast.Name object at 0x7da1b18fceb0>, <ast.Name object at 0x7da1b18fe7d0>]]] in starred[call[name[node_collection_items]][<ast.Slice object at 0x7da1b18fec20>]] begin[:]
variable[msg] assign[=] call[name[report_collection_diff], parameter[name[col], name[collection], name[first_node].gateway.id, name[node].gateway.id]]
if name[msg] begin[:]
variable[same_collection] assign[=] constant[False]
call[name[self].log, parameter[name[msg]]]
if compare[name[self].config is_not constant[None]] begin[:]
variable[rep] assign[=] call[name[CollectReport], parameter[name[node].gateway.id, constant[failed]]]
call[name[self].config.hook.pytest_collectreport, parameter[]]
return[name[same_collection]] | keyword[def] identifier[_check_nodes_have_same_collection] ( identifier[self] ):
literal[string]
identifier[node_collection_items] = identifier[list] ( identifier[self] . identifier[node2collection] . identifier[items] ())
identifier[first_node] , identifier[col] = identifier[node_collection_items] [ literal[int] ]
identifier[same_collection] = keyword[True]
keyword[for] identifier[node] , identifier[collection] keyword[in] identifier[node_collection_items] [ literal[int] :]:
identifier[msg] = identifier[report_collection_diff] (
identifier[col] , identifier[collection] , identifier[first_node] . identifier[gateway] . identifier[id] , identifier[node] . identifier[gateway] . identifier[id]
)
keyword[if] identifier[msg] :
identifier[same_collection] = keyword[False]
identifier[self] . identifier[log] ( identifier[msg] )
keyword[if] identifier[self] . identifier[config] keyword[is] keyword[not] keyword[None] :
identifier[rep] = identifier[CollectReport] (
identifier[node] . identifier[gateway] . identifier[id] , literal[string] , identifier[longrepr] = identifier[msg] , identifier[result] =[]
)
identifier[self] . identifier[config] . identifier[hook] . identifier[pytest_collectreport] ( identifier[report] = identifier[rep] )
keyword[return] identifier[same_collection] | def _check_nodes_have_same_collection(self):
"""Return True if all nodes have collected the same items.
If collections differ, this method returns False while logging
the collection differences and posting collection errors to
pytest_collectreport hook.
"""
node_collection_items = list(self.node2collection.items())
(first_node, col) = node_collection_items[0]
same_collection = True
for (node, collection) in node_collection_items[1:]:
msg = report_collection_diff(col, collection, first_node.gateway.id, node.gateway.id)
if msg:
same_collection = False
self.log(msg)
if self.config is not None:
rep = CollectReport(node.gateway.id, 'failed', longrepr=msg, result=[])
self.config.hook.pytest_collectreport(report=rep) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
return same_collection |
def cone_search(lcc_server,
center_ra,
center_decl,
radiusarcmin=5.0,
result_visibility='unlisted',
email_when_done=False,
collections=None,
columns=None,
filters=None,
sortspec=None,
samplespec=None,
limitspec=None,
download_data=True,
outdir=None,
maxtimeout=300.0,
refresh=15.0):
'''This runs a cone-search query.
Parameters
----------
lcc_server : str
This is the base URL of the LCC-Server to talk to. (e.g. for HAT, use:
https://data.hatsurveys.org)
center_ra,center_decl : float
These are the central coordinates of the search to conduct. These can be
either decimal degrees of type float, or sexagesimal coordinates of type
str:
- OK: 290.0, 45.0
- OK: 15:00:00 +45:00:00
- OK: 15 00 00.0 -45 00 00.0
- NOT OK: 290.0 +45:00:00
- NOT OK: 15:00:00 45.0
radiusarcmin : float
This is the search radius to use for the cone-search. This is in
arcminutes. The maximum radius you can use is 60 arcminutes = 1 degree.
result_visibility : {'private', 'unlisted', 'public'}
This sets the visibility of the dataset produced from the search
result::
'private' -> the dataset and its products are not visible or
accessible by any user other than the one that
created the dataset.
'unlisted' -> the dataset and its products are not visible in the
list of public datasets, but can be accessed if the
dataset URL is known
'public' -> the dataset and its products are visible in the list
of public datasets and can be accessed by anyone.
email_when_done : bool
If True, the LCC-Server will email you when the search is complete. This
will also set `download_data` to False. Using this requires an
LCC-Server account and an API key tied to that account.
collections : list of str or None
This is a list of LC collections to search in. If this is None, all
collections will be searched.
columns : list of str or None
This is a list of columns to return in the results. Matching objects'
object IDs, RAs, DECs, and links to light curve files will always be
returned so there is no need to specify these columns. If None, only
these columns will be returned: 'objectid', 'ra', 'decl', 'lcfname'
filters : str or None
This is an SQL-like string to use to filter on database columns in the
LCC-Server's collections. To see the columns available for a search,
visit the Collections tab in the LCC-Server's browser UI. The filter
operators allowed are::
lt -> less than
gt -> greater than
ge -> greater than or equal to
le -> less than or equal to
eq -> equal to
ne -> not equal to
ct -> contains text
isnull -> column value is null
notnull -> column value is not null
You may use the `and` and `or` operators between filter specifications
to chain them together logically.
Example filter strings::
"(propermotion gt 200.0) and (sdssr lt 11.0)"
"(dered_jmag_kmag gt 2.0) and (aep_000_stetsonj gt 10.0)"
"(gaia_status ct 'ok') and (propermotion gt 300.0)"
"(simbad_best_objtype ct 'RR') and (dered_sdssu_sdssg lt 0.5)"
sortspec : tuple of two strs or None
If not None, this should be a tuple of two items::
('column to sort by', 'asc|desc')
This sets the column to sort the results by. For cone_search, the
default column and sort order are 'dist_arcsec' and 'asc', meaning the
distance from the search center in ascending order.
samplespec : int or None
If this is an int, will indicate how many rows from the initial search
result will be uniformly random sampled and returned.
limitspec : int or None
If this is an int, will indicate how many rows from the initial search
result to return in total.
`sortspec`, `samplespec`, and `limitspec` are applied in this order:
sample -> sort -> limit
download_data : bool
This sets if the accompanying data from the search results will be
downloaded automatically. This includes the data table CSV, the dataset
pickle file, and a light curve ZIP file. Note that if the search service
indicates that your query is still in progress, this function will block
until the light curve ZIP file becomes available. The maximum wait time
in seconds is set by maxtimeout and the refresh interval is set by
refresh.
To avoid the wait block, set download_data to False and the function
will write a pickle file to `~/.astrobase/lccs/query-[setid].pkl`
containing all the information necessary to retrieve these data files
later when the query is done. To do so, call the
`retrieve_dataset_files` with the path to this pickle file (it will be
returned).
outdir : str or None
If this is provided, sets the output directory of the downloaded dataset
files. If None, they will be downloaded to the current directory.
maxtimeout : float
The maximum time in seconds to wait for the LCC-Server to respond with a
result before timing out. You can use the `retrieve_dataset_files`
function to get results later as needed.
refresh : float
The time to wait in seconds before pinging the LCC-Server to see if a
search query has completed and dataset result files can be downloaded.
Returns
-------
tuple
Returns a tuple with the following elements::
(search result status dict,
search result CSV file path,
search result LC ZIP path)
'''
# turn the input into a param dict
coords = '%.5f %.5f %.1f' % (center_ra, center_decl, radiusarcmin)
params = {
'coords':coords
}
if collections:
params['collections'] = collections
if columns:
params['columns'] = columns
if filters:
params['filters'] = filters
if sortspec:
params['sortspec'] = json.dumps([sortspec])
if samplespec:
params['samplespec'] = int(samplespec)
if limitspec:
params['limitspec'] = int(limitspec)
params['visibility'] = result_visibility
params['emailwhendone'] = email_when_done
# we won't wait for the LC ZIP to complete if email_when_done = True
if email_when_done:
download_data = False
# check if we have an API key already
have_apikey, apikey, expires = check_existing_apikey(lcc_server)
# if not, get a new one
if not have_apikey:
apikey, expires = get_new_apikey(lcc_server)
# hit the server
api_url = '%s/api/conesearch' % lcc_server
searchresult = submit_post_searchquery(api_url, params, apikey)
# check the status of the search
status = searchresult[0]
# now we'll check if we want to download the data
if download_data:
if status == 'ok':
LOGINFO('query complete, downloading associated data...')
csv, lczip, pkl = retrieve_dataset_files(searchresult,
outdir=outdir,
apikey=apikey)
if pkl:
return searchresult[1], csv, lczip, pkl
else:
return searchresult[1], csv, lczip
elif status == 'background':
LOGINFO('query is not yet complete, '
'waiting up to %.1f minutes, '
'updates every %s seconds (hit Ctrl+C to cancel)...' %
(maxtimeout/60.0, refresh))
timewaited = 0.0
while timewaited < maxtimeout:
try:
time.sleep(refresh)
csv, lczip, pkl = retrieve_dataset_files(searchresult,
outdir=outdir,
apikey=apikey)
if (csv and os.path.exists(csv) and
lczip and os.path.exists(lczip)):
LOGINFO('all dataset products collected')
return searchresult[1], csv, lczip
timewaited = timewaited + refresh
except KeyboardInterrupt:
LOGWARNING('abandoned wait for downloading data')
return searchresult[1], None, None
LOGERROR('wait timed out.')
return searchresult[1], None, None
else:
LOGERROR('could not download the data for this query result')
return searchresult[1], None, None
else:
return searchresult[1], None, None | def function[cone_search, parameter[lcc_server, center_ra, center_decl, radiusarcmin, result_visibility, email_when_done, collections, columns, filters, sortspec, samplespec, limitspec, download_data, outdir, maxtimeout, refresh]]:
constant[This runs a cone-search query.
Parameters
----------
lcc_server : str
This is the base URL of the LCC-Server to talk to. (e.g. for HAT, use:
https://data.hatsurveys.org)
center_ra,center_decl : float
These are the central coordinates of the search to conduct. These can be
either decimal degrees of type float, or sexagesimal coordinates of type
str:
- OK: 290.0, 45.0
- OK: 15:00:00 +45:00:00
- OK: 15 00 00.0 -45 00 00.0
- NOT OK: 290.0 +45:00:00
- NOT OK: 15:00:00 45.0
radiusarcmin : float
This is the search radius to use for the cone-search. This is in
arcminutes. The maximum radius you can use is 60 arcminutes = 1 degree.
result_visibility : {'private', 'unlisted', 'public'}
This sets the visibility of the dataset produced from the search
result::
'private' -> the dataset and its products are not visible or
accessible by any user other than the one that
created the dataset.
'unlisted' -> the dataset and its products are not visible in the
list of public datasets, but can be accessed if the
dataset URL is known
'public' -> the dataset and its products are visible in the list
of public datasets and can be accessed by anyone.
email_when_done : bool
If True, the LCC-Server will email you when the search is complete. This
will also set `download_data` to False. Using this requires an
LCC-Server account and an API key tied to that account.
collections : list of str or None
This is a list of LC collections to search in. If this is None, all
collections will be searched.
columns : list of str or None
This is a list of columns to return in the results. Matching objects'
object IDs, RAs, DECs, and links to light curve files will always be
returned so there is no need to specify these columns. If None, only
these columns will be returned: 'objectid', 'ra', 'decl', 'lcfname'
filters : str or None
This is an SQL-like string to use to filter on database columns in the
LCC-Server's collections. To see the columns available for a search,
visit the Collections tab in the LCC-Server's browser UI. The filter
operators allowed are::
lt -> less than
gt -> greater than
ge -> greater than or equal to
le -> less than or equal to
eq -> equal to
ne -> not equal to
ct -> contains text
isnull -> column value is null
notnull -> column value is not null
You may use the `and` and `or` operators between filter specifications
to chain them together logically.
Example filter strings::
"(propermotion gt 200.0) and (sdssr lt 11.0)"
"(dered_jmag_kmag gt 2.0) and (aep_000_stetsonj gt 10.0)"
"(gaia_status ct 'ok') and (propermotion gt 300.0)"
"(simbad_best_objtype ct 'RR') and (dered_sdssu_sdssg lt 0.5)"
sortspec : tuple of two strs or None
If not None, this should be a tuple of two items::
('column to sort by', 'asc|desc')
This sets the column to sort the results by. For cone_search, the
default column and sort order are 'dist_arcsec' and 'asc', meaning the
distance from the search center in ascending order.
samplespec : int or None
If this is an int, will indicate how many rows from the initial search
result will be uniformly random sampled and returned.
limitspec : int or None
If this is an int, will indicate how many rows from the initial search
result to return in total.
`sortspec`, `samplespec`, and `limitspec` are applied in this order:
sample -> sort -> limit
download_data : bool
This sets if the accompanying data from the search results will be
downloaded automatically. This includes the data table CSV, the dataset
pickle file, and a light curve ZIP file. Note that if the search service
indicates that your query is still in progress, this function will block
until the light curve ZIP file becomes available. The maximum wait time
in seconds is set by maxtimeout and the refresh interval is set by
refresh.
To avoid the wait block, set download_data to False and the function
will write a pickle file to `~/.astrobase/lccs/query-[setid].pkl`
containing all the information necessary to retrieve these data files
later when the query is done. To do so, call the
`retrieve_dataset_files` with the path to this pickle file (it will be
returned).
outdir : str or None
If this is provided, sets the output directory of the downloaded dataset
files. If None, they will be downloaded to the current directory.
maxtimeout : float
The maximum time in seconds to wait for the LCC-Server to respond with a
result before timing out. You can use the `retrieve_dataset_files`
function to get results later as needed.
refresh : float
The time to wait in seconds before pinging the LCC-Server to see if a
search query has completed and dataset result files can be downloaded.
Returns
-------
tuple
Returns a tuple with the following elements::
(search result status dict,
search result CSV file path,
search result LC ZIP path)
]
variable[coords] assign[=] binary_operation[constant[%.5f %.5f %.1f] <ast.Mod object at 0x7da2590d6920> tuple[[<ast.Name object at 0x7da1b0141c90>, <ast.Name object at 0x7da1b01408e0>, <ast.Name object at 0x7da1b01421a0>]]]
variable[params] assign[=] dictionary[[<ast.Constant object at 0x7da1b0143a00>], [<ast.Name object at 0x7da1b01403a0>]]
if name[collections] begin[:]
call[name[params]][constant[collections]] assign[=] name[collections]
if name[columns] begin[:]
call[name[params]][constant[columns]] assign[=] name[columns]
if name[filters] begin[:]
call[name[params]][constant[filters]] assign[=] name[filters]
if name[sortspec] begin[:]
call[name[params]][constant[sortspec]] assign[=] call[name[json].dumps, parameter[list[[<ast.Name object at 0x7da1b0143430>]]]]
if name[samplespec] begin[:]
call[name[params]][constant[samplespec]] assign[=] call[name[int], parameter[name[samplespec]]]
if name[limitspec] begin[:]
call[name[params]][constant[limitspec]] assign[=] call[name[int], parameter[name[limitspec]]]
call[name[params]][constant[visibility]] assign[=] name[result_visibility]
call[name[params]][constant[emailwhendone]] assign[=] name[email_when_done]
if name[email_when_done] begin[:]
variable[download_data] assign[=] constant[False]
<ast.Tuple object at 0x7da20c6aa830> assign[=] call[name[check_existing_apikey], parameter[name[lcc_server]]]
if <ast.UnaryOp object at 0x7da20c6a9660> begin[:]
<ast.Tuple object at 0x7da20c6a8160> assign[=] call[name[get_new_apikey], parameter[name[lcc_server]]]
variable[api_url] assign[=] binary_operation[constant[%s/api/conesearch] <ast.Mod object at 0x7da2590d6920> name[lcc_server]]
variable[searchresult] assign[=] call[name[submit_post_searchquery], parameter[name[api_url], name[params], name[apikey]]]
variable[status] assign[=] call[name[searchresult]][constant[0]]
if name[download_data] begin[:]
if compare[name[status] equal[==] constant[ok]] begin[:]
call[name[LOGINFO], parameter[constant[query complete, downloading associated data...]]]
<ast.Tuple object at 0x7da20c6a83d0> assign[=] call[name[retrieve_dataset_files], parameter[name[searchresult]]]
if name[pkl] begin[:]
return[tuple[[<ast.Subscript object at 0x7da1b01c6860>, <ast.Name object at 0x7da1b01c5360>, <ast.Name object at 0x7da1b01c5930>, <ast.Name object at 0x7da1b01c47f0>]]] | keyword[def] identifier[cone_search] ( identifier[lcc_server] ,
identifier[center_ra] ,
identifier[center_decl] ,
identifier[radiusarcmin] = literal[int] ,
identifier[result_visibility] = literal[string] ,
identifier[email_when_done] = keyword[False] ,
identifier[collections] = keyword[None] ,
identifier[columns] = keyword[None] ,
identifier[filters] = keyword[None] ,
identifier[sortspec] = keyword[None] ,
identifier[samplespec] = keyword[None] ,
identifier[limitspec] = keyword[None] ,
identifier[download_data] = keyword[True] ,
identifier[outdir] = keyword[None] ,
identifier[maxtimeout] = literal[int] ,
identifier[refresh] = literal[int] ):
literal[string]
identifier[coords] = literal[string] %( identifier[center_ra] , identifier[center_decl] , identifier[radiusarcmin] )
identifier[params] ={
literal[string] : identifier[coords]
}
keyword[if] identifier[collections] :
identifier[params] [ literal[string] ]= identifier[collections]
keyword[if] identifier[columns] :
identifier[params] [ literal[string] ]= identifier[columns]
keyword[if] identifier[filters] :
identifier[params] [ literal[string] ]= identifier[filters]
keyword[if] identifier[sortspec] :
identifier[params] [ literal[string] ]= identifier[json] . identifier[dumps] ([ identifier[sortspec] ])
keyword[if] identifier[samplespec] :
identifier[params] [ literal[string] ]= identifier[int] ( identifier[samplespec] )
keyword[if] identifier[limitspec] :
identifier[params] [ literal[string] ]= identifier[int] ( identifier[limitspec] )
identifier[params] [ literal[string] ]= identifier[result_visibility]
identifier[params] [ literal[string] ]= identifier[email_when_done]
keyword[if] identifier[email_when_done] :
identifier[download_data] = keyword[False]
identifier[have_apikey] , identifier[apikey] , identifier[expires] = identifier[check_existing_apikey] ( identifier[lcc_server] )
keyword[if] keyword[not] identifier[have_apikey] :
identifier[apikey] , identifier[expires] = identifier[get_new_apikey] ( identifier[lcc_server] )
identifier[api_url] = literal[string] % identifier[lcc_server]
identifier[searchresult] = identifier[submit_post_searchquery] ( identifier[api_url] , identifier[params] , identifier[apikey] )
identifier[status] = identifier[searchresult] [ literal[int] ]
keyword[if] identifier[download_data] :
keyword[if] identifier[status] == literal[string] :
identifier[LOGINFO] ( literal[string] )
identifier[csv] , identifier[lczip] , identifier[pkl] = identifier[retrieve_dataset_files] ( identifier[searchresult] ,
identifier[outdir] = identifier[outdir] ,
identifier[apikey] = identifier[apikey] )
keyword[if] identifier[pkl] :
keyword[return] identifier[searchresult] [ literal[int] ], identifier[csv] , identifier[lczip] , identifier[pkl]
keyword[else] :
keyword[return] identifier[searchresult] [ literal[int] ], identifier[csv] , identifier[lczip]
keyword[elif] identifier[status] == literal[string] :
identifier[LOGINFO] ( literal[string]
literal[string]
literal[string] %
( identifier[maxtimeout] / literal[int] , identifier[refresh] ))
identifier[timewaited] = literal[int]
keyword[while] identifier[timewaited] < identifier[maxtimeout] :
keyword[try] :
identifier[time] . identifier[sleep] ( identifier[refresh] )
identifier[csv] , identifier[lczip] , identifier[pkl] = identifier[retrieve_dataset_files] ( identifier[searchresult] ,
identifier[outdir] = identifier[outdir] ,
identifier[apikey] = identifier[apikey] )
keyword[if] ( identifier[csv] keyword[and] identifier[os] . identifier[path] . identifier[exists] ( identifier[csv] ) keyword[and]
identifier[lczip] keyword[and] identifier[os] . identifier[path] . identifier[exists] ( identifier[lczip] )):
identifier[LOGINFO] ( literal[string] )
keyword[return] identifier[searchresult] [ literal[int] ], identifier[csv] , identifier[lczip]
identifier[timewaited] = identifier[timewaited] + identifier[refresh]
keyword[except] identifier[KeyboardInterrupt] :
identifier[LOGWARNING] ( literal[string] )
keyword[return] identifier[searchresult] [ literal[int] ], keyword[None] , keyword[None]
identifier[LOGERROR] ( literal[string] )
keyword[return] identifier[searchresult] [ literal[int] ], keyword[None] , keyword[None]
keyword[else] :
identifier[LOGERROR] ( literal[string] )
keyword[return] identifier[searchresult] [ literal[int] ], keyword[None] , keyword[None]
keyword[else] :
keyword[return] identifier[searchresult] [ literal[int] ], keyword[None] , keyword[None] | def cone_search(lcc_server, center_ra, center_decl, radiusarcmin=5.0, result_visibility='unlisted', email_when_done=False, collections=None, columns=None, filters=None, sortspec=None, samplespec=None, limitspec=None, download_data=True, outdir=None, maxtimeout=300.0, refresh=15.0):
"""This runs a cone-search query.
Parameters
----------
lcc_server : str
This is the base URL of the LCC-Server to talk to. (e.g. for HAT, use:
https://data.hatsurveys.org)
center_ra,center_decl : float
These are the central coordinates of the search to conduct. These can be
either decimal degrees of type float, or sexagesimal coordinates of type
str:
- OK: 290.0, 45.0
- OK: 15:00:00 +45:00:00
- OK: 15 00 00.0 -45 00 00.0
- NOT OK: 290.0 +45:00:00
- NOT OK: 15:00:00 45.0
radiusarcmin : float
This is the search radius to use for the cone-search. This is in
arcminutes. The maximum radius you can use is 60 arcminutes = 1 degree.
result_visibility : {'private', 'unlisted', 'public'}
This sets the visibility of the dataset produced from the search
result::
'private' -> the dataset and its products are not visible or
accessible by any user other than the one that
created the dataset.
'unlisted' -> the dataset and its products are not visible in the
list of public datasets, but can be accessed if the
dataset URL is known
'public' -> the dataset and its products are visible in the list
of public datasets and can be accessed by anyone.
email_when_done : bool
If True, the LCC-Server will email you when the search is complete. This
will also set `download_data` to False. Using this requires an
LCC-Server account and an API key tied to that account.
collections : list of str or None
This is a list of LC collections to search in. If this is None, all
collections will be searched.
columns : list of str or None
This is a list of columns to return in the results. Matching objects'
object IDs, RAs, DECs, and links to light curve files will always be
returned so there is no need to specify these columns. If None, only
these columns will be returned: 'objectid', 'ra', 'decl', 'lcfname'
filters : str or None
This is an SQL-like string to use to filter on database columns in the
LCC-Server's collections. To see the columns available for a search,
visit the Collections tab in the LCC-Server's browser UI. The filter
operators allowed are::
lt -> less than
gt -> greater than
ge -> greater than or equal to
le -> less than or equal to
eq -> equal to
ne -> not equal to
ct -> contains text
isnull -> column value is null
notnull -> column value is not null
You may use the `and` and `or` operators between filter specifications
to chain them together logically.
Example filter strings::
"(propermotion gt 200.0) and (sdssr lt 11.0)"
"(dered_jmag_kmag gt 2.0) and (aep_000_stetsonj gt 10.0)"
"(gaia_status ct 'ok') and (propermotion gt 300.0)"
"(simbad_best_objtype ct 'RR') and (dered_sdssu_sdssg lt 0.5)"
sortspec : tuple of two strs or None
If not None, this should be a tuple of two items::
('column to sort by', 'asc|desc')
This sets the column to sort the results by. For cone_search, the
default column and sort order are 'dist_arcsec' and 'asc', meaning the
distance from the search center in ascending order.
samplespec : int or None
If this is an int, will indicate how many rows from the initial search
result will be uniformly random sampled and returned.
limitspec : int or None
If this is an int, will indicate how many rows from the initial search
result to return in total.
`sortspec`, `samplespec`, and `limitspec` are applied in this order:
sample -> sort -> limit
download_data : bool
This sets if the accompanying data from the search results will be
downloaded automatically. This includes the data table CSV, the dataset
pickle file, and a light curve ZIP file. Note that if the search service
indicates that your query is still in progress, this function will block
until the light curve ZIP file becomes available. The maximum wait time
in seconds is set by maxtimeout and the refresh interval is set by
refresh.
To avoid the wait block, set download_data to False and the function
will write a pickle file to `~/.astrobase/lccs/query-[setid].pkl`
containing all the information necessary to retrieve these data files
later when the query is done. To do so, call the
`retrieve_dataset_files` with the path to this pickle file (it will be
returned).
outdir : str or None
If this is provided, sets the output directory of the downloaded dataset
files. If None, they will be downloaded to the current directory.
maxtimeout : float
The maximum time in seconds to wait for the LCC-Server to respond with a
result before timing out. You can use the `retrieve_dataset_files`
function to get results later as needed.
refresh : float
The time to wait in seconds before pinging the LCC-Server to see if a
search query has completed and dataset result files can be downloaded.
Returns
-------
tuple
Returns a tuple with the following elements::
(search result status dict,
search result CSV file path,
search result LC ZIP path)
"""
# turn the input into a param dict
coords = '%.5f %.5f %.1f' % (center_ra, center_decl, radiusarcmin)
params = {'coords': coords}
if collections:
params['collections'] = collections # depends on [control=['if'], data=[]]
if columns:
params['columns'] = columns # depends on [control=['if'], data=[]]
if filters:
params['filters'] = filters # depends on [control=['if'], data=[]]
if sortspec:
params['sortspec'] = json.dumps([sortspec]) # depends on [control=['if'], data=[]]
if samplespec:
params['samplespec'] = int(samplespec) # depends on [control=['if'], data=[]]
if limitspec:
params['limitspec'] = int(limitspec) # depends on [control=['if'], data=[]]
params['visibility'] = result_visibility
params['emailwhendone'] = email_when_done
# we won't wait for the LC ZIP to complete if email_when_done = True
if email_when_done:
download_data = False # depends on [control=['if'], data=[]]
# check if we have an API key already
(have_apikey, apikey, expires) = check_existing_apikey(lcc_server)
# if not, get a new one
if not have_apikey:
(apikey, expires) = get_new_apikey(lcc_server) # depends on [control=['if'], data=[]]
# hit the server
api_url = '%s/api/conesearch' % lcc_server
searchresult = submit_post_searchquery(api_url, params, apikey)
# check the status of the search
status = searchresult[0]
# now we'll check if we want to download the data
if download_data:
if status == 'ok':
LOGINFO('query complete, downloading associated data...')
(csv, lczip, pkl) = retrieve_dataset_files(searchresult, outdir=outdir, apikey=apikey)
if pkl:
return (searchresult[1], csv, lczip, pkl) # depends on [control=['if'], data=[]]
else:
return (searchresult[1], csv, lczip) # depends on [control=['if'], data=[]]
elif status == 'background':
LOGINFO('query is not yet complete, waiting up to %.1f minutes, updates every %s seconds (hit Ctrl+C to cancel)...' % (maxtimeout / 60.0, refresh))
timewaited = 0.0
while timewaited < maxtimeout:
try:
time.sleep(refresh)
(csv, lczip, pkl) = retrieve_dataset_files(searchresult, outdir=outdir, apikey=apikey)
if csv and os.path.exists(csv) and lczip and os.path.exists(lczip):
LOGINFO('all dataset products collected')
return (searchresult[1], csv, lczip) # depends on [control=['if'], data=[]]
timewaited = timewaited + refresh # depends on [control=['try'], data=[]]
except KeyboardInterrupt:
LOGWARNING('abandoned wait for downloading data')
return (searchresult[1], None, None) # depends on [control=['except'], data=[]] # depends on [control=['while'], data=['timewaited']]
LOGERROR('wait timed out.')
return (searchresult[1], None, None) # depends on [control=['if'], data=[]]
else:
LOGERROR('could not download the data for this query result')
return (searchresult[1], None, None) # depends on [control=['if'], data=[]]
else:
return (searchresult[1], None, None) |
def get_full_angles(self):
"""Get the interpolated lons/lats.
"""
if (self.sun_azi is not None and self.sun_zen is not None and
self.sat_azi is not None and self.sat_zen is not None):
return self.sun_azi, self.sun_zen, self.sat_azi, self.sat_zen
self.sun_azi, self.sun_zen, self.sat_azi, self.sat_zen = self._get_full_angles()
self.sun_azi = da.from_delayed(self.sun_azi, dtype=self["ANGULAR_RELATIONS"].dtype,
shape=(self.scanlines, self.pixels))
self.sun_zen = da.from_delayed(self.sun_zen, dtype=self["ANGULAR_RELATIONS"].dtype,
shape=(self.scanlines, self.pixels))
self.sat_azi = da.from_delayed(self.sat_azi, dtype=self["ANGULAR_RELATIONS"].dtype,
shape=(self.scanlines, self.pixels))
self.sat_zen = da.from_delayed(self.sat_zen, dtype=self["ANGULAR_RELATIONS"].dtype,
shape=(self.scanlines, self.pixels))
return self.sun_azi, self.sun_zen, self.sat_azi, self.sat_zen | def function[get_full_angles, parameter[self]]:
constant[Get the interpolated lons/lats.
]
if <ast.BoolOp object at 0x7da18fe92c80> begin[:]
return[tuple[[<ast.Attribute object at 0x7da18fe93e20>, <ast.Attribute object at 0x7da18fe92050>, <ast.Attribute object at 0x7da18fe926b0>, <ast.Attribute object at 0x7da18fe93fa0>]]]
<ast.Tuple object at 0x7da18fe91000> assign[=] call[name[self]._get_full_angles, parameter[]]
name[self].sun_azi assign[=] call[name[da].from_delayed, parameter[name[self].sun_azi]]
name[self].sun_zen assign[=] call[name[da].from_delayed, parameter[name[self].sun_zen]]
name[self].sat_azi assign[=] call[name[da].from_delayed, parameter[name[self].sat_azi]]
name[self].sat_zen assign[=] call[name[da].from_delayed, parameter[name[self].sat_zen]]
return[tuple[[<ast.Attribute object at 0x7da1b1d8b3a0>, <ast.Attribute object at 0x7da1b1ddfc70>, <ast.Attribute object at 0x7da1b1ddc280>, <ast.Attribute object at 0x7da1b1dde8f0>]]] | keyword[def] identifier[get_full_angles] ( identifier[self] ):
literal[string]
keyword[if] ( identifier[self] . identifier[sun_azi] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[sun_zen] keyword[is] keyword[not] keyword[None] keyword[and]
identifier[self] . identifier[sat_azi] keyword[is] keyword[not] keyword[None] keyword[and] identifier[self] . identifier[sat_zen] keyword[is] keyword[not] keyword[None] ):
keyword[return] identifier[self] . identifier[sun_azi] , identifier[self] . identifier[sun_zen] , identifier[self] . identifier[sat_azi] , identifier[self] . identifier[sat_zen]
identifier[self] . identifier[sun_azi] , identifier[self] . identifier[sun_zen] , identifier[self] . identifier[sat_azi] , identifier[self] . identifier[sat_zen] = identifier[self] . identifier[_get_full_angles] ()
identifier[self] . identifier[sun_azi] = identifier[da] . identifier[from_delayed] ( identifier[self] . identifier[sun_azi] , identifier[dtype] = identifier[self] [ literal[string] ]. identifier[dtype] ,
identifier[shape] =( identifier[self] . identifier[scanlines] , identifier[self] . identifier[pixels] ))
identifier[self] . identifier[sun_zen] = identifier[da] . identifier[from_delayed] ( identifier[self] . identifier[sun_zen] , identifier[dtype] = identifier[self] [ literal[string] ]. identifier[dtype] ,
identifier[shape] =( identifier[self] . identifier[scanlines] , identifier[self] . identifier[pixels] ))
identifier[self] . identifier[sat_azi] = identifier[da] . identifier[from_delayed] ( identifier[self] . identifier[sat_azi] , identifier[dtype] = identifier[self] [ literal[string] ]. identifier[dtype] ,
identifier[shape] =( identifier[self] . identifier[scanlines] , identifier[self] . identifier[pixels] ))
identifier[self] . identifier[sat_zen] = identifier[da] . identifier[from_delayed] ( identifier[self] . identifier[sat_zen] , identifier[dtype] = identifier[self] [ literal[string] ]. identifier[dtype] ,
identifier[shape] =( identifier[self] . identifier[scanlines] , identifier[self] . identifier[pixels] ))
keyword[return] identifier[self] . identifier[sun_azi] , identifier[self] . identifier[sun_zen] , identifier[self] . identifier[sat_azi] , identifier[self] . identifier[sat_zen] | def get_full_angles(self):
"""Get the interpolated lons/lats.
"""
if self.sun_azi is not None and self.sun_zen is not None and (self.sat_azi is not None) and (self.sat_zen is not None):
return (self.sun_azi, self.sun_zen, self.sat_azi, self.sat_zen) # depends on [control=['if'], data=[]]
(self.sun_azi, self.sun_zen, self.sat_azi, self.sat_zen) = self._get_full_angles()
self.sun_azi = da.from_delayed(self.sun_azi, dtype=self['ANGULAR_RELATIONS'].dtype, shape=(self.scanlines, self.pixels))
self.sun_zen = da.from_delayed(self.sun_zen, dtype=self['ANGULAR_RELATIONS'].dtype, shape=(self.scanlines, self.pixels))
self.sat_azi = da.from_delayed(self.sat_azi, dtype=self['ANGULAR_RELATIONS'].dtype, shape=(self.scanlines, self.pixels))
self.sat_zen = da.from_delayed(self.sat_zen, dtype=self['ANGULAR_RELATIONS'].dtype, shape=(self.scanlines, self.pixels))
return (self.sun_azi, self.sun_zen, self.sat_azi, self.sat_zen) |
def do(self, changes, task_handle=taskhandle.NullTaskHandle()):
"""Perform the change and add it to the `self.undo_list`
Note that uninteresting changes (changes to ignored files)
will not be appended to `self.undo_list`.
"""
try:
self.current_change = changes
changes.do(change.create_job_set(task_handle, changes))
finally:
self.current_change = None
if self._is_change_interesting(changes):
self.undo_list.append(changes)
self._remove_extra_items()
del self.redo_list[:] | def function[do, parameter[self, changes, task_handle]]:
constant[Perform the change and add it to the `self.undo_list`
Note that uninteresting changes (changes to ignored files)
will not be appended to `self.undo_list`.
]
<ast.Try object at 0x7da1b065d7b0>
if call[name[self]._is_change_interesting, parameter[name[changes]]] begin[:]
call[name[self].undo_list.append, parameter[name[changes]]]
call[name[self]._remove_extra_items, parameter[]]
<ast.Delete object at 0x7da207f023e0> | keyword[def] identifier[do] ( identifier[self] , identifier[changes] , identifier[task_handle] = identifier[taskhandle] . identifier[NullTaskHandle] ()):
literal[string]
keyword[try] :
identifier[self] . identifier[current_change] = identifier[changes]
identifier[changes] . identifier[do] ( identifier[change] . identifier[create_job_set] ( identifier[task_handle] , identifier[changes] ))
keyword[finally] :
identifier[self] . identifier[current_change] = keyword[None]
keyword[if] identifier[self] . identifier[_is_change_interesting] ( identifier[changes] ):
identifier[self] . identifier[undo_list] . identifier[append] ( identifier[changes] )
identifier[self] . identifier[_remove_extra_items] ()
keyword[del] identifier[self] . identifier[redo_list] [:] | def do(self, changes, task_handle=taskhandle.NullTaskHandle()):
"""Perform the change and add it to the `self.undo_list`
Note that uninteresting changes (changes to ignored files)
will not be appended to `self.undo_list`.
"""
try:
self.current_change = changes
changes.do(change.create_job_set(task_handle, changes)) # depends on [control=['try'], data=[]]
finally:
self.current_change = None
if self._is_change_interesting(changes):
self.undo_list.append(changes)
self._remove_extra_items() # depends on [control=['if'], data=[]]
del self.redo_list[:] |
def meta(r):
"""Convert 60 character string `r`, the metadata from an image file.
Returns a 5-tuple (*chan*,*minx*,*miny*,*limx*,*limy*). 5-tuples may
settle into lists in transit.
As per http://plan9.bell-labs.com/magic/man2html/6/image the metadata
comprises 5 words separated by blanks. As it happens each word starts
at an index that is a multiple of 12, but this routine does not care
about that."""
r = r.split()
# :todo: raise FormatError
assert len(r) == 5
r = [r[0]] + map(int, r[1:])
return r | def function[meta, parameter[r]]:
constant[Convert 60 character string `r`, the metadata from an image file.
Returns a 5-tuple (*chan*,*minx*,*miny*,*limx*,*limy*). 5-tuples may
settle into lists in transit.
As per http://plan9.bell-labs.com/magic/man2html/6/image the metadata
comprises 5 words separated by blanks. As it happens each word starts
at an index that is a multiple of 12, but this routine does not care
about that.]
variable[r] assign[=] call[name[r].split, parameter[]]
assert[compare[call[name[len], parameter[name[r]]] equal[==] constant[5]]]
variable[r] assign[=] binary_operation[list[[<ast.Subscript object at 0x7da1b072e3b0>]] + call[name[map], parameter[name[int], call[name[r]][<ast.Slice object at 0x7da1b072e7a0>]]]]
return[name[r]] | keyword[def] identifier[meta] ( identifier[r] ):
literal[string]
identifier[r] = identifier[r] . identifier[split] ()
keyword[assert] identifier[len] ( identifier[r] )== literal[int]
identifier[r] =[ identifier[r] [ literal[int] ]]+ identifier[map] ( identifier[int] , identifier[r] [ literal[int] :])
keyword[return] identifier[r] | def meta(r):
"""Convert 60 character string `r`, the metadata from an image file.
Returns a 5-tuple (*chan*,*minx*,*miny*,*limx*,*limy*). 5-tuples may
settle into lists in transit.
As per http://plan9.bell-labs.com/magic/man2html/6/image the metadata
comprises 5 words separated by blanks. As it happens each word starts
at an index that is a multiple of 12, but this routine does not care
about that."""
r = r.split()
# :todo: raise FormatError
assert len(r) == 5
r = [r[0]] + map(int, r[1:])
return r |
def update_disk(self, disk_name, has_operating_system=None, label=None, media_link=None,
name=None, os=None):
'''
Updates an existing disk in your image repository.
disk_name:
The name of the disk to update.
has_operating_system:
Deprecated.
label:
Specifies the description of the disk.
media_link:
Deprecated.
name:
Deprecated.
os:
Deprecated.
'''
_validate_not_none('disk_name', disk_name)
_validate_not_none('label', label)
return self._perform_put(self._get_disk_path(disk_name),
_XmlSerializer.disk_to_xml(
label,
None,
None,
None)) | def function[update_disk, parameter[self, disk_name, has_operating_system, label, media_link, name, os]]:
constant[
Updates an existing disk in your image repository.
disk_name:
The name of the disk to update.
has_operating_system:
Deprecated.
label:
Specifies the description of the disk.
media_link:
Deprecated.
name:
Deprecated.
os:
Deprecated.
]
call[name[_validate_not_none], parameter[constant[disk_name], name[disk_name]]]
call[name[_validate_not_none], parameter[constant[label], name[label]]]
return[call[name[self]._perform_put, parameter[call[name[self]._get_disk_path, parameter[name[disk_name]]], call[name[_XmlSerializer].disk_to_xml, parameter[name[label], constant[None], constant[None], constant[None]]]]]] | keyword[def] identifier[update_disk] ( identifier[self] , identifier[disk_name] , identifier[has_operating_system] = keyword[None] , identifier[label] = keyword[None] , identifier[media_link] = keyword[None] ,
identifier[name] = keyword[None] , identifier[os] = keyword[None] ):
literal[string]
identifier[_validate_not_none] ( literal[string] , identifier[disk_name] )
identifier[_validate_not_none] ( literal[string] , identifier[label] )
keyword[return] identifier[self] . identifier[_perform_put] ( identifier[self] . identifier[_get_disk_path] ( identifier[disk_name] ),
identifier[_XmlSerializer] . identifier[disk_to_xml] (
identifier[label] ,
keyword[None] ,
keyword[None] ,
keyword[None] )) | def update_disk(self, disk_name, has_operating_system=None, label=None, media_link=None, name=None, os=None):
"""
Updates an existing disk in your image repository.
disk_name:
The name of the disk to update.
has_operating_system:
Deprecated.
label:
Specifies the description of the disk.
media_link:
Deprecated.
name:
Deprecated.
os:
Deprecated.
"""
_validate_not_none('disk_name', disk_name)
_validate_not_none('label', label)
return self._perform_put(self._get_disk_path(disk_name), _XmlSerializer.disk_to_xml(label, None, None, None)) |
def addText(self, text, width=None):
"""
Adds a simple text item to this group.
:param text | <str>
maximumWidth | <float> || None
maximumHeight | <float> || None
"""
item = QtGui.QGraphicsTextItem()
font = item.font()
font.setFamily('Arial')
font.setPointSize(12)
item.setFont(font)
item.setHtml(text)
item.setDefaultTextColor(QtGui.QColor('white'))
self.addToGroup(item)
item.graphicsEffect().setBlurRadius(8)
if width:
item.setTextWidth(width)
return item | def function[addText, parameter[self, text, width]]:
constant[
Adds a simple text item to this group.
:param text | <str>
maximumWidth | <float> || None
maximumHeight | <float> || None
]
variable[item] assign[=] call[name[QtGui].QGraphicsTextItem, parameter[]]
variable[font] assign[=] call[name[item].font, parameter[]]
call[name[font].setFamily, parameter[constant[Arial]]]
call[name[font].setPointSize, parameter[constant[12]]]
call[name[item].setFont, parameter[name[font]]]
call[name[item].setHtml, parameter[name[text]]]
call[name[item].setDefaultTextColor, parameter[call[name[QtGui].QColor, parameter[constant[white]]]]]
call[name[self].addToGroup, parameter[name[item]]]
call[call[name[item].graphicsEffect, parameter[]].setBlurRadius, parameter[constant[8]]]
if name[width] begin[:]
call[name[item].setTextWidth, parameter[name[width]]]
return[name[item]] | keyword[def] identifier[addText] ( identifier[self] , identifier[text] , identifier[width] = keyword[None] ):
literal[string]
identifier[item] = identifier[QtGui] . identifier[QGraphicsTextItem] ()
identifier[font] = identifier[item] . identifier[font] ()
identifier[font] . identifier[setFamily] ( literal[string] )
identifier[font] . identifier[setPointSize] ( literal[int] )
identifier[item] . identifier[setFont] ( identifier[font] )
identifier[item] . identifier[setHtml] ( identifier[text] )
identifier[item] . identifier[setDefaultTextColor] ( identifier[QtGui] . identifier[QColor] ( literal[string] ))
identifier[self] . identifier[addToGroup] ( identifier[item] )
identifier[item] . identifier[graphicsEffect] (). identifier[setBlurRadius] ( literal[int] )
keyword[if] identifier[width] :
identifier[item] . identifier[setTextWidth] ( identifier[width] )
keyword[return] identifier[item] | def addText(self, text, width=None):
"""
Adds a simple text item to this group.
:param text | <str>
maximumWidth | <float> || None
maximumHeight | <float> || None
"""
item = QtGui.QGraphicsTextItem()
font = item.font()
font.setFamily('Arial')
font.setPointSize(12)
item.setFont(font)
item.setHtml(text)
item.setDefaultTextColor(QtGui.QColor('white'))
self.addToGroup(item)
item.graphicsEffect().setBlurRadius(8)
if width:
item.setTextWidth(width) # depends on [control=['if'], data=[]]
return item |
def set_row_height(self, n=0, height=18):
"""
Sets the n'th row height in pixels.
"""
self._widget.setRowHeight(n, height)
return self | def function[set_row_height, parameter[self, n, height]]:
constant[
Sets the n'th row height in pixels.
]
call[name[self]._widget.setRowHeight, parameter[name[n], name[height]]]
return[name[self]] | keyword[def] identifier[set_row_height] ( identifier[self] , identifier[n] = literal[int] , identifier[height] = literal[int] ):
literal[string]
identifier[self] . identifier[_widget] . identifier[setRowHeight] ( identifier[n] , identifier[height] )
keyword[return] identifier[self] | def set_row_height(self, n=0, height=18):
"""
Sets the n'th row height in pixels.
"""
self._widget.setRowHeight(n, height)
return self |
def _generate(self):
"""Parses a file or directory of files into a set of ``Document`` objects."""
doc_count = 0
for fp in self.all_files:
for doc in self._get_docs_for_path(fp):
yield doc
doc_count += 1
if doc_count >= self.max_docs:
return | def function[_generate, parameter[self]]:
constant[Parses a file or directory of files into a set of ``Document`` objects.]
variable[doc_count] assign[=] constant[0]
for taget[name[fp]] in starred[name[self].all_files] begin[:]
for taget[name[doc]] in starred[call[name[self]._get_docs_for_path, parameter[name[fp]]]] begin[:]
<ast.Yield object at 0x7da18f720280>
<ast.AugAssign object at 0x7da18f721240>
if compare[name[doc_count] greater_or_equal[>=] name[self].max_docs] begin[:]
return[None] | keyword[def] identifier[_generate] ( identifier[self] ):
literal[string]
identifier[doc_count] = literal[int]
keyword[for] identifier[fp] keyword[in] identifier[self] . identifier[all_files] :
keyword[for] identifier[doc] keyword[in] identifier[self] . identifier[_get_docs_for_path] ( identifier[fp] ):
keyword[yield] identifier[doc]
identifier[doc_count] += literal[int]
keyword[if] identifier[doc_count] >= identifier[self] . identifier[max_docs] :
keyword[return] | def _generate(self):
"""Parses a file or directory of files into a set of ``Document`` objects."""
doc_count = 0
for fp in self.all_files:
for doc in self._get_docs_for_path(fp):
yield doc
doc_count += 1
if doc_count >= self.max_docs:
return # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['doc']] # depends on [control=['for'], data=['fp']] |
def data_tree_map(func, data_tree):
"""
Map func to every ABITypedData element in the tree. func will
receive two args: abi_type, and data
"""
def map_to_typed_data(elements):
if isinstance(elements, ABITypedData) and elements.abi_type is not None:
return ABITypedData(func(*elements))
else:
return elements
return recursive_map(map_to_typed_data, data_tree) | def function[data_tree_map, parameter[func, data_tree]]:
constant[
Map func to every ABITypedData element in the tree. func will
receive two args: abi_type, and data
]
def function[map_to_typed_data, parameter[elements]]:
if <ast.BoolOp object at 0x7da18c4cf460> begin[:]
return[call[name[ABITypedData], parameter[call[name[func], parameter[<ast.Starred object at 0x7da18c4cc100>]]]]]
return[call[name[recursive_map], parameter[name[map_to_typed_data], name[data_tree]]]] | keyword[def] identifier[data_tree_map] ( identifier[func] , identifier[data_tree] ):
literal[string]
keyword[def] identifier[map_to_typed_data] ( identifier[elements] ):
keyword[if] identifier[isinstance] ( identifier[elements] , identifier[ABITypedData] ) keyword[and] identifier[elements] . identifier[abi_type] keyword[is] keyword[not] keyword[None] :
keyword[return] identifier[ABITypedData] ( identifier[func] (* identifier[elements] ))
keyword[else] :
keyword[return] identifier[elements]
keyword[return] identifier[recursive_map] ( identifier[map_to_typed_data] , identifier[data_tree] ) | def data_tree_map(func, data_tree):
"""
Map func to every ABITypedData element in the tree. func will
receive two args: abi_type, and data
"""
def map_to_typed_data(elements):
if isinstance(elements, ABITypedData) and elements.abi_type is not None:
return ABITypedData(func(*elements)) # depends on [control=['if'], data=[]]
else:
return elements
return recursive_map(map_to_typed_data, data_tree) |
def convert(csv, json, **kwargs):
'''Convert csv to json.
csv: filename or file-like object
json: filename or file-like object
if csv is '-' or None:
stdin is used for input
if json is '-' or None:
stdout is used for output
'''
csv_local, json_local = None, None
try:
if csv == '-' or csv is None:
csv = sys.stdin
elif isinstance(csv, str):
csv = csv_local = open(csv, 'r')
if json == '-' or json is None:
json = sys.stdout
elif isinstance(json, str):
json = json_local = open(json, 'w')
data = load_csv(csv, **kwargs)
save_json(data, json, **kwargs)
finally:
if csv_local is not None:
csv_local.close()
if json_local is not None:
json_local.close() | def function[convert, parameter[csv, json]]:
constant[Convert csv to json.
csv: filename or file-like object
json: filename or file-like object
if csv is '-' or None:
stdin is used for input
if json is '-' or None:
stdout is used for output
]
<ast.Tuple object at 0x7da1b0f9d600> assign[=] tuple[[<ast.Constant object at 0x7da1b0f9dde0>, <ast.Constant object at 0x7da1b0f9d690>]]
<ast.Try object at 0x7da1b0f9dbd0> | keyword[def] identifier[convert] ( identifier[csv] , identifier[json] ,** identifier[kwargs] ):
literal[string]
identifier[csv_local] , identifier[json_local] = keyword[None] , keyword[None]
keyword[try] :
keyword[if] identifier[csv] == literal[string] keyword[or] identifier[csv] keyword[is] keyword[None] :
identifier[csv] = identifier[sys] . identifier[stdin]
keyword[elif] identifier[isinstance] ( identifier[csv] , identifier[str] ):
identifier[csv] = identifier[csv_local] = identifier[open] ( identifier[csv] , literal[string] )
keyword[if] identifier[json] == literal[string] keyword[or] identifier[json] keyword[is] keyword[None] :
identifier[json] = identifier[sys] . identifier[stdout]
keyword[elif] identifier[isinstance] ( identifier[json] , identifier[str] ):
identifier[json] = identifier[json_local] = identifier[open] ( identifier[json] , literal[string] )
identifier[data] = identifier[load_csv] ( identifier[csv] ,** identifier[kwargs] )
identifier[save_json] ( identifier[data] , identifier[json] ,** identifier[kwargs] )
keyword[finally] :
keyword[if] identifier[csv_local] keyword[is] keyword[not] keyword[None] :
identifier[csv_local] . identifier[close] ()
keyword[if] identifier[json_local] keyword[is] keyword[not] keyword[None] :
identifier[json_local] . identifier[close] () | def convert(csv, json, **kwargs):
"""Convert csv to json.
csv: filename or file-like object
json: filename or file-like object
if csv is '-' or None:
stdin is used for input
if json is '-' or None:
stdout is used for output
"""
(csv_local, json_local) = (None, None)
try:
if csv == '-' or csv is None:
csv = sys.stdin # depends on [control=['if'], data=[]]
elif isinstance(csv, str):
csv = csv_local = open(csv, 'r') # depends on [control=['if'], data=[]]
if json == '-' or json is None:
json = sys.stdout # depends on [control=['if'], data=[]]
elif isinstance(json, str):
json = json_local = open(json, 'w') # depends on [control=['if'], data=[]]
data = load_csv(csv, **kwargs)
save_json(data, json, **kwargs) # depends on [control=['try'], data=[]]
finally:
if csv_local is not None:
csv_local.close() # depends on [control=['if'], data=['csv_local']]
if json_local is not None:
json_local.close() # depends on [control=['if'], data=['json_local']] |
def findAllExceptions(pathToCheck):
"""
Find patterns of exceptions in a file or folder.
@param patternFinder: a visitor for pattern checking and save results
@return: patterns of special functions and classes
"""
finder = PatternFinder()
if os.path.isfile(pathToCheck):
with open(pathToCheck) as f:
findPatternsInFile(f.read(), finder)
else:
for path, dirs, files in os.walk(pathToCheck):
for file in files:
_, extname = os.path.splitext(file)
if extname == ".py":
pathFile = os.path.join(path, file)
with open(pathFile) as f:
findPatternsInFile(f.read(), finder)
return finder.patternsFunc, finder.patternsClass | def function[findAllExceptions, parameter[pathToCheck]]:
constant[
Find patterns of exceptions in a file or folder.
@param patternFinder: a visitor for pattern checking and save results
@return: patterns of special functions and classes
]
variable[finder] assign[=] call[name[PatternFinder], parameter[]]
if call[name[os].path.isfile, parameter[name[pathToCheck]]] begin[:]
with call[name[open], parameter[name[pathToCheck]]] begin[:]
call[name[findPatternsInFile], parameter[call[name[f].read, parameter[]], name[finder]]]
return[tuple[[<ast.Attribute object at 0x7da18bccaf20>, <ast.Attribute object at 0x7da18bcc8490>]]] | keyword[def] identifier[findAllExceptions] ( identifier[pathToCheck] ):
literal[string]
identifier[finder] = identifier[PatternFinder] ()
keyword[if] identifier[os] . identifier[path] . identifier[isfile] ( identifier[pathToCheck] ):
keyword[with] identifier[open] ( identifier[pathToCheck] ) keyword[as] identifier[f] :
identifier[findPatternsInFile] ( identifier[f] . identifier[read] (), identifier[finder] )
keyword[else] :
keyword[for] identifier[path] , identifier[dirs] , identifier[files] keyword[in] identifier[os] . identifier[walk] ( identifier[pathToCheck] ):
keyword[for] identifier[file] keyword[in] identifier[files] :
identifier[_] , identifier[extname] = identifier[os] . identifier[path] . identifier[splitext] ( identifier[file] )
keyword[if] identifier[extname] == literal[string] :
identifier[pathFile] = identifier[os] . identifier[path] . identifier[join] ( identifier[path] , identifier[file] )
keyword[with] identifier[open] ( identifier[pathFile] ) keyword[as] identifier[f] :
identifier[findPatternsInFile] ( identifier[f] . identifier[read] (), identifier[finder] )
keyword[return] identifier[finder] . identifier[patternsFunc] , identifier[finder] . identifier[patternsClass] | def findAllExceptions(pathToCheck):
"""
Find patterns of exceptions in a file or folder.
@param patternFinder: a visitor for pattern checking and save results
@return: patterns of special functions and classes
"""
finder = PatternFinder()
if os.path.isfile(pathToCheck):
with open(pathToCheck) as f:
findPatternsInFile(f.read(), finder) # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]]
else:
for (path, dirs, files) in os.walk(pathToCheck):
for file in files:
(_, extname) = os.path.splitext(file)
if extname == '.py':
pathFile = os.path.join(path, file)
with open(pathFile) as f:
findPatternsInFile(f.read(), finder) # depends on [control=['with'], data=['f']] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['file']] # depends on [control=['for'], data=[]]
return (finder.patternsFunc, finder.patternsClass) |
def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop job configuration, passed in as a dict (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter, jconf) | def function[saveAsNewAPIHadoopFile, parameter[self, path, outputFormatClass, keyClass, valueClass, keyConverter, valueConverter, conf]]:
constant[
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop job configuration, passed in as a dict (None by default)
]
variable[jconf] assign[=] call[name[self].ctx._dictToJavaMap, parameter[name[conf]]]
variable[pickledRDD] assign[=] call[name[self]._pickled, parameter[]]
call[name[self].ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile, parameter[name[pickledRDD]._jrdd, constant[True], name[path], name[outputFormatClass], name[keyClass], name[valueClass], name[keyConverter], name[valueConverter], name[jconf]]] | keyword[def] identifier[saveAsNewAPIHadoopFile] ( identifier[self] , identifier[path] , identifier[outputFormatClass] , identifier[keyClass] = keyword[None] , identifier[valueClass] = keyword[None] ,
identifier[keyConverter] = keyword[None] , identifier[valueConverter] = keyword[None] , identifier[conf] = keyword[None] ):
literal[string]
identifier[jconf] = identifier[self] . identifier[ctx] . identifier[_dictToJavaMap] ( identifier[conf] )
identifier[pickledRDD] = identifier[self] . identifier[_pickled] ()
identifier[self] . identifier[ctx] . identifier[_jvm] . identifier[PythonRDD] . identifier[saveAsNewAPIHadoopFile] ( identifier[pickledRDD] . identifier[_jrdd] , keyword[True] , identifier[path] ,
identifier[outputFormatClass] ,
identifier[keyClass] , identifier[valueClass] ,
identifier[keyConverter] , identifier[valueConverter] , identifier[jconf] ) | def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None, keyConverter=None, valueConverter=None, conf=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop job configuration, passed in as a dict (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(pickledRDD._jrdd, True, path, outputFormatClass, keyClass, valueClass, keyConverter, valueConverter, jconf) |
def getTypeName(data_type_oid, type_modifier):
"""Returns the base type name according to data_type_oid and type_modifier"""
if data_type_oid == VerticaType.BOOL:
return "Boolean"
elif data_type_oid == VerticaType.INT8:
return "Integer"
elif data_type_oid == VerticaType.FLOAT8:
return "Float"
elif data_type_oid == VerticaType.CHAR:
return "Char"
elif data_type_oid in (VerticaType.VARCHAR, VerticaType.UNKNOWN):
return "Varchar"
elif data_type_oid == VerticaType.LONGVARCHAR:
return "Long Varchar"
elif data_type_oid == VerticaType.DATE:
return "Date"
elif data_type_oid == VerticaType.TIME:
return "Time"
elif data_type_oid == VerticaType.TIMETZ:
return "TimeTz"
elif data_type_oid == VerticaType.TIMESTAMP:
return "Timestamp"
elif data_type_oid == VerticaType.TIMESTAMPTZ:
return "TimestampTz"
elif data_type_oid in (VerticaType.INTERVAL, VerticaType.INTERVALYM):
return "Interval " + getIntervalRange(data_type_oid, type_modifier)
elif data_type_oid == VerticaType.BINARY:
return "Binary"
elif data_type_oid == VerticaType.VARBINARY:
return "Varbinary"
elif data_type_oid == VerticaType.LONGVARBINARY:
return "Long Varbinary"
elif data_type_oid == VerticaType.NUMERIC:
return "Numeric"
elif data_type_oid == VerticaType.UUID:
return "Uuid"
else:
return "Unknown" | def function[getTypeName, parameter[data_type_oid, type_modifier]]:
constant[Returns the base type name according to data_type_oid and type_modifier]
if compare[name[data_type_oid] equal[==] name[VerticaType].BOOL] begin[:]
return[constant[Boolean]] | keyword[def] identifier[getTypeName] ( identifier[data_type_oid] , identifier[type_modifier] ):
literal[string]
keyword[if] identifier[data_type_oid] == identifier[VerticaType] . identifier[BOOL] :
keyword[return] literal[string]
keyword[elif] identifier[data_type_oid] == identifier[VerticaType] . identifier[INT8] :
keyword[return] literal[string]
keyword[elif] identifier[data_type_oid] == identifier[VerticaType] . identifier[FLOAT8] :
keyword[return] literal[string]
keyword[elif] identifier[data_type_oid] == identifier[VerticaType] . identifier[CHAR] :
keyword[return] literal[string]
keyword[elif] identifier[data_type_oid] keyword[in] ( identifier[VerticaType] . identifier[VARCHAR] , identifier[VerticaType] . identifier[UNKNOWN] ):
keyword[return] literal[string]
keyword[elif] identifier[data_type_oid] == identifier[VerticaType] . identifier[LONGVARCHAR] :
keyword[return] literal[string]
keyword[elif] identifier[data_type_oid] == identifier[VerticaType] . identifier[DATE] :
keyword[return] literal[string]
keyword[elif] identifier[data_type_oid] == identifier[VerticaType] . identifier[TIME] :
keyword[return] literal[string]
keyword[elif] identifier[data_type_oid] == identifier[VerticaType] . identifier[TIMETZ] :
keyword[return] literal[string]
keyword[elif] identifier[data_type_oid] == identifier[VerticaType] . identifier[TIMESTAMP] :
keyword[return] literal[string]
keyword[elif] identifier[data_type_oid] == identifier[VerticaType] . identifier[TIMESTAMPTZ] :
keyword[return] literal[string]
keyword[elif] identifier[data_type_oid] keyword[in] ( identifier[VerticaType] . identifier[INTERVAL] , identifier[VerticaType] . identifier[INTERVALYM] ):
keyword[return] literal[string] + identifier[getIntervalRange] ( identifier[data_type_oid] , identifier[type_modifier] )
keyword[elif] identifier[data_type_oid] == identifier[VerticaType] . identifier[BINARY] :
keyword[return] literal[string]
keyword[elif] identifier[data_type_oid] == identifier[VerticaType] . identifier[VARBINARY] :
keyword[return] literal[string]
keyword[elif] identifier[data_type_oid] == identifier[VerticaType] . identifier[LONGVARBINARY] :
keyword[return] literal[string]
keyword[elif] identifier[data_type_oid] == identifier[VerticaType] . identifier[NUMERIC] :
keyword[return] literal[string]
keyword[elif] identifier[data_type_oid] == identifier[VerticaType] . identifier[UUID] :
keyword[return] literal[string]
keyword[else] :
keyword[return] literal[string] | def getTypeName(data_type_oid, type_modifier):
"""Returns the base type name according to data_type_oid and type_modifier"""
if data_type_oid == VerticaType.BOOL:
return 'Boolean' # depends on [control=['if'], data=[]]
elif data_type_oid == VerticaType.INT8:
return 'Integer' # depends on [control=['if'], data=[]]
elif data_type_oid == VerticaType.FLOAT8:
return 'Float' # depends on [control=['if'], data=[]]
elif data_type_oid == VerticaType.CHAR:
return 'Char' # depends on [control=['if'], data=[]]
elif data_type_oid in (VerticaType.VARCHAR, VerticaType.UNKNOWN):
return 'Varchar' # depends on [control=['if'], data=[]]
elif data_type_oid == VerticaType.LONGVARCHAR:
return 'Long Varchar' # depends on [control=['if'], data=[]]
elif data_type_oid == VerticaType.DATE:
return 'Date' # depends on [control=['if'], data=[]]
elif data_type_oid == VerticaType.TIME:
return 'Time' # depends on [control=['if'], data=[]]
elif data_type_oid == VerticaType.TIMETZ:
return 'TimeTz' # depends on [control=['if'], data=[]]
elif data_type_oid == VerticaType.TIMESTAMP:
return 'Timestamp' # depends on [control=['if'], data=[]]
elif data_type_oid == VerticaType.TIMESTAMPTZ:
return 'TimestampTz' # depends on [control=['if'], data=[]]
elif data_type_oid in (VerticaType.INTERVAL, VerticaType.INTERVALYM):
return 'Interval ' + getIntervalRange(data_type_oid, type_modifier) # depends on [control=['if'], data=['data_type_oid']]
elif data_type_oid == VerticaType.BINARY:
return 'Binary' # depends on [control=['if'], data=[]]
elif data_type_oid == VerticaType.VARBINARY:
return 'Varbinary' # depends on [control=['if'], data=[]]
elif data_type_oid == VerticaType.LONGVARBINARY:
return 'Long Varbinary' # depends on [control=['if'], data=[]]
elif data_type_oid == VerticaType.NUMERIC:
return 'Numeric' # depends on [control=['if'], data=[]]
elif data_type_oid == VerticaType.UUID:
return 'Uuid' # depends on [control=['if'], data=[]]
else:
return 'Unknown' |
def scale_to_fit_a_in_b(a_shape, b_shape):
'''
Return scale factor (scalar float) to fit `a_shape` into `b_shape` while
maintaining aspect ratio.
Arguments
---------
a_shape, b_shape : pandas.Series
Input shapes containing numeric `width` and `height` values.
Returns
-------
float
Scale factor to fit :data:`a_shape` into :data:`b_shape` while
maintaining aspect ratio.
'''
# Normalize the shapes to allow comparison.
a_shape_normal = a_shape / a_shape.max()
b_shape_normal = b_shape / b_shape.max()
if a_shape_normal.width > b_shape_normal.width:
a_shape_normal *= b_shape_normal.width / a_shape_normal.width
if a_shape_normal.height > b_shape_normal.height:
a_shape_normal *= b_shape_normal.height / a_shape_normal.height
return a_shape_normal.max() * b_shape.max() / a_shape.max() | def function[scale_to_fit_a_in_b, parameter[a_shape, b_shape]]:
constant[
Return scale factor (scalar float) to fit `a_shape` into `b_shape` while
maintaining aspect ratio.
Arguments
---------
a_shape, b_shape : pandas.Series
Input shapes containing numeric `width` and `height` values.
Returns
-------
float
Scale factor to fit :data:`a_shape` into :data:`b_shape` while
maintaining aspect ratio.
]
variable[a_shape_normal] assign[=] binary_operation[name[a_shape] / call[name[a_shape].max, parameter[]]]
variable[b_shape_normal] assign[=] binary_operation[name[b_shape] / call[name[b_shape].max, parameter[]]]
if compare[name[a_shape_normal].width greater[>] name[b_shape_normal].width] begin[:]
<ast.AugAssign object at 0x7da2046239d0>
if compare[name[a_shape_normal].height greater[>] name[b_shape_normal].height] begin[:]
<ast.AugAssign object at 0x7da20e9b21d0>
return[binary_operation[binary_operation[call[name[a_shape_normal].max, parameter[]] * call[name[b_shape].max, parameter[]]] / call[name[a_shape].max, parameter[]]]] | keyword[def] identifier[scale_to_fit_a_in_b] ( identifier[a_shape] , identifier[b_shape] ):
literal[string]
identifier[a_shape_normal] = identifier[a_shape] / identifier[a_shape] . identifier[max] ()
identifier[b_shape_normal] = identifier[b_shape] / identifier[b_shape] . identifier[max] ()
keyword[if] identifier[a_shape_normal] . identifier[width] > identifier[b_shape_normal] . identifier[width] :
identifier[a_shape_normal] *= identifier[b_shape_normal] . identifier[width] / identifier[a_shape_normal] . identifier[width]
keyword[if] identifier[a_shape_normal] . identifier[height] > identifier[b_shape_normal] . identifier[height] :
identifier[a_shape_normal] *= identifier[b_shape_normal] . identifier[height] / identifier[a_shape_normal] . identifier[height]
keyword[return] identifier[a_shape_normal] . identifier[max] ()* identifier[b_shape] . identifier[max] ()/ identifier[a_shape] . identifier[max] () | def scale_to_fit_a_in_b(a_shape, b_shape):
"""
Return scale factor (scalar float) to fit `a_shape` into `b_shape` while
maintaining aspect ratio.
Arguments
---------
a_shape, b_shape : pandas.Series
Input shapes containing numeric `width` and `height` values.
Returns
-------
float
Scale factor to fit :data:`a_shape` into :data:`b_shape` while
maintaining aspect ratio.
"""
# Normalize the shapes to allow comparison.
a_shape_normal = a_shape / a_shape.max()
b_shape_normal = b_shape / b_shape.max()
if a_shape_normal.width > b_shape_normal.width:
a_shape_normal *= b_shape_normal.width / a_shape_normal.width # depends on [control=['if'], data=[]]
if a_shape_normal.height > b_shape_normal.height:
a_shape_normal *= b_shape_normal.height / a_shape_normal.height # depends on [control=['if'], data=[]]
return a_shape_normal.max() * b_shape.max() / a_shape.max() |
def remember(self, request, username, **kw):
""" Returns 'WWW-Authenticate' header with a value that should be used
in 'Authorization' header.
"""
if self.credentials_callback:
token = self.credentials_callback(username, request)
api_key = 'ApiKey {}:{}'.format(username, token)
return [('WWW-Authenticate', api_key)] | def function[remember, parameter[self, request, username]]:
constant[ Returns 'WWW-Authenticate' header with a value that should be used
in 'Authorization' header.
]
if name[self].credentials_callback begin[:]
variable[token] assign[=] call[name[self].credentials_callback, parameter[name[username], name[request]]]
variable[api_key] assign[=] call[constant[ApiKey {}:{}].format, parameter[name[username], name[token]]]
return[list[[<ast.Tuple object at 0x7da18f58e9b0>]]] | keyword[def] identifier[remember] ( identifier[self] , identifier[request] , identifier[username] ,** identifier[kw] ):
literal[string]
keyword[if] identifier[self] . identifier[credentials_callback] :
identifier[token] = identifier[self] . identifier[credentials_callback] ( identifier[username] , identifier[request] )
identifier[api_key] = literal[string] . identifier[format] ( identifier[username] , identifier[token] )
keyword[return] [( literal[string] , identifier[api_key] )] | def remember(self, request, username, **kw):
""" Returns 'WWW-Authenticate' header with a value that should be used
in 'Authorization' header.
"""
if self.credentials_callback:
token = self.credentials_callback(username, request)
api_key = 'ApiKey {}:{}'.format(username, token)
return [('WWW-Authenticate', api_key)] # depends on [control=['if'], data=[]] |
def _url_chunk_join(self, *args):
"""Join the arguments together to form a predictable URL chunk."""
# Strip slashes from either side of each path piece.
pathlets = map(lambda s: s.strip('/'), args)
# Remove empty pieces.
pathlets = filter(None, pathlets)
url = '/'.join(pathlets)
# If this is a directory, add a trailing slash.
if args[-1].endswith('/'):
url = '%s/' % url
return url | def function[_url_chunk_join, parameter[self]]:
constant[Join the arguments together to form a predictable URL chunk.]
variable[pathlets] assign[=] call[name[map], parameter[<ast.Lambda object at 0x7da1b1838790>, name[args]]]
variable[pathlets] assign[=] call[name[filter], parameter[constant[None], name[pathlets]]]
variable[url] assign[=] call[constant[/].join, parameter[name[pathlets]]]
if call[call[name[args]][<ast.UnaryOp object at 0x7da1b183a530>].endswith, parameter[constant[/]]] begin[:]
variable[url] assign[=] binary_operation[constant[%s/] <ast.Mod object at 0x7da2590d6920> name[url]]
return[name[url]] | keyword[def] identifier[_url_chunk_join] ( identifier[self] ,* identifier[args] ):
literal[string]
identifier[pathlets] = identifier[map] ( keyword[lambda] identifier[s] : identifier[s] . identifier[strip] ( literal[string] ), identifier[args] )
identifier[pathlets] = identifier[filter] ( keyword[None] , identifier[pathlets] )
identifier[url] = literal[string] . identifier[join] ( identifier[pathlets] )
keyword[if] identifier[args] [- literal[int] ]. identifier[endswith] ( literal[string] ):
identifier[url] = literal[string] % identifier[url]
keyword[return] identifier[url] | def _url_chunk_join(self, *args):
"""Join the arguments together to form a predictable URL chunk."""
# Strip slashes from either side of each path piece.
pathlets = map(lambda s: s.strip('/'), args)
# Remove empty pieces.
pathlets = filter(None, pathlets)
url = '/'.join(pathlets)
# If this is a directory, add a trailing slash.
if args[-1].endswith('/'):
url = '%s/' % url # depends on [control=['if'], data=[]]
return url |
def ensure_dir_path(self, path, relative=False):
"""
Ensure the path is a dir path.
Should end with '/' except for schemes and locators.
Args:
path (str): Path or URL.
relative (bool): Path is relative to current root.
Returns:
path: dir path
"""
if not relative:
rel_path = self.relpath(path)
else:
rel_path = path
# Locator
if self.is_locator(rel_path, relative=True):
path = path.rstrip('/')
# Directory
elif rel_path:
path = path.rstrip('/') + '/'
# else: root
return path | def function[ensure_dir_path, parameter[self, path, relative]]:
constant[
Ensure the path is a dir path.
Should end with '/' except for schemes and locators.
Args:
path (str): Path or URL.
relative (bool): Path is relative to current root.
Returns:
path: dir path
]
if <ast.UnaryOp object at 0x7da1b191fb50> begin[:]
variable[rel_path] assign[=] call[name[self].relpath, parameter[name[path]]]
if call[name[self].is_locator, parameter[name[rel_path]]] begin[:]
variable[path] assign[=] call[name[path].rstrip, parameter[constant[/]]]
return[name[path]] | keyword[def] identifier[ensure_dir_path] ( identifier[self] , identifier[path] , identifier[relative] = keyword[False] ):
literal[string]
keyword[if] keyword[not] identifier[relative] :
identifier[rel_path] = identifier[self] . identifier[relpath] ( identifier[path] )
keyword[else] :
identifier[rel_path] = identifier[path]
keyword[if] identifier[self] . identifier[is_locator] ( identifier[rel_path] , identifier[relative] = keyword[True] ):
identifier[path] = identifier[path] . identifier[rstrip] ( literal[string] )
keyword[elif] identifier[rel_path] :
identifier[path] = identifier[path] . identifier[rstrip] ( literal[string] )+ literal[string]
keyword[return] identifier[path] | def ensure_dir_path(self, path, relative=False):
"""
Ensure the path is a dir path.
Should end with '/' except for schemes and locators.
Args:
path (str): Path or URL.
relative (bool): Path is relative to current root.
Returns:
path: dir path
"""
if not relative:
rel_path = self.relpath(path) # depends on [control=['if'], data=[]]
else:
rel_path = path
# Locator
if self.is_locator(rel_path, relative=True):
path = path.rstrip('/') # depends on [control=['if'], data=[]]
# Directory
elif rel_path:
path = path.rstrip('/') + '/' # depends on [control=['if'], data=[]]
# else: root
return path |
def SVD_2_stream(uvectors, stachans, k, sampling_rate):
"""
Depreciated. Use svd_to_stream
"""
warnings.warn('Depreciated, use svd_to_stream instead.')
return svd_to_stream(uvectors=uvectors, stachans=stachans, k=k,
sampling_rate=sampling_rate) | def function[SVD_2_stream, parameter[uvectors, stachans, k, sampling_rate]]:
constant[
Depreciated. Use svd_to_stream
]
call[name[warnings].warn, parameter[constant[Depreciated, use svd_to_stream instead.]]]
return[call[name[svd_to_stream], parameter[]]] | keyword[def] identifier[SVD_2_stream] ( identifier[uvectors] , identifier[stachans] , identifier[k] , identifier[sampling_rate] ):
literal[string]
identifier[warnings] . identifier[warn] ( literal[string] )
keyword[return] identifier[svd_to_stream] ( identifier[uvectors] = identifier[uvectors] , identifier[stachans] = identifier[stachans] , identifier[k] = identifier[k] ,
identifier[sampling_rate] = identifier[sampling_rate] ) | def SVD_2_stream(uvectors, stachans, k, sampling_rate):
"""
Depreciated. Use svd_to_stream
"""
warnings.warn('Depreciated, use svd_to_stream instead.')
return svd_to_stream(uvectors=uvectors, stachans=stachans, k=k, sampling_rate=sampling_rate) |
def simxReadProximitySensor(clientID, sensorHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
detectionState = ct.c_ubyte()
detectedObjectHandle = ct.c_int()
detectedPoint = (ct.c_float*3)()
detectedSurfaceNormalVector = (ct.c_float*3)()
ret = c_ReadProximitySensor(clientID, sensorHandle, ct.byref(detectionState), detectedPoint, ct.byref(detectedObjectHandle), detectedSurfaceNormalVector, operationMode)
arr1 = []
for i in range(3):
arr1.append(detectedPoint[i])
arr2 = []
for i in range(3):
arr2.append(detectedSurfaceNormalVector[i])
return ret, bool(detectionState.value!=0), arr1, detectedObjectHandle.value, arr2 | def function[simxReadProximitySensor, parameter[clientID, sensorHandle, operationMode]]:
constant[
Please have a look at the function description/documentation in the V-REP user manual
]
variable[detectionState] assign[=] call[name[ct].c_ubyte, parameter[]]
variable[detectedObjectHandle] assign[=] call[name[ct].c_int, parameter[]]
variable[detectedPoint] assign[=] call[binary_operation[name[ct].c_float * constant[3]], parameter[]]
variable[detectedSurfaceNormalVector] assign[=] call[binary_operation[name[ct].c_float * constant[3]], parameter[]]
variable[ret] assign[=] call[name[c_ReadProximitySensor], parameter[name[clientID], name[sensorHandle], call[name[ct].byref, parameter[name[detectionState]]], name[detectedPoint], call[name[ct].byref, parameter[name[detectedObjectHandle]]], name[detectedSurfaceNormalVector], name[operationMode]]]
variable[arr1] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[constant[3]]]] begin[:]
call[name[arr1].append, parameter[call[name[detectedPoint]][name[i]]]]
variable[arr2] assign[=] list[[]]
for taget[name[i]] in starred[call[name[range], parameter[constant[3]]]] begin[:]
call[name[arr2].append, parameter[call[name[detectedSurfaceNormalVector]][name[i]]]]
return[tuple[[<ast.Name object at 0x7da18ede5f00>, <ast.Call object at 0x7da18ede5300>, <ast.Name object at 0x7da18ede77c0>, <ast.Attribute object at 0x7da18ede5cf0>, <ast.Name object at 0x7da18ede7c70>]]] | keyword[def] identifier[simxReadProximitySensor] ( identifier[clientID] , identifier[sensorHandle] , identifier[operationMode] ):
literal[string]
identifier[detectionState] = identifier[ct] . identifier[c_ubyte] ()
identifier[detectedObjectHandle] = identifier[ct] . identifier[c_int] ()
identifier[detectedPoint] =( identifier[ct] . identifier[c_float] * literal[int] )()
identifier[detectedSurfaceNormalVector] =( identifier[ct] . identifier[c_float] * literal[int] )()
identifier[ret] = identifier[c_ReadProximitySensor] ( identifier[clientID] , identifier[sensorHandle] , identifier[ct] . identifier[byref] ( identifier[detectionState] ), identifier[detectedPoint] , identifier[ct] . identifier[byref] ( identifier[detectedObjectHandle] ), identifier[detectedSurfaceNormalVector] , identifier[operationMode] )
identifier[arr1] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] ):
identifier[arr1] . identifier[append] ( identifier[detectedPoint] [ identifier[i] ])
identifier[arr2] =[]
keyword[for] identifier[i] keyword[in] identifier[range] ( literal[int] ):
identifier[arr2] . identifier[append] ( identifier[detectedSurfaceNormalVector] [ identifier[i] ])
keyword[return] identifier[ret] , identifier[bool] ( identifier[detectionState] . identifier[value] != literal[int] ), identifier[arr1] , identifier[detectedObjectHandle] . identifier[value] , identifier[arr2] | def simxReadProximitySensor(clientID, sensorHandle, operationMode):
"""
Please have a look at the function description/documentation in the V-REP user manual
"""
detectionState = ct.c_ubyte()
detectedObjectHandle = ct.c_int()
detectedPoint = (ct.c_float * 3)()
detectedSurfaceNormalVector = (ct.c_float * 3)()
ret = c_ReadProximitySensor(clientID, sensorHandle, ct.byref(detectionState), detectedPoint, ct.byref(detectedObjectHandle), detectedSurfaceNormalVector, operationMode)
arr1 = []
for i in range(3):
arr1.append(detectedPoint[i]) # depends on [control=['for'], data=['i']]
arr2 = []
for i in range(3):
arr2.append(detectedSurfaceNormalVector[i]) # depends on [control=['for'], data=['i']]
return (ret, bool(detectionState.value != 0), arr1, detectedObjectHandle.value, arr2) |
def migrate_codec(config_old, config_new):
'''Migrate data from mongodict <= 0.2.1 to 0.3.0
`config_old` and `config_new` should be dictionaries with the keys
regarding to MongoDB server:
- `host`
- `port`
- `database`
- `collection`
'''
assert mongodict.__version__ in [(0, 3, 0), (0, 3, 1)]
connection = pymongo.Connection(host=config_old['host'],
port=config_old['port'])
database = connection[config_old['database']]
collection = database[config_old['collection']]
new_dict = mongodict.MongoDict(**config_new) # uses pickle codec by default
total_pairs = collection.count()
start_time = time.time()
for counter, pair in enumerate(collection.find(), start=1):
key, value = pair['_id'], pair['value']
new_dict[key] = value
if counter % REPORT_INTERVAL == 0:
print_report(counter, total_pairs, start_time)
print_report(counter, total_pairs, start_time)
print('') | def function[migrate_codec, parameter[config_old, config_new]]:
constant[Migrate data from mongodict <= 0.2.1 to 0.3.0
`config_old` and `config_new` should be dictionaries with the keys
regarding to MongoDB server:
- `host`
- `port`
- `database`
- `collection`
]
assert[compare[name[mongodict].__version__ in list[[<ast.Tuple object at 0x7da18f00c970>, <ast.Tuple object at 0x7da18f00ff70>]]]]
variable[connection] assign[=] call[name[pymongo].Connection, parameter[]]
variable[database] assign[=] call[name[connection]][call[name[config_old]][constant[database]]]
variable[collection] assign[=] call[name[database]][call[name[config_old]][constant[collection]]]
variable[new_dict] assign[=] call[name[mongodict].MongoDict, parameter[]]
variable[total_pairs] assign[=] call[name[collection].count, parameter[]]
variable[start_time] assign[=] call[name[time].time, parameter[]]
for taget[tuple[[<ast.Name object at 0x7da20e960fa0>, <ast.Name object at 0x7da20e960c40>]]] in starred[call[name[enumerate], parameter[call[name[collection].find, parameter[]]]]] begin[:]
<ast.Tuple object at 0x7da18f722f80> assign[=] tuple[[<ast.Subscript object at 0x7da18f720cd0>, <ast.Subscript object at 0x7da18f723f10>]]
call[name[new_dict]][name[key]] assign[=] name[value]
if compare[binary_operation[name[counter] <ast.Mod object at 0x7da2590d6920> name[REPORT_INTERVAL]] equal[==] constant[0]] begin[:]
call[name[print_report], parameter[name[counter], name[total_pairs], name[start_time]]]
call[name[print_report], parameter[name[counter], name[total_pairs], name[start_time]]]
call[name[print], parameter[constant[]]] | keyword[def] identifier[migrate_codec] ( identifier[config_old] , identifier[config_new] ):
literal[string]
keyword[assert] identifier[mongodict] . identifier[__version__] keyword[in] [( literal[int] , literal[int] , literal[int] ),( literal[int] , literal[int] , literal[int] )]
identifier[connection] = identifier[pymongo] . identifier[Connection] ( identifier[host] = identifier[config_old] [ literal[string] ],
identifier[port] = identifier[config_old] [ literal[string] ])
identifier[database] = identifier[connection] [ identifier[config_old] [ literal[string] ]]
identifier[collection] = identifier[database] [ identifier[config_old] [ literal[string] ]]
identifier[new_dict] = identifier[mongodict] . identifier[MongoDict] (** identifier[config_new] )
identifier[total_pairs] = identifier[collection] . identifier[count] ()
identifier[start_time] = identifier[time] . identifier[time] ()
keyword[for] identifier[counter] , identifier[pair] keyword[in] identifier[enumerate] ( identifier[collection] . identifier[find] (), identifier[start] = literal[int] ):
identifier[key] , identifier[value] = identifier[pair] [ literal[string] ], identifier[pair] [ literal[string] ]
identifier[new_dict] [ identifier[key] ]= identifier[value]
keyword[if] identifier[counter] % identifier[REPORT_INTERVAL] == literal[int] :
identifier[print_report] ( identifier[counter] , identifier[total_pairs] , identifier[start_time] )
identifier[print_report] ( identifier[counter] , identifier[total_pairs] , identifier[start_time] )
identifier[print] ( literal[string] ) | def migrate_codec(config_old, config_new):
"""Migrate data from mongodict <= 0.2.1 to 0.3.0
`config_old` and `config_new` should be dictionaries with the keys
regarding to MongoDB server:
- `host`
- `port`
- `database`
- `collection`
"""
assert mongodict.__version__ in [(0, 3, 0), (0, 3, 1)]
connection = pymongo.Connection(host=config_old['host'], port=config_old['port'])
database = connection[config_old['database']]
collection = database[config_old['collection']]
new_dict = mongodict.MongoDict(**config_new) # uses pickle codec by default
total_pairs = collection.count()
start_time = time.time()
for (counter, pair) in enumerate(collection.find(), start=1):
(key, value) = (pair['_id'], pair['value'])
new_dict[key] = value
if counter % REPORT_INTERVAL == 0:
print_report(counter, total_pairs, start_time) # depends on [control=['if'], data=[]] # depends on [control=['for'], data=[]]
print_report(counter, total_pairs, start_time)
print('') |
def columnOptions( self, tableType ):
"""
Returns the column options for the inputed table type.
:param tableType | <subclass of orb.Table>
:return [<str>, ..]
"""
if ( not tableType ):
return []
schema = tableType.schema()
return map(lambda x: x.name(), schema.columns()) | def function[columnOptions, parameter[self, tableType]]:
constant[
Returns the column options for the inputed table type.
:param tableType | <subclass of orb.Table>
:return [<str>, ..]
]
if <ast.UnaryOp object at 0x7da2043448e0> begin[:]
return[list[[]]]
variable[schema] assign[=] call[name[tableType].schema, parameter[]]
return[call[name[map], parameter[<ast.Lambda object at 0x7da204347130>, call[name[schema].columns, parameter[]]]]] | keyword[def] identifier[columnOptions] ( identifier[self] , identifier[tableType] ):
literal[string]
keyword[if] ( keyword[not] identifier[tableType] ):
keyword[return] []
identifier[schema] = identifier[tableType] . identifier[schema] ()
keyword[return] identifier[map] ( keyword[lambda] identifier[x] : identifier[x] . identifier[name] (), identifier[schema] . identifier[columns] ()) | def columnOptions(self, tableType):
"""
Returns the column options for the inputed table type.
:param tableType | <subclass of orb.Table>
:return [<str>, ..]
"""
if not tableType:
return [] # depends on [control=['if'], data=[]]
schema = tableType.schema()
return map(lambda x: x.name(), schema.columns()) |
def html2text(__html: str, *, width: int = 80,
ascii_replacements: bool = False) -> str:
"""HTML to plain text renderer.
See also: :pypi:`html2text`
Args:
__html: Text to process
width: Paragraph width
ascii_replacements: Use pseudo-ASCII replacements for Unicode
Returns:
Rendered text
"""
html2.BODY_WIDTH = width
html2.UNICODE_SNOB = ascii_replacements
return html2.html2text(__html).strip() | def function[html2text, parameter[__html]]:
constant[HTML to plain text renderer.
See also: :pypi:`html2text`
Args:
__html: Text to process
width: Paragraph width
ascii_replacements: Use pseudo-ASCII replacements for Unicode
Returns:
Rendered text
]
name[html2].BODY_WIDTH assign[=] name[width]
name[html2].UNICODE_SNOB assign[=] name[ascii_replacements]
return[call[call[name[html2].html2text, parameter[name[__html]]].strip, parameter[]]] | keyword[def] identifier[html2text] ( identifier[__html] : identifier[str] ,*, identifier[width] : identifier[int] = literal[int] ,
identifier[ascii_replacements] : identifier[bool] = keyword[False] )-> identifier[str] :
literal[string]
identifier[html2] . identifier[BODY_WIDTH] = identifier[width]
identifier[html2] . identifier[UNICODE_SNOB] = identifier[ascii_replacements]
keyword[return] identifier[html2] . identifier[html2text] ( identifier[__html] ). identifier[strip] () | def html2text(__html: str, *, width: int=80, ascii_replacements: bool=False) -> str:
"""HTML to plain text renderer.
See also: :pypi:`html2text`
Args:
__html: Text to process
width: Paragraph width
ascii_replacements: Use pseudo-ASCII replacements for Unicode
Returns:
Rendered text
"""
html2.BODY_WIDTH = width
html2.UNICODE_SNOB = ascii_replacements
return html2.html2text(__html).strip() |
def publish_pages(name, paths, git_repo, published_repo, extra_message=''):
"""Publish helm chart index to github pages"""
version = last_modified_commit(*paths)
checkout_dir = '{}-{}'.format(name, version)
check_call([
'git', 'clone', '--no-checkout',
git_remote(git_repo), checkout_dir],
echo=False,
)
check_call(['git', 'checkout', 'gh-pages'], cwd=checkout_dir)
# package the latest version into a temporary directory
# and run helm repo index with --merge to update index.yaml
# without refreshing all of the timestamps
with TemporaryDirectory() as td:
check_call([
'helm', 'package', name,
'--destination', td + '/',
])
check_call([
'helm', 'repo', 'index', td,
'--url', published_repo,
'--merge', os.path.join(checkout_dir, 'index.yaml'),
])
# equivalent to `cp td/* checkout/`
# copies new helm chart and updated index.yaml
for f in os.listdir(td):
shutil.copy2(
os.path.join(td, f),
os.path.join(checkout_dir, f)
)
check_call(['git', 'add', '.'], cwd=checkout_dir)
if extra_message:
extra_message = '\n\n%s' % extra_message
else:
extra_message = ''
check_call([
'git',
'commit',
'-m', '[{}] Automatic update for commit {}{}'.format(name, version, extra_message)
], cwd=checkout_dir)
check_call(
['git', 'push', 'origin', 'gh-pages'],
cwd=checkout_dir,
) | def function[publish_pages, parameter[name, paths, git_repo, published_repo, extra_message]]:
constant[Publish helm chart index to github pages]
variable[version] assign[=] call[name[last_modified_commit], parameter[<ast.Starred object at 0x7da1b1a79090>]]
variable[checkout_dir] assign[=] call[constant[{}-{}].format, parameter[name[name], name[version]]]
call[name[check_call], parameter[list[[<ast.Constant object at 0x7da1b1a79ba0>, <ast.Constant object at 0x7da1b1a78fd0>, <ast.Constant object at 0x7da1b1a7a1d0>, <ast.Call object at 0x7da1b1a78370>, <ast.Name object at 0x7da1b1a78250>]]]]
call[name[check_call], parameter[list[[<ast.Constant object at 0x7da2044c3e20>, <ast.Constant object at 0x7da2044c1480>, <ast.Constant object at 0x7da2044c3910>]]]]
with call[name[TemporaryDirectory], parameter[]] begin[:]
call[name[check_call], parameter[list[[<ast.Constant object at 0x7da2044c02b0>, <ast.Constant object at 0x7da2044c2f80>, <ast.Name object at 0x7da2044c26b0>, <ast.Constant object at 0x7da2044c0460>, <ast.BinOp object at 0x7da2044c0a30>]]]]
call[name[check_call], parameter[list[[<ast.Constant object at 0x7da2044c20e0>, <ast.Constant object at 0x7da2044c0940>, <ast.Constant object at 0x7da2044c06a0>, <ast.Name object at 0x7da2044c21a0>, <ast.Constant object at 0x7da2044c1060>, <ast.Name object at 0x7da2044c3850>, <ast.Constant object at 0x7da2044c2770>, <ast.Call object at 0x7da2044c2d70>]]]]
for taget[name[f]] in starred[call[name[os].listdir, parameter[name[td]]]] begin[:]
call[name[shutil].copy2, parameter[call[name[os].path.join, parameter[name[td], name[f]]], call[name[os].path.join, parameter[name[checkout_dir], name[f]]]]]
call[name[check_call], parameter[list[[<ast.Constant object at 0x7da2044c12d0>, <ast.Constant object at 0x7da2044c0820>, <ast.Constant object at 0x7da2044c1570>]]]]
if name[extra_message] begin[:]
variable[extra_message] assign[=] binary_operation[constant[
%s] <ast.Mod object at 0x7da2590d6920> name[extra_message]]
call[name[check_call], parameter[list[[<ast.Constant object at 0x7da2044c1930>, <ast.Constant object at 0x7da2044c27d0>, <ast.Constant object at 0x7da2044c01c0>, <ast.Call object at 0x7da2044c0d60>]]]]
call[name[check_call], parameter[list[[<ast.Constant object at 0x7da2044c3370>, <ast.Constant object at 0x7da2044c39d0>, <ast.Constant object at 0x7da2044c3550>, <ast.Constant object at 0x7da2044c2500>]]]] | keyword[def] identifier[publish_pages] ( identifier[name] , identifier[paths] , identifier[git_repo] , identifier[published_repo] , identifier[extra_message] = literal[string] ):
literal[string]
identifier[version] = identifier[last_modified_commit] (* identifier[paths] )
identifier[checkout_dir] = literal[string] . identifier[format] ( identifier[name] , identifier[version] )
identifier[check_call] ([
literal[string] , literal[string] , literal[string] ,
identifier[git_remote] ( identifier[git_repo] ), identifier[checkout_dir] ],
identifier[echo] = keyword[False] ,
)
identifier[check_call] ([ literal[string] , literal[string] , literal[string] ], identifier[cwd] = identifier[checkout_dir] )
keyword[with] identifier[TemporaryDirectory] () keyword[as] identifier[td] :
identifier[check_call] ([
literal[string] , literal[string] , identifier[name] ,
literal[string] , identifier[td] + literal[string] ,
])
identifier[check_call] ([
literal[string] , literal[string] , literal[string] , identifier[td] ,
literal[string] , identifier[published_repo] ,
literal[string] , identifier[os] . identifier[path] . identifier[join] ( identifier[checkout_dir] , literal[string] ),
])
keyword[for] identifier[f] keyword[in] identifier[os] . identifier[listdir] ( identifier[td] ):
identifier[shutil] . identifier[copy2] (
identifier[os] . identifier[path] . identifier[join] ( identifier[td] , identifier[f] ),
identifier[os] . identifier[path] . identifier[join] ( identifier[checkout_dir] , identifier[f] )
)
identifier[check_call] ([ literal[string] , literal[string] , literal[string] ], identifier[cwd] = identifier[checkout_dir] )
keyword[if] identifier[extra_message] :
identifier[extra_message] = literal[string] % identifier[extra_message]
keyword[else] :
identifier[extra_message] = literal[string]
identifier[check_call] ([
literal[string] ,
literal[string] ,
literal[string] , literal[string] . identifier[format] ( identifier[name] , identifier[version] , identifier[extra_message] )
], identifier[cwd] = identifier[checkout_dir] )
identifier[check_call] (
[ literal[string] , literal[string] , literal[string] , literal[string] ],
identifier[cwd] = identifier[checkout_dir] ,
) | def publish_pages(name, paths, git_repo, published_repo, extra_message=''):
"""Publish helm chart index to github pages"""
version = last_modified_commit(*paths)
checkout_dir = '{}-{}'.format(name, version)
check_call(['git', 'clone', '--no-checkout', git_remote(git_repo), checkout_dir], echo=False)
check_call(['git', 'checkout', 'gh-pages'], cwd=checkout_dir)
# package the latest version into a temporary directory
# and run helm repo index with --merge to update index.yaml
# without refreshing all of the timestamps
with TemporaryDirectory() as td:
check_call(['helm', 'package', name, '--destination', td + '/'])
check_call(['helm', 'repo', 'index', td, '--url', published_repo, '--merge', os.path.join(checkout_dir, 'index.yaml')])
# equivalent to `cp td/* checkout/`
# copies new helm chart and updated index.yaml
for f in os.listdir(td):
shutil.copy2(os.path.join(td, f), os.path.join(checkout_dir, f)) # depends on [control=['for'], data=['f']] # depends on [control=['with'], data=['td']]
check_call(['git', 'add', '.'], cwd=checkout_dir)
if extra_message:
extra_message = '\n\n%s' % extra_message # depends on [control=['if'], data=[]]
else:
extra_message = ''
check_call(['git', 'commit', '-m', '[{}] Automatic update for commit {}{}'.format(name, version, extra_message)], cwd=checkout_dir)
check_call(['git', 'push', 'origin', 'gh-pages'], cwd=checkout_dir) |
def get_vlan_assignment_uuid(self):
"""Returns the UUID for the region's vlan assignment on CVX
:returns: string containing the region's vlan assignment UUID
"""
vlan_uuid_cmd = self.cli_commands['resource-pool']
if vlan_uuid_cmd:
return self._run_eos_cmds(commands=vlan_uuid_cmd)[0]
return None | def function[get_vlan_assignment_uuid, parameter[self]]:
constant[Returns the UUID for the region's vlan assignment on CVX
:returns: string containing the region's vlan assignment UUID
]
variable[vlan_uuid_cmd] assign[=] call[name[self].cli_commands][constant[resource-pool]]
if name[vlan_uuid_cmd] begin[:]
return[call[call[name[self]._run_eos_cmds, parameter[]]][constant[0]]]
return[constant[None]] | keyword[def] identifier[get_vlan_assignment_uuid] ( identifier[self] ):
literal[string]
identifier[vlan_uuid_cmd] = identifier[self] . identifier[cli_commands] [ literal[string] ]
keyword[if] identifier[vlan_uuid_cmd] :
keyword[return] identifier[self] . identifier[_run_eos_cmds] ( identifier[commands] = identifier[vlan_uuid_cmd] )[ literal[int] ]
keyword[return] keyword[None] | def get_vlan_assignment_uuid(self):
"""Returns the UUID for the region's vlan assignment on CVX
:returns: string containing the region's vlan assignment UUID
"""
vlan_uuid_cmd = self.cli_commands['resource-pool']
if vlan_uuid_cmd:
return self._run_eos_cmds(commands=vlan_uuid_cmd)[0] # depends on [control=['if'], data=[]]
return None |
def run(self): # pylint: disable=too-many-locals, too-many-statements, too-many-branches
"""Main scheduler function::
* Load retention
* Call 'pre_scheduler_mod_start' hook point
* Start modules
* Schedule first checks
* Init connection with pollers/reactionners
* Run main loop
* Do recurrent works
* Push/Get actions to passive satellites
* Update stats
* Call 'scheduler_tick' hook point
* Save retention (on quit)
:return: None
"""
if not self.must_schedule:
logger.warning("#%d - scheduler is not active...",
self.my_daemon.loop_count)
return
# Increment ticks count
self.ticks += 1
loop_start_ts = time.time()
# Do recurrent works like schedule, consume, delete_zombie_checks
for i in self.recurrent_works:
(name, fun, nb_ticks) = self.recurrent_works[i]
# A 0 in the tick will just disable it
if nb_ticks:
if self.ticks % nb_ticks == 0:
# Call it and save the time spend in it
_t0 = time.time()
fun()
statsmgr.timer('loop.recurrent.%s' % name, time.time() - _t0)
statsmgr.timer('loop.recurrent', time.time() - loop_start_ts)
_ts = time.time()
self.push_actions_to_passive_satellites()
statsmgr.timer('loop.push_actions_to_passive_satellites', time.time() - _ts)
_ts = time.time()
self.get_results_from_passive_satellites()
statsmgr.timer('loop.get_results_from_passive_satellites', time.time() - _ts)
# Scheduler statistics
# - broks / notifications counters
if self.my_daemon.log_loop:
logger.debug("Items (loop): broks: %d, notifications: %d, checks: %d, internal checks: "
"%d, event handlers: %d, external commands: %d",
self.nb_broks, self.nb_notifications, self.nb_checks,
self.nb_internal_checks, self.nb_event_handlers, self.nb_external_commands)
statsmgr.gauge('activity.checks', self.nb_checks)
statsmgr.gauge('activity.internal_checks', self.nb_internal_checks)
statsmgr.gauge('activity.launched_checks', self.nb_checks_launched)
statsmgr.gauge('activity.checks_results', self.nb_checks_results)
statsmgr.gauge('activity.checks_results_timeout', self.nb_checks_results_timeout)
statsmgr.gauge('activity.checks_results_active', self.nb_checks_results_active)
statsmgr.gauge('activity.checks_results_passive', self.nb_checks_results_passive)
statsmgr.gauge('activity.launched_actions', self.nb_actions_launched)
statsmgr.gauge('activity.actions_results', self.nb_actions_results)
statsmgr.gauge('activity.actions_results_timeout', self.nb_actions_results_timeout)
statsmgr.gauge('activity.broks', self.nb_broks)
statsmgr.gauge('activity.external_commands', self.nb_external_commands)
statsmgr.gauge('activity.notifications', self.nb_notifications)
statsmgr.gauge('activity.event_handlers', self.nb_event_handlers)
if self.my_daemon.need_dump_environment:
_ts = time.time()
logger.debug('I must dump my memory...')
self.my_daemon.dump_environment()
self.my_daemon.need_dump_environment = False
statsmgr.timer('loop.memory_dump', time.time() - _ts)
if self.my_daemon.need_objects_dump:
_ts = time.time()
logger.debug('I must dump my objects...')
self.dump_objects()
self.dump_config()
self.my_daemon.need_objects_dump = False
statsmgr.timer('loop.objects_dump', time.time() - _ts)
_ts = time.time()
self.hook_point('scheduler_tick')
statsmgr.timer('loop.hook-tick', time.time() - _ts)
if self.my_daemon.log_loop:
elapsed_time = time.time() - self.my_daemon.start_time
logger.debug("Check average (total) = %d checks results, %.2f checks/s",
self.nb_checks, self.nb_checks / elapsed_time)
if self.nb_checks_dropped > 0 \
or self.nb_broks_dropped > 0 or self.nb_actions_dropped > 0:
logger.warning("We dropped %d checks, %d broks and %d actions",
self.nb_checks_dropped, self.nb_broks_dropped, self.nb_actions_dropped)
statsmgr.gauge('activity.broks_dropped', self.nb_broks_dropped)
statsmgr.gauge('activity.checks_dropped', self.nb_checks_dropped)
statsmgr.gauge('activity.actions_dropped', self.nb_actions_dropped)
self.nb_checks_dropped = self.nb_broks_dropped = self.nb_actions_dropped = 0 | def function[run, parameter[self]]:
constant[Main scheduler function::
* Load retention
* Call 'pre_scheduler_mod_start' hook point
* Start modules
* Schedule first checks
* Init connection with pollers/reactionners
* Run main loop
* Do recurrent works
* Push/Get actions to passive satellites
* Update stats
* Call 'scheduler_tick' hook point
* Save retention (on quit)
:return: None
]
if <ast.UnaryOp object at 0x7da20c7c9450> begin[:]
call[name[logger].warning, parameter[constant[#%d - scheduler is not active...], name[self].my_daemon.loop_count]]
return[None]
<ast.AugAssign object at 0x7da20c7c9e40>
variable[loop_start_ts] assign[=] call[name[time].time, parameter[]]
for taget[name[i]] in starred[name[self].recurrent_works] begin[:]
<ast.Tuple object at 0x7da20c7c8a90> assign[=] call[name[self].recurrent_works][name[i]]
if name[nb_ticks] begin[:]
if compare[binary_operation[name[self].ticks <ast.Mod object at 0x7da2590d6920> name[nb_ticks]] equal[==] constant[0]] begin[:]
variable[_t0] assign[=] call[name[time].time, parameter[]]
call[name[fun], parameter[]]
call[name[statsmgr].timer, parameter[binary_operation[constant[loop.recurrent.%s] <ast.Mod object at 0x7da2590d6920> name[name]], binary_operation[call[name[time].time, parameter[]] - name[_t0]]]]
call[name[statsmgr].timer, parameter[constant[loop.recurrent], binary_operation[call[name[time].time, parameter[]] - name[loop_start_ts]]]]
variable[_ts] assign[=] call[name[time].time, parameter[]]
call[name[self].push_actions_to_passive_satellites, parameter[]]
call[name[statsmgr].timer, parameter[constant[loop.push_actions_to_passive_satellites], binary_operation[call[name[time].time, parameter[]] - name[_ts]]]]
variable[_ts] assign[=] call[name[time].time, parameter[]]
call[name[self].get_results_from_passive_satellites, parameter[]]
call[name[statsmgr].timer, parameter[constant[loop.get_results_from_passive_satellites], binary_operation[call[name[time].time, parameter[]] - name[_ts]]]]
if name[self].my_daemon.log_loop begin[:]
call[name[logger].debug, parameter[constant[Items (loop): broks: %d, notifications: %d, checks: %d, internal checks: %d, event handlers: %d, external commands: %d], name[self].nb_broks, name[self].nb_notifications, name[self].nb_checks, name[self].nb_internal_checks, name[self].nb_event_handlers, name[self].nb_external_commands]]
call[name[statsmgr].gauge, parameter[constant[activity.checks], name[self].nb_checks]]
call[name[statsmgr].gauge, parameter[constant[activity.internal_checks], name[self].nb_internal_checks]]
call[name[statsmgr].gauge, parameter[constant[activity.launched_checks], name[self].nb_checks_launched]]
call[name[statsmgr].gauge, parameter[constant[activity.checks_results], name[self].nb_checks_results]]
call[name[statsmgr].gauge, parameter[constant[activity.checks_results_timeout], name[self].nb_checks_results_timeout]]
call[name[statsmgr].gauge, parameter[constant[activity.checks_results_active], name[self].nb_checks_results_active]]
call[name[statsmgr].gauge, parameter[constant[activity.checks_results_passive], name[self].nb_checks_results_passive]]
call[name[statsmgr].gauge, parameter[constant[activity.launched_actions], name[self].nb_actions_launched]]
call[name[statsmgr].gauge, parameter[constant[activity.actions_results], name[self].nb_actions_results]]
call[name[statsmgr].gauge, parameter[constant[activity.actions_results_timeout], name[self].nb_actions_results_timeout]]
call[name[statsmgr].gauge, parameter[constant[activity.broks], name[self].nb_broks]]
call[name[statsmgr].gauge, parameter[constant[activity.external_commands], name[self].nb_external_commands]]
call[name[statsmgr].gauge, parameter[constant[activity.notifications], name[self].nb_notifications]]
call[name[statsmgr].gauge, parameter[constant[activity.event_handlers], name[self].nb_event_handlers]]
if name[self].my_daemon.need_dump_environment begin[:]
variable[_ts] assign[=] call[name[time].time, parameter[]]
call[name[logger].debug, parameter[constant[I must dump my memory...]]]
call[name[self].my_daemon.dump_environment, parameter[]]
name[self].my_daemon.need_dump_environment assign[=] constant[False]
call[name[statsmgr].timer, parameter[constant[loop.memory_dump], binary_operation[call[name[time].time, parameter[]] - name[_ts]]]]
if name[self].my_daemon.need_objects_dump begin[:]
variable[_ts] assign[=] call[name[time].time, parameter[]]
call[name[logger].debug, parameter[constant[I must dump my objects...]]]
call[name[self].dump_objects, parameter[]]
call[name[self].dump_config, parameter[]]
name[self].my_daemon.need_objects_dump assign[=] constant[False]
call[name[statsmgr].timer, parameter[constant[loop.objects_dump], binary_operation[call[name[time].time, parameter[]] - name[_ts]]]]
variable[_ts] assign[=] call[name[time].time, parameter[]]
call[name[self].hook_point, parameter[constant[scheduler_tick]]]
call[name[statsmgr].timer, parameter[constant[loop.hook-tick], binary_operation[call[name[time].time, parameter[]] - name[_ts]]]]
if name[self].my_daemon.log_loop begin[:]
variable[elapsed_time] assign[=] binary_operation[call[name[time].time, parameter[]] - name[self].my_daemon.start_time]
call[name[logger].debug, parameter[constant[Check average (total) = %d checks results, %.2f checks/s], name[self].nb_checks, binary_operation[name[self].nb_checks / name[elapsed_time]]]]
if <ast.BoolOp object at 0x7da18bc721d0> begin[:]
call[name[logger].warning, parameter[constant[We dropped %d checks, %d broks and %d actions], name[self].nb_checks_dropped, name[self].nb_broks_dropped, name[self].nb_actions_dropped]]
call[name[statsmgr].gauge, parameter[constant[activity.broks_dropped], name[self].nb_broks_dropped]]
call[name[statsmgr].gauge, parameter[constant[activity.checks_dropped], name[self].nb_checks_dropped]]
call[name[statsmgr].gauge, parameter[constant[activity.actions_dropped], name[self].nb_actions_dropped]]
name[self].nb_checks_dropped assign[=] constant[0] | keyword[def] identifier[run] ( identifier[self] ):
literal[string]
keyword[if] keyword[not] identifier[self] . identifier[must_schedule] :
identifier[logger] . identifier[warning] ( literal[string] ,
identifier[self] . identifier[my_daemon] . identifier[loop_count] )
keyword[return]
identifier[self] . identifier[ticks] += literal[int]
identifier[loop_start_ts] = identifier[time] . identifier[time] ()
keyword[for] identifier[i] keyword[in] identifier[self] . identifier[recurrent_works] :
( identifier[name] , identifier[fun] , identifier[nb_ticks] )= identifier[self] . identifier[recurrent_works] [ identifier[i] ]
keyword[if] identifier[nb_ticks] :
keyword[if] identifier[self] . identifier[ticks] % identifier[nb_ticks] == literal[int] :
identifier[_t0] = identifier[time] . identifier[time] ()
identifier[fun] ()
identifier[statsmgr] . identifier[timer] ( literal[string] % identifier[name] , identifier[time] . identifier[time] ()- identifier[_t0] )
identifier[statsmgr] . identifier[timer] ( literal[string] , identifier[time] . identifier[time] ()- identifier[loop_start_ts] )
identifier[_ts] = identifier[time] . identifier[time] ()
identifier[self] . identifier[push_actions_to_passive_satellites] ()
identifier[statsmgr] . identifier[timer] ( literal[string] , identifier[time] . identifier[time] ()- identifier[_ts] )
identifier[_ts] = identifier[time] . identifier[time] ()
identifier[self] . identifier[get_results_from_passive_satellites] ()
identifier[statsmgr] . identifier[timer] ( literal[string] , identifier[time] . identifier[time] ()- identifier[_ts] )
keyword[if] identifier[self] . identifier[my_daemon] . identifier[log_loop] :
identifier[logger] . identifier[debug] ( literal[string]
literal[string] ,
identifier[self] . identifier[nb_broks] , identifier[self] . identifier[nb_notifications] , identifier[self] . identifier[nb_checks] ,
identifier[self] . identifier[nb_internal_checks] , identifier[self] . identifier[nb_event_handlers] , identifier[self] . identifier[nb_external_commands] )
identifier[statsmgr] . identifier[gauge] ( literal[string] , identifier[self] . identifier[nb_checks] )
identifier[statsmgr] . identifier[gauge] ( literal[string] , identifier[self] . identifier[nb_internal_checks] )
identifier[statsmgr] . identifier[gauge] ( literal[string] , identifier[self] . identifier[nb_checks_launched] )
identifier[statsmgr] . identifier[gauge] ( literal[string] , identifier[self] . identifier[nb_checks_results] )
identifier[statsmgr] . identifier[gauge] ( literal[string] , identifier[self] . identifier[nb_checks_results_timeout] )
identifier[statsmgr] . identifier[gauge] ( literal[string] , identifier[self] . identifier[nb_checks_results_active] )
identifier[statsmgr] . identifier[gauge] ( literal[string] , identifier[self] . identifier[nb_checks_results_passive] )
identifier[statsmgr] . identifier[gauge] ( literal[string] , identifier[self] . identifier[nb_actions_launched] )
identifier[statsmgr] . identifier[gauge] ( literal[string] , identifier[self] . identifier[nb_actions_results] )
identifier[statsmgr] . identifier[gauge] ( literal[string] , identifier[self] . identifier[nb_actions_results_timeout] )
identifier[statsmgr] . identifier[gauge] ( literal[string] , identifier[self] . identifier[nb_broks] )
identifier[statsmgr] . identifier[gauge] ( literal[string] , identifier[self] . identifier[nb_external_commands] )
identifier[statsmgr] . identifier[gauge] ( literal[string] , identifier[self] . identifier[nb_notifications] )
identifier[statsmgr] . identifier[gauge] ( literal[string] , identifier[self] . identifier[nb_event_handlers] )
keyword[if] identifier[self] . identifier[my_daemon] . identifier[need_dump_environment] :
identifier[_ts] = identifier[time] . identifier[time] ()
identifier[logger] . identifier[debug] ( literal[string] )
identifier[self] . identifier[my_daemon] . identifier[dump_environment] ()
identifier[self] . identifier[my_daemon] . identifier[need_dump_environment] = keyword[False]
identifier[statsmgr] . identifier[timer] ( literal[string] , identifier[time] . identifier[time] ()- identifier[_ts] )
keyword[if] identifier[self] . identifier[my_daemon] . identifier[need_objects_dump] :
identifier[_ts] = identifier[time] . identifier[time] ()
identifier[logger] . identifier[debug] ( literal[string] )
identifier[self] . identifier[dump_objects] ()
identifier[self] . identifier[dump_config] ()
identifier[self] . identifier[my_daemon] . identifier[need_objects_dump] = keyword[False]
identifier[statsmgr] . identifier[timer] ( literal[string] , identifier[time] . identifier[time] ()- identifier[_ts] )
identifier[_ts] = identifier[time] . identifier[time] ()
identifier[self] . identifier[hook_point] ( literal[string] )
identifier[statsmgr] . identifier[timer] ( literal[string] , identifier[time] . identifier[time] ()- identifier[_ts] )
keyword[if] identifier[self] . identifier[my_daemon] . identifier[log_loop] :
identifier[elapsed_time] = identifier[time] . identifier[time] ()- identifier[self] . identifier[my_daemon] . identifier[start_time]
identifier[logger] . identifier[debug] ( literal[string] ,
identifier[self] . identifier[nb_checks] , identifier[self] . identifier[nb_checks] / identifier[elapsed_time] )
keyword[if] identifier[self] . identifier[nb_checks_dropped] > literal[int] keyword[or] identifier[self] . identifier[nb_broks_dropped] > literal[int] keyword[or] identifier[self] . identifier[nb_actions_dropped] > literal[int] :
identifier[logger] . identifier[warning] ( literal[string] ,
identifier[self] . identifier[nb_checks_dropped] , identifier[self] . identifier[nb_broks_dropped] , identifier[self] . identifier[nb_actions_dropped] )
identifier[statsmgr] . identifier[gauge] ( literal[string] , identifier[self] . identifier[nb_broks_dropped] )
identifier[statsmgr] . identifier[gauge] ( literal[string] , identifier[self] . identifier[nb_checks_dropped] )
identifier[statsmgr] . identifier[gauge] ( literal[string] , identifier[self] . identifier[nb_actions_dropped] )
identifier[self] . identifier[nb_checks_dropped] = identifier[self] . identifier[nb_broks_dropped] = identifier[self] . identifier[nb_actions_dropped] = literal[int] | def run(self): # pylint: disable=too-many-locals, too-many-statements, too-many-branches
"Main scheduler function::\n\n * Load retention\n * Call 'pre_scheduler_mod_start' hook point\n * Start modules\n * Schedule first checks\n * Init connection with pollers/reactionners\n * Run main loop\n\n * Do recurrent works\n * Push/Get actions to passive satellites\n * Update stats\n * Call 'scheduler_tick' hook point\n\n * Save retention (on quit)\n\n :return: None\n "
if not self.must_schedule:
logger.warning('#%d - scheduler is not active...', self.my_daemon.loop_count)
return # depends on [control=['if'], data=[]]
# Increment ticks count
self.ticks += 1
loop_start_ts = time.time()
# Do recurrent works like schedule, consume, delete_zombie_checks
for i in self.recurrent_works:
(name, fun, nb_ticks) = self.recurrent_works[i]
# A 0 in the tick will just disable it
if nb_ticks:
if self.ticks % nb_ticks == 0:
# Call it and save the time spend in it
_t0 = time.time()
fun()
statsmgr.timer('loop.recurrent.%s' % name, time.time() - _t0) # depends on [control=['if'], data=[]] # depends on [control=['if'], data=[]] # depends on [control=['for'], data=['i']]
statsmgr.timer('loop.recurrent', time.time() - loop_start_ts)
_ts = time.time()
self.push_actions_to_passive_satellites()
statsmgr.timer('loop.push_actions_to_passive_satellites', time.time() - _ts)
_ts = time.time()
self.get_results_from_passive_satellites()
statsmgr.timer('loop.get_results_from_passive_satellites', time.time() - _ts)
# Scheduler statistics
# - broks / notifications counters
if self.my_daemon.log_loop:
logger.debug('Items (loop): broks: %d, notifications: %d, checks: %d, internal checks: %d, event handlers: %d, external commands: %d', self.nb_broks, self.nb_notifications, self.nb_checks, self.nb_internal_checks, self.nb_event_handlers, self.nb_external_commands) # depends on [control=['if'], data=[]]
statsmgr.gauge('activity.checks', self.nb_checks)
statsmgr.gauge('activity.internal_checks', self.nb_internal_checks)
statsmgr.gauge('activity.launched_checks', self.nb_checks_launched)
statsmgr.gauge('activity.checks_results', self.nb_checks_results)
statsmgr.gauge('activity.checks_results_timeout', self.nb_checks_results_timeout)
statsmgr.gauge('activity.checks_results_active', self.nb_checks_results_active)
statsmgr.gauge('activity.checks_results_passive', self.nb_checks_results_passive)
statsmgr.gauge('activity.launched_actions', self.nb_actions_launched)
statsmgr.gauge('activity.actions_results', self.nb_actions_results)
statsmgr.gauge('activity.actions_results_timeout', self.nb_actions_results_timeout)
statsmgr.gauge('activity.broks', self.nb_broks)
statsmgr.gauge('activity.external_commands', self.nb_external_commands)
statsmgr.gauge('activity.notifications', self.nb_notifications)
statsmgr.gauge('activity.event_handlers', self.nb_event_handlers)
if self.my_daemon.need_dump_environment:
_ts = time.time()
logger.debug('I must dump my memory...')
self.my_daemon.dump_environment()
self.my_daemon.need_dump_environment = False
statsmgr.timer('loop.memory_dump', time.time() - _ts) # depends on [control=['if'], data=[]]
if self.my_daemon.need_objects_dump:
_ts = time.time()
logger.debug('I must dump my objects...')
self.dump_objects()
self.dump_config()
self.my_daemon.need_objects_dump = False
statsmgr.timer('loop.objects_dump', time.time() - _ts) # depends on [control=['if'], data=[]]
_ts = time.time()
self.hook_point('scheduler_tick')
statsmgr.timer('loop.hook-tick', time.time() - _ts)
if self.my_daemon.log_loop:
elapsed_time = time.time() - self.my_daemon.start_time
logger.debug('Check average (total) = %d checks results, %.2f checks/s', self.nb_checks, self.nb_checks / elapsed_time) # depends on [control=['if'], data=[]]
if self.nb_checks_dropped > 0 or self.nb_broks_dropped > 0 or self.nb_actions_dropped > 0:
logger.warning('We dropped %d checks, %d broks and %d actions', self.nb_checks_dropped, self.nb_broks_dropped, self.nb_actions_dropped)
statsmgr.gauge('activity.broks_dropped', self.nb_broks_dropped)
statsmgr.gauge('activity.checks_dropped', self.nb_checks_dropped)
statsmgr.gauge('activity.actions_dropped', self.nb_actions_dropped)
self.nb_checks_dropped = self.nb_broks_dropped = self.nb_actions_dropped = 0 # depends on [control=['if'], data=[]] |
def _api_config(self):
"""Glances API RESTful implementation.
Return the JSON representation of the Glances configuration file
HTTP/200 if OK
HTTP/404 if others error
"""
response.content_type = 'application/json; charset=utf-8'
try:
# Get the JSON value of the config' dict
args_json = json.dumps(self.config.as_dict())
except Exception as e:
abort(404, "Cannot get config (%s)" % str(e))
return args_json | def function[_api_config, parameter[self]]:
constant[Glances API RESTful implementation.
Return the JSON representation of the Glances configuration file
HTTP/200 if OK
HTTP/404 if others error
]
name[response].content_type assign[=] constant[application/json; charset=utf-8]
<ast.Try object at 0x7da1b1c21fc0>
return[name[args_json]] | keyword[def] identifier[_api_config] ( identifier[self] ):
literal[string]
identifier[response] . identifier[content_type] = literal[string]
keyword[try] :
identifier[args_json] = identifier[json] . identifier[dumps] ( identifier[self] . identifier[config] . identifier[as_dict] ())
keyword[except] identifier[Exception] keyword[as] identifier[e] :
identifier[abort] ( literal[int] , literal[string] % identifier[str] ( identifier[e] ))
keyword[return] identifier[args_json] | def _api_config(self):
"""Glances API RESTful implementation.
Return the JSON representation of the Glances configuration file
HTTP/200 if OK
HTTP/404 if others error
"""
response.content_type = 'application/json; charset=utf-8'
try:
# Get the JSON value of the config' dict
args_json = json.dumps(self.config.as_dict()) # depends on [control=['try'], data=[]]
except Exception as e:
abort(404, 'Cannot get config (%s)' % str(e)) # depends on [control=['except'], data=['e']]
return args_json |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.