code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
#
# Copyright (c) 2018, 2022 Oracle and/or its affiliates. All rights reserved.
#
# Licensed under the Universal Permissive License v 1.0 as shown at
# https://oss.oracle.com/licenses/upl/
#
from logging import DEBUG
from multiprocessing import pool
from platform import python_version
from requests import Session
from sys import version_info
from threading import Lock
from time import time
from .common import (
ByteOutputStream, CheckValue, HttpConstants, LogUtils, SSLAdapter,
TableLimits, synchronized)
from .config import DefaultRetryHandler
from .exception import (IllegalArgumentException,
OperationNotSupportedException, RequestSizeLimitException)
from .http import RateLimiterMap, RequestUtils
from .kv import StoreAccessTokenProvider
from .operations import GetTableRequest, QueryResult, TableRequest, WriteRequest
from .query import QueryDriver
from .serde import BinaryProtocol
from .version import __version__
class Client(object):
DEFAULT_MAX_CONTENT_LENGTH = 32 * 1024 * 1024
LIMITER_REFRESH_NANOS = 600000000000
TRACE_LEVEL = 0
# The HTTP driver client.
def __init__(self, config, logger):
self._logutils = LogUtils(logger)
self._config = config
self._url = config.get_service_url()
self._request_uri = self._url.geturl() + HttpConstants.NOSQL_DATA_PATH
self._pool_connections = config.get_pool_connections()
self._pool_maxsize = config.get_pool_maxsize()
max_content_length = config.get_max_content_length()
self._max_content_length = (
Client.DEFAULT_MAX_CONTENT_LENGTH if max_content_length == 0
else max_content_length)
self._request_id = 1
self._proxy_host = config.get_proxy_host()
self._proxy_port = config.get_proxy_port()
self._proxy_username = config.get_proxy_username()
self._proxy_password = config.get_proxy_password()
self._retry_handler = config.get_retry_handler()
if self._retry_handler is None:
self._retry_handler = DefaultRetryHandler()
self._shut_down = False
self._user_agent = self._make_user_agent()
self._auth_provider = config.get_authorization_provider()
if self._auth_provider is None:
raise IllegalArgumentException(
'Must configure AuthorizationProvider.')
self._sess = Session()
ssl_ctx = None
url_scheme = self._url.scheme
if url_scheme == 'https':
ssl_ctx = config.get_ssl_context()
if ssl_ctx is None:
raise IllegalArgumentException(
'Unable to configure https: SSLContext is missing from ' +
'config.')
adapter = SSLAdapter(
ssl_ctx, pool_connections=self._pool_connections,
pool_maxsize=self._pool_maxsize, max_retries=5, pool_block=True)
self._sess.mount(self._url.scheme + '://', adapter)
if self._proxy_host is not None:
self._check_and_set_proxy(self._sess)
self.serial_version = BinaryProtocol.DEFAULT_SERIAL_VERSION
# StoreAccessTokenProvider means onprem
self._is_cloud = not isinstance(self._auth_provider, StoreAccessTokenProvider)
if config.get_rate_limiting_enabled() and self._is_cloud:
self._logutils.log_debug(
'Starting client with rate limiting enabled')
self._rate_limiter_map = RateLimiterMap()
self._table_limit_update_map = dict()
self._threadpool = pool.ThreadPool(1)
else:
self._logutils.log_debug('Starting client with no rate limiting')
self._rate_limiter_map = None
self._table_limit_update_map = None
self._threadpool = None
self.lock = Lock()
self._ratelimiter_duration_seconds = 30
self._one_time_messages = {}
@synchronized
def background_update_limiters(self, table_name):
# Query table limits and create rate limiters for a table in a
# short-lived background thread.
if not self._table_needs_refresh(table_name):
return
self._set_table_needs_refresh(table_name, False)
try:
self._threadpool.map(self._update_table_limiters, ['table_name'])
except RuntimeError:
self._set_table_needs_refresh(table_name, True)
def enable_rate_limiting(self, enable, use_percent):
"""
Internal use only.
Allow tests to enable/disable rate limiting. This method is not thread
safe, and should only be executed by one thread when no other operations
are in progress.
"""
self._config.set_default_rate_limiting_percentage(use_percent)
if enable and self._rate_limiter_map is None:
self._rate_limiter_map = RateLimiterMap()
self._table_limit_update_map = dict()
self._threadpool = pool.ThreadPool(1)
elif not enable and self._rate_limiter_map is not None:
self._rate_limiter_map.clear()
self._rate_limiter_map = None
self._table_limit_update_map.clear()
self._table_limit_update_map = None
if self._threadpool is not None:
self._threadpool.close()
self._threadpool = None
def execute(self, request):
"""
Execute the KV request and return the response. This is the top-level
method for request execution.
This method handles exceptions to distinguish between what can be
retried and what cannot, making sure that root cause exceptions are
kept. Examples:
can't connect (host, port, etc)\n
throttling exceptions\n
general networking issues, IOError\n
RequestTimeoutException needs a cause, or at least needs to include the
message from the causing exception.
:param request: the request to be executed by the server.
:type request: Request
:returns: the result of the request.
:rtype: Result
:raises IllegalArgumentException: raises the exception if request is
None.
"""
CheckValue.check_not_none(request, 'request')
request.set_defaults(self._config)
request.validate()
if request.is_query_request():
"""
The following 'if' may be True for advanced queries only. For such
queries, the 'if' will be True (i.e., the QueryRequest will be bound
with a QueryDriver) if and only if this is not the 1st execute()
call for this query. In this case we just return a new, empty
QueryResult. Actual computation of a result batch will take place
when the app calls get_results() on the QueryResult.
"""
if request.has_driver():
self._trace('QueryRequest has QueryDriver', 2)
return QueryResult(request, False)
"""
If it is an advanced query and we are here, then this must be the
1st execute() call for the query. If the query has been prepared
before, we create a QueryDriver and bind it with the QueryRequest.
Then, we create and return an empty QueryResult. Actual computation
of a result batch will take place when the app calls get_results()
on the QueryResult.
"""
if request.is_prepared() and not request.is_simple_query():
self._trace(
'QueryRequest has no QueryDriver, but is prepared', 2)
driver = QueryDriver(request)
driver.set_client(self)
driver.set_topology_info(request.topology_info())
return QueryResult(request, False)
"""
If we are here, then this is either (a) a simple query or (b) an
advanced query that has not been prepared already, which also
implies that this is the 1st execute() call on this query. For a
non-prepared advanced query, the effect of this 1st execute() call
is to send the query to the proxy for compilation, get back the
prepared query, but no query results, create a QueryDriver, and bind
it with the QueryRequest (see QueryRequestSerializer.deserialize()),
and return an empty QueryResult.
"""
self._trace(
'QueryRequest has no QueryDriver and is not prepared', 2)
timeout_ms = request.get_timeout()
headers = {'Host': self._url.hostname,
'Content-Type': 'application/octet-stream',
'Connection': 'keep-alive',
'Accept': 'application/octet-stream',
'User-Agent': self._user_agent}
# We expressly check size limit below based on onprem versus cloud. Set
# the request to not check size limit inside self._write_content().
request.set_check_request_size(False)
content = self.serialize_request(request, headers)
content_len = len(content)
# If on-premise the auth_provider will always be a
# StoreAccessTokenProvider. If so, check against configurable limit.
# Otherwise check against internal hardcoded cloud limit.
if isinstance(self._auth_provider, StoreAccessTokenProvider):
if content_len > self._max_content_length:
raise RequestSizeLimitException(
'The request size of ' + str(content_len) + ' exceeded ' +
'the limit of ' + str(self._max_content_length))
else:
request.set_check_request_size(True)
BinaryProtocol.check_request_size_limit(request, content_len)
if request.get_compartment() is None:
request.set_compartment_internal(
self._config.get_default_compartment())
if self._logutils.is_enabled_for(DEBUG):
self._logutils.log_debug('Request: ' + request.__class__.__name__)
request_id = self._next_request_id()
headers[HttpConstants.REQUEST_ID_HEADER] = request_id
self.check_request(request)
# TODO: look at avoiding creating this object on each request
request_utils = RequestUtils(
self._sess, self._logutils, request, self._retry_handler, self,
self._rate_limiter_map)
return request_utils.do_post_request(
self._request_uri, headers, content, timeout_ms)
def check_request(self, request):
# warn if using features not implemented at the connected server
# currently cloud does not support Durability
if self.serial_version < 3 or self._is_cloud:
if isinstance(request, WriteRequest) and request.get_durability() is not None:
self.one_time_message('The requested feature is not supported ' +
'by the connected server: Durability')
# ondemand tables are not available in V2 or onprem
if self.serial_version < 3 or not self._is_cloud:
if (isinstance(request, TableRequest) and
request.get_table_limits() is not None and
request.get_table_limits().get_mode() ==
TableLimits.CAPACITY_MODE.ON_DEMAND):
raise OperationNotSupportedException(
'The requested feature is not supported ' +
'by the connected server: on demand capacity table')
@synchronized
def _next_request_id(self):
"""
Get the next client-scoped request id. It really needs to be combined
with a client id to obtain a globally unique scope but is sufficient
for most purposes
"""
self._request_id += 1
return str(self._request_id)
def get_auth_provider(self):
return self._auth_provider
# for test use
def get_is_cloud(self):
return self._is_cloud
@synchronized
def one_time_message(self, message):
val = self._one_time_messages.get(message)
if val is None:
self._one_time_messages[message] = "1"
self._logutils.log_warning(message)
def reset_rate_limiters(self, table_name):
"""
Internal use only.
Allow tests to reset limiters in map.
:param table_name: name or OCID of the table.
:type table_name: str
"""
if self._rate_limiter_map is not None:
self._rate_limiter_map.reset(table_name)
def set_ratelimiter_duration_seconds(self, duration_seconds):
# Allow tests to override this hardcoded setting
self._ratelimiter_duration_seconds = duration_seconds
def shut_down(self):
# Shutdown the client.
self._logutils.log_debug('Shutting down driver http client')
if self._shut_down:
return
self._shut_down = True
if self._auth_provider is not None:
self._auth_provider.close()
if self._sess is not None:
self._sess.close()
if self._threadpool is not None:
self._threadpool.close()
def update_rate_limiters(self, table_name, limits):
"""
Add or update rate limiters for a table.
Cloud only.
:param table_name: the table name or OCID of table.
:type table_name: str
:param limits: read/write limits for table.
:type limits: TableLimits
:returns: whether the update is succeed.
"""
if self._rate_limiter_map is None:
return False
self._set_table_needs_refresh(table_name, False)
if (limits is None or limits.get_read_units() <= 0 and
limits.get_write_units() <= 0):
self._rate_limiter_map.remove(table_name)
self._logutils.log_info(
'Removing rate limiting from table: ' + table_name)
return False
"""
Create or update rate limiters in map
Note: NoSQL cloud service has a "burst" availability of 300 seconds. But
we don't know if or how many other clients may have been using this
table, and a duration of 30 seconds allows for more predictable usage.
Also, it's better to use a reasonable hardcoded value here than to try
to explain the subtleties of it in docs for configuration. In the end
this setting is probably fine for all uses.
"""
read_units = limits.get_read_units()
write_units = limits.get_write_units()
# If there's a specified rate limiter percentage, use that.
rl_percent = self._config.get_default_rate_limiting_percentage()
if rl_percent > 0.0:
read_units = read_units * rl_percent / 100.0
write_units = write_units * rl_percent / 100.0
self._rate_limiter_map.update(
table_name, float(read_units), float(write_units),
self._ratelimiter_duration_seconds)
msg = str.format('Updated table "{0}" to have RUs={1} and WUs={2} ' +
'per second.', table_name, str(read_units),
str(write_units))
self._logutils.log_info(msg)
return True
def _check_and_set_proxy(self, sess):
if (self._proxy_host is not None and self._proxy_port == 0 or
self._proxy_host is None and self._proxy_port != 0):
raise IllegalArgumentException(
'To configure an HTTP proxy, both host and port are required.')
if (self._proxy_username is not None and self._proxy_password is None or
self._proxy_username is None and
self._proxy_password is not None):
raise IllegalArgumentException(
'To configure HTTP proxy authentication, both user name and ' +
'password are required')
if self._proxy_host is not None:
if self._proxy_username is None:
proxy_url = (
'http://' + self._proxy_host + ':' + str(self._proxy_port))
else:
assert self._proxy_password is not None
proxy_url = (
'http://' + self._proxy_username + ':' +
self._proxy_password + '@' + self._proxy_host + ':' +
str(self._proxy_port))
sess.proxies = {'http': proxy_url, 'https': proxy_url}
@staticmethod
def _make_user_agent():
if version_info.major >= 3:
pyversion = python_version()
else:
pyversion = '%s.%s.%s' % (version_info.major, version_info.minor,
version_info.micro)
return '%s/%s (Python %s)' % ('NoSQL-PythonSDK', __version__, pyversion)
def _set_table_needs_refresh(self, table_name, needs_refresh):
# set the status of a table needing limits refresh now.
if self._table_limit_update_map is None:
return
then = self._table_limit_update_map.get(table_name)
now_nanos = int(round(time() * 1000000000))
if then is not None:
if needs_refresh:
self._table_limit_update_map[table_name] = now_nanos - 1
else:
self._table_limit_update_map[table_name] = (
now_nanos + Client.LIMITER_REFRESH_NANOS)
return
if needs_refresh:
self._table_limit_update_map[table_name] = now_nanos - 1
else:
self._table_limit_update_map[table_name] = (
now_nanos + Client.LIMITER_REFRESH_NANOS)
def _table_needs_refresh(self, table_name):
# Return True if table needs limits refresh.
if self._table_limit_update_map is None:
return False
then = self._table_limit_update_map.get(table_name)
now_nanos = int(round(time() * 1000000000))
if then is not None and then > now_nanos:
return False
return True
@staticmethod
def _trace(msg, level):
if level <= Client.TRACE_LEVEL:
print('DRIVER: ' + msg)
def _update_table_limiters(self, table_name):
# This is meant to be run in a background thread.
req = GetTableRequest().set_table_name(table_name).set_timeout(1000)
res = None
try:
self._logutils.log_debug(
'Starting GetTableRequest for table "' + table_name + '"')
res = self.execute(req)
except Exception as e:
self._logutils.log_error(
'GetTableRequest for table "' + table_name +
'" returned exception: ' + str(e))
if res is None:
# table doesn't exist? other error?
self._logutils.log_error(
'GetTableRequest for table "' + table_name + '" returned None')
then = self._table_limit_update_map.get(table_name)
if then is not None:
# Allow retry after 100ms.
self._table_limit_update_map[table_name] = (
int(round(time() * 1000000000)) + 100000000)
return
self._logutils.log_debug(
'GetTableRequest completed for table "' + table_name + '"')
# Update/add rate limiters for table.
if self.update_rate_limiters(table_name, res.get_table_limits()):
self._logutils.log_info(
'Background thread added limiters for table "' + table_name +
'"')
def decrement_serial_version(self):
"""
Decrements the serial version, if it is greater than the minimum.
For internal use only.
The current minimum value is 2.
:returns: true if the version was decremented, false otherwise.
:rtype: bool
"""
if self.serial_version > 2:
self.serial_version -= 1
return True
return False
def _write_content(self, request):
"""
Serializes the request payload, sent as http content.
:param request: the request to be executed by the server.
:type request: Request
:returns: the bytearray that contains the content.
:rtype: bytearray
"""
content = bytearray()
bos = ByteOutputStream(content)
BinaryProtocol.write_serial_version(bos, self.serial_version)
request.create_serializer().serialize(
request, bos, self.serial_version)
return content
def serialize_request(self, request, headers):
"""
Serializes the request payload and sets the Content-Length
header to the correct value.
:param request: the request to be executed by the server.
:type request: Request
:param headers: the http headers
:type headers: Dictionary
:returns: the bytearray that contains the content.
:rtype: bytearray
"""
content = self._write_content(request)
headers.update({'Content-Length': str(len(content))})
return content
| [
"requests.Session",
"threading.Lock",
"multiprocessing.pool.ThreadPool",
"time.time",
"platform.python_version"
] | [((2401, 2410), 'requests.Session', 'Session', ([], {}), '()\n', (2408, 2410), False, 'from requests import Session\n'), ((3824, 3830), 'threading.Lock', 'Lock', ([], {}), '()\n', (3828, 3830), False, 'from threading import Lock\n'), ((3567, 3585), 'multiprocessing.pool.ThreadPool', 'pool.ThreadPool', (['(1)'], {}), '(1)\n', (3582, 3585), False, 'from multiprocessing import pool\n'), ((4966, 4984), 'multiprocessing.pool.ThreadPool', 'pool.ThreadPool', (['(1)'], {}), '(1)\n', (4981, 4984), False, 'from multiprocessing import pool\n'), ((16665, 16681), 'platform.python_version', 'python_version', ([], {}), '()\n', (16679, 16681), False, 'from platform import python_version\n'), ((17203, 17209), 'time.time', 'time', ([], {}), '()\n', (17207, 17209), False, 'from time import time\n'), ((18015, 18021), 'time.time', 'time', ([], {}), '()\n', (18019, 18021), False, 'from time import time\n'), ((19228, 19234), 'time.time', 'time', ([], {}), '()\n', (19232, 19234), False, 'from time import time\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.framework import random_seed
from tensorflow.python.ops.ragged import ragged_tensor
from tfmiss.ops import tfmiss_ops
def cbow_context(source, window, empty, name=None):
"""Generates `Continuous bag-of-words` contexts for inference from batched list of tokens.
Args:
source: `2-D` string `Tensor` or `RaggedTensor`, batched lists of tokens [sentences, tokens].
window: `int`, size of context before and after target token, must be > 0.
name: `string`, a name for the operation (optional).
Returns:
`2-D` string `RaggedTensor`: context tokens.
`2-D` int32 `RaggedTensor`: context positions.
"""
with tf.name_scope(name or 'cbow_context'):
source = ragged_tensor.convert_to_tensor_or_ragged_tensor(source, name='source')
if source.shape.rank != 2:
raise ValueError('Rank of `source` must equals 2')
if not ragged_tensor.is_ragged(source):
source = ragged_tensor.RaggedTensor.from_tensor(source, ragged_rank=1)
if source.ragged_rank != 1:
raise ValueError('Ragged rank of `source` must equals 1')
context_values, context_splits, context_positions = tfmiss_ops.miss_cbow_context(
source_values=source.values,
source_splits=source.row_splits,
window=window,
empty=empty
)
context = tf.RaggedTensor.from_row_splits(context_values, context_splits)
position = tf.RaggedTensor.from_row_splits(context_positions, context_splits)
return context, position
def cont_bow(source, window, seed=None, name=None):
"""Generates `Continuous bag-of-words` target and context pairs from batched list of tokens.
Args:
source: `2-D` string `Tensor` or `RaggedTensor`, batched lists of tokens [sentences, tokens].
window: `int`, size of context before and after target token, must be > 0.
seed: `int`, used to create a random seed (optional).
See @{tf.random.set_seed} for behavior.
name: `string`, a name for the operation (optional).
Returns:
`1-D` string `Tensor`: target tokens.
`2-D` string `RaggedTensor`: context tokens.
`2-D` int32 `RaggedTensor`: context positions.
"""
with tf.name_scope(name or 'cont_bow'):
source = ragged_tensor.convert_to_tensor_or_ragged_tensor(source, name='source')
if source.shape.rank != 2:
raise ValueError('Rank of `source` must equals 2')
if not ragged_tensor.is_ragged(source):
source = ragged_tensor.RaggedTensor.from_tensor(source, ragged_rank=1)
if source.ragged_rank != 1:
raise ValueError('Ragged rank of `source` must equals 1')
seed1, seed2 = random_seed.get_seed(seed)
target, context_values, context_splits, context_positions = tfmiss_ops.miss_cont_bow(
source_values=source.values,
source_splits=source.row_splits,
window=window,
seed=seed1,
seed2=seed2
)
context = tf.RaggedTensor.from_row_splits(context_values, context_splits)
position = tf.RaggedTensor.from_row_splits(context_positions, context_splits)
return target, context, position
def skip_gram(source, window, seed=None, name=None):
"""Generates `Skip-Gram` target and context pairs from batched list of tokens.
Args:
source: `2-D` string `Tensor` or `RaggedTensor`, batched lists of tokens [sentences, tokens].
window: `int`, size of context before and after target token, must be > 0.
seed: `int`, used to create a random seed (optional).
See @{tf.random.set_seed} for behavior.
name: `string`, a name for the operation (optional).
Returns:
Two `1-D` string `Tensor`s: target and context tokens.
"""
with tf.name_scope(name or 'skip_gram'):
source = ragged_tensor.convert_to_tensor_or_ragged_tensor(source, name='source')
if source.shape.rank != 2:
raise ValueError('Rank of `source` must equals 2')
if not ragged_tensor.is_ragged(source):
source = ragged_tensor.RaggedTensor.from_tensor(source, ragged_rank=1)
if source.ragged_rank != 1:
raise ValueError('Ragged rank of `source` must equals 1')
seed1, seed2 = random_seed.get_seed(seed)
target, context = tfmiss_ops.miss_skip_gram(
source_values=source.values,
source_splits=source.row_splits,
window=window,
seed=seed1,
seed2=seed2
)
return target, context
def spaces_after(source, name=None):
"""Separates spaces from tokens.
Args:
source: `2-D` string `Tensor` or `RaggedTensor`, batched lists of "tokens with spaces" [sentences, tokens].
name: `string`, a name for the operation (optional).
Returns:
`2-D` string `RaggedTensor`: tokens.
`2-D` string `RaggedTensor`: spaces.
"""
with tf.name_scope(name or 'spaces_after'):
source = ragged_tensor.convert_to_tensor_or_ragged_tensor(source, name='source')
if source.shape.rank != 2:
raise ValueError('Rank of `source` must equals 2')
if not ragged_tensor.is_ragged(source):
source = ragged_tensor.RaggedTensor.from_tensor(source, ragged_rank=1)
if source.ragged_rank != 1:
raise ValueError('Ragged rank of `source` must equals 1')
token_values, space_values, common_splits = tfmiss_ops.miss_spaces_after(
source_values=source.values,
source_splits=source.row_splits
)
tokens = tf.RaggedTensor.from_row_splits(token_values, common_splits)
spaces = tf.RaggedTensor.from_row_splits(space_values, common_splits)
return tokens, spaces
| [
"tensorflow.python.ops.ragged.ragged_tensor.convert_to_tensor_or_ragged_tensor",
"tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_tensor",
"tensorflow.python.ops.ragged.ragged_tensor.is_ragged",
"tensorflow.RaggedTensor.from_row_splits",
"tfmiss.ops.tfmiss_ops.miss_spaces_after",
"tensorflow.... | [((820, 857), 'tensorflow.name_scope', 'tf.name_scope', (["(name or 'cbow_context')"], {}), "(name or 'cbow_context')\n", (833, 857), True, 'import tensorflow as tf\n'), ((876, 947), 'tensorflow.python.ops.ragged.ragged_tensor.convert_to_tensor_or_ragged_tensor', 'ragged_tensor.convert_to_tensor_or_ragged_tensor', (['source'], {'name': '"""source"""'}), "(source, name='source')\n", (924, 947), False, 'from tensorflow.python.ops.ragged import ragged_tensor\n'), ((1347, 1470), 'tfmiss.ops.tfmiss_ops.miss_cbow_context', 'tfmiss_ops.miss_cbow_context', ([], {'source_values': 'source.values', 'source_splits': 'source.row_splits', 'window': 'window', 'empty': 'empty'}), '(source_values=source.values, source_splits=\n source.row_splits, window=window, empty=empty)\n', (1375, 1470), False, 'from tfmiss.ops import tfmiss_ops\n'), ((1543, 1606), 'tensorflow.RaggedTensor.from_row_splits', 'tf.RaggedTensor.from_row_splits', (['context_values', 'context_splits'], {}), '(context_values, context_splits)\n', (1574, 1606), True, 'import tensorflow as tf\n'), ((1626, 1692), 'tensorflow.RaggedTensor.from_row_splits', 'tf.RaggedTensor.from_row_splits', (['context_positions', 'context_splits'], {}), '(context_positions, context_splits)\n', (1657, 1692), True, 'import tensorflow as tf\n'), ((2434, 2467), 'tensorflow.name_scope', 'tf.name_scope', (["(name or 'cont_bow')"], {}), "(name or 'cont_bow')\n", (2447, 2467), True, 'import tensorflow as tf\n'), ((2486, 2557), 'tensorflow.python.ops.ragged.ragged_tensor.convert_to_tensor_or_ragged_tensor', 'ragged_tensor.convert_to_tensor_or_ragged_tensor', (['source'], {'name': '"""source"""'}), "(source, name='source')\n", (2534, 2557), False, 'from tensorflow.python.ops.ragged import ragged_tensor\n'), ((2920, 2946), 'tensorflow.python.framework.random_seed.get_seed', 'random_seed.get_seed', (['seed'], {}), '(seed)\n', (2940, 2946), False, 'from tensorflow.python.framework import random_seed\n'), ((3016, 3147), 'tfmiss.ops.tfmiss_ops.miss_cont_bow', 'tfmiss_ops.miss_cont_bow', ([], {'source_values': 'source.values', 'source_splits': 'source.row_splits', 'window': 'window', 'seed': 'seed1', 'seed2': 'seed2'}), '(source_values=source.values, source_splits=source.\n row_splits, window=window, seed=seed1, seed2=seed2)\n', (3040, 3147), False, 'from tfmiss.ops import tfmiss_ops\n'), ((3232, 3295), 'tensorflow.RaggedTensor.from_row_splits', 'tf.RaggedTensor.from_row_splits', (['context_values', 'context_splits'], {}), '(context_values, context_splits)\n', (3263, 3295), True, 'import tensorflow as tf\n'), ((3315, 3381), 'tensorflow.RaggedTensor.from_row_splits', 'tf.RaggedTensor.from_row_splits', (['context_positions', 'context_splits'], {}), '(context_positions, context_splits)\n', (3346, 3381), True, 'import tensorflow as tf\n'), ((4025, 4059), 'tensorflow.name_scope', 'tf.name_scope', (["(name or 'skip_gram')"], {}), "(name or 'skip_gram')\n", (4038, 4059), True, 'import tensorflow as tf\n'), ((4078, 4149), 'tensorflow.python.ops.ragged.ragged_tensor.convert_to_tensor_or_ragged_tensor', 'ragged_tensor.convert_to_tensor_or_ragged_tensor', (['source'], {'name': '"""source"""'}), "(source, name='source')\n", (4126, 4149), False, 'from tensorflow.python.ops.ragged import ragged_tensor\n'), ((4512, 4538), 'tensorflow.python.framework.random_seed.get_seed', 'random_seed.get_seed', (['seed'], {}), '(seed)\n', (4532, 4538), False, 'from tensorflow.python.framework import random_seed\n'), ((4566, 4698), 'tfmiss.ops.tfmiss_ops.miss_skip_gram', 'tfmiss_ops.miss_skip_gram', ([], {'source_values': 'source.values', 'source_splits': 'source.row_splits', 'window': 'window', 'seed': 'seed1', 'seed2': 'seed2'}), '(source_values=source.values, source_splits=source\n .row_splits, window=window, seed=seed1, seed2=seed2)\n', (4591, 4698), False, 'from tfmiss.ops import tfmiss_ops\n'), ((5180, 5217), 'tensorflow.name_scope', 'tf.name_scope', (["(name or 'spaces_after')"], {}), "(name or 'spaces_after')\n", (5193, 5217), True, 'import tensorflow as tf\n'), ((5236, 5307), 'tensorflow.python.ops.ragged.ragged_tensor.convert_to_tensor_or_ragged_tensor', 'ragged_tensor.convert_to_tensor_or_ragged_tensor', (['source'], {'name': '"""source"""'}), "(source, name='source')\n", (5284, 5307), False, 'from tensorflow.python.ops.ragged import ragged_tensor\n'), ((5699, 5794), 'tfmiss.ops.tfmiss_ops.miss_spaces_after', 'tfmiss_ops.miss_spaces_after', ([], {'source_values': 'source.values', 'source_splits': 'source.row_splits'}), '(source_values=source.values, source_splits=\n source.row_splits)\n', (5727, 5794), False, 'from tfmiss.ops import tfmiss_ops\n'), ((5842, 5902), 'tensorflow.RaggedTensor.from_row_splits', 'tf.RaggedTensor.from_row_splits', (['token_values', 'common_splits'], {}), '(token_values, common_splits)\n', (5873, 5902), True, 'import tensorflow as tf\n'), ((5920, 5980), 'tensorflow.RaggedTensor.from_row_splits', 'tf.RaggedTensor.from_row_splits', (['space_values', 'common_splits'], {}), '(space_values, common_splits)\n', (5951, 5980), True, 'import tensorflow as tf\n'), ((1063, 1094), 'tensorflow.python.ops.ragged.ragged_tensor.is_ragged', 'ragged_tensor.is_ragged', (['source'], {}), '(source)\n', (1086, 1094), False, 'from tensorflow.python.ops.ragged import ragged_tensor\n'), ((1117, 1178), 'tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_tensor', 'ragged_tensor.RaggedTensor.from_tensor', (['source'], {'ragged_rank': '(1)'}), '(source, ragged_rank=1)\n', (1155, 1178), False, 'from tensorflow.python.ops.ragged import ragged_tensor\n'), ((2673, 2704), 'tensorflow.python.ops.ragged.ragged_tensor.is_ragged', 'ragged_tensor.is_ragged', (['source'], {}), '(source)\n', (2696, 2704), False, 'from tensorflow.python.ops.ragged import ragged_tensor\n'), ((2727, 2788), 'tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_tensor', 'ragged_tensor.RaggedTensor.from_tensor', (['source'], {'ragged_rank': '(1)'}), '(source, ragged_rank=1)\n', (2765, 2788), False, 'from tensorflow.python.ops.ragged import ragged_tensor\n'), ((4265, 4296), 'tensorflow.python.ops.ragged.ragged_tensor.is_ragged', 'ragged_tensor.is_ragged', (['source'], {}), '(source)\n', (4288, 4296), False, 'from tensorflow.python.ops.ragged import ragged_tensor\n'), ((4319, 4380), 'tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_tensor', 'ragged_tensor.RaggedTensor.from_tensor', (['source'], {'ragged_rank': '(1)'}), '(source, ragged_rank=1)\n', (4357, 4380), False, 'from tensorflow.python.ops.ragged import ragged_tensor\n'), ((5423, 5454), 'tensorflow.python.ops.ragged.ragged_tensor.is_ragged', 'ragged_tensor.is_ragged', (['source'], {}), '(source)\n', (5446, 5454), False, 'from tensorflow.python.ops.ragged import ragged_tensor\n'), ((5477, 5538), 'tensorflow.python.ops.ragged.ragged_tensor.RaggedTensor.from_tensor', 'ragged_tensor.RaggedTensor.from_tensor', (['source'], {'ragged_rank': '(1)'}), '(source, ragged_rank=1)\n', (5515, 5538), False, 'from tensorflow.python.ops.ragged import ragged_tensor\n')] |
import sys
from typing import NoReturn, Optional, Type
from traceback_with_variables.print import print_exc, Format
def global_print_exc(fmt: Optional[Format] = None) -> NoReturn:
sys.excepthook = lambda e_cls, e, tb: print_exc(e=e, fmt=fmt)
def global_print_exc_in_ipython(fmt: Optional[Format] = None) -> NoReturn:
try:
import IPython
except ModuleNotFoundError:
raise ValueError("IPython not found")
IPython.core.interactiveshell.InteractiveShell.showtraceback = \
lambda self, *args, **kwargs: print_exc(num_skipped_frames=1, fmt=fmt)
def is_ipython_global(name: str, type_: Type, filename: str, is_global: bool) -> bool:
return is_global and (
name in ['In', 'Out', 'get_ipython', 'exit', 'quit']
or name.startswith('_')
)
| [
"traceback_with_variables.print.print_exc"
] | [((232, 255), 'traceback_with_variables.print.print_exc', 'print_exc', ([], {'e': 'e', 'fmt': 'fmt'}), '(e=e, fmt=fmt)\n', (241, 255), False, 'from traceback_with_variables.print import print_exc, Format\n'), ((561, 601), 'traceback_with_variables.print.print_exc', 'print_exc', ([], {'num_skipped_frames': '(1)', 'fmt': 'fmt'}), '(num_skipped_frames=1, fmt=fmt)\n', (570, 601), False, 'from traceback_with_variables.print import print_exc, Format\n')] |
"""This is the main module"""
import random
import webhandler
import sentimentanalyser
def main():
"""Run client"""
if not webhandler.API_KEY:
raise ValueError("Set your API_KEY in webhandler.py! Find it on https://devrecruitmentchallenge.com/account")
analyser = sentimentanalyser.SentimentAnalyser()
print("Getting challenge list")
challenge_list = webhandler.get_challenge_list()
print("There are {} challenges".format(len(challenge_list)))
for info in challenge_list:
print("Solving challenge {} - {}".format(info.cid, info.challenge_type))
challenge = webhandler.get_challenge(info.cid)
if info.challenge_type == "pertweet":
handle_pertweet(challenge, analyser, False)
elif info.challenge_type == "aggregated":
handle_aggregated(challenge, analyser)
else:
print("Unrecognised challenge type '{}'".format(info.challenge_type))
def handle_pertweet(challenge, analyser, verbose=False):
"""Handle a per-tweet challenge"""
sentiments = {}
for tweet in challenge.tweets:
sentiment_list = analyser.analyse_tweet(tweet.tweet, True, verbose)
sentiments[tweet.tid] = [{'subject': subject, 'sentiment': sentiment}
for (subject, sentiment) in sentiment_list]
submission = {'challengeId': challenge.info.cid, 'perTweetSentiment': sentiments}
result = webhandler.post_pertweet_submission(submission)
print("Mark = {}%".format(result.mark))
def handle_aggregated(challenge, analyser):
"""Handle an aggregated challenge"""
sentiments = {}
# Just guess
min_time = min(t.time for t in challenge.tweets)
max_time = max(t.time for t in challenge.tweets)
for tweet in challenge.tweets:
result_list = analyser.analyse_tweet(tweet.tweet, True)
mult = 1
if (tweet.source.startswith("Verified")):
mult = 1.5
for (company,result) in result_list:
if (not (company in sentiments)):
sentiments[company] = {}
for i in range(min_time,max_time+1):
sentiments[company][i] = []
sentiments[company][tweet.time].append(mult*result);
sols = {}
for company in sentiments:
lastval = 0
for time in sentiments[company]:
if (not(company in sols)):
sols[company] = {}
if (len(sentiments[company][time]) == 0):
sols[company][time] = lastval
else:
lastval = 1.0*sum(sentiments[company][time])/len(sentiments[company][time])
sols[company][time] = lastval
submission = {'challengeId': challenge.info.cid, 'sentiments': sols}
result = webhandler.post_aggregated_submission(submission)
print ("Mark = {}%".format(result.mark))
if __name__ == "__main__":
main()
| [
"webhandler.get_challenge",
"webhandler.get_challenge_list",
"webhandler.post_pertweet_submission",
"webhandler.post_aggregated_submission",
"sentimentanalyser.SentimentAnalyser"
] | [((286, 323), 'sentimentanalyser.SentimentAnalyser', 'sentimentanalyser.SentimentAnalyser', ([], {}), '()\n', (321, 323), False, 'import sentimentanalyser\n'), ((382, 413), 'webhandler.get_challenge_list', 'webhandler.get_challenge_list', ([], {}), '()\n', (411, 413), False, 'import webhandler\n'), ((1414, 1461), 'webhandler.post_pertweet_submission', 'webhandler.post_pertweet_submission', (['submission'], {}), '(submission)\n', (1449, 1461), False, 'import webhandler\n'), ((2746, 2795), 'webhandler.post_aggregated_submission', 'webhandler.post_aggregated_submission', (['submission'], {}), '(submission)\n', (2783, 2795), False, 'import webhandler\n'), ((613, 647), 'webhandler.get_challenge', 'webhandler.get_challenge', (['info.cid'], {}), '(info.cid)\n', (637, 647), False, 'import webhandler\n')] |
from django import forms
class CreateListForm(forms.Form):
name = forms.CharField(label="Name ", max_length=300)
| [
"django.forms.CharField"
] | [((71, 117), 'django.forms.CharField', 'forms.CharField', ([], {'label': '"""Name """', 'max_length': '(300)'}), "(label='Name ', max_length=300)\n", (86, 117), False, 'from django import forms\n')] |
from services.rabbitmq_wrapper import RabbitMqWrapper
import yaml
def test_given_validRabbitConfigurations_when_openChannelCalled_then_expectSuccessfulConnection(configs):
wrapper = RabbitMqWrapper(configs["queue"])
channel = wrapper.open_channel()
opened = channel.is_open
wrapper.close_connection()
assert opened | [
"services.rabbitmq_wrapper.RabbitMqWrapper"
] | [((187, 220), 'services.rabbitmq_wrapper.RabbitMqWrapper', 'RabbitMqWrapper', (["configs['queue']"], {}), "(configs['queue'])\n", (202, 220), False, 'from services.rabbitmq_wrapper import RabbitMqWrapper\n')] |
#!/usr/bin/env python
import os
import shutil
import sys
from setuptools import setup, find_packages
VERSION = '0.0.1'
long_description = """
Fast CPU/CUDA Solid Harmonic 3D Scattering implementation
Numpy + PyTorch + FFTW / cuFFT implementation
"""
setup_info = dict(
# Metadata
name='scatharm',
version=VERSION,
author='<NAME>',
author_email='louis(dot)thiry<at>outlook(dot)fr',
url='https://github.com/louity/pyscatharm',
description='Fast CPU/CUDA Solid Harmonic 3D Scattering implementation',
long_description=long_description,
license='BSD',
# Package info
packages=find_packages(exclude=('test',)),
zip_safe=True,
install_requires=[
'torch',
'six'
]
)
setup(**setup_info)
| [
"setuptools.find_packages",
"setuptools.setup"
] | [((739, 758), 'setuptools.setup', 'setup', ([], {}), '(**setup_info)\n', (744, 758), False, 'from setuptools import setup, find_packages\n'), ((621, 653), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "('test',)"}), "(exclude=('test',))\n", (634, 653), False, 'from setuptools import setup, find_packages\n')] |
import argparse
from aquosRemote.aquos import AquosTV
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--ip-address", type=str,
help="IP address of AQUOS TV", required=True)
args = parser.parse_args()
# Example/Test
aquos = AquosTV(args.ip_address, setup=True, verbose=True)
aquos.on()
# aquos.delay()
print(aquos.get_info())
# aquos.set_volume(30)
# aquos.delay()
# aquos.off()
if __name__ == "__main__":
main()
| [
"aquosRemote.aquos.AquosTV",
"argparse.ArgumentParser"
] | [((81, 106), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (104, 106), False, 'import argparse\n'), ((296, 346), 'aquosRemote.aquos.AquosTV', 'AquosTV', (['args.ip_address'], {'setup': '(True)', 'verbose': '(True)'}), '(args.ip_address, setup=True, verbose=True)\n', (303, 346), False, 'from aquosRemote.aquos import AquosTV\n')] |
# For importing keys
import sys
sys.path.append("../")
sys.path.append("DONOTPUSH/")
import api_keys
# My modules
import database as db
# Libraries
import json
from time import sleep
# from github import Github
from alpha_vantage.timeseries import TimeSeries
import logging
# Constants
REPO_NAME = "My-All-Weather-Strategy"
STRATEGY = "../strategy.json"
LOG = "DONOTPUSH/errors.log"
# Global objects
client = None
def main(argv):
# Get values from JSON file
strategy_name = None
portfolio_json = None
band_threshold = None
assets = None
# Parse JSON into variables
with open(STRATEGY, 'r') as f:
strategy_json = json.loads(f.read())
strategy_name = strategy_json['Name']
band_threshold = strategy_json['Percent Band Threshold']
assets = strategy_json['Portfolio']
# Remove all invalid assets (no allocation or no ticker).
# Converts to dictionary of asset names.
temp = {}
for asset in assets:
if asset['Ticker'] != '' and asset['Percent Allocation'] != 0:
name = asset.pop('Name')
temp[name] = asset
assets = temp
# Get performance for all assets.
assets = get_asset_changes(assets)
# Push asset's performance to database
for name in assets.keys():
asset = assets[name]
ticker = asset['Ticker']
fields = {'Price': asset['Price'], 'Percent Change': asset['Percent Change']}
db.write_asset_price(name, ticker, fields)
# Updates portfolio
balances, balanced = update_portfolio(strategy_name, assets, band_threshold)
# Writes updated portfolio balances
db.write_balance(strategy_name, balances, balanced=balanced)
def get_asset_changes(assets):
"""
Calculate price changes on all assets
Returns dict {asset ->
{"price" -> price, "Percent Change" -> percent_change, ... (from original)} }
Mutates assets. Returns assets as well for ease of use.
"""
for name in assets.keys():
asset = assets[name]
ticker = asset['Ticker']
price = get_price_curr(ticker)
prev_price = db.get_price_prev(ticker)
if prev_price == None:
percent_change = 0
logging.error("Could not pull value for ticker " + ticker + " from database")
else:
percent_change = (price - prev_price) / prev_price * 100
asset['Price'] = price
asset['Percent Change'] = percent_change
return assets
def get_price_curr(ticker):
"""
Pulls data from Alphavantage using ticker.
Returns back close for today (latest value).
"""
ts = TimeSeries(api_keys.AV_KEY)
attempts = 1
while attempts <= 2:
try:
data = ts.get_quote_endpoint(symbol=ticker)
price = float(data[0]['05. price'])
return price
except ValueError:
sleep(61)
attempts += 1
def update_portfolio(strategy_name, assets, band_threshold):
"""
Handles all processing required to update the portfolio.
Returns back the balances for the updated portfolio
and whether the portfolio was created or rebalanced (balanced).
As a tuple.
"""
old = db.get_balance_prev(strategy_name)
new = {}
balanced = False
if old == None:
new = create_portfolio(assets)
balanced = True
else:
total_balance = 0
for name in old.keys():
if name != 'Balance':
balance = old[name]
change = assets[name]['Percent Change'] / 100
new_balance = balance * (1 + change)
new[name] = new_balance
total_balance += new_balance
new['Balance'] = total_balance
balanced = rebalance_portfolio(new, assets, band_threshold)
return (new, balanced)
def create_portfolio(assets, size = 1.0):
# Checks that portfolio allocation sums to 100%.
sum = 0
for name in assets.keys():
allocation = assets[name]['Percent Allocation']
sum += allocation
if sum != 100:
raise ValueError("Portfolio allocation does not sum to 100%")
portfolio = {}
portfolio['Balance'] = size
for name in assets.keys():
allocation = (assets[name]['Percent Allocation']) / 100
portfolio[name] = size * allocation
return portfolio
def rebalance_portfolio(portfolio, assets, band_threshold):
rebalanced = False
total_balance = portfolio['Balance']
for name in portfolio.keys():
if name != 'Balance':
asset_balance = portfolio[name]
allocation = assets[name]['Percent Allocation']
curr_allocation = asset_balance/total_balance * 100
if abs(curr_allocation - allocation) >= band_threshold:
create_portfolio(assets, size = total_balance)
rebalanced = True
break
return rebalanced
if __name__ == "__main__":
try:
logging.basicConfig(filename=LOG, level=logging.DEBUG, \
format='%(asctime)s %(levelname)s %(name)s %(message)s')
main(sys.argv[1:])
except Exception as e:
logging.error(e) | [
"logging.basicConfig",
"database.write_balance",
"database.get_price_prev",
"database.get_balance_prev",
"time.sleep",
"database.write_asset_price",
"alpha_vantage.timeseries.TimeSeries",
"sys.path.append",
"logging.error"
] | [((32, 54), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (47, 54), False, 'import sys\n'), ((55, 84), 'sys.path.append', 'sys.path.append', (['"""DONOTPUSH/"""'], {}), "('DONOTPUSH/')\n", (70, 84), False, 'import sys\n'), ((1646, 1706), 'database.write_balance', 'db.write_balance', (['strategy_name', 'balances'], {'balanced': 'balanced'}), '(strategy_name, balances, balanced=balanced)\n', (1662, 1706), True, 'import database as db\n'), ((2637, 2664), 'alpha_vantage.timeseries.TimeSeries', 'TimeSeries', (['api_keys.AV_KEY'], {}), '(api_keys.AV_KEY)\n', (2647, 2664), False, 'from alpha_vantage.timeseries import TimeSeries\n'), ((3214, 3248), 'database.get_balance_prev', 'db.get_balance_prev', (['strategy_name'], {}), '(strategy_name)\n', (3233, 3248), True, 'import database as db\n'), ((1452, 1494), 'database.write_asset_price', 'db.write_asset_price', (['name', 'ticker', 'fields'], {}), '(name, ticker, fields)\n', (1472, 1494), True, 'import database as db\n'), ((2125, 2150), 'database.get_price_prev', 'db.get_price_prev', (['ticker'], {}), '(ticker)\n', (2142, 2150), True, 'import database as db\n'), ((4987, 5103), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'LOG', 'level': 'logging.DEBUG', 'format': '"""%(asctime)s %(levelname)s %(name)s %(message)s"""'}), "(filename=LOG, level=logging.DEBUG, format=\n '%(asctime)s %(levelname)s %(name)s %(message)s')\n", (5006, 5103), False, 'import logging\n'), ((2225, 2302), 'logging.error', 'logging.error', (["('Could not pull value for ticker ' + ticker + ' from database')"], {}), "('Could not pull value for ticker ' + ticker + ' from database')\n", (2238, 2302), False, 'import logging\n'), ((5176, 5192), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (5189, 5192), False, 'import logging\n'), ((2888, 2897), 'time.sleep', 'sleep', (['(61)'], {}), '(61)\n', (2893, 2897), False, 'from time import sleep\n')] |
from apartment import Apartment, Apartments, Price, Area, Year, Vastike, Floor, Rooms, Zone, K, Parameter
import pytest
import unittest
class TestParameter(unittest.TestCase):
def test_K(self):
self.assertEqual(K, 1000)
def test_Parameter_Constructor_DefaultValues(self):
p = Parameter(150*K)
self.assertEqual(p.value, 150000)
self.assertEqual(p.is_increasing_better, True)
self.assertEqual(p.unit, "")
self.assertEqual(p.name, "")
self.assertEqual(p.range, None)
self.assertEqual(p.weight, 1.0)
self.assertEqual(p.normalized_value, 0.0)
def test_Parameter_Throw_Errors(self):
with pytest.raises(ValueError):
p = Parameter(10, range=(100,10))
with pytest.raises(ValueError):
p = Parameter(0, range=(10,100))
def test_Parameter_Constructor_CustomValues(self):
p = Parameter(120*K, False, "euro", "price", (100*K, 400*K), 2.0)
self.assertEqual(p.value, 120*K)
self.assertEqual(p.is_increasing_better, False)
self.assertEqual(p.unit, "euro")
self.assertEqual(p.name, "price")
self.assertEqual(p.range, (100*K, 400*K))
def test_Price(self):
price = Price(value=150 *K, range = None)
self.assertEqual(price.is_increasing_better, False)
self.assertEqual(price.value, 150 *K)
self.assertEqual(price.normalized_value, 0.0)
self.assertEqual(price.calculate_weighted_value(), 0.0)
price.normalize(150*K, 350*K)
self.assertEqual(price.normalized_value, 1.0)
self.assertEqual(price.calculate_weighted_value(), 1.0)
price.normalize(100*K, 200*K)
self.assertEqual(price.normalized_value, 0.5)
self.assertEqual(price.calculate_weighted_value(), 0.5)
| [
"apartment.Parameter",
"pytest.raises",
"apartment.Price"
] | [((302, 320), 'apartment.Parameter', 'Parameter', (['(150 * K)'], {}), '(150 * K)\n', (311, 320), False, 'from apartment import Apartment, Apartments, Price, Area, Year, Vastike, Floor, Rooms, Zone, K, Parameter\n'), ((904, 971), 'apartment.Parameter', 'Parameter', (['(120 * K)', '(False)', '"""euro"""', '"""price"""', '(100 * K, 400 * K)', '(2.0)'], {}), "(120 * K, False, 'euro', 'price', (100 * K, 400 * K), 2.0)\n", (913, 971), False, 'from apartment import Apartment, Apartments, Price, Area, Year, Vastike, Floor, Rooms, Zone, K, Parameter\n'), ((1240, 1272), 'apartment.Price', 'Price', ([], {'value': '(150 * K)', 'range': 'None'}), '(value=150 * K, range=None)\n', (1245, 1272), False, 'from apartment import Apartment, Apartments, Price, Area, Year, Vastike, Floor, Rooms, Zone, K, Parameter\n'), ((677, 702), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (690, 702), False, 'import pytest\n'), ((720, 750), 'apartment.Parameter', 'Parameter', (['(10)'], {'range': '(100, 10)'}), '(10, range=(100, 10))\n', (729, 750), False, 'from apartment import Apartment, Apartments, Price, Area, Year, Vastike, Floor, Rooms, Zone, K, Parameter\n'), ((764, 789), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (777, 789), False, 'import pytest\n'), ((807, 836), 'apartment.Parameter', 'Parameter', (['(0)'], {'range': '(10, 100)'}), '(0, range=(10, 100))\n', (816, 836), False, 'from apartment import Apartment, Apartments, Price, Area, Year, Vastike, Floor, Rooms, Zone, K, Parameter\n')] |
from trezor.crypto.hashlib import sha256
from apps.common import HARDENED
def get_address_from_public_key(pubkey):
pubkeyhash = sha256(pubkey).digest()
address = int.from_bytes(pubkeyhash[:8], "little")
return str(address) + "L"
def get_votes_count(votes):
plus, minus = 0, 0
for vote in votes:
if vote.startswith("+"):
plus += 1
else:
minus += 1
return plus, minus
def get_vote_tx_text(votes):
plus, minus = get_votes_count(votes)
text = []
if plus > 0:
text.append(_text_with_plural("Add", plus))
if minus > 0:
text.append(_text_with_plural("Remove", minus))
return text
def _text_with_plural(txt, value):
return "%s %s %s" % (txt, value, ("votes" if value != 1 else "vote"))
def validate_full_path(path: list) -> bool:
"""
Validates derivation path to equal 44'/134'/a',
where `a` is an account index from 0 to 1 000 000.
"""
if len(path) != 3:
return False
if path[0] != 44 | HARDENED:
return False
if path[1] != 134 | HARDENED:
return False
if path[2] < HARDENED or path[2] > 1000000 | HARDENED:
return False
return True
| [
"trezor.crypto.hashlib.sha256"
] | [((135, 149), 'trezor.crypto.hashlib.sha256', 'sha256', (['pubkey'], {}), '(pubkey)\n', (141, 149), False, 'from trezor.crypto.hashlib import sha256\n')] |
from torch import nn
import torch.nn.functional as F
from model.basic import DownSampling, SSnbt, APN
class LEDNet(nn.Module):
def __init__(self, nclass, drop=0.1):
super(LEDNet, self).__init__()
self.encoder = nn.Sequential(
DownSampling(3, 29), SSnbt(32, 1, 0.1 * drop), SSnbt(32, 1, 0.1 * drop), SSnbt(32, 1, 0.1 * drop),
DownSampling(32, 32), SSnbt(64, 1, 0.1 * drop), SSnbt(64, 1, 0.1 * drop),
DownSampling(64, 64), SSnbt(128, 1, drop), SSnbt(128, 2, drop), SSnbt(128, 5, drop),
SSnbt(128, 9, drop), SSnbt(128, 2, drop), SSnbt(128, 5, drop), SSnbt(128, 9, drop), SSnbt(128, 17, drop)
)
self.decoder = APN(128, nclass)
def forward(self, x):
_, _, h, w = x.shape
x = self.encoder(x)
x = self.decoder(x)
return F.interpolate(x, size=(h, w), mode='bilinear', align_corners=True)
if __name__ == '__main__':
net = LEDNet(21)
import torch
a = torch.randn(2, 3, 554, 253)
out = net(a)
print(out.shape)
| [
"model.basic.SSnbt",
"model.basic.APN",
"model.basic.DownSampling",
"torch.nn.functional.interpolate",
"torch.randn"
] | [((979, 1006), 'torch.randn', 'torch.randn', (['(2)', '(3)', '(554)', '(253)'], {}), '(2, 3, 554, 253)\n', (990, 1006), False, 'import torch\n'), ((692, 708), 'model.basic.APN', 'APN', (['(128)', 'nclass'], {}), '(128, nclass)\n', (695, 708), False, 'from model.basic import DownSampling, SSnbt, APN\n'), ((836, 902), 'torch.nn.functional.interpolate', 'F.interpolate', (['x'], {'size': '(h, w)', 'mode': '"""bilinear"""', 'align_corners': '(True)'}), "(x, size=(h, w), mode='bilinear', align_corners=True)\n", (849, 902), True, 'import torch.nn.functional as F\n'), ((260, 279), 'model.basic.DownSampling', 'DownSampling', (['(3)', '(29)'], {}), '(3, 29)\n', (272, 279), False, 'from model.basic import DownSampling, SSnbt, APN\n'), ((281, 305), 'model.basic.SSnbt', 'SSnbt', (['(32)', '(1)', '(0.1 * drop)'], {}), '(32, 1, 0.1 * drop)\n', (286, 305), False, 'from model.basic import DownSampling, SSnbt, APN\n'), ((307, 331), 'model.basic.SSnbt', 'SSnbt', (['(32)', '(1)', '(0.1 * drop)'], {}), '(32, 1, 0.1 * drop)\n', (312, 331), False, 'from model.basic import DownSampling, SSnbt, APN\n'), ((333, 357), 'model.basic.SSnbt', 'SSnbt', (['(32)', '(1)', '(0.1 * drop)'], {}), '(32, 1, 0.1 * drop)\n', (338, 357), False, 'from model.basic import DownSampling, SSnbt, APN\n'), ((371, 391), 'model.basic.DownSampling', 'DownSampling', (['(32)', '(32)'], {}), '(32, 32)\n', (383, 391), False, 'from model.basic import DownSampling, SSnbt, APN\n'), ((393, 417), 'model.basic.SSnbt', 'SSnbt', (['(64)', '(1)', '(0.1 * drop)'], {}), '(64, 1, 0.1 * drop)\n', (398, 417), False, 'from model.basic import DownSampling, SSnbt, APN\n'), ((419, 443), 'model.basic.SSnbt', 'SSnbt', (['(64)', '(1)', '(0.1 * drop)'], {}), '(64, 1, 0.1 * drop)\n', (424, 443), False, 'from model.basic import DownSampling, SSnbt, APN\n'), ((457, 477), 'model.basic.DownSampling', 'DownSampling', (['(64)', '(64)'], {}), '(64, 64)\n', (469, 477), False, 'from model.basic import DownSampling, SSnbt, APN\n'), ((479, 498), 'model.basic.SSnbt', 'SSnbt', (['(128)', '(1)', 'drop'], {}), '(128, 1, drop)\n', (484, 498), False, 'from model.basic import DownSampling, SSnbt, APN\n'), ((500, 519), 'model.basic.SSnbt', 'SSnbt', (['(128)', '(2)', 'drop'], {}), '(128, 2, drop)\n', (505, 519), False, 'from model.basic import DownSampling, SSnbt, APN\n'), ((521, 540), 'model.basic.SSnbt', 'SSnbt', (['(128)', '(5)', 'drop'], {}), '(128, 5, drop)\n', (526, 540), False, 'from model.basic import DownSampling, SSnbt, APN\n'), ((554, 573), 'model.basic.SSnbt', 'SSnbt', (['(128)', '(9)', 'drop'], {}), '(128, 9, drop)\n', (559, 573), False, 'from model.basic import DownSampling, SSnbt, APN\n'), ((575, 594), 'model.basic.SSnbt', 'SSnbt', (['(128)', '(2)', 'drop'], {}), '(128, 2, drop)\n', (580, 594), False, 'from model.basic import DownSampling, SSnbt, APN\n'), ((596, 615), 'model.basic.SSnbt', 'SSnbt', (['(128)', '(5)', 'drop'], {}), '(128, 5, drop)\n', (601, 615), False, 'from model.basic import DownSampling, SSnbt, APN\n'), ((617, 636), 'model.basic.SSnbt', 'SSnbt', (['(128)', '(9)', 'drop'], {}), '(128, 9, drop)\n', (622, 636), False, 'from model.basic import DownSampling, SSnbt, APN\n'), ((638, 658), 'model.basic.SSnbt', 'SSnbt', (['(128)', '(17)', 'drop'], {}), '(128, 17, drop)\n', (643, 658), False, 'from model.basic import DownSampling, SSnbt, APN\n')] |
#!/usr/bin/env python
# Copyright (c) 2016 The UUV Simulator Authors.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
PKG = 'uuv_manipulators_kinematics'
import roslib; roslib.load_manifest(PKG)
import sys
import unittest
import numpy as np
from uuv_manipulator_interfaces import ArmInterface
class TestArmInterface(unittest.TestCase):
def test_init_interface(self):
arm = ArmInterface()
# Test if the namespace and arm name are correct
self.assertEquals(arm.namespace, '/rexrov/', 'Invalid robot namespace')
self.assertEquals(arm.arm_name, 'oberon7', 'Invalid arm name')
self.assertEquals(arm.base_link, 'oberon7/base', 'Invalid manipulator base name')
self.assertEquals(arm.tip_link, 'oberon7/end_effector', 'Invalid end-effector link name')
self.assertNotEquals(len(arm.joint_names), 0, 'The list of joint names is empty')
self.assertEquals(arm.n_links, 6, 'Invalid number of links, n_links=' + str(arm.n_links))
for name in arm.joint_names:
self.assertIn(name, arm.joint_angles, 'Joint name %s not listed in the joint positions dictionary' % name)
self.assertIn(name, arm.joint_velocities, 'Joint name %s not listed in the joint velocities dictionary' % name)
self.assertIn(name, arm.joint_efforts, 'Joint name %s not listed in the joint efforts dictionary' % name)
def test_joints_to_kdl(self):
arm = ArmInterface()
for idx, name in zip(range(len(arm.joint_names)), arm.joint_names):
for t in ['positions', 'torques']:
jnt_array = arm.joints_to_kdl(t, last_joint=name)
self.assertEquals(jnt_array.rows(), idx + 1,
'Invalid number of joints, joint_idx=%d, last_joint=%s, n_joints=%d' % (idx, name, jnt_array.rows()))
def test_jacobian(self):
arm = ArmInterface()
jac = arm.jacobian()
self.assertIsNotNone(jac, 'Jacobian matrix is invalid')
self.assertEquals(jac.shape, (arm.n_links, 6), 'The full Jacobian matrix has the wrong size')
for idx, name in zip(range(len(arm.link_names)), arm.link_names):
self.assertEquals(arm.jacobian(end_link=name).shape, (arm.n_links, 6))
self.assertEquals(arm.jacobian_transpose(end_link=name).shape, (6, arm.n_links))
def test_home_config(self):
arm = ArmInterface()
self.assertIsNotNone(arm.home, 'Home configuration is invalid')
if __name__ == '__main__':
import rosunit
rosunit.unitrun(PKG, 'test_arm_interface', TestArmInterface)
| [
"rosunit.unitrun",
"uuv_manipulator_interfaces.ArmInterface",
"roslib.load_manifest"
] | [((728, 753), 'roslib.load_manifest', 'roslib.load_manifest', (['PKG'], {}), '(PKG)\n', (748, 753), False, 'import roslib\n'), ((3098, 3158), 'rosunit.unitrun', 'rosunit.unitrun', (['PKG', '"""test_arm_interface"""', 'TestArmInterface'], {}), "(PKG, 'test_arm_interface', TestArmInterface)\n", (3113, 3158), False, 'import rosunit\n'), ((948, 962), 'uuv_manipulator_interfaces.ArmInterface', 'ArmInterface', ([], {}), '()\n', (960, 962), False, 'from uuv_manipulator_interfaces import ArmInterface\n'), ((2006, 2020), 'uuv_manipulator_interfaces.ArmInterface', 'ArmInterface', ([], {}), '()\n', (2018, 2020), False, 'from uuv_manipulator_interfaces import ArmInterface\n'), ((2451, 2465), 'uuv_manipulator_interfaces.ArmInterface', 'ArmInterface', ([], {}), '()\n', (2463, 2465), False, 'from uuv_manipulator_interfaces import ArmInterface\n'), ((2959, 2973), 'uuv_manipulator_interfaces.ArmInterface', 'ArmInterface', ([], {}), '()\n', (2971, 2973), False, 'from uuv_manipulator_interfaces import ArmInterface\n')] |
"""
Feature Selection
Test 3
Random Forest, heatmap
"""
import matplotlib.pyplot as plt
from mlxtend.plotting import scatterplotmatrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import SelectFromModel
from sklearn.model_selection import train_test_split
from mlxtend.plotting import heatmap
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
# Reading data
df = pd.read_csv('NewBioDegWCols.csv')
df.columns = ['SpMax_L','J_Dz','nHM','F01','F04','NssssC','nCb-','C%','nCp',
'n0','F03CN','SdssC','HyWi_B','LOC','SM6_L','F03CO','Me','Mi',
'nN-N','nArN02','nCRX3','SpPosA_B','nCIR','B01','B03','N-073',
'SpMax_A','Psi_i_1d','B04','Sd0','TI2_L','nCrt','c-026','F02',
'nHDon','SpMax_B','Psi_i_A','nN','SM6_B','nArCOOR','nX','TAR']
df['TAR'] = df['TAR'].replace(['RB', 'NRB'], [1, 0])
df.replace(to_replace='NaN', value=np.nan, regex=True, inplace=True)
# df.mean(), df.median()
df.fillna(df.mean(), inplace=True)
X = df[[i for i in list(df.columns) if i != 'TAR']]
y = df['TAR']
feat_labels = X.columns
## Random Forest Feature Selection ##
stdsc = StandardScaler()
X = stdsc.fit_transform(X)
forest = RandomForestClassifier(n_estimators=500,
random_state=1)
forest.fit(X, y)
importances = forest.feature_importances_
indices = np.argsort(importances)[::-1]
for f in range(X.shape[1]):
print("%2d) %-*s %f" % (f + 1, 30,
feat_labels[indices[f]],
importances[indices[f]]))
plt.title('Feature Importance')
plt.bar(range(X.shape[1]),
importances[indices],
align='center')
plt.xticks(range(X.shape[1]),
feat_labels[indices], rotation=90)
plt.xlim([-1, X.shape[1]])
plt.tight_layout()
plt.savefig("rf_selection.png")
plt.show()
sfm = SelectFromModel(forest, prefit=True)
X_selected = sfm.transform(X)
print('Number of features that meet this threshold criterion:',
X_selected.shape[1])
print("Threshold %f" % np.mean(importances))
cols = []
for f in range(X_selected.shape[1]):
cols.append(feat_labels[indices[f]])
print("%2d) %-*s %f" % (f + 1, 30,
feat_labels[indices[f]],
importances[indices[f]]))
## HEAT MAP using the above features ##
cols.append('TAR')
cm = np.corrcoef(df[cols].values.T)
hm = heatmap(cm, row_names=cols, column_names=cols)
plt.show()
| [
"numpy.mean",
"matplotlib.pyplot.savefig",
"pandas.read_csv",
"sklearn.feature_selection.SelectFromModel",
"numpy.corrcoef",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.preprocessing.StandardScaler",
"numpy.argsort",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotli... | [((442, 475), 'pandas.read_csv', 'pd.read_csv', (['"""NewBioDegWCols.csv"""'], {}), "('NewBioDegWCols.csv')\n", (453, 475), True, 'import pandas as pd\n'), ((1183, 1199), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1197, 1199), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1237, 1293), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(500)', 'random_state': '(1)'}), '(n_estimators=500, random_state=1)\n', (1259, 1293), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1605, 1636), 'matplotlib.pyplot.title', 'plt.title', (['"""Feature Importance"""'], {}), "('Feature Importance')\n", (1614, 1636), True, 'import matplotlib.pyplot as plt\n'), ((1797, 1823), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-1, X.shape[1]]'], {}), '([-1, X.shape[1]])\n', (1805, 1823), True, 'import matplotlib.pyplot as plt\n'), ((1824, 1842), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1840, 1842), True, 'import matplotlib.pyplot as plt\n'), ((1843, 1874), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""rf_selection.png"""'], {}), "('rf_selection.png')\n", (1854, 1874), True, 'import matplotlib.pyplot as plt\n'), ((1875, 1885), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1883, 1885), True, 'import matplotlib.pyplot as plt\n'), ((1893, 1929), 'sklearn.feature_selection.SelectFromModel', 'SelectFromModel', (['forest'], {'prefit': '(True)'}), '(forest, prefit=True)\n', (1908, 1929), False, 'from sklearn.feature_selection import SelectFromModel\n'), ((2404, 2434), 'numpy.corrcoef', 'np.corrcoef', (['df[cols].values.T'], {}), '(df[cols].values.T)\n', (2415, 2434), True, 'import numpy as np\n'), ((2440, 2486), 'mlxtend.plotting.heatmap', 'heatmap', (['cm'], {'row_names': 'cols', 'column_names': 'cols'}), '(cm, row_names=cols, column_names=cols)\n', (2447, 2486), False, 'from mlxtend.plotting import heatmap\n'), ((2487, 2497), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2495, 2497), True, 'import matplotlib.pyplot as plt\n'), ((1397, 1420), 'numpy.argsort', 'np.argsort', (['importances'], {}), '(importances)\n', (1407, 1420), True, 'import numpy as np\n'), ((2075, 2095), 'numpy.mean', 'np.mean', (['importances'], {}), '(importances)\n', (2082, 2095), True, 'import numpy as np\n')] |
"""
autograd package provides automatic differentiation for all operations on Tensors.
"""
import torch
from torch.autograd import Variable
"""
autograd.Variable wraps around Tensor and
supports (almost) all ops defined on it.
One can directly call .backward() and have
all gradients calculated automatically.
"""
x = Variable(torch.ones(2,2), requires_grad=True)
print(x)
y = x + 2
print(y)
"""
There is also another important class called Function
Variable and Function are interconnected and build up
an acyclic graph, that encodes complete history of
computation. Each variable has .grad_fn attribute that
references a Function that has created the Variable
(For user created Variable, grad_fn is None)
"""
z = y * y * 3
out = z.mean()
print(z, out)
"""
GRADIENTS
"""
out.backward()
print(x.grad)
x = torch.randn(3)
x = Variable(x, requires_grad=True)
y = x * 2
while y.data.norm() < 1000:
y = y * 2
print(y)
gradients = torch.FloatTensor([0.1, 1.0, 0.0001])
y.backward(gradients)
print(x.grad)
| [
"torch.FloatTensor",
"torch.autograd.Variable",
"torch.randn",
"torch.ones"
] | [((816, 830), 'torch.randn', 'torch.randn', (['(3)'], {}), '(3)\n', (827, 830), False, 'import torch\n'), ((835, 866), 'torch.autograd.Variable', 'Variable', (['x'], {'requires_grad': '(True)'}), '(x, requires_grad=True)\n', (843, 866), False, 'from torch.autograd import Variable\n'), ((943, 980), 'torch.FloatTensor', 'torch.FloatTensor', (['[0.1, 1.0, 0.0001]'], {}), '([0.1, 1.0, 0.0001])\n', (960, 980), False, 'import torch\n'), ((331, 347), 'torch.ones', 'torch.ones', (['(2)', '(2)'], {}), '(2, 2)\n', (341, 347), False, 'import torch\n')] |
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.style as style
import numpy as np
import os
style.use('ggplot')
grid_list = ['grid.168010.e', 'grid.1032.0', 'grid.7177.6', 'grid.194645.b', 'grid.6571.5']
dirname = os.getcwd()
dirname = dirname + '/Data/'
df_ARWU2018 = pd.read_csv(dirname + 'ARWU/ARWURanking_2018_grid.csv')
df_ARWU2017 = pd.read_csv(dirname + 'ARWU/ARWURanking_2017_grid.csv')
df_ARWU2016 = pd.read_csv(dirname + 'ARWU/ARWURanking_2016_grid.csv')
df_ARWU2015 = pd.read_csv(dirname + 'ARWU/ARWURanking_2015_grid.csv')
df_ARWU2014 = pd.read_csv(dirname + 'ARWU/ARWURanking_2014_grid.csv')
df_ARWU2013 = pd.read_csv(dirname + 'ARWU/ARWURanking_2013_grid.csv')
df_ARWU2012 = pd.read_csv(dirname + 'ARWU/ARWURanking_2012_grid.csv')
ARWU_DATA = ["GRID_ID", "Alumni", "Award", "HiCi", "NS", "PUB"]
df_ARWU2012 = df_ARWU2012[ARWU_DATA].dropna()
df_ARWU2013 = df_ARWU2013[ARWU_DATA].dropna()
df_ARWU2014 = df_ARWU2014[ARWU_DATA].dropna()
df_ARWU2015 = df_ARWU2015[ARWU_DATA].dropna()
df_ARWU2016 = df_ARWU2016[ARWU_DATA].dropna()
df_ARWU2017 = df_ARWU2017[ARWU_DATA].dropna()
df_ARWU2018 = df_ARWU2018[ARWU_DATA].dropna()
df_THE2012 = pd.read_csv(dirname + 'THE/THERanking2013__grid.csv')
df_THE2013 = pd.read_csv(dirname + 'THE/THERanking2014__grid.csv')
df_THE2014 = pd.read_csv(dirname + 'THE/THERanking2015__grid.csv')
df_THE2015 = pd.read_csv(dirname + 'THE/THERanking2016__grid.csv')
df_THE2016 = pd.read_csv(dirname + 'THE/THERanking2017__grid.csv')
df_THE2017 = pd.read_csv(dirname + 'THE/THERanking2018__grid.csv')
df_THE2018 = pd.read_csv(dirname + 'THE/THERanking2019__grid.csv')
THE_DATA = ["GRID_ID", "Teaching", "Rechearch", "Citations", "Industry_Income", "Internationals_Outlook"]
df_THE2012 = df_THE2012[THE_DATA].dropna()
df_THE2013 = df_THE2013[THE_DATA].dropna()
df_THE2014 = df_THE2014[THE_DATA].dropna()
df_THE2015 = df_THE2015[THE_DATA].dropna()
df_THE2016 = df_THE2016[THE_DATA].dropna()
df_THE2017 = df_THE2017[THE_DATA].dropna()
df_THE2018 = df_THE2018[THE_DATA].dropna()
df_QS2012 = pd.read_csv(dirname + 'QS/qs2013_grid.csv')
df_QS2013 = pd.read_csv(dirname + 'QS/qs2014_grid.csv')
df_QS2014 = pd.read_csv(dirname + 'QS/qs2015_grid.csv')
df_QS2015 = pd.read_csv(dirname + 'QS/qs2016_grid.csv')
df_QS2016 = pd.read_csv(dirname + 'QS/qs2017_grid.csv')
df_QS2017 = pd.read_csv(dirname + 'QS/qs2018_grid.csv')
df_QS2018 = pd.read_csv(dirname + 'QS/qs2019_grid.csv')
QS_DATA = ["GRID_ID", "Academic_reputation", "Employer_reputation", "Faculty_Student", "International_Faculty",
"International_Students", "Citations"]
df_QS2018 = df_QS2018.replace(0, np.nan)
df_QS2017 = df_QS2017.replace(0, np.nan)
df_QS2016 = df_QS2016.replace(0, np.nan)
df_QS2015 = df_QS2015.replace(0, np.nan)
df_QS2014 = df_QS2014.replace(0, np.nan)
df_QS2013 = df_QS2013.replace(0, np.nan)
df_QS2012 = df_QS2012.replace(0, np.nan)
df_QS2018 = df_QS2018[QS_DATA].dropna()
df_QS2017 = df_QS2017[QS_DATA].dropna()
df_QS2016 = df_QS2016[QS_DATA].dropna()
df_QS2015 = df_QS2015[QS_DATA].dropna()
df_QS2014 = df_QS2014[QS_DATA].dropna()
df_QS2013 = df_QS2013[QS_DATA].dropna()
df_QS2012 = df_QS2012[QS_DATA].dropna()
def create_constructs(df_ARWU, df_THE, df_QS, year):
df_ARWU['Reputation_ARWU'] = (df_ARWU['Alumni'] + df_ARWU['Award']) / 2
df_ARWU['Publication_ARWU'] = (df_ARWU['HiCi'] + df_ARWU['NS'] + df_ARWU['PUB']) / 3
df_ARWU = df_ARWU[['GRID_ID', 'Reputation_ARWU', 'Publication_ARWU']]
df_ARWU['year'] = year
df_ARWU = df_ARWU[['GRID_ID', 'year', 'Reputation_ARWU', 'Publication_ARWU']]
df_ARWU.columns = ['GRID_ID', 'year', 'Reputation_ARWU', 'Publication_ARWU']
df_THE['Reputation_THE'] = (df_THE['Teaching'] + df_THE['Rechearch']) / 2
df_THE['Publication_THE'] = df_THE['Citations']
df_THE = df_THE[['GRID_ID', 'Reputation_THE', 'Publication_THE']]
df_THE['year'] = year
df_THE = df_THE[['GRID_ID', 'year', 'Reputation_THE', 'Publication_THE']]
df_THE.columns = ['GRID_ID', 'year', 'Reputation_THE', 'Publication_THE']
df_QS['Reputation_QS'] = (df_QS['Academic_reputation'] + df_QS['Employer_reputation']) / 2
df_QS['Publication_QS'] = df_QS['Citations']
df_QS = df_QS[['GRID_ID', 'Reputation_QS', 'Publication_QS']]
df_QS['year'] = year
df_QS = df_QS[['GRID_ID', 'year', 'Reputation_QS', 'Publication_QS']]
df_QS.columns = ['GRID_ID', 'year', 'Reputation_QS', 'Publication_QS']
return df_ARWU, df_THE, df_QS
def add_arrow(line, position=None, direction='right', size=20, color=None):
if color is None:
color = line.get_color()
xdata = line.get_xdata()
ydata = line.get_ydata()
for i in range(len(xdata) -1):
position = xdata[i]
start_ind = np.argmin(np.absolute(xdata - position))
if direction == 'right':
end_ind = start_ind + 1
else:
end_ind = start_ind - 1
line.axes.annotate('',
xytext=(xdata[start_ind], ydata[start_ind]),
xy=(xdata[end_ind], ydata[end_ind]),
arrowprops=dict(arrowstyle="-|>", color=color),
size=size
)
df_ARWU2018, df_THE2018, df_QS2018 = create_constructs(df_ARWU2018, df_THE2018, df_QS2018, '2018')
df_ARWU2017, df_THE2017, df_QS2017 = create_constructs(df_ARWU2017, df_THE2017, df_QS2017, '2017')
df_ARWU2016, df_THE2016, df_QS2016 = create_constructs(df_ARWU2016, df_THE2016, df_QS2016, '2016')
df_ARWU2015, df_THE2015, df_QS2015 = create_constructs(df_ARWU2015, df_THE2015, df_QS2015, '2015')
df_ARWU2014, df_THE2014, df_QS2014 = create_constructs(df_ARWU2014, df_THE2014, df_QS2014, '2014')
df_ARWU2013, df_THE2013, df_QS2013 = create_constructs(df_ARWU2013, df_THE2013, df_QS2013, '2013')
df_ARWU2012, df_THE2012, df_QS2012 = create_constructs(df_ARWU2012, df_THE2012, df_QS2012, '2012')
df_ARWU = df_ARWU2018
listARWU = [df_ARWU2017, df_ARWU2016, df_ARWU2015, df_ARWU2014, df_ARWU2013, df_ARWU2012]
for i in listARWU:
df_ARWU = df_ARWU.append(i)
df_THE = df_THE2018
listTHE = [df_THE2017, df_THE2016, df_THE2015, df_THE2014, df_THE2013, df_THE2012]
for i in listTHE:
df_THE = df_THE.append(i)
df_QS = df_QS2018
listQS = [df_QS2017, df_QS2016, df_QS2015, df_QS2014, df_QS2013, df_QS2012]
for i in listQS:
df_QS = df_QS.append(i)
def create_uni_df(ARWU, THE, QS, GRID):
ARWU = ARWU[ARWU['GRID_ID'] == GRID]
THE = THE[THE['GRID_ID'] == GRID]
QS = QS[QS['GRID_ID'] == GRID]
return ARWU, THE, QS
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 5))
for i in grid_list:
df_stanford_ARWU, df_stanford_THE, df_stanford_QS = create_uni_df(df_ARWU, df_THE, df_QS, i)
line = ax1.plot(df_stanford_ARWU['Reputation_ARWU'], df_stanford_ARWU['Publication_ARWU'])[0]
add_arrow(line)
line = ax2.plot(df_stanford_THE['Reputation_THE'], df_stanford_THE['Publication_THE'])[0]
add_arrow(line)
line = ax3.plot(df_stanford_QS['Reputation_QS'], df_stanford_QS['Publication_QS'])[0]
add_arrow(line)
ax1.set_title('ARWU')
ax2.set_title('THE')
ax3.set_title('QS')
fig.text(0.5, 0.04, 'Reputation', ha='center', va='center', fontsize=15)
fig.text(0.09, 0.5, 'Publication', ha='center', va='center', rotation='vertical', fontsize=15)
ax3.legend(grid_list, loc='right',
bbox_to_anchor=(1.5, 0.5), ncol=1, fontsize='large', frameon=False)
plt.show()
| [
"pandas.read_csv",
"numpy.absolute",
"os.getcwd",
"matplotlib.style.use",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((115, 134), 'matplotlib.style.use', 'style.use', (['"""ggplot"""'], {}), "('ggplot')\n", (124, 134), True, 'import matplotlib.style as style\n'), ((239, 250), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (248, 250), False, 'import os\n'), ((295, 350), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'ARWU/ARWURanking_2018_grid.csv')"], {}), "(dirname + 'ARWU/ARWURanking_2018_grid.csv')\n", (306, 350), True, 'import pandas as pd\n'), ((365, 420), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'ARWU/ARWURanking_2017_grid.csv')"], {}), "(dirname + 'ARWU/ARWURanking_2017_grid.csv')\n", (376, 420), True, 'import pandas as pd\n'), ((435, 490), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'ARWU/ARWURanking_2016_grid.csv')"], {}), "(dirname + 'ARWU/ARWURanking_2016_grid.csv')\n", (446, 490), True, 'import pandas as pd\n'), ((505, 560), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'ARWU/ARWURanking_2015_grid.csv')"], {}), "(dirname + 'ARWU/ARWURanking_2015_grid.csv')\n", (516, 560), True, 'import pandas as pd\n'), ((575, 630), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'ARWU/ARWURanking_2014_grid.csv')"], {}), "(dirname + 'ARWU/ARWURanking_2014_grid.csv')\n", (586, 630), True, 'import pandas as pd\n'), ((645, 700), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'ARWU/ARWURanking_2013_grid.csv')"], {}), "(dirname + 'ARWU/ARWURanking_2013_grid.csv')\n", (656, 700), True, 'import pandas as pd\n'), ((715, 770), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'ARWU/ARWURanking_2012_grid.csv')"], {}), "(dirname + 'ARWU/ARWURanking_2012_grid.csv')\n", (726, 770), True, 'import pandas as pd\n'), ((1173, 1226), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'THE/THERanking2013__grid.csv')"], {}), "(dirname + 'THE/THERanking2013__grid.csv')\n", (1184, 1226), True, 'import pandas as pd\n'), ((1240, 1293), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'THE/THERanking2014__grid.csv')"], {}), "(dirname + 'THE/THERanking2014__grid.csv')\n", (1251, 1293), True, 'import pandas as pd\n'), ((1307, 1360), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'THE/THERanking2015__grid.csv')"], {}), "(dirname + 'THE/THERanking2015__grid.csv')\n", (1318, 1360), True, 'import pandas as pd\n'), ((1374, 1427), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'THE/THERanking2016__grid.csv')"], {}), "(dirname + 'THE/THERanking2016__grid.csv')\n", (1385, 1427), True, 'import pandas as pd\n'), ((1441, 1494), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'THE/THERanking2017__grid.csv')"], {}), "(dirname + 'THE/THERanking2017__grid.csv')\n", (1452, 1494), True, 'import pandas as pd\n'), ((1508, 1561), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'THE/THERanking2018__grid.csv')"], {}), "(dirname + 'THE/THERanking2018__grid.csv')\n", (1519, 1561), True, 'import pandas as pd\n'), ((1575, 1628), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'THE/THERanking2019__grid.csv')"], {}), "(dirname + 'THE/THERanking2019__grid.csv')\n", (1586, 1628), True, 'import pandas as pd\n'), ((2051, 2094), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'QS/qs2013_grid.csv')"], {}), "(dirname + 'QS/qs2013_grid.csv')\n", (2062, 2094), True, 'import pandas as pd\n'), ((2107, 2150), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'QS/qs2014_grid.csv')"], {}), "(dirname + 'QS/qs2014_grid.csv')\n", (2118, 2150), True, 'import pandas as pd\n'), ((2163, 2206), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'QS/qs2015_grid.csv')"], {}), "(dirname + 'QS/qs2015_grid.csv')\n", (2174, 2206), True, 'import pandas as pd\n'), ((2219, 2262), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'QS/qs2016_grid.csv')"], {}), "(dirname + 'QS/qs2016_grid.csv')\n", (2230, 2262), True, 'import pandas as pd\n'), ((2275, 2318), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'QS/qs2017_grid.csv')"], {}), "(dirname + 'QS/qs2017_grid.csv')\n", (2286, 2318), True, 'import pandas as pd\n'), ((2331, 2374), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'QS/qs2018_grid.csv')"], {}), "(dirname + 'QS/qs2018_grid.csv')\n", (2342, 2374), True, 'import pandas as pd\n'), ((2387, 2430), 'pandas.read_csv', 'pd.read_csv', (["(dirname + 'QS/qs2019_grid.csv')"], {}), "(dirname + 'QS/qs2019_grid.csv')\n", (2398, 2430), True, 'import pandas as pd\n'), ((6482, 6517), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(15, 5)'}), '(1, 3, figsize=(15, 5))\n', (6494, 6517), True, 'import matplotlib.pyplot as plt\n'), ((7330, 7340), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7338, 7340), True, 'import matplotlib.pyplot as plt\n'), ((4736, 4765), 'numpy.absolute', 'np.absolute', (['(xdata - position)'], {}), '(xdata - position)\n', (4747, 4765), True, 'import numpy as np\n')] |
from fest import utils
class SomeClass:
pass
def test_future():
fut = utils.Future(iter('abcdefg'))
ret = fut.filter(lambda x: x < 'e').execute()
exp = list('abcd')
assert ret == exp
def test_digest():
ret = {'fizz': 'buzz'}
assert utils.digest(ret) == 'f45195aef08daea1be5dbb1c7feb5763c5bc7b37'
def test_logger():
obj = SomeClass()
ret = utils.logger(obj)
exp = 'tests.utils_test.SomeClass'
assert ret.name == exp
| [
"fest.utils.digest",
"fest.utils.logger"
] | [((383, 400), 'fest.utils.logger', 'utils.logger', (['obj'], {}), '(obj)\n', (395, 400), False, 'from fest import utils\n'), ((266, 283), 'fest.utils.digest', 'utils.digest', (['ret'], {}), '(ret)\n', (278, 283), False, 'from fest import utils\n')] |
import asyncio
import fasmga
import os
client = fasmga.Client(os.getenv("FGA_TOKEN"))
@client.on("ready")
async def main():
url = await client.shorten("http://example.com", "your-url-id")
# change "your-url-id" with the url ID you want,
# or remove it if you want it to generate a random one.
print("Your shortened URL is:", url)
print("It will redirect to", url.uri)
await asyncio.sleep(10) # wait for 10 seconds if you want to try the shortened site
await url.edit(
url="https://google.com", password="<PASSWORD>"
) # edit the redirect URL and add a password
print(f"Your URL {url} has been edited.")
print("Now it redirects to", url.uri)
print("Remember: Passwords aren't stored in URL instances.")
await client.close() # closes the client, if you want to keep the event loop running you can comment this line
client.start()
| [
"os.getenv",
"asyncio.sleep"
] | [((63, 85), 'os.getenv', 'os.getenv', (['"""FGA_TOKEN"""'], {}), "('FGA_TOKEN')\n", (72, 85), False, 'import os\n'), ((401, 418), 'asyncio.sleep', 'asyncio.sleep', (['(10)'], {}), '(10)\n', (414, 418), False, 'import asyncio\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8-*-
"""\
Get CMIP6 Citation info and save as a JSON file.
---
This script gets Citation info from the citation service.
You have to specify MIP(`activity_id`), model(`source_id`),
institution(`institution_id`), and experiment(`experiment_id`) to get
info.
"""
from utils import getJSON
import json
# import certifi
# import urllib3
import argparse
__author__ = 'T.Inoue'
__credits__ = 'Copyright (c) 2019 JAMSTEC'
__version__ = 'v20191213'
__date__ = '2019/12/13'
# mip = 'CMIP'
# model = 'MIROC6'
institution = 'MIROC'
# experiment = 'historical'
# mip = 'HighResMIP'
# model = 'NICAM16-7S'
# institution = 'MIROC'
# experiment = 'highresSST-present'
desc, epilog = __doc__.split('---')
def my_parser():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=desc, epilog=epilog, )
parser.add_argument('-v', '--verbose',
dest='verbose', action='store_true',
help='be verbose.',
default=False)
parser.add_argument('-a', '--mip', '--activity_id',
type=str,
help='MIP(activity_id)',
default=None)
parser.add_argument('-i', '--inst', '--institution_id',
metavar='inst', type=str,
help='inst(institution_id),default="%(default)s"',
default=institution)
parser.add_argument('-s', '--model', '--source_id',
type=str,
help='model(source_id)',
default=None)
parser.add_argument('-e', '--exp', '--experiment_id',
metavar='exp', type=str,
help='experiments to submit',
default=None)
return parser
def main():
parser = my_parser()
a = parser.parse_args()
if (a.verbose):
print('Configuration:')
print(' mip:', a.mip)
print(' model:', a.model)
print(' institution:', a.inst)
print(' experiments:', a.exp)
base = getJSON(source_id=a.model, activity_id=a.mip,
institution_id=a.inst, experiment_id=a.exp)
if (base is None):
parser.print_help()
exit(1)
base_title = base['titles'][0]
base_subject = base['subjects'][0]['subject']
if (a.verbose):
print('base title:', base_title)
print('base subject:', base_subject)
fname = base_subject + '.json'
with open(fname, 'w') as f:
print('Saving base data to:', fname)
json.dump(base, f, indent=2)
print('Done.')
return 0
if __name__ == '__main__':
exit(main())
| [
"json.dump",
"utils.getJSON",
"argparse.ArgumentParser"
] | [((770, 885), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'formatter_class': 'argparse.RawDescriptionHelpFormatter', 'description': 'desc', 'epilog': 'epilog'}), '(formatter_class=argparse.\n RawDescriptionHelpFormatter, description=desc, epilog=epilog)\n', (793, 885), False, 'import argparse\n'), ((2167, 2260), 'utils.getJSON', 'getJSON', ([], {'source_id': 'a.model', 'activity_id': 'a.mip', 'institution_id': 'a.inst', 'experiment_id': 'a.exp'}), '(source_id=a.model, activity_id=a.mip, institution_id=a.inst,\n experiment_id=a.exp)\n', (2174, 2260), False, 'from utils import getJSON\n'), ((2657, 2685), 'json.dump', 'json.dump', (['base', 'f'], {'indent': '(2)'}), '(base, f, indent=2)\n', (2666, 2685), False, 'import json\n')] |
import time
from umqtt.robust import MQTTClient
def sub_cb(topic, msg):
print((topic, msg))
c = MQTTClient("umqtt_client", "localhost")
# Print diagnostic messages when retries/reconnects happens
c.DEBUG = True
c.set_callback(sub_cb)
# Connect to server, requesting not to clean session for this
# client. If there was no existing session (False return value
# from connect() method), we perform the initial setup of client
# session - subscribe to needed topics. Afterwards, these
# subscriptions will be stored server-side, and will be persistent,
# (as we use clean_session=False).
#
# There can be a problem when a session for a given client exists,
# but doesn't have subscriptions a particular application expects.
# In this case, a session needs to be cleaned first. See
# example_reset_session.py for an obvious way how to do that.
#
# In an actual application, it's up to its developer how to
# manage these issues. One extreme is to have external "provisioning"
# phase, where initial session setup, and any further management of
# a session, is done by external tools. This allows to save resources
# on a small embedded device. Another extreme is to have an application
# to perform auto-setup (e.g., clean session, then re-create session
# on each restart). This example shows mid-line between these 2
# approaches, where initial setup of session is done by application,
# but if anything goes wrong, there's an external tool to clean session.
if not c.connect(clean_session=False):
print("New session being set up")
c.subscribe(b"foo_topic")
while 1:
c.wait_msg()
c.disconnect()
| [
"umqtt.robust.MQTTClient"
] | [((104, 143), 'umqtt.robust.MQTTClient', 'MQTTClient', (['"""umqtt_client"""', '"""localhost"""'], {}), "('umqtt_client', 'localhost')\n", (114, 143), False, 'from umqtt.robust import MQTTClient\n')] |
from shutil import which
from typing import Optional
from wlanpi_core.models.validation_error import ValidationError
from .helpers import get_phy80211_interfaces, run_cli_async
async def executable_exists(name: str) -> bool:
"""
Check whether `name` is on PATH and marked as executable.
"""
return which(name) is not None
async def test_wifi_interface(interface: str) -> dict:
test = {}
test["name"] = interface
test["mac"] = (
await run_cli_async(f"cat /sys/class/net/{interface}/address")
).strip()
test["driver"] = (
(await run_cli_async(f"readlink -f /sys/class/net/{interface}/device/driver"))
.strip()
.rsplit("/", 1)[1]
)
"""
https://www.kernel.org/doc/Documentation/ABI/testing/sysfs-class-net
What: /sys/class/net/<iface>/operstate
Date: March 2006
KernelVersion: 2.6.17
Contact: <EMAIL>
Description:
Indicates the interface RFC2863 operational state as a string.
Possible values are:
"unknown", "notpresent", "down", "lowerlayerdown", "testing",
"dormant", "up".
"""
operstate = await run_cli_async(f"cat /sys/class/net/{interface}/operstate")
test["operstate"] = operstate.strip()
_type = await run_cli_async(f"cat /sys/class/net/{interface}/type")
_type = int(_type)
if _type == 1:
test["mode"] = "managed"
elif _type == 801:
test["mode"] = "monitor"
elif _type == 802:
test["mode"] = "monitor"
elif (
_type == 803
): # https://elixir.bootlin.com/linux/latest/source/include/uapi/linux/if_arp.h#L90
test["mode"] = "monitor"
else:
test["mode"] = "unknown"
return test
async def get_diagnostics():
"""
Return diagnostic tests for probe
"""
diag = {}
regdomain = await run_cli_async("iw reg get")
diag["regdomain"] = [line for line in regdomain.split("\n") if "country" in line]
executable = {}
tcpdump_exists = await executable_exists("tcpdump")
executable["tcpdump"] = tcpdump_exists
iw_exists = await executable_exists("iw")
executable["iw"] = iw_exists
ip_exists = await executable_exists("ip")
executable["ip"] = ip_exists
ifconfig_exists = await executable_exists("ifconfig")
executable["ifconfig"] = ifconfig_exists
airmonng_exists = await executable_exists("airmon-ng")
executable["airmon-ng"] = airmonng_exists
# add executable tests to diag
diag["tools"] = executable
tool_versions = {}
if tcpdump_exists:
tool_versions["tcpdump"] = await run_cli_async(
"tcpdump --version", want_stderr=True
)
else:
tool_versions["tcpdump"] = "unknown"
if iw_exists:
tool_versions["iw"] = await run_cli_async("iw --version")
else:
tool_versions["iw"] = "unknown"
if ip_exists:
tool_versions["ip"] = await run_cli_async("ip -V")
else:
tool_versions["ip"] = "unknown"
if ifconfig_exists:
tool_versions["ifconfig"] = await run_cli_async("ifconfig --version")
else:
tool_versions["ifconfig"] = "unknown"
# add version tests to diag
diag["versions"] = tool_versions
return diag
async def get_interface_diagnostics(interface: Optional[str] = None):
interfaces = get_phy80211_interfaces()
results = {}
if interface:
if interface not in interfaces:
raise ValidationError(
status_code=400, error_msg=f"wlan interface {interface} not found"
)
results["interfaces"] = [await test_wifi_interface(interface)]
return results
else:
ifaces = []
for interface in interfaces:
ifaces.append(await test_wifi_interface(interface))
results["interfaces"] = ifaces
return results
| [
"shutil.which",
"wlanpi_core.models.validation_error.ValidationError"
] | [((318, 329), 'shutil.which', 'which', (['name'], {}), '(name)\n', (323, 329), False, 'from shutil import which\n'), ((3402, 3490), 'wlanpi_core.models.validation_error.ValidationError', 'ValidationError', ([], {'status_code': '(400)', 'error_msg': 'f"""wlan interface {interface} not found"""'}), "(status_code=400, error_msg=\n f'wlan interface {interface} not found')\n", (3417, 3490), False, 'from wlanpi_core.models.validation_error import ValidationError\n')] |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
import multiprocessing
from multiprocessing import cpu_count
import math
class ProcessorsScheduler(object):
process_num = cpu_count()
def __init__(self, cpu_num_workers=None):
if cpu_num_workers != None and cpu_num_workers > 0:
self.process_num = cpu_num_workers
def run_data_parallel(self, func, func_args):
data, rest_args = func_args[0], func_args[1:]
res = []
# logging.info("multiprocess enabled, process num: %d" % (self.process_num))
process_p = multiprocessing.Pool(self.process_num)
data_length = len(data)
size = math.ceil(data_length/ self.process_num)
for i in range(self.process_num):
start = size * i
end = (i + 1) * size if (i + 1) * size < data_length else data_length
args = (data[start:end], ) + rest_args
res.append((i, process_p.apply_async(func, args=args)))
process_p.close()
process_p.join()
res = sorted(res, key=lambda x:x[0])
return res
| [
"math.ceil",
"multiprocessing.Pool",
"multiprocessing.cpu_count"
] | [((222, 233), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (231, 233), False, 'from multiprocessing import cpu_count\n'), ((616, 654), 'multiprocessing.Pool', 'multiprocessing.Pool', (['self.process_num'], {}), '(self.process_num)\n', (636, 654), False, 'import multiprocessing\n'), ((702, 743), 'math.ceil', 'math.ceil', (['(data_length / self.process_num)'], {}), '(data_length / self.process_num)\n', (711, 743), False, 'import math\n')] |
#!/bin/python3
# encoding: utf-8
import sys
import numpy as np
from time import time
'''
x
[0, 2] => idx start 0, end 3
[3, 5] => idx start 3, end 6
[6, 8] => idx start 6, end 9
((0 + (r_idx // 3 * 3)): (3 + (r_idx // 3 * 3)), (0 + (c_idx // 3 * 3)): (3 + (c_idx // 3 * 3)))
np.random.randint(1, 10)
'''
sys.setrecursionlimit(10 ** 7)
np.random.seed(int(time() % 1000))
TRIALS = [(0, 0, 0)]
def padding(input_values, rollback=False):
MAX_ROW, MAX_COL = input_values.shape
# if it is rollback
if rollback:
if len(TRIALS) == 0:
raise Exception('No possible result!')
i, j, prev_val = TRIALS.pop()
valid_digit = False
for num in range(prev_val+1, 10):
input_values[i, j] = num
valid_digit = value_chk(input_values, i, j)
if valid_digit: # if value fits current position
TRIALS.append((i, j, num))
return padding(input_values)
if not valid_digit: # if not updated
# clear value
input_values[i, j] = 0
# and rollback again
return padding(input_values, True)
else:
# if new position
for i in range(MAX_ROW):
for j in range(MAX_COL):
if input_values[i, j] == 0:
valid_digit = False
for num in range(1, 10):
input_values[i, j] = num
valid_digit = value_chk(input_values, i, j)
if valid_digit: # if value fits current position
TRIALS.append((i, j, num))
return padding(input_values)
# if no digit fits, rollback
if not valid_digit:
input_values[i, j] = 0
return padding(input_values, True)
return input_values
def value_chk(val_mtx, row_idx, col_idx):
val = val_mtx[row_idx, col_idx]
return (dup_cnt(val_mtx[row_idx, :], val) == 1
and dup_cnt(val_mtx[:, col_idx], val) == 1
and dup_cnt(val_mtx[(0 + (row_idx // 3 * 3)): (3 + (row_idx // 3 * 3)), (0 + (col_idx // 3 * 3)): (3 + (col_idx // 3 * 3))].flatten(), val) == 1)
def dup_cnt(tar_arr, val):
cnt = 0
for e in tar_arr:
if e == val:
cnt += 1
return cnt
if __name__ == '__main__':
i1 = np.array([
[5, 3, 0, 0, 7, 0, 0, 0, 0],
[6, 0, 0, 1, 9, 5, 0, 0, 0],
[0, 9, 8, 0, 0, 0, 0, 6, 0],
[8, 0, 0, 0, 6, 0, 0, 0, 3],
[4, 0, 0, 8, 0, 3, 0, 0, 1],
[7, 0, 0, 0, 2, 0, 0, 0, 6],
[0, 6, 0, 0, 0, 0, 2, 8, 0],
[0, 0, 0, 4, 1, 9, 0, 0, 5],
[0, 0, 0, 0, 8, 0, 0, 7, 9]
])
print('Original input:\n', i1)
result = padding(i1)
print('Result:\n', result)
# result check
for i in range(result.shape[0]):
for j in range(result.shape[1]):
if not value_chk(result, i, j):
raise Exception("Unvalid result! ({}, {})".format(i, j))
| [
"sys.setrecursionlimit",
"numpy.array",
"time.time"
] | [((309, 339), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(10 ** 7)'], {}), '(10 ** 7)\n', (330, 339), False, 'import sys\n'), ((2394, 2678), 'numpy.array', 'np.array', (['[[5, 3, 0, 0, 7, 0, 0, 0, 0], [6, 0, 0, 1, 9, 5, 0, 0, 0], [0, 9, 8, 0, 0, \n 0, 0, 6, 0], [8, 0, 0, 0, 6, 0, 0, 0, 3], [4, 0, 0, 8, 0, 3, 0, 0, 1],\n [7, 0, 0, 0, 2, 0, 0, 0, 6], [0, 6, 0, 0, 0, 0, 2, 8, 0], [0, 0, 0, 4, \n 1, 9, 0, 0, 5], [0, 0, 0, 0, 8, 0, 0, 7, 9]]'], {}), '([[5, 3, 0, 0, 7, 0, 0, 0, 0], [6, 0, 0, 1, 9, 5, 0, 0, 0], [0, 9, \n 8, 0, 0, 0, 0, 6, 0], [8, 0, 0, 0, 6, 0, 0, 0, 3], [4, 0, 0, 8, 0, 3, 0,\n 0, 1], [7, 0, 0, 0, 2, 0, 0, 0, 6], [0, 6, 0, 0, 0, 0, 2, 8, 0], [0, 0,\n 0, 4, 1, 9, 0, 0, 5], [0, 0, 0, 0, 8, 0, 0, 7, 9]])\n', (2402, 2678), True, 'import numpy as np\n'), ((360, 366), 'time.time', 'time', ([], {}), '()\n', (364, 366), False, 'from time import time\n')] |
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch.conditions import IfCondition
import launch.substitutions
from launch.substitutions import LaunchConfiguration
from launch_ros.actions import Node
from launch_ros.substitutions import FindPackageShare
from tracetools_launch.action import Trace
from tracetools_trace.tools.names import DEFAULT_EVENTS_ROS
def generate_launch_description():
# Get the bringup directory
bringup_dir = FindPackageShare('pendulum_bringup').find('pendulum_bringup')
# Set robot description parameters
urdf_file = os.path.join(bringup_dir, 'urdf', 'pendulum.urdf')
with open(urdf_file, 'r') as infp:
robot_desc = infp.read()
rsp_params = {'robot_description': robot_desc}
# Set parameter file path
param_file_path = os.path.join(bringup_dir, 'params', 'pendulum.param.yaml')
param_file = launch.substitutions.LaunchConfiguration('params', default=[param_file_path])
# Set rviz config path
rviz_cfg_path = os.path.join(bringup_dir, 'rviz/pendulum.rviz')
# Create the launch configuration variables
autostart_param = DeclareLaunchArgument(
name='autostart',
default_value='True',
description='Automatically start lifecycle nodes')
priority_param = DeclareLaunchArgument(
name='priority',
default_value='0',
description='Set process priority')
cpu_affinity_param = DeclareLaunchArgument(
name='cpu-affinity',
default_value='0',
description='Set process CPU affinity')
with_lock_memory_param = DeclareLaunchArgument(
name='lock-memory',
default_value='False',
description='Lock the process memory')
lock_memory_size_param = DeclareLaunchArgument(
name='lock-memory-size',
default_value='0',
description='Set lock memory size in MB')
config_child_threads_param = DeclareLaunchArgument(
name='config-child-threads',
default_value='False',
description='Configure process child threads (typically DDS threads)')
driver_enable_param = DeclareLaunchArgument(
name='driver-enable',
default_value='True',
description='Enable/disable pendulum driver nodes')
controller_enable_param = DeclareLaunchArgument(
name='controller-enable',
default_value='True',
description='Enable/disable controller driver nodes')
with_rviz_param = DeclareLaunchArgument(
'rviz',
default_value='False',
description='Launch RVIZ2 in addition to other nodes'
)
trace_param = DeclareLaunchArgument(
'trace',
default_value='False',
description='Launch ROS tracing action'
)
trace_memory_param = DeclareLaunchArgument(
'trace-memory',
default_value='False',
description='Launch ROS tracing action with memory functions tracing enabled'
)
# Node definitions
pendulum_demo_runner = Node(
package='pendulum_demo',
executable='pendulum_demo',
output='screen',
parameters=[param_file],
arguments=[
'--autostart', LaunchConfiguration('autostart'),
'--priority', LaunchConfiguration('priority'),
'--cpu-affinity', LaunchConfiguration('cpu-affinity'),
'--lock-memory', LaunchConfiguration('lock-memory'),
'--lock-memory-size', LaunchConfiguration('lock-memory-size'),
'--config-child-threads', LaunchConfiguration('config-child-threads'),
'--driver-enable', LaunchConfiguration('driver-enable'),
'--controller-enable', LaunchConfiguration('controller-enable'),
]
)
robot_state_publisher_runner = Node(
package='robot_state_publisher',
executable='robot_state_publisher',
output='screen',
parameters=[rsp_params],
condition=IfCondition(LaunchConfiguration('rviz'))
)
rviz_runner = Node(
package='rviz2',
executable='rviz2',
name='rviz2',
arguments=['-d', str(rviz_cfg_path)],
condition=IfCondition(LaunchConfiguration('rviz'))
)
# Create tracing runners
ros_tracing = Trace(
session_name='pendulum',
events_kernel=[],
condition=IfCondition(LaunchConfiguration('trace'))
)
ros_tracing_memory_usage = Trace(
session_name='pendulum-memory-usage',
events_ust=[
'lttng_ust_libc:malloc',
'lttng_ust_libc:calloc',
'lttng_ust_libc:realloc',
'lttng_ust_libc:free',
'lttng_ust_libc:memalign',
'lttng_ust_libc:posix_memalign',
] + DEFAULT_EVENTS_ROS,
events_kernel=[
'kmem_mm_page_alloc',
'kmem_mm_page_free',
],
condition=IfCondition(LaunchConfiguration('trace-memory'))
)
return LaunchDescription([
trace_param,
trace_memory_param,
ros_tracing,
ros_tracing_memory_usage,
autostart_param,
priority_param,
cpu_affinity_param,
with_lock_memory_param,
lock_memory_size_param,
config_child_threads_param,
driver_enable_param,
controller_enable_param,
with_rviz_param,
robot_state_publisher_runner,
pendulum_demo_runner,
rviz_runner
])
| [
"launch.substitutions.LaunchConfiguration",
"os.path.join",
"launch_ros.substitutions.FindPackageShare",
"launch.LaunchDescription",
"launch.actions.DeclareLaunchArgument"
] | [((1189, 1239), 'os.path.join', 'os.path.join', (['bringup_dir', '"""urdf"""', '"""pendulum.urdf"""'], {}), "(bringup_dir, 'urdf', 'pendulum.urdf')\n", (1201, 1239), False, 'import os\n'), ((1416, 1474), 'os.path.join', 'os.path.join', (['bringup_dir', '"""params"""', '"""pendulum.param.yaml"""'], {}), "(bringup_dir, 'params', 'pendulum.param.yaml')\n", (1428, 1474), False, 'import os\n'), ((1618, 1665), 'os.path.join', 'os.path.join', (['bringup_dir', '"""rviz/pendulum.rviz"""'], {}), "(bringup_dir, 'rviz/pendulum.rviz')\n", (1630, 1665), False, 'import os\n'), ((1737, 1854), 'launch.actions.DeclareLaunchArgument', 'DeclareLaunchArgument', ([], {'name': '"""autostart"""', 'default_value': '"""True"""', 'description': '"""Automatically start lifecycle nodes"""'}), "(name='autostart', default_value='True', description=\n 'Automatically start lifecycle nodes')\n", (1758, 1854), False, 'from launch.actions import DeclareLaunchArgument\n'), ((1896, 1994), 'launch.actions.DeclareLaunchArgument', 'DeclareLaunchArgument', ([], {'name': '"""priority"""', 'default_value': '"""0"""', 'description': '"""Set process priority"""'}), "(name='priority', default_value='0', description=\n 'Set process priority')\n", (1917, 1994), False, 'from launch.actions import DeclareLaunchArgument\n'), ((2040, 2146), 'launch.actions.DeclareLaunchArgument', 'DeclareLaunchArgument', ([], {'name': '"""cpu-affinity"""', 'default_value': '"""0"""', 'description': '"""Set process CPU affinity"""'}), "(name='cpu-affinity', default_value='0', description=\n 'Set process CPU affinity')\n", (2061, 2146), False, 'from launch.actions import DeclareLaunchArgument\n'), ((2196, 2303), 'launch.actions.DeclareLaunchArgument', 'DeclareLaunchArgument', ([], {'name': '"""lock-memory"""', 'default_value': '"""False"""', 'description': '"""Lock the process memory"""'}), "(name='lock-memory', default_value='False',\n description='Lock the process memory')\n", (2217, 2303), False, 'from launch.actions import DeclareLaunchArgument\n'), ((2354, 2465), 'launch.actions.DeclareLaunchArgument', 'DeclareLaunchArgument', ([], {'name': '"""lock-memory-size"""', 'default_value': '"""0"""', 'description': '"""Set lock memory size in MB"""'}), "(name='lock-memory-size', default_value='0',\n description='Set lock memory size in MB')\n", (2375, 2465), False, 'from launch.actions import DeclareLaunchArgument\n'), ((2520, 2668), 'launch.actions.DeclareLaunchArgument', 'DeclareLaunchArgument', ([], {'name': '"""config-child-threads"""', 'default_value': '"""False"""', 'description': '"""Configure process child threads (typically DDS threads)"""'}), "(name='config-child-threads', default_value='False',\n description='Configure process child threads (typically DDS threads)')\n", (2541, 2668), False, 'from launch.actions import DeclareLaunchArgument\n'), ((2716, 2837), 'launch.actions.DeclareLaunchArgument', 'DeclareLaunchArgument', ([], {'name': '"""driver-enable"""', 'default_value': '"""True"""', 'description': '"""Enable/disable pendulum driver nodes"""'}), "(name='driver-enable', default_value='True',\n description='Enable/disable pendulum driver nodes')\n", (2737, 2837), False, 'from launch.actions import DeclareLaunchArgument\n'), ((2889, 3016), 'launch.actions.DeclareLaunchArgument', 'DeclareLaunchArgument', ([], {'name': '"""controller-enable"""', 'default_value': '"""True"""', 'description': '"""Enable/disable controller driver nodes"""'}), "(name='controller-enable', default_value='True',\n description='Enable/disable controller driver nodes')\n", (2910, 3016), False, 'from launch.actions import DeclareLaunchArgument\n'), ((3060, 3172), 'launch.actions.DeclareLaunchArgument', 'DeclareLaunchArgument', (['"""rviz"""'], {'default_value': '"""False"""', 'description': '"""Launch RVIZ2 in addition to other nodes"""'}), "('rviz', default_value='False', description=\n 'Launch RVIZ2 in addition to other nodes')\n", (3081, 3172), False, 'from launch.actions import DeclareLaunchArgument\n'), ((3216, 3315), 'launch.actions.DeclareLaunchArgument', 'DeclareLaunchArgument', (['"""trace"""'], {'default_value': '"""False"""', 'description': '"""Launch ROS tracing action"""'}), "('trace', default_value='False', description=\n 'Launch ROS tracing action')\n", (3237, 3315), False, 'from launch.actions import DeclareLaunchArgument\n'), ((3366, 3510), 'launch.actions.DeclareLaunchArgument', 'DeclareLaunchArgument', (['"""trace-memory"""'], {'default_value': '"""False"""', 'description': '"""Launch ROS tracing action with memory functions tracing enabled"""'}), "('trace-memory', default_value='False', description=\n 'Launch ROS tracing action with memory functions tracing enabled')\n", (3387, 3510), False, 'from launch.actions import DeclareLaunchArgument\n'), ((5532, 5900), 'launch.LaunchDescription', 'LaunchDescription', (['[trace_param, trace_memory_param, ros_tracing, ros_tracing_memory_usage,\n autostart_param, priority_param, cpu_affinity_param,\n with_lock_memory_param, lock_memory_size_param,\n config_child_threads_param, driver_enable_param,\n controller_enable_param, with_rviz_param, robot_state_publisher_runner,\n pendulum_demo_runner, rviz_runner]'], {}), '([trace_param, trace_memory_param, ros_tracing,\n ros_tracing_memory_usage, autostart_param, priority_param,\n cpu_affinity_param, with_lock_memory_param, lock_memory_size_param,\n config_child_threads_param, driver_enable_param,\n controller_enable_param, with_rviz_param, robot_state_publisher_runner,\n pendulum_demo_runner, rviz_runner])\n', (5549, 5900), False, 'from launch import LaunchDescription\n'), ((1071, 1107), 'launch_ros.substitutions.FindPackageShare', 'FindPackageShare', (['"""pendulum_bringup"""'], {}), "('pendulum_bringup')\n", (1087, 1107), False, 'from launch_ros.substitutions import FindPackageShare\n'), ((3766, 3798), 'launch.substitutions.LaunchConfiguration', 'LaunchConfiguration', (['"""autostart"""'], {}), "('autostart')\n", (3785, 3798), False, 'from launch.substitutions import LaunchConfiguration\n'), ((3825, 3856), 'launch.substitutions.LaunchConfiguration', 'LaunchConfiguration', (['"""priority"""'], {}), "('priority')\n", (3844, 3856), False, 'from launch.substitutions import LaunchConfiguration\n'), ((3887, 3922), 'launch.substitutions.LaunchConfiguration', 'LaunchConfiguration', (['"""cpu-affinity"""'], {}), "('cpu-affinity')\n", (3906, 3922), False, 'from launch.substitutions import LaunchConfiguration\n'), ((3952, 3986), 'launch.substitutions.LaunchConfiguration', 'LaunchConfiguration', (['"""lock-memory"""'], {}), "('lock-memory')\n", (3971, 3986), False, 'from launch.substitutions import LaunchConfiguration\n'), ((4021, 4060), 'launch.substitutions.LaunchConfiguration', 'LaunchConfiguration', (['"""lock-memory-size"""'], {}), "('lock-memory-size')\n", (4040, 4060), False, 'from launch.substitutions import LaunchConfiguration\n'), ((4099, 4142), 'launch.substitutions.LaunchConfiguration', 'LaunchConfiguration', (['"""config-child-threads"""'], {}), "('config-child-threads')\n", (4118, 4142), False, 'from launch.substitutions import LaunchConfiguration\n'), ((4174, 4210), 'launch.substitutions.LaunchConfiguration', 'LaunchConfiguration', (['"""driver-enable"""'], {}), "('driver-enable')\n", (4193, 4210), False, 'from launch.substitutions import LaunchConfiguration\n'), ((4246, 4286), 'launch.substitutions.LaunchConfiguration', 'LaunchConfiguration', (['"""controller-enable"""'], {}), "('controller-enable')\n", (4265, 4286), False, 'from launch.substitutions import LaunchConfiguration\n'), ((4522, 4549), 'launch.substitutions.LaunchConfiguration', 'LaunchConfiguration', (['"""rviz"""'], {}), "('rviz')\n", (4541, 4549), False, 'from launch.substitutions import LaunchConfiguration\n'), ((4733, 4760), 'launch.substitutions.LaunchConfiguration', 'LaunchConfiguration', (['"""rviz"""'], {}), "('rviz')\n", (4752, 4760), False, 'from launch.substitutions import LaunchConfiguration\n'), ((4912, 4940), 'launch.substitutions.LaunchConfiguration', 'LaunchConfiguration', (['"""trace"""'], {}), "('trace')\n", (4931, 4940), False, 'from launch.substitutions import LaunchConfiguration\n'), ((5477, 5512), 'launch.substitutions.LaunchConfiguration', 'LaunchConfiguration', (['"""trace-memory"""'], {}), "('trace-memory')\n", (5496, 5512), False, 'from launch.substitutions import LaunchConfiguration\n')] |
# Generated by Django 2.1.15 on 2020-01-22 22:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('committees', '0008_auto_20200114_1807'),
]
operations = [
migrations.AlterField(
model_name='topic',
name='committee',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='topics', to='committees.Committee'),
),
]
| [
"django.db.models.ForeignKey"
] | [((391, 508), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""topics"""', 'to': '"""committees.Committee"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='topics', to='committees.Committee')\n", (408, 508), False, 'from django.db import migrations, models\n')] |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for interaction_rdms.py."""
from __future__ import absolute_import
import unittest
from fermilib.config import *
from fermilib.utils import MolecularData
from fermilib.transforms import jordan_wigner
class InteractionRDMTest(unittest.TestCase):
def setUp(self):
geometry = [('H', (0., 0., 0.)), ('H', (0., 0., 0.7414))]
basis = 'sto-3g'
multiplicity = 1
filename = os.path.join(THIS_DIRECTORY, 'data', 'H2_sto-3g_singlet')
self.molecule = MolecularData(
geometry, basis, multiplicity, filename=filename)
self.molecule.load()
self.cisd_energy = self.molecule.cisd_energy
self.rdm = self.molecule.get_molecular_rdm()
self.hamiltonian = self.molecule.get_molecular_hamiltonian()
def test_get_qubit_expectations(self):
qubit_operator = jordan_wigner(self.hamiltonian)
qubit_expectations = self.rdm.get_qubit_expectations(qubit_operator)
test_energy = qubit_operator.terms[()]
for qubit_term in qubit_expectations.terms:
term_coefficient = qubit_operator.terms[qubit_term]
test_energy += (term_coefficient *
qubit_expectations.terms[qubit_term])
self.assertLess(abs(test_energy - self.cisd_energy), EQ_TOLERANCE)
def test_get_molecular_operator_expectation(self):
expectation = self.rdm.expectation(self.hamiltonian)
self.assertAlmostEqual(expectation, self.cisd_energy, places=7)
# Test.
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"fermilib.transforms.jordan_wigner",
"fermilib.utils.MolecularData"
] | [((2101, 2116), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2114, 2116), False, 'import unittest\n'), ((1060, 1123), 'fermilib.utils.MolecularData', 'MolecularData', (['geometry', 'basis', 'multiplicity'], {'filename': 'filename'}), '(geometry, basis, multiplicity, filename=filename)\n', (1073, 1123), False, 'from fermilib.utils import MolecularData\n'), ((1410, 1441), 'fermilib.transforms.jordan_wigner', 'jordan_wigner', (['self.hamiltonian'], {}), '(self.hamiltonian)\n', (1423, 1441), False, 'from fermilib.transforms import jordan_wigner\n')] |
import sys
import asyncio
import zmq
import zmq.asyncio
from zmq.auth import Authenticator
from zmq.auth.thread import _inherit_docstrings, ThreadAuthenticator, \
AuthenticationThread
# Copying code from zqm classes since no way to inject these dependencies
class MultiZapAuthenticator(Authenticator):
"""
`Authenticator` supports only one ZAP socket in a single process, this lets
you have multiple ZAP sockets
"""
count = 0
def __init__(self, context=None, encoding='utf-8', log=None):
MultiZapAuthenticator.count += 1
super().__init__(context=context, encoding=encoding, log=log)
def start(self):
"""Create and bind the ZAP socket"""
self.zap_socket = self.context.socket(zmq.REP)
self.zap_socket.linger = 1
zapLoc = 'inproc://zeromq.zap.{}'.format(MultiZapAuthenticator.count)
self.zap_socket.bind(zapLoc)
self.log.debug('Starting ZAP at {}'.format(zapLoc))
def stop(self):
"""Close the ZAP socket"""
if self.zap_socket:
self.log.debug(
'Stopping ZAP at {}'.format(self.zap_socket.LAST_ENDPOINT))
super().stop()
@_inherit_docstrings
class ThreadMultiZapAuthenticator(ThreadAuthenticator):
def start(self):
"""Start the authentication thread"""
# create a socket to communicate with auth thread.
self.pipe = self.context.socket(zmq.PAIR)
self.pipe.linger = 1
self.pipe.bind(self.pipe_endpoint)
authenticator = MultiZapAuthenticator(self.context, encoding=self.encoding,
log=self.log)
self.thread = AuthenticationThread(self.context, self.pipe_endpoint,
encoding=self.encoding, log=self.log,
authenticator=authenticator)
self.thread.start()
# Event.wait:Changed in version 2.7: Previously, the method always returned None.
if sys.version_info < (2, 7):
self.thread.started.wait(timeout=10)
else:
if not self.thread.started.wait(timeout=10):
raise RuntimeError("Authenticator thread failed to start")
class AsyncioAuthenticator(MultiZapAuthenticator):
"""ZAP authentication for use in the asyncio IO loop"""
def __init__(self, context=None, loop=None):
super().__init__(context)
self.loop = loop or asyncio.get_event_loop()
self.__poller = None
self.__task = None
# TODO: Remove this commented method later
# @asyncio.coroutine
# def __handle_zap(self):
# while True:
# events = yield from self.__poller.poll()
# if self.zap_socket in dict(events):
# msg = yield from self.zap_socket.recv_multipart()
# self.handle_zap_message(msg)
async def __handle_zap(self):
while True:
events = await self.__poller.poll()
if self.zap_socket in dict(events):
msg = await self.zap_socket.recv_multipart()
self.handle_zap_message(msg)
def start(self):
"""Start ZAP authentication"""
super().start()
self.__poller = zmq.asyncio.Poller()
self.__poller.register(self.zap_socket, zmq.POLLIN)
self.__task = asyncio.ensure_future(self.__handle_zap())
def stop(self):
"""Stop ZAP authentication"""
if self.__task:
self.__task.cancel()
if self.__poller:
self.__poller.unregister(self.zap_socket)
self.__poller = None
super().stop()
| [
"asyncio.get_event_loop",
"zmq.asyncio.Poller",
"zmq.auth.thread.AuthenticationThread"
] | [((1674, 1800), 'zmq.auth.thread.AuthenticationThread', 'AuthenticationThread', (['self.context', 'self.pipe_endpoint'], {'encoding': 'self.encoding', 'log': 'self.log', 'authenticator': 'authenticator'}), '(self.context, self.pipe_endpoint, encoding=self.\n encoding, log=self.log, authenticator=authenticator)\n', (1694, 1800), False, 'from zmq.auth.thread import _inherit_docstrings, ThreadAuthenticator, AuthenticationThread\n'), ((3250, 3270), 'zmq.asyncio.Poller', 'zmq.asyncio.Poller', ([], {}), '()\n', (3268, 3270), False, 'import zmq\n'), ((2458, 2482), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (2480, 2482), False, 'import asyncio\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
**Project Name:** MakeHuman
**Product Home Page:** http://www.makehumancommunity.org/
**Github Code Home Page:** https://github.com/makehumancommunity/
**Authors:** <NAME>, <NAME>
**Copyright(c):** MakeHuman Team 2001-2019
**Licensing:** AGPL3
This file is part of MakeHuman Community (www.makehumancommunity.org).
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Abstract
--------
TODO
"""
import os
import io
import math
import numpy as np
from core import G
import getpath
import log
from collections import OrderedDict
import makehuman
import material
import json
#
# Proxy types. Loop over simple proxy types to do all proxies.
# Some code use lowercase proxy types instead.
#
SimpleProxyTypes = ['Hair', 'Eyes', 'Eyebrows', 'Eyelashes', 'Teeth', 'Tongue']
ProxyTypes = ['Proxymeshes', 'Clothes'] + SimpleProxyTypes
SimpleProxyTypesLower = []
for name in SimpleProxyTypes:
SimpleProxyTypesLower.append(name.lower())
_A7converter = None
Unit3 = np.identity(3,float)
class Proxy:
def __init__(self, file, type, human):
log.debug("Loading proxy file: %s.", file)
import makehuman
name = os.path.splitext(os.path.basename(file))[0]
self.name = name.capitalize().replace(" ","_")
self.license = makehuman.getAssetLicense()
self.description = ""
self.type = type
self.object = None
self.human = human
if not human:
raise RuntimeError("Proxy constructor expects a valid human object.")
self.file = file
if file:
self.mtime = os.path.getmtime(file)
else:
self.mtime = None
self.uuid = None
self.basemesh = makehuman.getBasemeshVersion()
self.tags = []
self.version = 110
self.ref_vIdxs = None # (Vidx1,Vidx2,Vidx3) list with references to human vertex indices, indexed by proxy vert
self.weights = None # (w1,w2,w3) list, with weights per human vertex (mapped by ref_vIdxs), indexed by proxy vert
self.vertWeights = {} # (proxy-vert, weight) list for each parent vert (reverse mapping of self.weights, indexed by human vertex)
self.offsets = None # (x,y,z) list of vertex offsets, indexed by proxy vert
self.vertexBoneWeights = None # Explicitly defined custom vertex-to-bone weights, connecting the proxy mesh to the reference skeleton (optional)
# Not to be confused with the vertex weights assigned for mapping the proxy mesh geometry to the base mesh
self.tmatrix = TMatrix() # Offset transformation matrix. Replaces scale
self.z_depth = -1 # Render order depth for the proxy object. Also used to determine which proxy object should mask others (delete faces)
self.max_pole = None # Signifies the maximum number of faces per vertex on the mesh topology. Set to none for default.
self.special_pose = {} # Special poses that should be set on the human when this proxy is active to make it look good
self.uvLayers = {} # TODO what is this used for?
self.material = material.Material(self.name)
self._obj_file = None
self._vertexBoneWeights_file = None
self._material_file = None
self.deleteVerts = np.zeros(human.meshData.getVertexCount(), bool)
self.weightsCache = None
self.cacheSkel = None
@property
def material_file(self):
folder = os.path.dirname(self.file) if self.file else None
return _getFilePath(self._material_file, folder)
@property
def obj_file(self):
folder = os.path.dirname(self.file) if self.file else None
return _getFilePath(self._obj_file, folder, ['npz', 'obj'])
@property
def vertexBoneWeights_file(self):
folder = os.path.dirname(self.file) if self.file else None
return _getFilePath(self._vertexBoneWeights_file, folder)
def __repr__(self):
return ("<Proxy %s %s %s %s>" % (self.name, self.type, self.file, self.uuid))
def getSeedMesh(self):
for pxy in self.human.getProxies():
if self == pxy:
return pxy.object.getSeedMesh()
if self.type == "Proxymeshes":
if not self.human.proxy:
return None
return self.human.getProxyMesh()
elif self.type in ["Converter"]:
return None
else:
raise NameError("Unknown proxy type %s" % self.type)
def getMesh(self):
if not self.object:
return None
return self.object.mesh
def loadMeshAndObject(self, human):
import files3d
import guicommon
obj = False
mesh = files3d.loadMesh(self.obj_file, maxFaces = self.max_pole)
if not mesh:
log.error("Failed to load %s", self.obj_file)
else:
mesh.priority = self.z_depth # Set render order
mesh.setCameraProjection(0) # Set to model camera
obj = self.object = guicommon.Object(mesh, human.getPosition())
obj.proxy = self
obj.material = self.material
obj.setRotation(human.getRotation())
obj.setSolid(human.solid) # Set to wireframe if human is in wireframe
# TODO perhaps other properties should be copied from human to object, such as subdivision state. For other hints, and duplicate code, see guicommon Object.setProxy()
# TODO why return both obj and mesh if you can access the mesh easily through obj.mesh?
return mesh,obj
def _finalize(self, refVerts):
"""
Final step in parsing/loading a proxy file. Initializes numpy structures
for performance improvement.
"""
self.weights = np.asarray([v._weights for v in refVerts], dtype=np.float32)
self.ref_vIdxs = np.asarray([v._verts for v in refVerts], dtype=np.uint32)
self.offsets = np.asarray([v._offset for v in refVerts], dtype=np.float32)
def _reloadReverseMapping(self):
"""
Reconstruct reverse vertex (and weights) mapping
"""
self.vertWeights = {}
for pxy_vIdx in range(self.ref_vIdxs.shape[0]):
_addProxyVertWeight(self.vertWeights, self.ref_vIdxs[pxy_vIdx, 0], pxy_vIdx, self.weights[pxy_vIdx, 0])
_addProxyVertWeight(self.vertWeights, self.ref_vIdxs[pxy_vIdx, 1], pxy_vIdx, self.weights[pxy_vIdx, 1])
_addProxyVertWeight(self.vertWeights, self.ref_vIdxs[pxy_vIdx, 2], pxy_vIdx, self.weights[pxy_vIdx, 2])
def getCoords(self, fit_to_posed=False):
if fit_to_posed:
hcoord = self.human.meshData.coord
else:
hcoord = self.human.getRestposeCoordinates()
matrix = self.tmatrix.getMatrix(hcoord)
ref_vIdxs = self.ref_vIdxs
weights = self.weights
coord = (
hcoord[ref_vIdxs[:,0]] * weights[:,0,None] +
hcoord[ref_vIdxs[:,1]] * weights[:,1,None] +
hcoord[ref_vIdxs[:,2]] * weights[:,2,None] +
np.dot(matrix, self.offsets.transpose()).transpose()
)
return coord
def update(self, mesh, fit_to_posed=False):
#log.debug("Updating proxy %s.", self.name)
coords = self.getCoords(fit_to_posed)
mesh.changeCoords(coords)
mesh.calcNormals()
def getUuid(self):
if self.uuid:
return self.uuid
else:
return self.name
def hasCustomVertexWeights(self):
"""
Determines whether this proxy explicitly defines its own set of vertex
to bone weights (defined on the bones of the reference skeleton).
Returns True if this proxy has custom vertex weights, False if it does
not, in which case vertex weights will be derived from the weights of
the basemesh, mapped through the vertex mapping of the proxy.
"""
return self.vertexBoneWeights is not None
def getVertexWeights(self, humanWeights, skel=None, allowCache=False):
"""
Map armature weights mapped to the human to the proxy mesh through the
proxy mapping.
humanWeights is expected to be an animation.VertexBoneWeights object.
Only when this proxy has custom weights:
Optionally remaps the weights to fit a user-selected skeleton when a
skel is supplied as argument. If no skel argument is provided, the
weights for the base skeleton are returned.
Note: these vertex weights are intended for rigging and are not to be
confused with getWeights() which returns the weights of the proxy
mapping to the basemesh.
"""
# Override proxy weights mapping behaviour if this proxy has its own
# bone weights defined explicitly.
# This requires remapping the vertex weights of the proxy, defined on
# the bones of the reference skeleton, to those of the current skeleton.
# The current skeleton is retrieved from the human object linked to this
# proxy.
import time
import log
if self.hasCustomVertexWeights():
# TODO we could introduce caching of weights here as long as the skeleton is not changed
if skel is None:
return self.human.getBaseSkeleton().getVertexWeights(self.vertexBoneWeights, force_remap=True)
else:
return skel.getVertexWeights(self.vertexBoneWeights, force_remap=True)
# Remap weights through proxy mapping
WEIGHT_THRESHOLD = 1e-4 # Threshold for including bone weight
recalculate = True
weights = OrderedDict()
if not allowCache:
pass
#print("Caching not allowed")
else:
if self.weightsCache is None:
pass
#print("There is no cache")
else:
if not skel is None:
if skel == self.cacheSkel:
recalculate = False
else:
log.debug("The skeleton is different")
if recalculate:
log.debug("remapping weights for proxy " + self.name)
start = time.perf_counter()
for bname, (indxs, wghts) in list(humanWeights.data.items()):
vgroup = []
empty = True
for (v,wt) in zip(indxs, wghts):
try:
vlist = self.vertWeights[v]
except KeyError:
vlist = []
for (pv, w) in vlist:
pw = w*wt
if (pw > WEIGHT_THRESHOLD):
vgroup.append((pv, pw))
empty = False
if not empty:
weights[bname] = vgroup
stop = time.perf_counter()
hw = humanWeights.create(weights)
if allowCache:
self.weightsCache = hw
self.cacheSkel = skel
else:
self.weightsCache = None
self.cacheSkel = None
log.debug("remapping weights for %s took %.5f seconds", self.name, stop - start)
else:
hw = self.weightsCache
return hw
doRefVerts = 1
doWeights = 2
doDeleteVerts = 3
def loadProxy(human, path, type="Clothes"):
try:
npzpath = os.path.splitext(path)[0] + '.mhpxy'
asciipath = os.path.splitext(path)[0] + getAsciiFileExtension(type)
try:
if not os.path.isfile(npzpath):
log.message('compiled proxy file missing: %s', npzpath)
raise RuntimeError('compiled proxy file missing: %s', npzpath)
if os.path.isfile(asciipath) and os.path.getmtime(asciipath) > os.path.getmtime(npzpath):
log.message('compiled proxy file out of date: %s', npzpath)
raise RuntimeError('compiled file out of date: %s', npzpath)
proxy = loadBinaryProxy(npzpath, human, type)
except Exception as e:
showTrace = not isinstance(e, RuntimeError)
log.warning("Problem loading binary proxy: %s", e, exc_info=showTrace)
proxy = loadTextProxy(human, asciipath, type) # TODO perhaps proxy type should be stored in .mhclo file too
if getpath.isSubPath(npzpath, getpath.getPath()):
# Only write compiled binary proxies to user data path
try:
log.message('Compiling binary proxy file %s', npzpath)
saveBinaryProxy(proxy, npzpath)
except Exception:
log.notice('unable to save compiled proxy: %s', npzpath, exc_info=True)
if os.path.isfile(npzpath):
# Remove file again, in case an empty file is left
try:
os.remove(npzpath)
except Exception as e:
log.warning("Could not remove empty file %s that was left behind (%s).", npzpath, e)
else:
log.debug('Not writing compiled proxies to system paths (%s).', npzpath)
except:
log.error('Unable to load proxy file: %s', path, exc_info=True)
return None
return proxy
def loadTextProxy(human, filepath, type="Clothes"):
import io
try:
fp = io.open(filepath, "r", encoding="utf-8")
except IOError:
log.error("*** Cannot open %s", filepath)
return None
folder = os.path.realpath(os.path.expanduser(os.path.dirname(filepath)))
proxy = Proxy(filepath, type, human)
proxy.max_pole = 8;
refVerts = []
status = 0
vnum = 0
for line in fp:
words = line.split()
if len(words) == 0:
# Reset status on empty line
#status = 0
continue
if words[0].startswith('#'):
# Comment
# Try interpreting comment attributes as license info
proxy.license.updateFromComment(line)
continue
key = words[0]
if key == 'name':
proxy.name = " ".join(words[1:])
elif key == 'uuid':
proxy.uuid = " ".join(words[1:])
elif key == 'description':
proxy.description = " ".join(words[1:])
elif key in ['author', 'license', 'homepage']:
proxy.license.updateFromComment(words)
elif key == 'tag':
proxy.tags.append( " ".join(words[1:]).lower() )
elif key == 'version':
proxy.version = int(words[1])
elif key == 'z_depth':
proxy.z_depth = int(words[1])
elif key == 'max_pole':
proxy.max_pole = int(words[1])
elif key == 'special_pose':
proxy.special_pose[words[1]] = words[2]
elif key == 'verts':
status = doRefVerts
elif key == 'weights':
status = doWeights
if proxy.weights is None:
proxy.weights = {}
weights = []
proxy.weights[words[1]] = weights
elif key == "delete_verts":
status = doDeleteVerts
elif key == 'obj_file':
proxy._obj_file = _getFileName(folder, words[1], ".obj")
elif key == 'material':
matFile = _getFileName(folder, words[1], ".mhmat")
proxy._material_file = matFile
proxy.material.fromFile(proxy.material_file)
elif key == 'vertexboneweights_file':
from animation import VertexBoneWeights
proxy._vertexBoneWeights_file = _getFileName(folder, words[1], ".jsonw")
proxy.vertexBoneWeights = VertexBoneWeights.fromFile(proxy.vertexBoneWeights_file)
elif key == 'backface_culling':
# TODO remove in future
log.warning('Deprecated parameter "backface_culling" used in proxy file. Set property backfaceCull in material instead.')
elif key == 'transparent':
# TODO remove in future
log.warning('Deprecated parameter "transparent" used in proxy file. Set property in material file instead.')
elif key == 'uvLayer':
# TODO is this still used?
if len(words) > 2:
layer = int(words[1])
uvFile = words[2]
else:
layer = 0
uvFile = words[1]
#uvMap = material.UVMap(proxy.name+"UV"+str(layer))
#uvMap.read(proxy.mesh, _getFileName(folder, uvFile, ".mhuv"))
# Delayed load, only store path here
proxy.uvLayers[layer] = _getFileName(folder, uvFile, ".mhuv")
elif key == 'x_scale':
proxy.tmatrix.getScaleData(words, 0)
elif key == 'y_scale':
proxy.tmatrix.getScaleData(words, 1)
elif key == 'z_scale':
proxy.tmatrix.getScaleData(words, 2)
elif key == 'shear_x':
proxy.tmatrix.getShearData(words, 0, None)
elif key == 'shear_y':
proxy.tmatrix.getShearData(words, 1, None)
elif key == 'shear_z':
proxy.tmatrix.getShearData(words, 2, None)
elif key == 'l_shear_x':
proxy.tmatrix.getShearData(words, 0, 'Left')
elif key == 'l_shear_y':
proxy.tmatrix.getShearData(words, 1, 'Left')
elif key == 'l_shear_z':
proxy.tmatrix.getShearData(words, 2, 'Left')
elif key == 'r_shear_x':
proxy.tmatrix.getShearData(words, 0, 'Right')
elif key == 'r_shear_y':
proxy.tmatrix.getShearData(words, 1, 'Right')
elif key == 'r_shear_z':
proxy.tmatrix.getShearData(words, 2, 'Right')
elif key == 'basemesh':
proxy.basemesh = words[1]
elif key in ['shapekey', 'subsurf', 'shrinkwrap', 'solidify', 'objfile_layer', 'uvtex_layer', 'use_projection', 'mask_uv_layer', 'texture_uv_layer', 'delete', 'vertexgroup_file']:
log.warning('Deprecated parameter "%s" used in proxy file. Please remove.', key)
elif status == doRefVerts:
refVert = ProxyRefVert(human)
refVerts.append(refVert)
if len(words) == 1:
refVert.fromSingle(words, vnum, proxy.vertWeights)
else:
refVert.fromTriple(words, vnum, proxy.vertWeights)
vnum += 1
elif status == doWeights:
v = int(words[0])
w = float(words[1])
weights.append((v,w))
elif status == doDeleteVerts:
sequence = False
for v in words:
if v == "-":
sequence = True
else:
v1 = int(v)
if sequence:
for vn in range(v0,v1+1):
proxy.deleteVerts[vn] = True
sequence = False
else:
proxy.deleteVerts[v1] = True
v0 = v1
else:
log.warning('Unknown keyword %s found in proxy file %s', key, filepath)
if proxy.z_depth == -1:
log.warning('Proxy file %s does not specify a Z depth. Using 50.', filepath)
proxy.z_depth = 50
# since max-pole is used for the calculation of neighboring planes we have to double it initially
proxy.max_pole *= 2
proxy._finalize(refVerts)
return proxy
def saveBinaryProxy(proxy, path):
def _properPath(path):
return getpath.getJailedPath(path, folder)
fp = io.open(path, 'wb')
tagStr, tagIdx = _packStringList(proxy.tags)
uvStr,uvIdx = _packStringList([ _properPath(proxy.uvLayers[k]) for k in sorted(proxy.uvLayers.keys()) ])
licStr, licIdx = proxy.license.toNumpyString()
folder = os.path.dirname(path)
vars_ = dict(
#proxyType = np.fromstring(proxy.type, dtype='S1'), # TODO store proxy type?
name = np.fromstring(proxy.name, dtype='S1'),
uuid = np.fromstring(proxy.uuid, dtype='S1'),
description = np.fromstring(proxy.description, dtype='S1'),
basemesh = np.fromstring(proxy.basemesh, dtype='S1'),
tags_str = tagStr,
tags_idx = tagIdx,
lic_str = licStr,
lic_idx = licIdx,
uvLayers_str = uvStr,
uvLayers_idx = uvIdx,
obj_file = np.fromstring(_properPath(proxy.obj_file), dtype='S1'),
version = np.asarray(proxy.version, dtype=np.int32)
)
if proxy.material_file:
vars_["material_file"] = np.fromstring(_properPath(proxy.material_file), dtype='S1')
if np.any(proxy.deleteVerts):
vars_["deleteVerts"] = proxy.deleteVerts
if proxy.z_depth is not None and proxy.z_depth != -1:
vars_["z_depth"] = np.asarray(proxy.z_depth, dtype=np.int32)
if proxy.max_pole:
vars_["max_pole"] = np.asarray(proxy.max_pole, dtype=np.uint32)
proxy.tmatrix.toNumpyStruct(vars_)
special_poses = []
for posetype, posename in list(proxy.special_pose.items()):
special_poses.append(posetype)
special_poses.append(posename)
specialposeStr, specialposeIdx = _packStringList(special_poses)
vars_["special_pose_str"] = specialposeStr
vars_["special_pose_idx"] = specialposeIdx
if proxy.weights[:,1:].any():
# 3 ref verts used in this proxy
num_refverts = 3
vars_["ref_vIdxs"] = proxy.ref_vIdxs
vars_["offsets"] = proxy.offsets
vars_["weights"] = proxy.weights
else:
# Proxy uses exact fitting exclusively: store npz file more compactly
num_refverts = 1
vars_["ref_vIdxs"] = proxy.ref_vIdxs[:,0]
vars_["weights"] = proxy.weights[:,0]
vars_['num_refverts'] = np.asarray(num_refverts, dtype=np.int32)
if proxy.vertexBoneWeights_file:
vars_['vertexBoneWeights_file'] = np.fromstring(_properPath(proxy.vertexBoneWeights_file), dtype='S1')
np.savez_compressed(fp, **vars_)
fp.close()
os.utime(path, None) # Ensure modification time is updated
def loadBinaryProxy(path, human, type):
log.debug("Loading binary proxy %s.", path)
npzfile = np.load(path)
#if type is None:
# proxyType = npzfile['proxyType'].tostring()
#else:
proxyType = type
proxy = Proxy(path, proxyType, human)
proxy.name = str(npzfile['name'].tostring(), 'utf8')
proxy.uuid = str(npzfile['uuid'].tostring(), 'utf8')
proxy.basemesh = str(npzfile['basemesh'].tostring(), 'utf8')
if 'description' in npzfile:
proxy.description = str(npzfile['description'].tostring(), 'utf8')
if 'version' in npzfile:
proxy.version = int(npzfile['version'])
if 'lic_str' in npzfile and 'lic_idx' in npzfile:
proxy.license.fromNumpyString(npzfile['lic_str'], npzfile['lic_idx'])
proxy.tags = set(_unpackStringList(npzfile['tags_str'], npzfile['tags_idx']))
if 'z_depth' in npzfile:
proxy.z_depth = int(npzfile['z_depth'])
if 'max_pole' in npzfile:
proxy.max_pole = int(npzfile['max_pole'])
if 'special_pose_str' in npzfile:
special_poses = _unpackStringList(npzfile['special_pose_str'], npzfile['special_pose_idx'])
for idx in range(0, len(special_poses), 2):
proxy.special_pose[special_poses[idx]] = special_poses[idx+1]
num_refverts = int(npzfile['num_refverts'])
if num_refverts == 3:
proxy.ref_vIdxs = npzfile['ref_vIdxs']
proxy.offsets = npzfile['offsets']
proxy.weights = npzfile['weights']
else:
num_refs = npzfile['ref_vIdxs'].shape[0]
proxy.ref_vIdxs = np.zeros((num_refs,3), dtype=np.uint32)
proxy.ref_vIdxs[:,0] = npzfile['ref_vIdxs']
proxy.offsets = np.zeros((num_refs,3), dtype=np.float32)
proxy.weights = np.zeros((num_refs,3), dtype=np.float32)
proxy.weights[:,0] = npzfile['weights']
if "deleteVerts" in npzfile:
proxy.deleteVerts = npzfile['deleteVerts']
# Reconstruct reverse vertex (and weights) mapping
proxy._reloadReverseMapping()
proxy.tmatrix.fromNumpyStruct(npzfile)
proxy.uvLayers = {}
for uvIdx, uvName in enumerate(_unpackStringList(npzfile['uvLayers_str'], npzfile['uvLayers_idx'])):
proxy.uvLayers[uvIdx] = uvName
proxy.material = material.Material(proxy.name)
if 'material_file' in npzfile:
proxy._material_file = str(npzfile['material_file'].tostring(), 'utf8')
if proxy.material_file:
proxy.material.fromFile(proxy.material_file)
proxy._obj_file = str(npzfile['obj_file'].tostring(), 'utf8')
if 'vertexBoneWeights_file' in npzfile:
proxy._vertexBoneWeights_file = str(npzfile['vertexBoneWeights_file'].tostring(), 'utf8')
if proxy.vertexBoneWeights_file:
from animation import VertexBoneWeights
proxy.vertexBoneWeights = VertexBoneWeights.fromFile(proxy.vertexBoneWeights_file)
if proxy.z_depth == -1:
log.warning('Proxy file %s does not specify a Z depth. Using 50.', path)
proxy.z_depth = 50
return proxy
#
# class ProxyRefVert:
#
class ProxyRefVert:
def __init__(self, human):
self.human = human
def fromSingle(self, words, vnum, vertWeights):
# TODO store the number of reference verts in proxy so that we can efficiently save and load them.
v0 = int(words[0])
self._verts = (v0,0,1)
self._weights = (1.0,0.0,0.0)
self._offset = np.zeros(3, float)
_addProxyVertWeight(vertWeights, v0, vnum, 1)
return self
def fromTriple(self, words, vnum, vertWeights):
v0 = int(words[0])
v1 = int(words[1])
v2 = int(words[2])
w0 = float(words[3])
w1 = float(words[4])
w2 = float(words[5])
if len(words) > 6:
d0 = float(words[6])
d1 = float(words[7])
d2 = float(words[8])
else:
(d0,d1,d2) = (0,0,0)
self._verts = (v0,v1,v2)
self._weights = (w0,w1,w2)
self._offset = np.array((d0,d1,d2), float)
_addProxyVertWeight(vertWeights, v0, vnum, w0)
_addProxyVertWeight(vertWeights, v1, vnum, w1)
_addProxyVertWeight(vertWeights, v2, vnum, w2)
return self
def getWeights(self):
return self._weights
def getCoord(self, matrix):
hcoord = self.human.getRestposeCoordinates()
return (
np.dot(hcoord[self._verts], self._weights) +
np.dot(matrix, self._offset)
)
def _addProxyVertWeight(vertWeights, v, pv, w):
try:
vertWeights[v].append((pv, w))
except KeyError:
vertWeights[v] = [(pv,w)]
return
#
# class TMatrix:
# Transformation matrix. Replaces previous scale
#
class TMatrix:
def __init__(self):
self.scaleData = None
self.shearData = None
self.lShearData = None
self.rShearData = None
def toNumpyStruct(self, npzfile, prefix=""):
"""Serialize TMatrix in npz file"""
def _nan_array(size):
return np.repeat(float('nan'), size).astype(np.float32)
def _pack_scales(scaleData):
scales = list()
vidxs = list()
for e_idx, entry in enumerate(scaleData):
# Should be 3 entries
if entry is None:
scales.append(float('nan'))
vidxs.extend([0, 0])
else:
vidx1, vidx2, scale = entry
scales.append(scale)
vidxs.extend([vidx1, vidx2])
return (np.asarray(scales, dtype=np.float32),
np.asarray(vidxs, dtype=np.uint32))
def _pack_shears(shearData):
shears = list()
vidxs = list()
for e_idx, entry in enumerate(shearData):
# Should be 3 entries
if entry is None:
shears.extend([float('nan'), float('nan')])
vidxs.extend([0, 0])
else:
vidx1, vidx2, shear1, shear2 = entry
shears.extend([shear1, shear2])
vidxs.extend([vidx1, vidx2])
return (np.asarray(shears, dtype=np.float32),
np.asarray(vidxs, dtype=np.uint32))
if prefix:
prefix += "_"
if self.scaleData:
scales, vidxs = _pack_scales(self.scaleData)
npzfile[prefix+"tmat_scale"] = scales
npzfile[prefix+"tmat_scale_idx"] = vidxs
if self.shearData:
shears, vidxs = _pack_shears(self.shearData)
npzfile[prefix+"tmat_shear"] = shears
npzfile[prefix+"tmat_shear_idx"] = vidxs
if self.lShearData:
shears, vidxs = _pack_shears(self.lShearData)
npzfile[prefix+"tmat_lshear"] = shears
npzfile[prefix+"tmat_lshear_idx"] = vidxs
if self.rShearData:
shears, vidxs = _pack_shears(self.rShearData)
npzfile[prefix+"tmat_rshear"] = shears
npzfile[prefix+"tmat_rshear_idx"] = vidxs
def fromNumpyStruct(self, npzfile, prefix=""):
"""Deserialize TMatrix from npz file"""
def _unpack_scales(scales, vidxs):
scaleData = [None, None, None]
for i in range(3):
if i >= min(len(scales), len(vidxs)//2):
break
scale = scales[i]
if not math.isnan(scale):
vidx1, vidx2 = vidxs[i*2], vidxs[i*2+1]
scaleData[i] = (int(vidx1), int(vidx2), float(scale))
return scaleData
def _unpack_shears(shears, vidxs):
shearData = [None, None, None]
for i in range(3):
if i >= min(len(shears)//2, len(vidxs)//2):
break
shear1, shear2 = shears[i*2], shears[i*2+1]
vidx1, vidx2 = vidxs[i*2], vidxs[i*2+1]
shearData[i] = (int(vidx1), int(vidx2), float(shear1), float(shear2))
return shearData
if prefix:
prefix += "_"
if prefix+'tmat_scale' in npzfile and prefix+'tmat_scale_idx' in npzfile:
scales = npzfile[prefix+'tmat_scale']
vidxs = npzfile[prefix+'tmat_scale_idx']
self.scaleData = _unpack_scales(scales, vidxs)
if prefix+'tmat_shear' in npzfile and prefix+'tmat_shear_idx' in npzfile:
shears = npzfile[prefix+'tmat_shear']
vidxs = npzfile[prefix+'tmat_shear_idx']
self.shearData = _unpack_shears(shears, vidxs)
if prefix+'tmat_lshear' in npzfile and prefix+'tmat_lshear_idx' in npzfile:
shears = npzfile[prefix+'tmat_lshear']
vidxs = npzfile[prefix+'tmat_lshear_idx']
self.lShearData = _unpack_shears(shears, vidxs)
if prefix+'tmat_rshear' in npzfile and prefix+'tmat_rshear_idx' in npzfile:
shears = npzfile[prefix+'tmat_rshear']
vidxs = npzfile[prefix+'tmat_rshear_idx']
self.rShearData = _unpack_shears(shears, vidxs)
def getScaleData(self, words, idx):
vn1 = int(words[1])
vn2 = int(words[2])
den = float(words[3])
if not self.scaleData:
self.scaleData = [None, None, None]
self.scaleData[idx] = (vn1, vn2, den)
def getShearData(self, words, idx, side):
vn1 = int(words[1])
vn2 = int(words[2])
x1 = float(words[3])
x2 = float(words[4])
bbdata = (vn1, vn2, x1, x2)
if side == "Left":
if not self.lShearData:
self.lShearData = [None, None, None]
self.lShearData[idx] = bbdata
elif side == "Right":
if not self.rShearData:
self.rShearData = [None, None, None]
self.rShearData[idx] = bbdata
else:
if not self.shearData:
self.shearData = [None, None, None]
self.shearData[idx] = bbdata
def getMatrix(self, hcoord):
if self.scaleData:
matrix = np.identity(3, float)
for n in range(3):
(vn1, vn2, den) = self.scaleData[n]
co1 = hcoord[vn1]
co2 = hcoord[vn2]
num = abs(co1[n] - co2[n])
matrix[n][n] = (num/den)
return matrix
elif self.shearData:
return self.matrixFromShear(self.shearData, hcoord)
elif self.lShearData:
return self.matrixFromShear(self.lShearData, hcoord)
elif self.rShearData:
return self.matrixFromShear(self.rShearData, hcoord)
else:
return Unit3
def matrixFromShear(self, shear, hcoord):
from transformations import affine_matrix_from_points
# sfaces and tfaces are the face coordinates
sfaces = np.zeros((3,2), float)
tfaces = np.zeros((3,2), float)
for n in range(3):
(vn1, vn2, sfaces[n,0], sfaces[n,1]) = shear[n]
tfaces[n,0] = hcoord[vn1][n]
tfaces[n,1] = hcoord[vn2][n]
# sverts and tverts are the vertex coordinates
sverts = []
tverts = []
for i in [0,1]:
for j,k in [(0,0),(0,1),(1,1),(1,0)]:
sverts.append( np.array((sfaces[0,i], sfaces[1,j], sfaces[2,k])) )
tverts.append( np.array((tfaces[0,i], tfaces[1,j], tfaces[2,k])) )
sbox = vertsToNumpy(sverts)
tbox = vertsToNumpy(tverts)
mat = affine_matrix_from_points(sbox, tbox)
return mat[:3,:3]
def vertsToNumpy(verts):
result = np.asarray(verts)
return np.asarray([result[:,0], result[:,1], result[:,2]], dtype=np.float32)
def _getFileName(folder, file, suffix):
(name, ext) = os.path.split(file)
if ext:
return os.path.join(folder, file)
else:
return os.path.join(folder, file+suffix)
def transferVertexMaskToProxy(vertsMask, proxy):
"""
Transfer a vertex mask defined on the parent mesh to a proxy using the
proxy mapping to this parent mesh.
A vertex mask defines for each vertex if it should be hidden, only faces
that have all vertices hidden will be hidden.
True in vertex mask means: show vertex, false means hide (masked)
"""
# Convert basemesh vertex mask to local mask for proxy vertices
proxyVertMask = np.ones(len(proxy.ref_vIdxs), dtype=bool)
# Proxy verts that use exact mapping
exact_mask = ~np.any(proxy.weights[:,1:], axis=1)
# Faster numpy implementation of the above:
unmasked_row_col = np.nonzero(vertsMask[proxy.ref_vIdxs])
unmasked_rows = unmasked_row_col[0]
if len(unmasked_rows) > 0:
unmasked_count = np.bincount(unmasked_rows) # count number of unmasked verts per row
# only hide/mask a vertex if at least two referenced body verts are hidden/masked
masked_idxs = np.nonzero(unmasked_count < 2)
proxyVertMask[masked_idxs] = False
else:
# All verts are masked
proxyVertMask[:] = False
# Directly map exactly mapped proxy verts
proxyVertMask[exact_mask] = vertsMask[proxy.ref_vIdxs[exact_mask,0]]
return proxyVertMask
def getAsciiFileExtension(proxyType):
"""
The file extension used for ASCII (non-compiled) proxy source files
for the proxies of specified type.
"""
return '.proxy' if proxyType == 'Proxymeshes' else '.mhclo'
def peekMetadata(proxyFilePath, proxyType=None):
"""
Read UUID and tags from proxy file, and return as soon as vertex data
begins. Reads only the necessary lines of the proxy file from disk, not the
entire proxy file is loaded in memory.
"""
#import zipfile
#if zipfile.is_zipfile(proxyFilePath):
# Using the filename extension is faster (and will have to do):
if os.path.splitext(proxyFilePath)[1][1:].lower() == 'mhpxy':
try:
if proxyType is not None:
asciipath = os.path.splitext(proxyFilePath)[0] + getAsciiFileExtension(proxyType)
if os.path.isfile(asciipath) and os.path.getmtime(asciipath) > os.path.getmtime(proxyFilePath):
_npzpath = proxyFilePath
proxyFilePath = asciipath
raise RuntimeError('compiled file out of date: %s', _npzpath)
# Binary proxy file
npzfile = np.load(proxyFilePath)
uuid = str(npzfile['uuid'].tostring(), 'utf8')
tags = set(_unpackStringList(npzfile['tags_str'], npzfile['tags_idx']))
return (uuid, tags)
except Exception as e:
showTrace = not isinstance(e, RuntimeError)
log.warning("Problem loading metadata from binary proxy, trying ASCII file: %s", e, exc_info=showTrace)
# ASCII proxy file
import io
fp = io.open(proxyFilePath, 'r', encoding="utf-8")
uuid = None
tags = set()
for line in fp:
words = line.split()
if len(words) == 0:
pass
elif words[0] == 'uuid':
uuid = words[1]
elif words[0] == 'tag':
tags.add(" ".join(words[1:]).lower())
elif words[0] == 'verts':
break
fp.close()
return (uuid, tags)
def _packStringList(strings):
text = ''
index = []
for string in strings:
asbytes = bytearray(text,'utf-8')
index.append(len(asbytes))
text += string
text = np.fromstring(text, dtype='S1')
index = np.array(index, dtype=np.uint32)
return text, index
def _unpackStringList(text, index):
strings = []
last = None
for i in index:
if last is not None:
name = str(text[last:i].tostring(), 'utf8')
strings.append(name)
last = i
if last is not None:
name = str(text[last:].tostring(), 'utf8')
strings.append(name)
return strings
def _getFilePath(filename, folder = None, altExtensions=None):
import getpath
if altExtensions is not None:
# Search for existing path with alternative file extension
for aExt in altExtensions:
if aExt.startswith('.'):
aExt = aExt[1:]
aFile = os.path.splitext(filename)[0]+'.'+aExt
aPath = _getFilePath(aFile, folder, altExtensions=None)
if os.path.isfile(aPath):
# Path found, return result with original extension
orgExt = os.path.splitext(filename)[1]
path = os.path.splitext(aPath)[0]+orgExt
return getpath.formatPath(path)
if not filename or not isinstance(filename, str):
return filename
searchPaths = []
# Search within current folder
if folder:
searchPaths.append(folder)
return getpath.thoroughFindFile(filename, searchPaths)
| [
"getpath.thoroughFindFile",
"transformations.affine_matrix_from_points",
"material.Material",
"io.open",
"numpy.array",
"makehuman.getAssetLicense",
"getpath.formatPath",
"animation.VertexBoneWeights.fromFile",
"os.remove",
"log.notice",
"numpy.asarray",
"time.perf_counter",
"os.path.split",... | [((1684, 1705), 'numpy.identity', 'np.identity', (['(3)', 'float'], {}), '(3, float)\n', (1695, 1705), True, 'import numpy as np\n'), ((20405, 20424), 'io.open', 'io.open', (['path', '"""wb"""'], {}), "(path, 'wb')\n", (20412, 20424), False, 'import io\n'), ((20649, 20670), 'os.path.dirname', 'os.path.dirname', (['path'], {}), '(path)\n', (20664, 20670), False, 'import os\n'), ((21455, 21480), 'numpy.any', 'np.any', (['proxy.deleteVerts'], {}), '(proxy.deleteVerts)\n', (21461, 21480), True, 'import numpy as np\n'), ((22588, 22628), 'numpy.asarray', 'np.asarray', (['num_refverts'], {'dtype': 'np.int32'}), '(num_refverts, dtype=np.int32)\n', (22598, 22628), True, 'import numpy as np\n'), ((22783, 22815), 'numpy.savez_compressed', 'np.savez_compressed', (['fp'], {}), '(fp, **vars_)\n', (22802, 22815), True, 'import numpy as np\n'), ((22835, 22855), 'os.utime', 'os.utime', (['path', 'None'], {}), '(path, None)\n', (22843, 22855), False, 'import os\n'), ((22940, 22983), 'log.debug', 'log.debug', (['"""Loading binary proxy %s."""', 'path'], {}), "('Loading binary proxy %s.', path)\n", (22949, 22983), False, 'import log\n'), ((22999, 23012), 'numpy.load', 'np.load', (['path'], {}), '(path)\n', (23006, 23012), True, 'import numpy as np\n'), ((25144, 25173), 'material.Material', 'material.Material', (['proxy.name'], {}), '(proxy.name)\n', (25161, 25173), False, 'import material\n'), ((34523, 34540), 'numpy.asarray', 'np.asarray', (['verts'], {}), '(verts)\n', (34533, 34540), True, 'import numpy as np\n'), ((34552, 34624), 'numpy.asarray', 'np.asarray', (['[result[:, 0], result[:, 1], result[:, 2]]'], {'dtype': 'np.float32'}), '([result[:, 0], result[:, 1], result[:, 2]], dtype=np.float32)\n', (34562, 34624), True, 'import numpy as np\n'), ((34683, 34702), 'os.path.split', 'os.path.split', (['file'], {}), '(file)\n', (34696, 34702), False, 'import os\n'), ((35492, 35530), 'numpy.nonzero', 'np.nonzero', (['vertsMask[proxy.ref_vIdxs]'], {}), '(vertsMask[proxy.ref_vIdxs])\n', (35502, 35530), True, 'import numpy as np\n'), ((37731, 37776), 'io.open', 'io.open', (['proxyFilePath', '"""r"""'], {'encoding': '"""utf-8"""'}), "(proxyFilePath, 'r', encoding='utf-8')\n", (37738, 37776), False, 'import io\n'), ((38337, 38368), 'numpy.fromstring', 'np.fromstring', (['text'], {'dtype': '"""S1"""'}), "(text, dtype='S1')\n", (38350, 38368), True, 'import numpy as np\n'), ((38381, 38413), 'numpy.array', 'np.array', (['index'], {'dtype': 'np.uint32'}), '(index, dtype=np.uint32)\n', (38389, 38413), True, 'import numpy as np\n'), ((39667, 39714), 'getpath.thoroughFindFile', 'getpath.thoroughFindFile', (['filename', 'searchPaths'], {}), '(filename, searchPaths)\n', (39691, 39714), False, 'import getpath\n'), ((1771, 1813), 'log.debug', 'log.debug', (['"""Loading proxy file: %s."""', 'file'], {}), "('Loading proxy file: %s.', file)\n", (1780, 1813), False, 'import log\n'), ((1977, 2004), 'makehuman.getAssetLicense', 'makehuman.getAssetLicense', ([], {}), '()\n', (2002, 2004), False, 'import makehuman\n'), ((2401, 2431), 'makehuman.getBasemeshVersion', 'makehuman.getBasemeshVersion', ([], {}), '()\n', (2429, 2431), False, 'import makehuman\n'), ((3856, 3884), 'material.Material', 'material.Material', (['self.name'], {}), '(self.name)\n', (3873, 3884), False, 'import material\n'), ((5452, 5507), 'files3d.loadMesh', 'files3d.loadMesh', (['self.obj_file'], {'maxFaces': 'self.max_pole'}), '(self.obj_file, maxFaces=self.max_pole)\n', (5468, 5507), False, 'import files3d\n'), ((6538, 6598), 'numpy.asarray', 'np.asarray', (['[v._weights for v in refVerts]'], {'dtype': 'np.float32'}), '([v._weights for v in refVerts], dtype=np.float32)\n', (6548, 6598), True, 'import numpy as np\n'), ((6624, 6681), 'numpy.asarray', 'np.asarray', (['[v._verts for v in refVerts]'], {'dtype': 'np.uint32'}), '([v._verts for v in refVerts], dtype=np.uint32)\n', (6634, 6681), True, 'import numpy as np\n'), ((6705, 6764), 'numpy.asarray', 'np.asarray', (['[v._offset for v in refVerts]'], {'dtype': 'np.float32'}), '([v._offset for v in refVerts], dtype=np.float32)\n', (6715, 6764), True, 'import numpy as np\n'), ((10438, 10451), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10449, 10451), False, 'from collections import OrderedDict\n'), ((14240, 14280), 'io.open', 'io.open', (['filepath', '"""r"""'], {'encoding': '"""utf-8"""'}), "(filepath, 'r', encoding='utf-8')\n", (14247, 14280), False, 'import io\n'), ((20002, 20078), 'log.warning', 'log.warning', (['"""Proxy file %s does not specify a Z depth. Using 50."""', 'filepath'], {}), "('Proxy file %s does not specify a Z depth. Using 50.', filepath)\n", (20013, 20078), False, 'import log\n'), ((20359, 20394), 'getpath.getJailedPath', 'getpath.getJailedPath', (['path', 'folder'], {}), '(path, folder)\n', (20380, 20394), False, 'import getpath\n'), ((21617, 21658), 'numpy.asarray', 'np.asarray', (['proxy.z_depth'], {'dtype': 'np.int32'}), '(proxy.z_depth, dtype=np.int32)\n', (21627, 21658), True, 'import numpy as np\n'), ((21711, 21754), 'numpy.asarray', 'np.asarray', (['proxy.max_pole'], {'dtype': 'np.uint32'}), '(proxy.max_pole, dtype=np.uint32)\n', (21721, 21754), True, 'import numpy as np\n'), ((24464, 24504), 'numpy.zeros', 'np.zeros', (['(num_refs, 3)'], {'dtype': 'np.uint32'}), '((num_refs, 3), dtype=np.uint32)\n', (24472, 24504), True, 'import numpy as np\n'), ((24580, 24621), 'numpy.zeros', 'np.zeros', (['(num_refs, 3)'], {'dtype': 'np.float32'}), '((num_refs, 3), dtype=np.float32)\n', (24588, 24621), True, 'import numpy as np\n'), ((24645, 24686), 'numpy.zeros', 'np.zeros', (['(num_refs, 3)'], {'dtype': 'np.float32'}), '((num_refs, 3), dtype=np.float32)\n', (24653, 24686), True, 'import numpy as np\n'), ((25806, 25878), 'log.warning', 'log.warning', (['"""Proxy file %s does not specify a Z depth. Using 50."""', 'path'], {}), "('Proxy file %s does not specify a Z depth. Using 50.', path)\n", (25817, 25878), False, 'import log\n'), ((26313, 26331), 'numpy.zeros', 'np.zeros', (['(3)', 'float'], {}), '(3, float)\n', (26321, 26331), True, 'import numpy as np\n'), ((26892, 26921), 'numpy.array', 'np.array', (['(d0, d1, d2)', 'float'], {}), '((d0, d1, d2), float)\n', (26900, 26921), True, 'import numpy as np\n'), ((33764, 33787), 'numpy.zeros', 'np.zeros', (['(3, 2)', 'float'], {}), '((3, 2), float)\n', (33772, 33787), True, 'import numpy as np\n'), ((33804, 33827), 'numpy.zeros', 'np.zeros', (['(3, 2)', 'float'], {}), '((3, 2), float)\n', (33812, 33827), True, 'import numpy as np\n'), ((34419, 34456), 'transformations.affine_matrix_from_points', 'affine_matrix_from_points', (['sbox', 'tbox'], {}), '(sbox, tbox)\n', (34444, 34456), False, 'from transformations import affine_matrix_from_points\n'), ((34730, 34756), 'os.path.join', 'os.path.join', (['folder', 'file'], {}), '(folder, file)\n', (34742, 34756), False, 'import os\n'), ((34782, 34817), 'os.path.join', 'os.path.join', (['folder', '(file + suffix)'], {}), '(folder, file + suffix)\n', (34794, 34817), False, 'import os\n'), ((35384, 35420), 'numpy.any', 'np.any', (['proxy.weights[:, 1:]'], {'axis': '(1)'}), '(proxy.weights[:, 1:], axis=1)\n', (35390, 35420), True, 'import numpy as np\n'), ((35627, 35653), 'numpy.bincount', 'np.bincount', (['unmasked_rows'], {}), '(unmasked_rows)\n', (35638, 35653), True, 'import numpy as np\n'), ((35807, 35837), 'numpy.nonzero', 'np.nonzero', (['(unmasked_count < 2)'], {}), '(unmasked_count < 2)\n', (35817, 35837), True, 'import numpy as np\n'), ((2285, 2307), 'os.path.getmtime', 'os.path.getmtime', (['file'], {}), '(file)\n', (2301, 2307), False, 'import os\n'), ((4196, 4222), 'os.path.dirname', 'os.path.dirname', (['self.file'], {}), '(self.file)\n', (4211, 4222), False, 'import os\n'), ((4359, 4385), 'os.path.dirname', 'os.path.dirname', (['self.file'], {}), '(self.file)\n', (4374, 4385), False, 'import os\n'), ((4547, 4573), 'os.path.dirname', 'os.path.dirname', (['self.file'], {}), '(self.file)\n', (4562, 4573), False, 'import os\n'), ((5543, 5588), 'log.error', 'log.error', (['"""Failed to load %s"""', 'self.obj_file'], {}), "('Failed to load %s', self.obj_file)\n", (5552, 5588), False, 'import log\n'), ((10932, 10985), 'log.debug', 'log.debug', (["('remapping weights for proxy ' + self.name)"], {}), "('remapping weights for proxy ' + self.name)\n", (10941, 10985), False, 'import log\n'), ((11006, 11025), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (11023, 11025), False, 'import time\n'), ((11670, 11689), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (11687, 11689), False, 'import time\n'), ((11951, 12036), 'log.debug', 'log.debug', (['"""remapping weights for %s took %.5f seconds"""', 'self.name', '(stop - start)'], {}), "('remapping weights for %s took %.5f seconds', self.name, stop - start\n )\n", (11960, 12036), False, 'import log\n'), ((14049, 14112), 'log.error', 'log.error', (['"""Unable to load proxy file: %s"""', 'path'], {'exc_info': '(True)'}), "('Unable to load proxy file: %s', path, exc_info=True)\n", (14058, 14112), False, 'import log\n'), ((14309, 14350), 'log.error', 'log.error', (['"""*** Cannot open %s"""', 'filepath'], {}), "('*** Cannot open %s', filepath)\n", (14318, 14350), False, 'import log\n'), ((14421, 14446), 'os.path.dirname', 'os.path.dirname', (['filepath'], {}), '(filepath)\n', (14436, 14446), False, 'import os\n'), ((20795, 20832), 'numpy.fromstring', 'np.fromstring', (['proxy.name'], {'dtype': '"""S1"""'}), "(proxy.name, dtype='S1')\n", (20808, 20832), True, 'import numpy as np\n'), ((20849, 20886), 'numpy.fromstring', 'np.fromstring', (['proxy.uuid'], {'dtype': '"""S1"""'}), "(proxy.uuid, dtype='S1')\n", (20862, 20886), True, 'import numpy as np\n'), ((20910, 20954), 'numpy.fromstring', 'np.fromstring', (['proxy.description'], {'dtype': '"""S1"""'}), "(proxy.description, dtype='S1')\n", (20923, 20954), True, 'import numpy as np\n'), ((20975, 21016), 'numpy.fromstring', 'np.fromstring', (['proxy.basemesh'], {'dtype': '"""S1"""'}), "(proxy.basemesh, dtype='S1')\n", (20988, 21016), True, 'import numpy as np\n'), ((21277, 21318), 'numpy.asarray', 'np.asarray', (['proxy.version'], {'dtype': 'np.int32'}), '(proxy.version, dtype=np.int32)\n', (21287, 21318), True, 'import numpy as np\n'), ((25711, 25767), 'animation.VertexBoneWeights.fromFile', 'VertexBoneWeights.fromFile', (['proxy.vertexBoneWeights_file'], {}), '(proxy.vertexBoneWeights_file)\n', (25737, 25767), False, 'from animation import VertexBoneWeights\n'), ((27277, 27319), 'numpy.dot', 'np.dot', (['hcoord[self._verts]', 'self._weights'], {}), '(hcoord[self._verts], self._weights)\n', (27283, 27319), True, 'import numpy as np\n'), ((27334, 27362), 'numpy.dot', 'np.dot', (['matrix', 'self._offset'], {}), '(matrix, self._offset)\n', (27340, 27362), True, 'import numpy as np\n'), ((32977, 32998), 'numpy.identity', 'np.identity', (['(3)', 'float'], {}), '(3, float)\n', (32988, 32998), True, 'import numpy as np\n'), ((37282, 37304), 'numpy.load', 'np.load', (['proxyFilePath'], {}), '(proxyFilePath)\n', (37289, 37304), True, 'import numpy as np\n'), ((39217, 39238), 'os.path.isfile', 'os.path.isfile', (['aPath'], {}), '(aPath)\n', (39231, 39238), False, 'import os\n'), ((1872, 1894), 'os.path.basename', 'os.path.basename', (['file'], {}), '(file)\n', (1888, 1894), False, 'import os\n'), ((12229, 12251), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (12245, 12251), False, 'import os\n'), ((12286, 12308), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (12302, 12308), False, 'import os\n'), ((12374, 12397), 'os.path.isfile', 'os.path.isfile', (['npzpath'], {}), '(npzpath)\n', (12388, 12397), False, 'import os\n'), ((12415, 12470), 'log.message', 'log.message', (['"""compiled proxy file missing: %s"""', 'npzpath'], {}), "('compiled proxy file missing: %s', npzpath)\n", (12426, 12470), False, 'import log\n'), ((12565, 12590), 'os.path.isfile', 'os.path.isfile', (['asciipath'], {}), '(asciipath)\n', (12579, 12590), False, 'import os\n'), ((12668, 12727), 'log.message', 'log.message', (['"""compiled proxy file out of date: %s"""', 'npzpath'], {}), "('compiled proxy file out of date: %s', npzpath)\n", (12679, 12727), False, 'import log\n'), ((12962, 13032), 'log.warning', 'log.warning', (['"""Problem loading binary proxy: %s"""', 'e'], {'exc_info': 'showTrace'}), "('Problem loading binary proxy: %s', e, exc_info=showTrace)\n", (12973, 13032), False, 'import log\n'), ((28458, 28494), 'numpy.asarray', 'np.asarray', (['scales'], {'dtype': 'np.float32'}), '(scales, dtype=np.float32)\n', (28468, 28494), True, 'import numpy as np\n'), ((28517, 28551), 'numpy.asarray', 'np.asarray', (['vidxs'], {'dtype': 'np.uint32'}), '(vidxs, dtype=np.uint32)\n', (28527, 28551), True, 'import numpy as np\n'), ((29077, 29113), 'numpy.asarray', 'np.asarray', (['shears'], {'dtype': 'np.float32'}), '(shears, dtype=np.float32)\n', (29087, 29113), True, 'import numpy as np\n'), ((29136, 29170), 'numpy.asarray', 'np.asarray', (['vidxs'], {'dtype': 'np.uint32'}), '(vidxs, dtype=np.uint32)\n', (29146, 29170), True, 'import numpy as np\n'), ((37580, 37688), 'log.warning', 'log.warning', (['"""Problem loading metadata from binary proxy, trying ASCII file: %s"""', 'e'], {'exc_info': 'showTrace'}), "('Problem loading metadata from binary proxy, trying ASCII file: %s'\n , e, exc_info=showTrace)\n", (37591, 37688), False, 'import log\n'), ((39443, 39467), 'getpath.formatPath', 'getpath.formatPath', (['path'], {}), '(path)\n', (39461, 39467), False, 'import getpath\n'), ((12595, 12622), 'os.path.getmtime', 'os.path.getmtime', (['asciipath'], {}), '(asciipath)\n', (12611, 12622), False, 'import os\n'), ((12625, 12650), 'os.path.getmtime', 'os.path.getmtime', (['npzpath'], {}), '(npzpath)\n', (12641, 12650), False, 'import os\n'), ((13198, 13215), 'getpath.getPath', 'getpath.getPath', ([], {}), '()\n', (13213, 13215), False, 'import getpath\n'), ((13956, 14028), 'log.debug', 'log.debug', (['"""Not writing compiled proxies to system paths (%s)."""', 'npzpath'], {}), "('Not writing compiled proxies to system paths (%s).', npzpath)\n", (13965, 14028), False, 'import log\n'), ((30332, 30349), 'math.isnan', 'math.isnan', (['scale'], {}), '(scale)\n', (30342, 30349), False, 'import math\n'), ((34197, 34249), 'numpy.array', 'np.array', (['(sfaces[0, i], sfaces[1, j], sfaces[2, k])'], {}), '((sfaces[0, i], sfaces[1, j], sfaces[2, k]))\n', (34205, 34249), True, 'import numpy as np\n'), ((34280, 34332), 'numpy.array', 'np.array', (['(tfaces[0, i], tfaces[1, j], tfaces[2, k])'], {}), '((tfaces[0, i], tfaces[1, j], tfaces[2, k]))\n', (34288, 34332), True, 'import numpy as np\n'), ((36961, 36986), 'os.path.isfile', 'os.path.isfile', (['asciipath'], {}), '(asciipath)\n', (36975, 36986), False, 'import os\n'), ((39333, 39359), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (39349, 39359), False, 'import os\n'), ((10856, 10894), 'log.debug', 'log.debug', (['"""The skeleton is different"""'], {}), "('The skeleton is different')\n", (10865, 10894), False, 'import log\n'), ((13330, 13384), 'log.message', 'log.message', (['"""Compiling binary proxy file %s"""', 'npzpath'], {}), "('Compiling binary proxy file %s', npzpath)\n", (13341, 13384), False, 'import log\n'), ((36734, 36765), 'os.path.splitext', 'os.path.splitext', (['proxyFilePath'], {}), '(proxyFilePath)\n', (36750, 36765), False, 'import os\n'), ((36872, 36903), 'os.path.splitext', 'os.path.splitext', (['proxyFilePath'], {}), '(proxyFilePath)\n', (36888, 36903), False, 'import os\n'), ((36991, 37018), 'os.path.getmtime', 'os.path.getmtime', (['asciipath'], {}), '(asciipath)\n', (37007, 37018), False, 'import os\n'), ((37021, 37052), 'os.path.getmtime', 'os.path.getmtime', (['proxyFilePath'], {}), '(proxyFilePath)\n', (37037, 37052), False, 'import os\n'), ((39095, 39121), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (39111, 39121), False, 'import os\n'), ((39386, 39409), 'os.path.splitext', 'os.path.splitext', (['aPath'], {}), '(aPath)\n', (39402, 39409), False, 'import os\n'), ((13491, 13562), 'log.notice', 'log.notice', (['"""unable to save compiled proxy: %s"""', 'npzpath'], {'exc_info': '(True)'}), "('unable to save compiled proxy: %s', npzpath, exc_info=True)\n", (13501, 13562), False, 'import log\n'), ((13586, 13609), 'os.path.isfile', 'os.path.isfile', (['npzpath'], {}), '(npzpath)\n', (13600, 13609), False, 'import os\n'), ((13743, 13761), 'os.remove', 'os.remove', (['npzpath'], {}), '(npzpath)\n', (13752, 13761), False, 'import os\n'), ((13837, 13925), 'log.warning', 'log.warning', (['"""Could not remove empty file %s that was left behind (%s)."""', 'npzpath', 'e'], {}), "('Could not remove empty file %s that was left behind (%s).',\n npzpath, e)\n", (13848, 13925), False, 'import log\n'), ((16540, 16596), 'animation.VertexBoneWeights.fromFile', 'VertexBoneWeights.fromFile', (['proxy.vertexBoneWeights_file'], {}), '(proxy.vertexBoneWeights_file)\n', (16566, 16596), False, 'from animation import VertexBoneWeights\n'), ((16686, 16817), 'log.warning', 'log.warning', (['"""Deprecated parameter "backface_culling" used in proxy file. Set property backfaceCull in material instead."""'], {}), '(\n \'Deprecated parameter "backface_culling" used in proxy file. Set property backfaceCull in material instead.\'\n )\n', (16697, 16817), False, 'import log\n'), ((16891, 17009), 'log.warning', 'log.warning', (['"""Deprecated parameter "transparent" used in proxy file. Set property in material file instead."""'], {}), '(\n \'Deprecated parameter "transparent" used in proxy file. Set property in material file instead.\'\n )\n', (16902, 17009), False, 'import log\n'), ((18829, 18914), 'log.warning', 'log.warning', (['"""Deprecated parameter "%s" used in proxy file. Please remove."""', 'key'], {}), '(\'Deprecated parameter "%s" used in proxy file. Please remove.\', key\n )\n', (18840, 18914), False, 'import log\n'), ((19893, 19964), 'log.warning', 'log.warning', (['"""Unknown keyword %s found in proxy file %s"""', 'key', 'filepath'], {}), "('Unknown keyword %s found in proxy file %s', key, filepath)\n", (19904, 19964), False, 'import log\n')] |
import numpy as np
import unittest
import pytest
from pysph.base.particle_array import ParticleArray
import pysph.tools.mesh_tools as G
from pysph.base.utils import get_particle_array
# Data of a unit length cube
def cube_data():
points = np.array([[0., 0., 0.],
[0., 1., 0.],
[1., 1., 0.],
[1., 0., 0.],
[0., 0., 1.],
[0., 1., 1.],
[1., 0., 1.],
[1., 1., 1.]])
x_cube, y_cube, z_cube = points.T
cells = np.array([[0, 1, 2],
[0, 2, 3],
[0, 4, 5],
[0, 5, 1],
[0, 3, 6],
[0, 6, 4],
[4, 6, 7],
[4, 7, 5],
[3, 2, 7],
[3, 7, 6],
[1, 5, 7],
[1, 7, 2]])
normals = np.array([[0., 0., -1.],
[0., 0., -1.],
[-1., 0., 0.],
[-1., 0., 0.],
[0., -1., 0.],
[0., -1., 0.],
[0., 0., 1.],
[0., 0., 1.],
[1., 0., 0.],
[1., 0., 0.],
[0., 1., 0.],
[0., 1., 0.]])
vectors = np.zeros((len(cells), 3, 3))
for i, cell in enumerate(cells):
idx1, idx2, idx3 = cell
vector = np.array([[x_cube[idx1], y_cube[idx1], z_cube[idx1]],
[x_cube[idx2], y_cube[idx2], z_cube[idx2]],
[x_cube[idx3], y_cube[idx3], z_cube[idx3]]])
vectors[i] = vector
return x_cube, y_cube, z_cube, cells, normals, vectors
class TestGeometry(unittest.TestCase):
def test_in_triangle(self):
assert(G._in_triangle(0.5, 0.5, 0.0, 0.0, 1.5, 0.0, 0.0, 1.5) is True)
assert(G._in_triangle(1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0) is False)
def test_interp_2d(self):
# Check interpolation between two points on line y=x
dx = 0.1
r = G._interp_2d(np.array([0., 0.]), np.array([1., 1.]), dx)
# Check if all points satisfy y=x
np.testing.assert_array_almost_equal(
r[:, 0] - r[:, 1], np.zeros(r.shape[0]))
# Check if distance between consecutive points is lesser than dx
np.testing.assert_array_less(np.linalg.norm(r[1:] - r[0:-1], axis=1),
np.ones(r.shape[0] - 1) * dx)
def test_fill_triangle(self):
triangle = np.array([[0., 0., 0.],
[1., 0., 0.],
[0., 1., 0.]])
dx_triangle = 0.1
x, y, z = G._fill_triangle(triangle, dx_triangle)
EPS = np.finfo(float).eps
np.testing.assert_array_less(-x, np.zeros(x.shape[0]) + EPS)
np.testing.assert_array_less(-y, np.zeros(x.shape[0]) + EPS)
np.testing.assert_array_less(-(x + y), np.ones(x.shape[0]) + EPS)
np.testing.assert_almost_equal(z, np.zeros(x.shape[0]))
def test_fill_triangle_throws_zero_area_triangle_exception(self):
self.assertRaises(G.ZeroAreaTriangleException, G._fill_triangle,
np.zeros((3, 3)), 0.5)
def test_fill_triangle_throws_polygon_mesh_error(self):
self.assertRaises(G.PolygonMeshError, G._fill_triangle,
np.zeros((4, 3)), 0.5)
def test_get_points_from_mgrid(self):
"""Find neighbouring particles around a unit cube"""
h = 0.1
x_cube, y_cube, z_cube, cells, normals, vectors = cube_data()
x, y, z, x_list, y_list, z_list, vectors = \
G._get_surface_mesh(x_cube, y_cube, z_cube, cells, h, uniform=True)
pa_mesh = ParticleArray(name='mesh', x=x, y=y, z=z, h=h)
offset = h
x_grid, y_grid, z_grid = np.meshgrid(
np.arange(x.min() - offset, x.max() + offset, h),
np.arange(y.min() - offset, y.max() + offset, h),
np.arange(z.min() - offset, z.max() + offset, h))
pa_grid = ParticleArray(name='grid', x=x_grid, y=y_grid, z=z_grid, h=h)
x_grid, y_grid, z_grid = G.get_points_from_mgrid(
pa_grid, pa_mesh, x_list, y_list, z_list, 1, h, vectors, normals
)
for i in range(x.shape[0]):
assert((x[i] ** 2 + y[i] ** 2 + z[i] ** 2) <= 4)
def _cube_assert(self, x, y, z, h):
"""Check if x,y,z lie within surface of thickness `h` of a unit cube"""
def surface1(x, y, z): return min(abs(x), abs(1 - x)) < h and \
y > -h and y < 1 + h and z > -h and z < 1 + h
def on_surface(x, y, z): return surface1(x, y, z) or \
surface1(y, x, z) or surface1(z, x, y)
for i in range(x.shape[0]):
assert on_surface(x[i], y[i], z[i])
def test_get_surface_mesh(self):
"""Check if mesh is generated correctly for unit cube"""
x_cube, y_cube, z_cube, cells, normals, vectors = cube_data()
x, y, z = G._get_surface_mesh(x_cube, y_cube, z_cube, cells, 0.1)
h = np.finfo(float).eps
self._cube_assert(x, y, z, h)
def test_get_surface_points(self):
"""Check if surface is generated correctly for unit cube"""
h = 0.1
x_cube, y_cube, z_cube, cells, normals, vectors = cube_data()
x, y, z = G.surface_points(x_cube, y_cube, z_cube, cells, h)
self._cube_assert(x, y, z, h)
def test_get_surface_points_uniform(self):
"""Check if uniform surface is generated correctly for unit cube"""
h = 0.1
x_cube, y_cube, z_cube, cells, normals, vectors = cube_data()
x, y, z = G.surf_points_uniform(x_cube, y_cube, z_cube,
cells, normals, 1.0, 1.0)
self._cube_assert(x, y, z, h)
def test_prism(self):
tri_normal = np.array([0, -1, 0])
tri_points = np.array([[0, 0, 0], [1, 0, 0], [0, 0, 1]])
h = 1/1.5
prism_normals, prism_points, prism_face_centres = \
G.prism(tri_normal, tri_points, h)
assert np.array([-1, 0, 0]) in prism_normals
assert np.array([0, 1, 0]) in prism_points
assert np.array([0.5, 0.5, 0]) in prism_face_centres
if __name__ == "__main__":
unittest.main()
| [
"pysph.tools.mesh_tools.get_points_from_mgrid",
"numpy.ones",
"pysph.tools.mesh_tools.prism",
"pysph.tools.mesh_tools.surface_points",
"pysph.base.particle_array.ParticleArray",
"pysph.tools.mesh_tools.surf_points_uniform",
"pysph.tools.mesh_tools._get_surface_mesh",
"pysph.tools.mesh_tools._in_triang... | [((245, 396), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, \n 0.0, 1.0], [0.0, 1.0, 1.0], [1.0, 0.0, 1.0], [1.0, 1.0, 1.0]]'], {}), '([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 1.0, 0.0], [1.0, 0.0, 0.0\n ], [0.0, 0.0, 1.0], [0.0, 1.0, 1.0], [1.0, 0.0, 1.0], [1.0, 1.0, 1.0]])\n', (253, 396), True, 'import numpy as np\n'), ((581, 727), 'numpy.array', 'np.array', (['[[0, 1, 2], [0, 2, 3], [0, 4, 5], [0, 5, 1], [0, 3, 6], [0, 6, 4], [4, 6, 7\n ], [4, 7, 5], [3, 2, 7], [3, 7, 6], [1, 5, 7], [1, 7, 2]]'], {}), '([[0, 1, 2], [0, 2, 3], [0, 4, 5], [0, 5, 1], [0, 3, 6], [0, 6, 4],\n [4, 6, 7], [4, 7, 5], [3, 2, 7], [3, 7, 6], [1, 5, 7], [1, 7, 2]])\n', (589, 727), True, 'import numpy as np\n'), ((981, 1210), 'numpy.array', 'np.array', (['[[0.0, 0.0, -1.0], [0.0, 0.0, -1.0], [-1.0, 0.0, 0.0], [-1.0, 0.0, 0.0], [\n 0.0, -1.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [\n 1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 0.0]]'], {}), '([[0.0, 0.0, -1.0], [0.0, 0.0, -1.0], [-1.0, 0.0, 0.0], [-1.0, 0.0,\n 0.0], [0.0, -1.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, \n 1.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 1.0, 0.0]])\n', (989, 1210), True, 'import numpy as np\n'), ((6425, 6440), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6438, 6440), False, 'import unittest\n'), ((1581, 1728), 'numpy.array', 'np.array', (['[[x_cube[idx1], y_cube[idx1], z_cube[idx1]], [x_cube[idx2], y_cube[idx2],\n z_cube[idx2]], [x_cube[idx3], y_cube[idx3], z_cube[idx3]]]'], {}), '([[x_cube[idx1], y_cube[idx1], z_cube[idx1]], [x_cube[idx2], y_cube\n [idx2], z_cube[idx2]], [x_cube[idx3], y_cube[idx3], z_cube[idx3]]])\n', (1589, 1728), True, 'import numpy as np\n'), ((2689, 2750), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]])\n', (2697, 2750), True, 'import numpy as np\n'), ((2844, 2883), 'pysph.tools.mesh_tools._fill_triangle', 'G._fill_triangle', (['triangle', 'dx_triangle'], {}), '(triangle, dx_triangle)\n', (2860, 2883), True, 'import pysph.tools.mesh_tools as G\n'), ((3813, 3880), 'pysph.tools.mesh_tools._get_surface_mesh', 'G._get_surface_mesh', (['x_cube', 'y_cube', 'z_cube', 'cells', 'h'], {'uniform': '(True)'}), '(x_cube, y_cube, z_cube, cells, h, uniform=True)\n', (3832, 3880), True, 'import pysph.tools.mesh_tools as G\n'), ((3899, 3945), 'pysph.base.particle_array.ParticleArray', 'ParticleArray', ([], {'name': '"""mesh"""', 'x': 'x', 'y': 'y', 'z': 'z', 'h': 'h'}), "(name='mesh', x=x, y=y, z=z, h=h)\n", (3912, 3945), False, 'from pysph.base.particle_array import ParticleArray\n'), ((4215, 4276), 'pysph.base.particle_array.ParticleArray', 'ParticleArray', ([], {'name': '"""grid"""', 'x': 'x_grid', 'y': 'y_grid', 'z': 'z_grid', 'h': 'h'}), "(name='grid', x=x_grid, y=y_grid, z=z_grid, h=h)\n", (4228, 4276), False, 'from pysph.base.particle_array import ParticleArray\n'), ((4310, 4403), 'pysph.tools.mesh_tools.get_points_from_mgrid', 'G.get_points_from_mgrid', (['pa_grid', 'pa_mesh', 'x_list', 'y_list', 'z_list', '(1)', 'h', 'vectors', 'normals'], {}), '(pa_grid, pa_mesh, x_list, y_list, z_list, 1, h,\n vectors, normals)\n', (4333, 4403), True, 'import pysph.tools.mesh_tools as G\n'), ((5163, 5218), 'pysph.tools.mesh_tools._get_surface_mesh', 'G._get_surface_mesh', (['x_cube', 'y_cube', 'z_cube', 'cells', '(0.1)'], {}), '(x_cube, y_cube, z_cube, cells, 0.1)\n', (5182, 5218), True, 'import pysph.tools.mesh_tools as G\n'), ((5501, 5551), 'pysph.tools.mesh_tools.surface_points', 'G.surface_points', (['x_cube', 'y_cube', 'z_cube', 'cells', 'h'], {}), '(x_cube, y_cube, z_cube, cells, h)\n', (5517, 5551), True, 'import pysph.tools.mesh_tools as G\n'), ((5818, 5889), 'pysph.tools.mesh_tools.surf_points_uniform', 'G.surf_points_uniform', (['x_cube', 'y_cube', 'z_cube', 'cells', 'normals', '(1.0)', '(1.0)'], {}), '(x_cube, y_cube, z_cube, cells, normals, 1.0, 1.0)\n', (5839, 5889), True, 'import pysph.tools.mesh_tools as G\n'), ((6016, 6036), 'numpy.array', 'np.array', (['[0, -1, 0]'], {}), '([0, -1, 0])\n', (6024, 6036), True, 'import numpy as np\n'), ((6058, 6101), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 0, 0], [0, 0, 1]]'], {}), '([[0, 0, 0], [1, 0, 0], [0, 0, 1]])\n', (6066, 6101), True, 'import numpy as np\n'), ((6192, 6226), 'pysph.tools.mesh_tools.prism', 'G.prism', (['tri_normal', 'tri_points', 'h'], {}), '(tri_normal, tri_points, h)\n', (6199, 6226), True, 'import pysph.tools.mesh_tools as G\n'), ((1954, 2008), 'pysph.tools.mesh_tools._in_triangle', 'G._in_triangle', (['(0.5)', '(0.5)', '(0.0)', '(0.0)', '(1.5)', '(0.0)', '(0.0)', '(1.5)'], {}), '(0.5, 0.5, 0.0, 0.0, 1.5, 0.0, 0.0, 1.5)\n', (1968, 2008), True, 'import pysph.tools.mesh_tools as G\n'), ((2033, 2087), 'pysph.tools.mesh_tools._in_triangle', 'G._in_triangle', (['(1.0)', '(1.0)', '(0.0)', '(0.0)', '(1.0)', '(0.0)', '(0.0)', '(1.0)'], {}), '(1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0)\n', (2047, 2087), True, 'import pysph.tools.mesh_tools as G\n'), ((2232, 2252), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (2240, 2252), True, 'import numpy as np\n'), ((2252, 2272), 'numpy.array', 'np.array', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (2260, 2272), True, 'import numpy as np\n'), ((2395, 2415), 'numpy.zeros', 'np.zeros', (['r.shape[0]'], {}), '(r.shape[0])\n', (2403, 2415), True, 'import numpy as np\n'), ((2527, 2566), 'numpy.linalg.norm', 'np.linalg.norm', (['(r[1:] - r[0:-1])'], {'axis': '(1)'}), '(r[1:] - r[0:-1], axis=1)\n', (2541, 2566), True, 'import numpy as np\n'), ((2898, 2913), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (2906, 2913), True, 'import numpy as np\n'), ((3172, 3192), 'numpy.zeros', 'np.zeros', (['x.shape[0]'], {}), '(x.shape[0])\n', (3180, 3192), True, 'import numpy as np\n'), ((3364, 3380), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (3372, 3380), True, 'import numpy as np\n'), ((3538, 3554), 'numpy.zeros', 'np.zeros', (['(4, 3)'], {}), '((4, 3))\n', (3546, 3554), True, 'import numpy as np\n'), ((5231, 5246), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (5239, 5246), True, 'import numpy as np\n'), ((6242, 6262), 'numpy.array', 'np.array', (['[-1, 0, 0]'], {}), '([-1, 0, 0])\n', (6250, 6262), True, 'import numpy as np\n'), ((6295, 6314), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (6303, 6314), True, 'import numpy as np\n'), ((6346, 6369), 'numpy.array', 'np.array', (['[0.5, 0.5, 0]'], {}), '([0.5, 0.5, 0])\n', (6354, 6369), True, 'import numpy as np\n'), ((2605, 2628), 'numpy.ones', 'np.ones', (['(r.shape[0] - 1)'], {}), '(r.shape[0] - 1)\n', (2612, 2628), True, 'import numpy as np\n'), ((2959, 2979), 'numpy.zeros', 'np.zeros', (['x.shape[0]'], {}), '(x.shape[0])\n', (2967, 2979), True, 'import numpy as np\n'), ((3028, 3048), 'numpy.zeros', 'np.zeros', (['x.shape[0]'], {}), '(x.shape[0])\n', (3036, 3048), True, 'import numpy as np\n'), ((3103, 3122), 'numpy.ones', 'np.ones', (['x.shape[0]'], {}), '(x.shape[0])\n', (3110, 3122), True, 'import numpy as np\n')] |
"""
CentralService.rest_api.dataservice
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module handles the interactions with the dataservice models. Takes care
of all the CRUD operations on dataservices. Each dataservice will have a list
of buildings and admins that belong to it.
@copyright: (c) 2016 SynergyLabs
@license: UCSD License. See License file for details.
"""
from flask import request, jsonify
from flask.views import MethodView
from .. import responses
from ..helper import xstr, gen_update
from ...models.cs_models import DataService
from ... import oauth
from ...auth.access_control import super_required
class DataserviceService(MethodView):
params = ['description', 'host', 'port']
@oauth.require_oauth()
@super_required
def post(self):
try:
data = request.get_json()['data']
except KeyError:
return jsonify(responses.missing_data)
try:
name = data['name']
except KeyError:
return jsonify(responses.missing_parameters)
dataservice = DataService.objects(name=name).first()
if dataservice is None:
DataService(name=name,
description=xstr(data.get('description')),
host=str(data.get('host')),
port=str(data.get('port'))).save()
else:
collection = DataService._get_collection()
collection.update({'name': name}, {'$set': gen_update(self.params, data)})
return jsonify(responses.success_true)
@oauth.require_oauth()
def get(self, name):
dataservice = DataService.objects(name=name).first()
if dataservice is None:
return jsonify(responses.invalid_dataservice)
response = dict(responses.success_true)
response.update({'name': name,
'description': xstr(dataservice.description),
'host': xstr(dataservice.host),
'port': xstr(dataservice.port)})
return jsonify(response)
@oauth.require_oauth()
@super_required
def delete(self, name):
dataservice = DataService.objects(name=name).first()
if dataservice is None:
return jsonify(responses.invalid_dataservice)
if len(dataservice.buildings) > 0:
return jsonify(responses.dataservice_in_use)
dataservice.delete()
return jsonify(responses.success_true)
| [
"flask.request.get_json",
"flask.jsonify"
] | [((1504, 1535), 'flask.jsonify', 'jsonify', (['responses.success_true'], {}), '(responses.success_true)\n', (1511, 1535), False, 'from flask import request, jsonify\n'), ((2028, 2045), 'flask.jsonify', 'jsonify', (['response'], {}), '(response)\n', (2035, 2045), False, 'from flask import request, jsonify\n'), ((2417, 2448), 'flask.jsonify', 'jsonify', (['responses.success_true'], {}), '(responses.success_true)\n', (2424, 2448), False, 'from flask import request, jsonify\n'), ((1701, 1739), 'flask.jsonify', 'jsonify', (['responses.invalid_dataservice'], {}), '(responses.invalid_dataservice)\n', (1708, 1739), False, 'from flask import request, jsonify\n'), ((2234, 2272), 'flask.jsonify', 'jsonify', (['responses.invalid_dataservice'], {}), '(responses.invalid_dataservice)\n', (2241, 2272), False, 'from flask import request, jsonify\n'), ((2335, 2372), 'flask.jsonify', 'jsonify', (['responses.dataservice_in_use'], {}), '(responses.dataservice_in_use)\n', (2342, 2372), False, 'from flask import request, jsonify\n'), ((795, 813), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (811, 813), False, 'from flask import request, jsonify\n'), ((866, 897), 'flask.jsonify', 'jsonify', (['responses.missing_data'], {}), '(responses.missing_data)\n', (873, 897), False, 'from flask import request, jsonify\n'), ((988, 1025), 'flask.jsonify', 'jsonify', (['responses.missing_parameters'], {}), '(responses.missing_parameters)\n', (995, 1025), False, 'from flask import request, jsonify\n')] |
from django.contrib.gis.db import models
from django.db import connection
from uk_geo_utils.models import (
AbstractAddress,
AbstractAddressManager,
AbstractOnsudManager,
)
class AddressManager(AbstractAddressManager):
def postcodes_for_district(self, district):
qs = self.filter(location__within=district.area)
qs = qs.values_list("postcode", flat=True).distinct()
return list(qs)
def points_for_postcode(self, postcode):
qs = self.filter(postcode=postcode)
qs = qs.values_list("location", flat=True)
return list(qs)
class Address(AbstractAddress):
objects = AddressManager()
class UprnToCouncil(models.Model):
class Meta:
indexes = [models.Index(fields=["lad",], name="lookup_lad_idx")]
objects = AbstractOnsudManager()
uprn = models.CharField(primary_key=True, max_length=12)
lad = models.CharField(blank=True, max_length=9)
class Blacklist(models.Model):
"""
Model for storing postcodes containing UPRNs in >1 local authorities
This is intentionally de-normalised for performance reasons
Ideally ('postcode', 'lad') should be a composite PK,
but django's ORM doesn't support them.
"""
postcode = models.CharField(blank=False, max_length=15, db_index=True)
lad = models.CharField(blank=False, max_length=9)
class Meta:
unique_together = ("postcode", "lad")
def get_uprn_hash_table(council_id):
# get all the UPRNs in target local auth
# NB we miss ~25 over the country because lighthouses etc.
cursor = connection.cursor()
cursor.execute(
"""
SELECT
a.uprn,
a.address,
REPLACE(a.postcode, ' ', ''),
a.location
FROM addressbase_address a
JOIN addressbase_uprntocouncil u ON a.uprn=u.uprn
WHERE u.lad=%s;
""",
[council_id],
)
# return result a hash table keyed by UPRN
return {
row[0]: {"address": row[1], "postcode": row[2], "location": row[3]}
for row in cursor.fetchall()
}
| [
"uk_geo_utils.models.AbstractOnsudManager",
"django.db.connection.cursor",
"django.contrib.gis.db.models.CharField",
"django.contrib.gis.db.models.Index"
] | [((795, 817), 'uk_geo_utils.models.AbstractOnsudManager', 'AbstractOnsudManager', ([], {}), '()\n', (815, 817), False, 'from uk_geo_utils.models import AbstractAddress, AbstractAddressManager, AbstractOnsudManager\n'), ((830, 879), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'primary_key': '(True)', 'max_length': '(12)'}), '(primary_key=True, max_length=12)\n', (846, 879), False, 'from django.contrib.gis.db import models\n'), ((890, 932), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(9)'}), '(blank=True, max_length=9)\n', (906, 932), False, 'from django.contrib.gis.db import models\n'), ((1236, 1295), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'blank': '(False)', 'max_length': '(15)', 'db_index': '(True)'}), '(blank=False, max_length=15, db_index=True)\n', (1252, 1295), False, 'from django.contrib.gis.db import models\n'), ((1306, 1349), 'django.contrib.gis.db.models.CharField', 'models.CharField', ([], {'blank': '(False)', 'max_length': '(9)'}), '(blank=False, max_length=9)\n', (1322, 1349), False, 'from django.contrib.gis.db import models\n'), ((1573, 1592), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (1590, 1592), False, 'from django.db import connection\n'), ((726, 777), 'django.contrib.gis.db.models.Index', 'models.Index', ([], {'fields': "['lad']", 'name': '"""lookup_lad_idx"""'}), "(fields=['lad'], name='lookup_lad_idx')\n", (738, 777), False, 'from django.contrib.gis.db import models\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Script for running unittests
unittests.py launches all or selected unittests.
Examples:
Setting the MySQL account for running tests
shell> python unittests.py -uroot -D dbtests
Executing only the cursor tests
shell> python unittests.py -t cursor
unittests.py has exit status 0 when tests were ran succesful, 1 otherwise.
"""
import sys
import os
import tempfile
import threading
import unittest
import logging
from optparse import OptionParser
if sys.version_info >= (2,4) and sys.version_info < (3,0):
sys.path = ['python2/'] + sys.path
elif sys.version_info >= (3,1):
sys.path = ['python3/'] + sys.path
else:
raise RuntimeError("Python v%d.%d is not supported" %\
sys.version_info[0:2])
sys.exit(1)
import tests
from tests import mysqld
logger = logging.getLogger(tests.LOGGER_NAME)
MY_CNF = """
# MySQL option file for MySQL Connector/Python tests
[mysqld]
basedir = %(mysqld_basedir)s
datadir = %(mysqld_datadir)s
tmpdir = %(mysqld_tmpdir)s
port = %(mysqld_port)d
socket = %(mysqld_socket)s
bind_address = %(mysqld_bind_address)s
skip_name_resolve
server_id = 19771406
sql_mode = ""
default_time_zone = +00:00
log-error = myconnpy_mysqld.err
log-bin = myconnpy_bin
general_log = ON
local_infile = ON
ssl
"""
if os.name == 'nt':
MY_CNF += '\n'.join((
"ssl-ca = %(ssl_dir)s\\\\tests_CA_cert.pem",
"ssl-cert = %(ssl_dir)s\\\\tests_server_cert.pem",
"ssl-key = %(ssl_dir)s\\\\tests_server_key.pem",
))
else:
MY_CNF += '\n'.join((
"ssl-ca = %(ssl_dir)s/tests_CA_cert.pem",
"ssl-cert = %(ssl_dir)s/tests_server_cert.pem",
"ssl-key = %(ssl_dir)s/tests_server_key.pem",
))
def _add_options(p):
default_topdir = os.path.join(os.path.dirname(
os.path.abspath(__file__)), 'mysql_myconnpy')
p.add_option('-t','--test', dest='testcase', metavar='NAME',
help='Tests to execute, one of %s' % tests.get_test_names())
p.add_option('-T','--one-test', dest='onetest', metavar='NAME',
help='Particular test to execute, format: '\
'<module>[.<class>[.<method>]]. '\
'For example, to run a particular '\
'test BugOra13392739.test_reconnect() from the tests.test_bugs '\
'module, use following value for the -T option: '\
' tests.test_bugs.BugOra13392739.test_reconnect')
p.add_option('-l','--log', dest='logfile', metavar='NAME',
default=None,
help='Log file location (if not given, logging is disabled)')
p.add_option('','--force', dest='force', action="store_true",
default=False,
help='Remove previous MySQL test installation.')
p.add_option('','--keep', dest='keep', action="store_true",
default=False,
help='Keep MySQL installation (i.e. for debugging)')
p.add_option('','--debug', dest='debug', action="store_true",
default=False,
help='Show/Log debugging messages')
p.add_option('','--verbosity', dest='verbosity', metavar='NUMBER',
default='0', type="int",
help='Verbosity of unittests (default 0)')
p.add_option('','--mysql-basedir', dest='mysql_basedir',
metavar='NAME', default='/usr/local/mysql',
help='Where MySQL is installed. This is used to bootstrap and '\
'run a MySQL server which is used for unittesting only.')
p.add_option('','--mysql-topdir', dest='mysql_topdir',
metavar='NAME',
default=default_topdir,
help='Where to bootstrap the new MySQL instance for testing. '\
'Defaults to current ./mysql_myconnpy')
p.add_option('','--bind-address', dest='bind_address', metavar='NAME',
default='127.0.0.1',
help='IP address to bind to')
p.add_option('-H', '--host', dest='host', metavar='NAME',
default='127.0.0.1',
help='Hostname or IP address for TCP/IP connections.')
p.add_option('-P', '--port', dest='port', metavar='NUMBER',
default=33770, type="int",
help='Port to use for TCP/IP connections.')
p.add_option('', '--unix-socket', dest='unix_socket', metavar='NAME',
default=os.path.join(default_topdir, 'myconnpy_mysql.sock'),
help='Unix socket location.')
def _set_config(options):
if options.host:
tests.MYSQL_CONFIG['host'] = options.host
if options.port:
tests.MYSQL_CONFIG['port'] = options.port
if options.unix_socket:
tests.MYSQL_CONFIG['unix_socket'] = options.unix_socket
tests.MYSQL_CONFIG['user'] = 'root'
tests.MYSQL_CONFIG['password'] = ''
tests.MYSQL_CONFIG['database'] = 'myconnpy'
def _show_help(msg=None,parser=None,exit=0):
tests.printmsg(msg)
if parser is not None:
parser.print_help()
if exit > -1:
sys.exit(exit)
def main():
usage = 'usage: %prog [options]'
parser = OptionParser()
_add_options(parser)
# Set options
(options, args) = parser.parse_args()
option_file = os.path.join(options.mysql_topdir,'myconnpy_my.cnf')
_set_config(options)
# Init the MySQL Server object
mysql_server = mysqld.MySQLInit(
options.mysql_basedir,
options.mysql_topdir,
MY_CNF,
option_file,
options.bind_address,
options.port,
options.unix_socket,
os.path.abspath(tests.SSL_DIR))
mysql_server._debug = options.debug
tests.MYSQL_VERSION = mysql_server.version
# Check if we can test IPv6
if options.bind_address.strip() != '::':
tests.IPV6_AVAILABLE = False
# Force removal of previous test data
if options.force is True:
mysql_server.remove()
# Which tests cases to run
if options.testcase is not None:
if options.testcase in tests.get_test_names():
testcases = [ 'tests.test_%s' % options.testcase ]
else:
msg = "Test case is not one of %s" % tests.get_test_names()
_show_help(msg=msg,parser=parser,exit=1)
testsuite = unittest.TestLoader().loadTestsFromNames(testcases)
elif options.onetest is not None:
testsuite = unittest.TestLoader().loadTestsFromName(options.onetest)
else:
testcases = tests.active_testcases
testsuite = unittest.TestLoader().loadTestsFromNames(testcases)
# Enabling logging
formatter = logging.Formatter("%(asctime)s [%(name)s:%(levelname)s] %(message)s")
myconnpy_logger = logging.getLogger('myconnpy')
fh = None
if options.logfile is not None:
fh = logging.FileHandler(options.logfile)
else:
fh = logging.StreamHandler()
fh.setFormatter(formatter)
logger.addHandler(fh)
if options.debug is True:
logger.setLevel(logging.DEBUG)
myconnpy_logger.setLevel(logging.DEBUG)
else:
myconnpy_logger.setLevel(logging.INFO)
myconnpy_logger.addHandler(fh)
myconnpy_logger.info(
"MySQL Connector/Python unittest started: "
"Python v%s ; MySQL v%s" % (
'.'.join([ str(v) for v in sys.version_info[0:3]]),
'.'.join([ str(v) for v in mysql_server.version[0:3]])))
# Bootstrap and start a MySQL server
myconnpy_logger.info("Bootstrapping a MySQL server")
mysql_server.bootstrap()
myconnpy_logger.info("Starting a MySQL server")
mysql_server.start()
myconnpy_logger.info("Starting unit tests")
was_successful = False
try:
# Run test cases
result = unittest.TextTestRunner(verbosity=options.verbosity).run(
testsuite)
was_successful = result.wasSuccessful()
except KeyboardInterrupt:
logger.info("Unittesting was interrupted")
was_successful = False
# Log messages added by test cases
for msg in tests.MESSAGES['WARNINGS']:
myconnpy_logger.warning(msg)
for msg in tests.MESSAGES['INFO']:
myconnpy_logger.info(msg)
# Clean up
if not options.keep:
mysql_server.stop()
mysql_server.remove()
myconnpy_logger.info("MySQL server stopped and cleaned up")
else:
myconnpy_logger.info("MySQL server kept running on %s:%d" %
(options.bind_address, options.port))
txt = ""
if not was_successful:
txt = "not "
logger.info("MySQL Connector/Python unittests were %ssuccessful" % txt)
# Return result of tests as exit code
sys.exit(not was_successful)
if __name__ == '__main__':
main()
| [
"logging.getLogger",
"logging.StreamHandler",
"tests.get_test_names",
"logging.Formatter",
"os.path.join",
"optparse.OptionParser",
"tests.printmsg",
"logging.FileHandler",
"sys.exit",
"os.path.abspath",
"unittest.TextTestRunner",
"unittest.TestLoader"
] | [((1966, 2002), 'logging.getLogger', 'logging.getLogger', (['tests.LOGGER_NAME'], {}), '(tests.LOGGER_NAME)\n', (1983, 2002), False, 'import logging\n'), ((5846, 5865), 'tests.printmsg', 'tests.printmsg', (['msg'], {}), '(msg)\n', (5860, 5865), False, 'import tests\n'), ((6029, 6043), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (6041, 6043), False, 'from optparse import OptionParser\n'), ((6148, 6201), 'os.path.join', 'os.path.join', (['options.mysql_topdir', '"""myconnpy_my.cnf"""'], {}), "(options.mysql_topdir, 'myconnpy_my.cnf')\n", (6160, 6201), False, 'import os\n'), ((7513, 7582), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s [%(name)s:%(levelname)s] %(message)s"""'], {}), "('%(asctime)s [%(name)s:%(levelname)s] %(message)s')\n", (7530, 7582), False, 'import logging\n'), ((7605, 7634), 'logging.getLogger', 'logging.getLogger', (['"""myconnpy"""'], {}), "('myconnpy')\n", (7622, 7634), False, 'import logging\n'), ((9567, 9595), 'sys.exit', 'sys.exit', (['(not was_successful)'], {}), '(not was_successful)\n', (9575, 9595), False, 'import sys\n'), ((1905, 1916), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1913, 1916), False, 'import sys\n'), ((5947, 5961), 'sys.exit', 'sys.exit', (['exit'], {}), '(exit)\n', (5955, 5961), False, 'import sys\n'), ((6490, 6520), 'os.path.abspath', 'os.path.abspath', (['tests.SSL_DIR'], {}), '(tests.SSL_DIR)\n', (6505, 6520), False, 'import os\n'), ((7698, 7734), 'logging.FileHandler', 'logging.FileHandler', (['options.logfile'], {}), '(options.logfile)\n', (7717, 7734), False, 'import logging\n'), ((7758, 7781), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (7779, 7781), False, 'import logging\n'), ((2942, 2967), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (2957, 2967), False, 'import os\n'), ((5316, 5367), 'os.path.join', 'os.path.join', (['default_topdir', '"""myconnpy_mysql.sock"""'], {}), "(default_topdir, 'myconnpy_mysql.sock')\n", (5328, 5367), False, 'import os\n'), ((6931, 6953), 'tests.get_test_names', 'tests.get_test_names', ([], {}), '()\n', (6951, 6953), False, 'import tests\n'), ((3098, 3120), 'tests.get_test_names', 'tests.get_test_names', ([], {}), '()\n', (3118, 3120), False, 'import tests\n'), ((7081, 7103), 'tests.get_test_names', 'tests.get_test_names', ([], {}), '()\n', (7101, 7103), False, 'import tests\n'), ((7177, 7198), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (7196, 7198), False, 'import unittest\n'), ((8636, 8688), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': 'options.verbosity'}), '(verbosity=options.verbosity)\n', (8659, 8688), False, 'import unittest\n'), ((7287, 7308), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (7306, 7308), False, 'import unittest\n'), ((7417, 7438), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (7436, 7438), False, 'import unittest\n')] |
import unittest
from solver import look_and_say
class TestSolver(unittest.TestCase):
def test_look_and_say(self):
self.assertEqual(look_and_say(1), "1")
self.assertEqual(look_and_say(2), "11")
self.assertEqual(look_and_say(3), "21")
self.assertEqual(look_and_say(4), "1211")
self.assertEqual(look_and_say(5), "111221")
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"solver.look_and_say"
] | [((374, 389), 'unittest.main', 'unittest.main', ([], {}), '()\n', (387, 389), False, 'import unittest\n'), ((138, 153), 'solver.look_and_say', 'look_and_say', (['(1)'], {}), '(1)\n', (150, 153), False, 'from solver import look_and_say\n'), ((181, 196), 'solver.look_and_say', 'look_and_say', (['(2)'], {}), '(2)\n', (193, 196), False, 'from solver import look_and_say\n'), ((225, 240), 'solver.look_and_say', 'look_and_say', (['(3)'], {}), '(3)\n', (237, 240), False, 'from solver import look_and_say\n'), ((269, 284), 'solver.look_and_say', 'look_and_say', (['(4)'], {}), '(4)\n', (281, 284), False, 'from solver import look_and_say\n'), ((315, 330), 'solver.look_and_say', 'look_and_say', (['(5)'], {}), '(5)\n', (327, 330), False, 'from solver import look_and_say\n')] |
from rest_framework import generics
from rest_framework.response import Response
from .serializers import CreatePostSerializer, ListPostSerializer
from .models import Post
class PostView(generics.GenericAPIView):
def post(self, request):
data = {**request.data, **{"user": request.user.id}}
serializer = CreatePostSerializer(data=data)
serializer.is_valid(self)
serializer.save()
return Response(serializer.data, status=201)
class PostListView(generics.ListAPIView):
serializer_class = ListPostSerializer
def get_queryset(self):
return Post.objects.exclude(user=self.request.user.id)
| [
"rest_framework.response.Response"
] | [((437, 474), 'rest_framework.response.Response', 'Response', (['serializer.data'], {'status': '(201)'}), '(serializer.data, status=201)\n', (445, 474), False, 'from rest_framework.response import Response\n')] |
from discord.ext import commands
import os
import sys
import asyncio
from ..core.cog_config import CogExtension
from typing import Tuple
# function for cogs management
def find_cog(bot, target_cog: str, mode: str) -> Tuple[bool, str]:
def load_ext(full_path: str):
if mode == 'load':
bot.load_extension(full_path)
if mode == 'unload':
bot.unload_extension(full_path)
if mode == 'reload':
bot.reload_extension(full_path)
for find_filename in os.listdir('./bot/cogs'):
# normal cog file
if find_filename.find('.') != -1:
if find_filename.startswith(target_cog) and find_filename.endswith('.py'):
load_ext(f'bot.cogs.{find_filename[:-3]}')
return True, f':white_check_mark: Extension {find_filename} {mode}ed!'
else:
for find_sub_filename in os.listdir(f'./bot/cogs/{find_filename}'):
if find_sub_filename.startswith(target_cog) and find_sub_filename.endswith('.py'):
load_ext(f'bot.cogs.{find_filename}.{find_sub_filename[:-3]}')
return True, f':white_check_mark: Extension {find_sub_filename} {mode}ed!'
return False, ''
class Cogs(CogExtension):
@commands.group()
@commands.has_any_role('總召', 'Administrator')
async def cogs(self, ctx):
pass
@cogs.command()
async def load(self, ctx, target_cog: str):
"""cmd
加載 插件<target_cog>。
"""
find, msg = find_cog(self.bot, target_cog, 'load')
if find:
return await ctx.send(msg)
return await ctx.send(
f':exclamation: There are no extension called {target_cog}!'
)
@cogs.command()
async def unload(self, ctx, target_cog: str):
"""cmd
卸載 插件<target_cog>。
"""
find, msg = find_cog(self.bot, target_cog, 'unload')
if find:
return await ctx.send(msg)
return await ctx.send(
f':exclamation: There are no extension called {target_cog}!'
)
@cogs.command()
async def reload(self, ctx, target_cog: str):
"""cmd
重新加載 插件<target_cog>。
"""
find, msg = find_cog(self.bot, target_cog, 'reload')
if find:
return await ctx.send(msg)
return await ctx.send(
f':exclamation: There are no extension called {target_cog}!'
)
@commands.command(aliases=['logout', 'shutdown'])
@commands.has_any_role('總召', 'Administrator')
async def shut_down(self, ctx):
"""cmd
安全關閉機器人。
"""
await ctx.send(':white_check_mark: The bot is shutting down...')
await self.bot.logout()
await asyncio.sleep(1)
sys.exit(0)
def setup(bot):
bot.add_cog(Cogs(bot))
| [
"os.listdir",
"asyncio.sleep",
"discord.ext.commands.group",
"sys.exit",
"discord.ext.commands.has_any_role",
"discord.ext.commands.command"
] | [((512, 536), 'os.listdir', 'os.listdir', (['"""./bot/cogs"""'], {}), "('./bot/cogs')\n", (522, 536), False, 'import os\n'), ((1264, 1280), 'discord.ext.commands.group', 'commands.group', ([], {}), '()\n', (1278, 1280), False, 'from discord.ext import commands\n'), ((1286, 1330), 'discord.ext.commands.has_any_role', 'commands.has_any_role', (['"""總召"""', '"""Administrator"""'], {}), "('總召', 'Administrator')\n", (1307, 1330), False, 'from discord.ext import commands\n'), ((2450, 2498), 'discord.ext.commands.command', 'commands.command', ([], {'aliases': "['logout', 'shutdown']"}), "(aliases=['logout', 'shutdown'])\n", (2466, 2498), False, 'from discord.ext import commands\n'), ((2504, 2548), 'discord.ext.commands.has_any_role', 'commands.has_any_role', (['"""總召"""', '"""Administrator"""'], {}), "('總召', 'Administrator')\n", (2525, 2548), False, 'from discord.ext import commands\n'), ((2773, 2784), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2781, 2784), False, 'import sys\n'), ((890, 931), 'os.listdir', 'os.listdir', (['f"""./bot/cogs/{find_filename}"""'], {}), "(f'./bot/cogs/{find_filename}')\n", (900, 931), False, 'import os\n'), ((2748, 2764), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (2761, 2764), False, 'import asyncio\n')] |
"""
A copy from the existing FK code.
This is not a real/pure DG method, I mean, the demagnetisation fields including the magnetic
potential are not totally computed by DG methods, such as IP method or
the mixed form using BDM and DG space. The idea is actually even we use DG space
to represent the effective field and magnetisation, we still can use CG space to compute
the magnetic potential -- this is the simplest way to extend, maybe we could try the
mixed form later.
"""
import numpy as np
import dolfin as df
from finmag.util.consts import mu0
from finmag.native.llg import compute_bem_fk
from finmag.util.meshes import nodal_volume
from finmag.util.timings import Timings, default_timer, timed, mtimed
from finmag.util import helpers
def prepared_timed(measurement_group, timer_to_use):
def new_timed(measurement_name):
return timed(measurement_name, measurement_group, timer_to_use)
return new_timed
fk_timer = Timings()
fk_timed = prepared_timed("FKDemag", fk_timer)
class FKDemagDG(object):
"""
Computation of the demagnetising field using the Fredkin-Koehler hybrid FEM/BEM technique.
<NAME>. and <NAME>., "`Hybrid method for computing demagnetizing fields`_",
IEEE Transactions on Magnetics, vol.26, no.2, pp.415-417, Mar 1990.
.. _Hybrid method for computing demagnetizing fields: http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=106342
"""
def __init__(self, name='DemagDG'):
"""
Create a new FKDemag instance.
The attribute `parameters` is a dict that contains the settings for the solvers
for the Neumann (potential phi_1) and Laplace (potential phi_2) problems.
Setting the method used by the solvers:
Change the entries `phi_1_solver` and `phi_2_solver` to a value from
`df.list_krylov_solver_methods()`. Default is dolfin's default.
Setting the preconditioners:
Change the entries `phi_1_preconditioner` and `phi_2_preconditioner` to
a value from `df.list_krylov_solver_preconditioners()`. Default is dolfin's default.
Setting the tolerances:
Change the existing entries inside `phi_1` and `phi_2` which are themselves dicts.
You can add new entries to these dicts as well. Everything which is
understood by `df.KrylovSolver` is valid.
"""
self.name = name
self.in_jacobian = False
default_parameters = {
'absolute_tolerance': 1e-6,
'relative_tolerance': 1e-6,
'maximum_iterations': int(1e4)
}
self.parameters = {
'phi_1_solver': 'default',
'phi_1_preconditioner': 'default',
'phi_1': default_parameters,
'phi_2_solver': 'default',
'phi_2_preconditioner': 'default',
'phi_2': default_parameters.copy()
}
@mtimed(default_timer)
def setup(self, DG3, m, Ms, unit_length=1):
"""
Setup the FKDemag instance. Usually called automatically by the Simulation object.
*Arguments*
S3: dolfin.VectorFunctionSpace
The finite element space the magnetisation is defined on.
m: dolfin.Function on S3
The unit magnetisation.
Ms: float
The saturation magnetisation in A/m.
unit_length: float
The length (in m) represented by one unit on the mesh. Default 1.
"""
self.m = m
self.Ms = Ms
self.unit_length = unit_length
mesh = DG3.mesh()
self.S1 = df.FunctionSpace(mesh, "Lagrange", 1)
self.S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1)
self.dim = mesh.topology().dim()
self.n = df.FacetNormal(mesh)
self.DG3 = DG3
self._test1 = df.TestFunction(self.S1)
self._trial1 = df.TrialFunction(self.S1)
self._test3 = df.TestFunction(self.S3)
self._trial3 = df.TrialFunction(self.S3)
self._test_dg3 = df.TestFunction(self.DG3)
self._trial_dg3 = df.TrialFunction(self.DG3)
# for computation of energy
self._nodal_volumes = nodal_volume(self.S1, unit_length)
self._H_func = df.Function(DG3) # we will copy field into this when we need the energy
self._E_integrand = -0.5 * mu0 * df.dot(self._H_func, self.m * self.Ms)
self._E = self._E_integrand * df.dx
self._nodal_E = df.dot(self._E_integrand, self._test1) * df.dx
self._nodal_E_func = df.Function(self.S1)
# for computation of field and scalar magnetic potential
self._poisson_matrix = self._poisson_matrix()
self._poisson_solver = df.KrylovSolver(self._poisson_matrix,
self.parameters['phi_1_solver'], self.parameters['phi_1_preconditioner'])
self._poisson_solver.parameters.update(self.parameters['phi_1'])
self._laplace_zeros = df.Function(self.S1).vector()
self._laplace_solver = df.KrylovSolver(
self.parameters['phi_2_solver'], self.parameters['phi_2_preconditioner'])
self._laplace_solver.parameters.update(self.parameters['phi_2'])
self._laplace_solver.parameters["preconditioner"]["same_nonzero_pattern"] = True
with fk_timed('compute BEM'):
if not hasattr(self, "_bem"):
self._bem, self._b2g_map = compute_bem_fk(df.BoundaryMesh(mesh, 'exterior', False))
self._phi_1 = df.Function(self.S1) # solution of inhomogeneous Neumann problem
self._phi_2 = df.Function(self.S1) # solution of Laplace equation inside domain
self._phi = df.Function(self.S1) # magnetic potential phi_1 + phi_2
# To be applied to the vector field m as first step of computation of _phi_1.
# This gives us div(M), which is equal to Laplace(_phi_1), equation
# which is then solved using _poisson_solver.
self._Ms_times_divergence = df.assemble(self.Ms * df.inner(self._trial_dg3, df.grad(self._test1)) * df.dx)
self._setup_gradient_computation()
@mtimed(default_timer)
def precomputed_bem(self, bem, b2g_map):
"""
If the BEM and a boundary to global vertices map are known, they can be
passed to the FKDemag object with this method so it will skip
re-computing them.
"""
self._bem, self._b2g_map = bem, b2g_map
@mtimed(default_timer)
def compute_potential(self):
"""
Compute the magnetic potential.
*Returns*
df.Function
The magnetic potential.
"""
self._compute_magnetic_potential()
return self._phi
@mtimed(default_timer)
def compute_field(self):
"""
Compute the demagnetising field.
*Returns*
numpy.ndarray
The demagnetising field.
"""
self._compute_magnetic_potential()
return self._compute_gradient()
def average_field(self):
"""
Compute the average demag field.
"""
return helpers.average_field(self.compute_field())
@mtimed(default_timer)
def compute_energy(self):
"""
Compute the total energy of the field.
.. math::
E_\\mathrm{d} = -\\frac12 \\mu_0 \\int_\\Omega
H_\\mathrm{d} \\cdot \\vec M \\mathrm{d}x
*Returns*
Float
The energy of the demagnetising field.
"""
self._H_func.vector()[:] = self.compute_field()
return df.assemble(self._E) * self.unit_length ** self.dim
@mtimed(default_timer)
def energy_density(self):
"""
Compute the energy density in the field.
.. math::
\\rho = \\frac{E_{\\mathrm{d}, i}}{V_i},
where V_i is the volume associated with the node i.
*Returns*
numpy.ndarray
The energy density of the demagnetising field.
"""
self._H_func.vector()[:] = self.compute_field()
nodal_E = df.assemble(self._nodal_E).array() * self.unit_length ** self.dim
return nodal_E / self._nodal_volumes
@mtimed(default_timer)
def energy_density_function(self):
"""
Returns the energy density in the field as a dolfin function to allow probing.
*Returns*
dolfin.Function
The energy density of the demagnetising field.
"""
self._nodal_E_func.vector()[:] = self.energy_density()
return self._nodal_E_func
@mtimed(fk_timer)
def _poisson_matrix(self):
A = df.dot(df.grad(self._trial1), df.grad(self._test1)) * df.dx
return df.assemble(A) # stiffness matrix for Poisson equation
def _compute_magnetic_potential(self):
# compute _phi_1 on the whole domain
g_1 = self._Ms_times_divergence * self.m.vector()
with fk_timed("first linear solve"):
self._poisson_solver.solve(self._phi_1.vector(), g_1)
# compute _phi_2 on the boundary using the Dirichlet boundary
# conditions we get from BEM * _phi_1 on the boundary.
with fk_timed("using boundary conditions"):
phi_1 = self._phi_1.vector()[self._b2g_map]
self._phi_2.vector()[self._b2g_map[:]] = np.dot(self._bem, phi_1.array())
boundary_condition = df.DirichletBC(self.S1, self._phi_2, df.DomainBoundary())
A = self._poisson_matrix.copy()
b = self._laplace_zeros
boundary_condition.apply(A, b)
# compute _phi_2 on the whole domain
with fk_timed("second linear solve"):
self._laplace_solver.solve(A, self._phi_2.vector(), b)
# add _phi_1 and _phi_2 to obtain magnetic potential
self._phi.vector()[:] = self._phi_1.vector() + self._phi_2.vector()
@mtimed(fk_timer)
def _setup_gradient_computation(self):
"""
Prepare the discretised gradient to use in :py:meth:`FKDemag._compute_gradient`.
We don't need the gradient field as a continuous field, we are only
interested in the values at specific points. It is thus a waste of
computational effort to use a projection of the gradient field, since
it performs the fairly large operation of assembling a matrix and
solving a linear system of equations.
"""
A = df.inner(self._test_dg3, - df.grad(self._trial1)) * df.dx
# This can be applied to scalar functions.
self._gradient = df.assemble(A)
# The `A` above is in fact not quite the gradient, since we integrated
# over the volume as well. We will divide by the volume later, after
# the multiplication of the scalar magnetic potential. Since the two
# operations are symmetric (multiplying by volume, dividing by volume)
# we don't have to care for the units, i.e. unit_length.
b = df.dot(self._test_dg3, df.Constant((1, 1, 1))) * df.dx
self._nodal_volumes_S3_no_units = df.assemble(b).array()
@mtimed(fk_timer)
def _compute_gradient(self):
"""
Get the demagnetising field from the magnetic scalar potential.
.. math::
\\vec{H}_{\\mathrm{d}} = - \\nabla \\phi (\\vec{r})
Using dolfin, we would translate this to
.. sourcecode::
H_d = df.project(- df.grad(self._phi), self.S3)
but the method used here is computationally less expensive.
"""
H = self._gradient * self._phi.vector()
return H.array() / self._nodal_volumes_S3_no_units
| [
"dolfin.grad",
"finmag.util.timings.timed",
"dolfin.KrylovSolver",
"dolfin.Function",
"dolfin.VectorFunctionSpace",
"dolfin.dot",
"dolfin.BoundaryMesh",
"finmag.util.timings.Timings",
"dolfin.assemble",
"dolfin.TestFunction",
"dolfin.TrialFunction",
"dolfin.FunctionSpace",
"dolfin.Constant",... | [((946, 955), 'finmag.util.timings.Timings', 'Timings', ([], {}), '()\n', (953, 955), False, 'from finmag.util.timings import Timings, default_timer, timed, mtimed\n'), ((2898, 2919), 'finmag.util.timings.mtimed', 'mtimed', (['default_timer'], {}), '(default_timer)\n', (2904, 2919), False, 'from finmag.util.timings import Timings, default_timer, timed, mtimed\n'), ((6080, 6101), 'finmag.util.timings.mtimed', 'mtimed', (['default_timer'], {}), '(default_timer)\n', (6086, 6101), False, 'from finmag.util.timings import Timings, default_timer, timed, mtimed\n'), ((6403, 6424), 'finmag.util.timings.mtimed', 'mtimed', (['default_timer'], {}), '(default_timer)\n', (6409, 6424), False, 'from finmag.util.timings import Timings, default_timer, timed, mtimed\n'), ((6680, 6701), 'finmag.util.timings.mtimed', 'mtimed', (['default_timer'], {}), '(default_timer)\n', (6686, 6701), False, 'from finmag.util.timings import Timings, default_timer, timed, mtimed\n'), ((7126, 7147), 'finmag.util.timings.mtimed', 'mtimed', (['default_timer'], {}), '(default_timer)\n', (7132, 7147), False, 'from finmag.util.timings import Timings, default_timer, timed, mtimed\n'), ((7604, 7625), 'finmag.util.timings.mtimed', 'mtimed', (['default_timer'], {}), '(default_timer)\n', (7610, 7625), False, 'from finmag.util.timings import Timings, default_timer, timed, mtimed\n'), ((8162, 8183), 'finmag.util.timings.mtimed', 'mtimed', (['default_timer'], {}), '(default_timer)\n', (8168, 8183), False, 'from finmag.util.timings import Timings, default_timer, timed, mtimed\n'), ((8548, 8564), 'finmag.util.timings.mtimed', 'mtimed', (['fk_timer'], {}), '(fk_timer)\n', (8554, 8564), False, 'from finmag.util.timings import Timings, default_timer, timed, mtimed\n'), ((9842, 9858), 'finmag.util.timings.mtimed', 'mtimed', (['fk_timer'], {}), '(fk_timer)\n', (9848, 9858), False, 'from finmag.util.timings import Timings, default_timer, timed, mtimed\n'), ((11043, 11059), 'finmag.util.timings.mtimed', 'mtimed', (['fk_timer'], {}), '(fk_timer)\n', (11049, 11059), False, 'from finmag.util.timings import Timings, default_timer, timed, mtimed\n'), ((856, 912), 'finmag.util.timings.timed', 'timed', (['measurement_name', 'measurement_group', 'timer_to_use'], {}), '(measurement_name, measurement_group, timer_to_use)\n', (861, 912), False, 'from finmag.util.timings import Timings, default_timer, timed, mtimed\n'), ((3596, 3633), 'dolfin.FunctionSpace', 'df.FunctionSpace', (['mesh', '"""Lagrange"""', '(1)'], {}), "(mesh, 'Lagrange', 1)\n", (3612, 3633), True, 'import dolfin as df\n'), ((3652, 3695), 'dolfin.VectorFunctionSpace', 'df.VectorFunctionSpace', (['mesh', '"""Lagrange"""', '(1)'], {}), "(mesh, 'Lagrange', 1)\n", (3674, 3695), True, 'import dolfin as df\n'), ((3763, 3783), 'dolfin.FacetNormal', 'df.FacetNormal', (['mesh'], {}), '(mesh)\n', (3777, 3783), True, 'import dolfin as df\n'), ((3831, 3855), 'dolfin.TestFunction', 'df.TestFunction', (['self.S1'], {}), '(self.S1)\n', (3846, 3855), True, 'import dolfin as df\n'), ((3879, 3904), 'dolfin.TrialFunction', 'df.TrialFunction', (['self.S1'], {}), '(self.S1)\n', (3895, 3904), True, 'import dolfin as df\n'), ((3927, 3951), 'dolfin.TestFunction', 'df.TestFunction', (['self.S3'], {}), '(self.S3)\n', (3942, 3951), True, 'import dolfin as df\n'), ((3975, 4000), 'dolfin.TrialFunction', 'df.TrialFunction', (['self.S3'], {}), '(self.S3)\n', (3991, 4000), True, 'import dolfin as df\n'), ((4026, 4051), 'dolfin.TestFunction', 'df.TestFunction', (['self.DG3'], {}), '(self.DG3)\n', (4041, 4051), True, 'import dolfin as df\n'), ((4078, 4104), 'dolfin.TrialFunction', 'df.TrialFunction', (['self.DG3'], {}), '(self.DG3)\n', (4094, 4104), True, 'import dolfin as df\n'), ((4172, 4206), 'finmag.util.meshes.nodal_volume', 'nodal_volume', (['self.S1', 'unit_length'], {}), '(self.S1, unit_length)\n', (4184, 4206), False, 'from finmag.util.meshes import nodal_volume\n'), ((4230, 4246), 'dolfin.Function', 'df.Function', (['DG3'], {}), '(DG3)\n', (4241, 4246), True, 'import dolfin as df\n'), ((4527, 4547), 'dolfin.Function', 'df.Function', (['self.S1'], {}), '(self.S1)\n', (4538, 4547), True, 'import dolfin as df\n'), ((4699, 4815), 'dolfin.KrylovSolver', 'df.KrylovSolver', (['self._poisson_matrix', "self.parameters['phi_1_solver']", "self.parameters['phi_1_preconditioner']"], {}), "(self._poisson_matrix, self.parameters['phi_1_solver'], self\n .parameters['phi_1_preconditioner'])\n", (4714, 4815), True, 'import dolfin as df\n'), ((4987, 5081), 'dolfin.KrylovSolver', 'df.KrylovSolver', (["self.parameters['phi_2_solver']", "self.parameters['phi_2_preconditioner']"], {}), "(self.parameters['phi_2_solver'], self.parameters[\n 'phi_2_preconditioner'])\n", (5002, 5081), True, 'import dolfin as df\n'), ((5458, 5478), 'dolfin.Function', 'df.Function', (['self.S1'], {}), '(self.S1)\n', (5469, 5478), True, 'import dolfin as df\n'), ((5546, 5566), 'dolfin.Function', 'df.Function', (['self.S1'], {}), '(self.S1)\n', (5557, 5566), True, 'import dolfin as df\n'), ((5633, 5653), 'dolfin.Function', 'df.Function', (['self.S1'], {}), '(self.S1)\n', (5644, 5653), True, 'import dolfin as df\n'), ((8683, 8697), 'dolfin.assemble', 'df.assemble', (['A'], {}), '(A)\n', (8694, 8697), True, 'import dolfin as df\n'), ((10512, 10526), 'dolfin.assemble', 'df.assemble', (['A'], {}), '(A)\n', (10523, 10526), True, 'import dolfin as df\n'), ((4344, 4382), 'dolfin.dot', 'df.dot', (['self._H_func', '(self.m * self.Ms)'], {}), '(self._H_func, self.m * self.Ms)\n', (4350, 4382), True, 'import dolfin as df\n'), ((4451, 4489), 'dolfin.dot', 'df.dot', (['self._E_integrand', 'self._test1'], {}), '(self._E_integrand, self._test1)\n', (4457, 4489), True, 'import dolfin as df\n'), ((7546, 7566), 'dolfin.assemble', 'df.assemble', (['self._E'], {}), '(self._E)\n', (7557, 7566), True, 'import dolfin as df\n'), ((4926, 4946), 'dolfin.Function', 'df.Function', (['self.S1'], {}), '(self.S1)\n', (4937, 4946), True, 'import dolfin as df\n'), ((8615, 8636), 'dolfin.grad', 'df.grad', (['self._trial1'], {}), '(self._trial1)\n', (8622, 8636), True, 'import dolfin as df\n'), ((8638, 8658), 'dolfin.grad', 'df.grad', (['self._test1'], {}), '(self._test1)\n', (8645, 8658), True, 'import dolfin as df\n'), ((9395, 9414), 'dolfin.DomainBoundary', 'df.DomainBoundary', ([], {}), '()\n', (9412, 9414), True, 'import dolfin as df\n'), ((10940, 10962), 'dolfin.Constant', 'df.Constant', (['(1, 1, 1)'], {}), '((1, 1, 1))\n', (10951, 10962), True, 'import dolfin as df\n'), ((11014, 11028), 'dolfin.assemble', 'df.assemble', (['b'], {}), '(b)\n', (11025, 11028), True, 'import dolfin as df\n'), ((5394, 5434), 'dolfin.BoundaryMesh', 'df.BoundaryMesh', (['mesh', '"""exterior"""', '(False)'], {}), "(mesh, 'exterior', False)\n", (5409, 5434), True, 'import dolfin as df\n'), ((8045, 8071), 'dolfin.assemble', 'df.assemble', (['self._nodal_E'], {}), '(self._nodal_E)\n', (8056, 8071), True, 'import dolfin as df\n'), ((10405, 10426), 'dolfin.grad', 'df.grad', (['self._trial1'], {}), '(self._trial1)\n', (10412, 10426), True, 'import dolfin as df\n'), ((5991, 6011), 'dolfin.grad', 'df.grad', (['self._test1'], {}), '(self._test1)\n', (5998, 6011), True, 'import dolfin as df\n')] |
from discord.ext import commands
from library.MessageContent import MessageContent
from library.Utils import getTeamInfo
from library.InputParser import InputParser
from library.SendList import SendList
class Commands(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
await self.bot.wait_until_ready()
# await bot.change_presence(activity=discord.Game('-'))
print('--------------------------')
print(f'Logged in as: {self.bot.user.name}')
print(f'With ID: {self.bot.user.id}')
print('--------------------------')
@commands.command()
async def all(self, ctx, *, league=""):
e = MessageContent(league).returnAllGame()
await ctx.send(embed=e)
@commands.command()
async def team(self, ctx, league="", *, team_date=""):
[team, date] = InputParser(team_date).parseTeamDate()
e = MessageContent(league).returnTeamGame(team, date)
await ctx.send(embed=e)
@commands.command()
async def live(self, ctx, league="", *, team=""):
team_data = getTeamInfo(league, team)
e = MessageContent(league).returnLiveGame(team_data)
msg = await ctx.send(embed=e)
# add to db
SendList().add_interval_update(league, team_data[0], msg)
@commands.command()
async def update(self, ctx, league="", *, team=""):
pass
# test command
@commands.command()
async def ping(self, ctx):
await ctx.send("pong!")
def setup(bot):
bot.add_cog(Commands(bot))
| [
"discord.ext.commands.Cog.listener",
"library.SendList.SendList",
"library.InputParser.InputParser",
"library.MessageContent.MessageContent",
"library.Utils.getTeamInfo",
"discord.ext.commands.command"
] | [((293, 316), 'discord.ext.commands.Cog.listener', 'commands.Cog.listener', ([], {}), '()\n', (314, 316), False, 'from discord.ext import commands\n'), ((646, 664), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (662, 664), False, 'from discord.ext import commands\n'), ((798, 816), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (814, 816), False, 'from discord.ext import commands\n'), ((1038, 1056), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (1054, 1056), False, 'from discord.ext import commands\n'), ((1349, 1367), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (1365, 1367), False, 'from discord.ext import commands\n'), ((1462, 1480), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (1478, 1480), False, 'from discord.ext import commands\n'), ((1131, 1156), 'library.Utils.getTeamInfo', 'getTeamInfo', (['league', 'team'], {}), '(league, team)\n', (1142, 1156), False, 'from library.Utils import getTeamInfo\n'), ((721, 743), 'library.MessageContent.MessageContent', 'MessageContent', (['league'], {}), '(league)\n', (735, 743), False, 'from library.MessageContent import MessageContent\n'), ((899, 921), 'library.InputParser.InputParser', 'InputParser', (['team_date'], {}), '(team_date)\n', (910, 921), False, 'from library.InputParser import InputParser\n'), ((950, 972), 'library.MessageContent.MessageContent', 'MessageContent', (['league'], {}), '(league)\n', (964, 972), False, 'from library.MessageContent import MessageContent\n'), ((1169, 1191), 'library.MessageContent.MessageContent', 'MessageContent', (['league'], {}), '(league)\n', (1183, 1191), False, 'from library.MessageContent import MessageContent\n'), ((1285, 1295), 'library.SendList.SendList', 'SendList', ([], {}), '()\n', (1293, 1295), False, 'from library.SendList import SendList\n')] |
from typing import Dict
from .common import mean, ValueBuffer
from datetime import datetime, timezone, timedelta
class TemperatureGroup:
def __init__(self, name, time_read_sec=1):
self.name = name
self.data: Dict[str, ValueBuffer] = {}
self.last_update = datetime.now(tz=timezone.utc) - timedelta(seconds=10)
self.time_read = timedelta(seconds=time_read_sec)
def updatable(self):
if datetime.now(timezone.utc) - self.last_update > self.time_read:
return True
return False
def update(self, name, device):
if name not in self.data:
self.data[name] = ValueBuffer(name, 35)
self.data[name].update(device)
self.last_update = datetime.now(timezone.utc)
def mean(self, device) -> float:
try:
if device is None:
return mean(buffer.mean() for buffer in self.data.values())
return self.data[device].mean()
except (KeyError, ZeroDivisionError):
return 35.0
| [
"datetime.datetime.now",
"datetime.timedelta"
] | [((366, 398), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'time_read_sec'}), '(seconds=time_read_sec)\n', (375, 398), False, 'from datetime import datetime, timezone, timedelta\n'), ((736, 762), 'datetime.datetime.now', 'datetime.now', (['timezone.utc'], {}), '(timezone.utc)\n', (748, 762), False, 'from datetime import datetime, timezone, timedelta\n'), ((287, 316), 'datetime.datetime.now', 'datetime.now', ([], {'tz': 'timezone.utc'}), '(tz=timezone.utc)\n', (299, 316), False, 'from datetime import datetime, timezone, timedelta\n'), ((319, 340), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(10)'}), '(seconds=10)\n', (328, 340), False, 'from datetime import datetime, timezone, timedelta\n'), ((436, 462), 'datetime.datetime.now', 'datetime.now', (['timezone.utc'], {}), '(timezone.utc)\n', (448, 462), False, 'from datetime import datetime, timezone, timedelta\n')] |
import xarray as xr
import pandas as pd
import cartopy
import cartopy.crs as ccrs
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.cm import get_cmap
import numpy as np
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import shapely.geometry as sgeom
import cartopy.feature as cfeature
from copy import copy
# Define functions for plotting
def find_side(ls, side):
"""
Given a shapely LineString which is assumed to be rectangular, return the
line corresponding to a given side of the rectangle.
"""
minx, miny, maxx, maxy = ls.bounds
points = {'left': [(minx, miny), (minx, maxy)],
'right': [(maxx, miny), (maxx, maxy)],
'bottom': [(minx, miny), (maxx, miny)],
'top': [(minx, maxy), (maxx, maxy)],}
return sgeom.LineString(points[side])
def lambert_xticks(ax, ticks):
"""
Draw ticks on the bottom x-axis of a Lambert Conformal projection.
"""
te = lambda xy: xy[0]
lc = lambda t, n, b: np.vstack((np.zeros(n) + t, np.linspace(b[2], b[3], n))).T
xticks, xticklabels = _lambert_ticks(ax, ticks, 'bottom', lc, te)
ax.xaxis.tick_bottom()
ax.set_xticks(xticks)
ax.set_xticklabels([ax.xaxis.get_major_formatter()(xtick) for xtick in xticklabels])
def lambert_yticks(ax, ticks):
"""
Draw ticks on the left y-axis of a Lambert Conformal projection.
"""
te = lambda xy: xy[1]
lc = lambda t, n, b: np.vstack((np.linspace(b[0], b[1], n), np.zeros(n) + t)).T
yticks, yticklabels = _lambert_ticks(ax, ticks, 'left', lc, te)
ax.yaxis.tick_left()
ax.set_yticks(yticks)
ax.set_yticklabels([ax.yaxis.get_major_formatter()(ytick) for ytick in yticklabels])
def _lambert_ticks(ax, ticks, tick_location, line_constructor, tick_extractor):
"""
Get the tick locations and labels for an axis of a Lambert Conformal projection.
"""
outline_patch = sgeom.LineString(ax.outline_patch.get_path().vertices.tolist())
axis = find_side(outline_patch, tick_location)
n_steps = 30
extent = ax.get_extent(ccrs.PlateCarree())
_ticks = []
for t in ticks:
xy = line_constructor(t, n_steps, extent)
proj_xyz = ax.projection.transform_points(ccrs.Geodetic(), xy[:, 0], xy[:, 1])
xyt = proj_xyz[..., :2]
ls = sgeom.LineString(xyt.tolist())
locs = axis.intersection(ls)
if not locs:
tick = [None]
else:
tick = tick_extractor(locs.xy)
_ticks.append(tick[0])
# Remove ticks that aren't visible:
ticklabels = copy(ticks)
while True:
try:
index = _ticks.index(None)
except ValueError:
break
_ticks.pop(index)
ticklabels.pop(index)
return _ticks, ticklabels
def plot_250hPa_winds(lon, lat, u, v, wspd, mode):
"""
Plot filled contours overlayed with vectors
Input
-------
lon = lon values extracted from xarray dataset (1-D)
lat = lat values extracted from xarray dataset (1-D)
u = U-wind at 250 hPa, shape = lon X lat
v = V-wind at 250 hPa, shape = lon X lat
wspd = Wind speed at 250 hPa, shape = lon X lat
mode = 'A' for anomaly data, 'LM' for long term means, and 'EM' for extreme precipitation days
Output
--------
matplotlib figure with filled contours of wind speed overlayed with wind vectors
"""
# change data and lon to cyclic coordinates
u, lon_new = add_cyclic_point(u.values, coord = lon.values)
v, lon_new = add_cyclic_point(v.values, coord = lon.values)
wspd, lon = add_cyclic_point(wspd.values, coord = lon.values)
# Create a figure
fig = plt.figure(figsize = (10, 5))
# Set the GeoAxes to the PlateCarree projection
ax = plt.axes(projection = ccrs.PlateCarree())
# Add coastlines
ax.coastlines('50m', linewidth = 0.8)
# Assign data for filled contour
data = wspd
if mode == 'EM' or mode == 'LM':
# Plot filled contours
plt.contourf(lon, lat, data, 20, transform = ccrs.PlateCarree(),
cmap = get_cmap("viridis"))
# Add a color bar
cbar = plt.colorbar(ax = ax, shrink = .75)
cbar.ax.set_ylabel('m/s', fontsize = 18)
# Plot the vectors and reference vector
rd = 5 #regrid_delta
quiver = plt.quiver(lon[::rd], lat[::rd], u[::rd, ::rd], v[::rd, ::rd],
transform = ccrs.PlateCarree(), headwidth = 5., headlength = 5.)
ax.quiverkey(quiver, X = 0.9, Y = 1.03, U = 20., label = '20 m/s',
coordinates='axes', labelpos='E')
elif mode == 'A':
# Plot filled contours
maxval, minval = np.abs(np.amax(data)), np.abs(np.amin(data))
normmax = np.amax([maxval, minval])
norm = mpl.colors.Normalize(vmin = -normmax, vmax = normmax)
plt.contourf(lon, lat, data, 20, transform = ccrs.PlateCarree(),
norm = norm, cmap = get_cmap("RdBu_r"))
# Add a color bar
cbar = plt.colorbar(ax = ax, shrink = .75)
cbar.ax.set_ylabel('m/s', fontsize = 18)
# Plot the vectors and reference vector
rd = 5 #regrid_delta
quiver = plt.quiver(lon[::rd], lat[::rd], u[::rd, ::rd], v[::rd, ::rd],
transform = ccrs.PlateCarree(), headwidth = 5., headlength = 5.)
ax.quiverkey(quiver, X = 0.9, Y = 1.03, U = 3., label = '3 m/s',
coordinates = 'axes', labelpos = 'E')
# *must* call draw in order to get the axis boundary used to add ticks:
fig.canvas.draw()
# Add the tick marks
xticks = np.arange(0., 360., 30.)
yticks = np.arange(-90., 100., 15.)
# Label the end-points of the gridlines using the custom tick makers:
ax.xaxis.set_major_formatter(LONGITUDE_FORMATTER)
ax.yaxis.set_major_formatter(LATITUDE_FORMATTER)
lambert_xticks(ax, xticks)
lambert_yticks(ax, yticks)
# Set title and figure name
if mode == 'LM':
plt.title('250 hPa Winds'+'\n'+'long term mean', fontsize=18)
pname = 'p250_longterm.png'
elif mode == 'EM':
plt.title('250 hPa Winds'+'\n'+'extreme precipitation days', fontsize=18)
pname = 'p250_extreme.png'
elif mode == 'A':
plt.title('250 hPa Winds'+'\n'+'anomaly fields', fontsize=18)
pname = 'p250_anom.png'
ax.set_global(); ax.gridlines();
plt.tight_layout()
#plot_dir = '/mnt/a/u/sciteam/chug/Laplata_tracers/plots/dipole_assessment/'
#pname = plot_dir + name + '.png'
plt.savefig(pname, bbox_inches = 'tight')
plt.show()
def plot_500hPa_winds_geopot(lon, lat, u, v, z, mode):
"""
Plot filled contours overlayed with vectors
Input
-------
lon = lon values extracted from xarray dataset (1-D)
lat = lat values extracted from xarray dataset (1-D)
u = U-wind at 500 hPa, shape = lon X lat
v = V-wind at 500 hPa, shape = lon X lat
z = Geopotential height at 500 hPa, shape = lon X lat
mode = 'A' for anomaly data, 'LM' for long term means, and 'EM' for extreme precipitation days
Output
--------
matplotlib figure with filled contours of geopotential height overlayed with wind vectors
"""
# change data and lon to cyclic coordinates
u, lon_new = add_cyclic_point(u.values, coord = lon.values)
v, lon_new = add_cyclic_point(v.values, coord = lon.values)
z, lon = add_cyclic_point(z.values, coord = lon.values)
# Create a figure
fig = plt.figure(figsize = (10, 5))
# Set the GeoAxes to the PlateCarree projection
ax = plt.axes(projection = ccrs.PlateCarree())
# Add coastlines
ax.coastlines('50m', linewidth=0.8)
# Assign data for filled contour
data = z
if mode == 'EM' or mode == 'LM':
# Plot filled contours
plt.contourf(lon, lat, data, 20, transform = ccrs.PlateCarree(),
cmap = get_cmap("viridis"))
# Add a color bar
cbar = plt.colorbar(ax = ax, shrink = .75)
cbar.ax.set_ylabel('m', fontsize = 18)
# Plot the vectors and reference vector
rd = 5 #regrid_delta
quiver = plt.quiver(lon[::rd], lat[::rd], u[::rd, ::rd], v[::rd, ::rd],
transform = ccrs.PlateCarree(), headwidth = 5., headlength = 5.)
ax.quiverkey(quiver, X = 0.9, Y = 1.03, U = 10., label = '10 m/s',
coordinates = 'axes', labelpos = 'E')
elif mode == 'A':
# Plot filled contours
maxval, minval = np.abs(np.amax(data)), np.abs(np.amin(data))
normmax = np.amax([maxval, minval])
norm = mpl.colors.Normalize(vmin = -normmax, vmax = normmax)
plt.contourf(lon, lat, data, 20, transform = ccrs.PlateCarree(),
norm = norm, cmap = get_cmap("RdBu_r"))
# Add a color bar
cbar = plt.colorbar(ax = ax, shrink = .75)
cbar.ax.set_ylabel('m', fontsize = 18)
# Plot the vectors and reference vector
rd = 5 #regrid_delta
quiver = plt.quiver(lon[::rd], lat[::rd], u[::rd, ::rd], v[::rd, ::rd],
transform = ccrs.PlateCarree(), headwidth = 5., headlength = 5.)
ax.quiverkey(quiver, X = 0.9, Y = 1.03, U = 3., label = '3 m/s',
coordinates = 'axes', labelpos = 'E')
# *must* call draw in order to get the axis boundary used to add ticks:
fig.canvas.draw()
# Add the tick marks
xticks = np.arange(0., 360., 30.)
yticks = np.arange(-90., 100., 15.)
# Label the end-points of the gridlines using the custom tick makers:
ax.xaxis.set_major_formatter(LONGITUDE_FORMATTER)
ax.yaxis.set_major_formatter(LATITUDE_FORMATTER)
lambert_xticks(ax, xticks)
lambert_yticks(ax, yticks)
#Set title and figure name
if mode == 'LM':
plt.title('500 hPa Winds, GPH'+'\n'+'long term mean', fontsize=18)
pname = 'p500_longterm.png'
elif mode == 'EM':
plt.title('500 hPa Winds, GPH'+'\n'+'extreme precipitation days', fontsize=18)
pname = 'p500_extreme.png'
elif mode == 'A':
plt.title('500 hPa Winds, GPH'+'\n'+'anomaly fields', fontsize=18)
pname = 'p500_anom.png'
ax.set_global(); ax.gridlines();
plt.tight_layout()
#plot_dir = '/mnt/a/u/sciteam/chug/Laplata_tracers/plots/dipole_assessment/'
#pname = plot_dir + name + '.png'
plt.savefig(pname, bbox_inches = 'tight')
plt.show()
def plot_850hPa(lon, lat, u, v, t, q, mode):
"""
Plot filled contours overlayed with contours and vectors
Input
-------
lon = lon values extracted from xarray dataset (1-D)
lat = lat values extracted from xarray dataset (1-D)
u = U-wind at 850 hPa, shape = lon X lat
v = V-wind at 850 hPa, shape = lon X lat
t = Temperature at 850 hPa, shape = lon X lat
q = Specific humidity at 850 hPa, shape = lon X lat
mode = 'A' for anomaly data, 'LM' for long term means, and 'EM' for extreme precipitation days
Output
--------
matplotlib figure with filled contours of temperature overlayed with contours of spec humidity and wind vectors
"""
# change data and lon to cyclic coordinates
u, lon_new = add_cyclic_point(u.values, coord = lon.values)
v, lon_new = add_cyclic_point(v.values, coord = lon.values)
q, lon_new = add_cyclic_point(q.values, coord = lon.values)
t, lon = add_cyclic_point(t.values, coord = lon.values)
# Create a figure
fig = plt.figure(figsize = (10, 5))
# Set the GeoAxes to the PlateCarree projection
ax = plt.axes(projection = ccrs.PlateCarree())
# Add coastlines
ax.coastlines('50m', linewidth = 0.8)
# Assign data for filled contour
data = t
if mode == 'EM' or mode == 'LM':
# Plot filled contours
plt.contourf(lon, lat, data, 20, transform = ccrs.PlateCarree(),
cmap = get_cmap("viridis"))
# Add a color bar
cbar = plt.colorbar(ax = ax, shrink = .75)
cbar.ax.set_ylabel('$^{o}C$', fontsize = 18)
# Plot contours
plt.contour(lon, lat, q, transform = ccrs.PlateCarree(), colors = 'w')
# Plot the vectors and reference vector
rd = 5 #regrid_delta
quiver = plt.quiver(lon[::rd], lat[::rd], u[::rd, ::rd], v[::rd, ::rd],
transform = ccrs.PlateCarree(), headwidth = 5., headlength = 5.)
ax.quiverkey(quiver, X = 0.9, Y = 1.03, U = 8., label = '8 m/s',
coordinates = 'axes', labelpos = 'E')
elif mode == 'A':
# Plot filled contours
maxval, minval = np.abs(np.amax(data)), np.abs(np.amin(data))
normmax = np.amax([maxval, minval])
norm = mpl.colors.Normalize(vmin = -normmax, vmax = normmax)
plt.contourf(lon, lat, data, 20, transform = ccrs.PlateCarree(),
norm = norm, cmap = get_cmap("RdBu_r"))
# Add a color bar
cbar = plt.colorbar(ax = ax, shrink = .75)
cbar.ax.set_ylabel('$^{o}C$', fontsize = 18)
# Plot contours
plt.contour(lon, lat, q, transform = ccrs.PlateCarree())
# Plot the vectors and reference vector
rd = 5 #regrid_delta
quiver = plt.quiver(lon[::rd], lat[::rd], u[::rd, ::rd], v[::rd, ::rd],
transform = ccrs.PlateCarree(), headwidth = 5., headlength = 5.)
ax.quiverkey(quiver, X = 0.9, Y = 1.03, U = 3., label = '3 m/s',
coordinates = 'axes', labelpos = 'E')
# *must* call draw in order to get the axis boundary used to add ticks:
fig.canvas.draw()
# Add the tick marks
xticks = np.arange(0., 360., 30.)
yticks = np.arange(-90., 100., 15.)
# Label the end-points of the gridlines using the custom tick makers:
ax.xaxis.set_major_formatter(LONGITUDE_FORMATTER)
ax.yaxis.set_major_formatter(LATITUDE_FORMATTER)
lambert_xticks(ax, xticks)
lambert_yticks(ax, yticks)
# Set title and figure name
if mode == 'LM':
plt.title('850 hPa Winds, Temp, Humidity'+'\n'+'long term mean', fontsize = 18)
pname = 'p850_longterm.png'
elif mode == 'EM':
plt.title('850 hPa Winds, Temp, Humidity'+'\n'+'extreme precipitation days', fontsize = 18)
pname = 'p850_extreme.png'
elif mode == 'A':
plt.title('850 hPa Winds, Temp, Humidity'+'\n'+'anomaly fields', fontsize = 18)
pname = 'p850_anom.png'
ax.set_global(); ax.gridlines();
plt.tight_layout()
#plot_dir = '/mnt/a/u/sciteam/chug/Laplata_tracers/plots/dipole_assessment/'
#pname = plot_dir + name + '.png'
plt.savefig(pname, bbox_inches = 'tight')
plt.show()
def plot_sfc_winds_skt(lonu, latu, u, v, lont, latt, t, mode):
"""
Plot filled contours overlayed with contours and vectors
Input
-------
lonu = lon values extracted from wind dataset (1-D)
latu = lat values extracted from wind dataset (1-D)
u = U-wind at surface, shape = lonu X latu
v = V-wind at surface, shape = lonu X latu
lont = lon values extracted from skin temperature dataset (1-D)
latt = lat values extracted from skin temperature dataset (1-D)
t = Skin temperature, shape = lont X latt
mode = 'A' for anomaly data, 'LM' for long term means, and 'EM' for extreme precipitation days
Output
--------
matplotlib figure with filled contours of skin temperature overlayed with wind vectors
"""
# change data and lon to cyclic coordinates
u, lonu_new = add_cyclic_point(u.values, coord = lonu.values)
v, lonu = add_cyclic_point(v.values, coord = lonu.values)
t, lont = add_cyclic_point(t.values, coord = lont.values)
# Create a figure
fig = plt.figure(figsize=(10, 5))
# Set the GeoAxes to the PlateCarree projection
ax = plt.axes(projection = ccrs.PlateCarree())
# Add coastlines
ax.coastlines('50m', linewidth = 0.8)
# Assign data for filled contour
data = t
if mode == 'EM' or mode == 'LM':
# Plot filled contours
plt.contourf(lont, latt, data, 20, transform = ccrs.PlateCarree(),
cmap = get_cmap("viridis"))
# Add a color bar
cbar = plt.colorbar(ax = ax, shrink = .75)
cbar.ax.set_ylabel('$^{o}C$', fontsize = 18)
# Plot the vectors and reference vector
rd = 5 #regrid_delta
quiver = plt.quiver(lonu[::rd], latu[::rd], u[::rd, ::rd], v[::rd, ::rd],
transform = ccrs.PlateCarree(), headwidth = 5., headlength = 5.)
ax.quiverkey(quiver, X = 0.9, Y = 1.03, U = 5., label = '5 m/s',
coordinates = 'axes', labelpos = 'E')
elif mode == 'A':
# Plot filled contours
maxval, minval = np.abs(np.amax(data)), np.abs(np.amin(data))
normmax = np.amax([maxval, minval])
norm = mpl.colors.Normalize(vmin = -normmax, vmax = normmax)
plt.contourf(lont, latt, data, 20, transform = ccrs.PlateCarree(),
norm = norm, cmap = get_cmap("RdBu_r"))
# Add a color bar
cbar = plt.colorbar(ax = ax, shrink = .75)
cbar.ax.set_ylabel('$^{o}C$', fontsize = 18)
# Plot the vectors and reference vector
rd = 5 #regrid_delta
quiver = plt.quiver(lonu[::rd], latu[::rd], u[::rd, ::rd], v[::rd, ::rd],
transform = ccrs.PlateCarree(), headwidth = 5., headlength = 5.)
ax.quiverkey(quiver, X = 0.9, Y = 1.03, U = 3., label = '3 m/s',
coordinates = 'axes', labelpos = 'E')
# *must* call draw in order to get the axis boundary used to add ticks:
fig.canvas.draw()
# Add the tick marks
xticks = np.arange(0., 360., 30.)
yticks = np.arange(-80., 80., 20.)
# Label the end-points of the gridlines using the custom tick makers:
ax.xaxis.set_major_formatter(LONGITUDE_FORMATTER)
ax.yaxis.set_major_formatter(LATITUDE_FORMATTER)
lambert_xticks(ax, xticks)
lambert_yticks(ax, yticks)
# Set title and figure name
if mode == 'LM':
plt.title('Surface Winds, Skin temp'+'\n'+'long term mean', fontsize = 18)
pname = 'sfc_longterm.png'
elif mode == 'EM':
plt.title('Surface Winds, Skin temp'+'\n'+'extreme precipitation days', fontsize = 18)
pname = 'sfc_extreme.png'
elif mode == 'A':
plt.title('Surface Winds, Skin temp'+'\n'+'anomaly fields', fontsize = 18)
pname = 'sfc_anom.png'
ax.set_global(); ax.gridlines();
plt.tight_layout()
#plot_dir = '/mnt/a/u/sciteam/chug/Laplata_tracers/plots/dipole_assessment/'
#pname = plot_dir + name + '.png'
plt.savefig(pname, bbox_inches = 'tight')
plt.show()
def plot_TCWV(lon, lat, q, mode):
"""
Plot filled contours of total column water vapor
Input
-------
lon = lon values extracted from xarray dataset (1-D)
lat = lat values extracted from xarray dataset (1-D)
q = Total column water vapor, shape = lon X lat
mode = 'A' for anomaly data, 'LM' for long term means, and 'EM' for extreme precipitation days
Output
--------
matplotlib figure with filled contours of total column water vapor
"""
# change data and lon to cyclic coordinates
q, lon = add_cyclic_point(q.values, coord = lon.values)
# Create a figure
fig = plt.figure(figsize=(10, 5))
# Set the GeoAxes to the PlateCarree projection
ax = plt.axes(projection = ccrs.PlateCarree())
# Add coastlines
ax.coastlines('50m', linewidth = 0.8)
# Assign data for filled contour
data = q
if mode == 'EM' or mode == 'LM':
data[data > 80.] = 80.
# Plot filled contours
plt.contourf(lon, lat, data, 20, transform = ccrs.PlateCarree(),
cmap = get_cmap("viridis"))
# Add a color bar
cbar = plt.colorbar(ax = ax, shrink = .75)
cbar.ax.set_ylabel('$mm$', fontsize = 18)
elif mode == 'A':
maxval, minval = np.abs(np.amax(data)), np.abs(np.amin(data))
normmax = np.amax([maxval, minval])
norm = mpl.colors.Normalize(vmin = -normmax, vmax = normmax)
plt.contourf(lon, lat, data, 20, transform = ccrs.PlateCarree(),
norm = norm, cmap = get_cmap("RdBu_r"))
# Add a color bar
cbar = plt.colorbar(ax = ax, shrink = .75)
cbar.ax.set_ylabel('$mm$', fontsize = 18)
# *must* call draw in order to get the axis boundary used to add ticks:
fig.canvas.draw()
# Add the tick marks
xticks = np.arange(0., 360., 30.)
yticks = np.arange(-80., 80., 20.)
# Label the end-points of the gridlines using the custom tick makers:
ax.xaxis.set_major_formatter(LONGITUDE_FORMATTER)
ax.yaxis.set_major_formatter(LATITUDE_FORMATTER)
lambert_xticks(ax, xticks)
lambert_yticks(ax, yticks)
# Set title and figure name
if mode == 'LM':
plt.title('Total column water vapor'+'\n'+'long term mean', fontsize = 18)
pname = 'tcwv_longterm.png'
elif mode == 'EM':
plt.title('Total column water vapor'+'\n'+'extreme precipitation days',fontsize = 18)
pname = 'tcwv_extreme.png'
elif mode == 'A':
plt.title('Total column water vapor'+'\n'+'anomaly field',fontsize = 18)
pname = 'tcwv_anom.png'
ax.set_global(); ax.gridlines();
plt.tight_layout()
#plot_dir = '/mnt/a/u/sciteam/chug/Laplata_tracers/plots/dipole_assessment/'
#pname = plot_dir + name + '.png'
plt.savefig(pname, bbox_inches = 'tight')
plt.show()
###############################
# Open datasets and plot data #
# Set path to netcdf files
path = 'atms597_proj3/data/'
# First let's plot the anomalies
# 250 hPa anomalies
xrdata = xr.open_dataset(path+'pressure_anomaly.nc')
lat = xrdata['lat']
lon = xrdata['lon']
u = xrdata['u_wind_250']
v = xrdata['v_wind_250']
xrdata = xr.open_dataset('atms597_proj3/data/pressure_anomaly_new.nc')
wspd = xrdata['wind_spd_250']
plot_250hPa_winds(lon, lat, u, v, wspd, 'A')
# 500 hPa anomalies
u = xrdata['u_wind_500']
v = xrdata['v_wind_500']
z = xrdata['height_500']
plot_500hPa_winds_geopot(lon, lat, u, v, z, 'A')
# 850 hPa anomalies
u = xrdata['u_wind_850']
v = xrdata['v_wind_850']
t = xrdata['temp_850']
q = xrdata['q_850']
plot_850hPa(lon, lat, u, v, t, q, 'A')
# Next we move to surface anomalies
xrdata = xr.open_dataset(path+'surface_anomaly.nc')
latu = xrdata['lat']
lonu = xrdata['lon']
u = xrdata['sfc_u_wind_surface']
v = xrdata['sfc_v_wind_surface']
xrdata = xr.open_dataset(path+'surface_gauss_anomaly.nc')
t = xrdata['skin_temp_surface']-273 #convert to Celcius
latt = xrdata['lat']
lont = xrdata['lon']
plot_sfc_winds_skt(lonu, latu, u, v, lont, latt, t, 'A')
# TCWV anomalies
xrdata = xr.open_dataset(path+'total_column_anomaly.nc')
lat = xrdata['lat']
lon = xrdata['lon']
q = xrdata['total_column_q']
plot_TCWV(lon, lat, q, 'A')
# Next we plot the long term means
# 250 hPa long term means
xrdata = xr.open_dataset(path+'pressure_long_term_mean.nc')
lat = xrdata['lat']
lon = xrdata['lon']
u = xrdata['u_wind_250']
v = xrdata['v_wind_250']
wspd = np.sqrt(np.multiply(u, u) + np.multiply(v, v))
plot_250hPa_winds(lon, lat, u, v, wspd, 'LM')
# 500 hPa long term means
u = xrdata['u_wind_500']
v = xrdata['v_wind_500']
z = xrdata['height_500']
plot_500hPa_winds_geopot(lon, lat, u, v, z, 'LM')
# 850 hPa long term means
u = xrdata['u_wind_850']
v = xrdata['v_wind_850']
t = xrdata['temp_850']
q = xrdata['q_850']
plot_850hPa(lon, lat, u, v, t, q, 'LM')
# surface long term means
xrdata = xr.open_dataset(path+'surface_long_term_mean.nc')
latu = xrdata['lat']
lonu = xrdata['lon']
u = xrdata['sfc_u_wind_surface']
v = xrdata['sfc_v_wind_surface']
xrdata = xr.open_dataset(path+'surface_gauss_long_term_mean.nc')
t = xrdata['skin_temp_surface']
latt = xrdata['lat']
lont = xrdata['lon']
plot_sfc_winds_skt(lonu, latu, u, v, lont, latt, t, 'LM')
# TCWV long term means
xrdata = xr.open_dataset(path+'total_column_long_term_mean.nc')
lat = xrdata['lat']
lon = xrdata['lon']
q = xrdata['total_column_q']
plot_TCWV(lon, lat, q, 'LM')
# Finally we plot the mean of extreme precipitation days
# 250 hPa extreme means
xrdata = xr.open_dataset(path+'pressure_extreme_precip_mean.nc')
lat = xrdata['lat']
lon = xrdata['lon']
u = xrdata['u_wind_250']
v = xrdata['v_wind_250']
wspd = np.sqrt(np.multiply(u, u) + np.multiply(v, v))
plot_250hPa_winds(lon, lat, u, v, wspd, 'EM')
# 500 hPa extreme means
u = xrdata['u_wind_500']
v = xrdata['v_wind_500']
z = xrdata['height_500']
plot_500hPa_winds_geopot(lon, lat, u, v, z, 'EM')
# 850 hPa extreme means
u = xrdata['u_wind_850']
v = xrdata['v_wind_850']
t = xrdata['temp_850']
q = xrdata['q_850']
plot_850hPa(lon, lat, u, v, t, q, 'EM')
# surface extreme means
xrdata = xr.open_dataset(path+'surface_extreme_precip_mean.nc')
latu = xrdata['lat']
lonu = xrdata['lon']
u = xrdata['sfc_u_wind_surface']
v = xrdata['sfc_v_wind_surface']
xrdata = xr.open_dataset(path+'surface_gauss_extreme_precip_mean.nc')
t = xrdata['skin_temp_surface']-273
latt = xrdata['lat']
lont = xrdata['lon']
plot_sfc_winds_skt(lonu, latu, u, v, lont, latt, t, 'EM')
# TCWV extreme means
xrdata = xr.open_dataset(path+'total_column_extreme_precip_mean.nc')
lat = xrdata['lat']
lon = xrdata['lon']
q = xrdata['total_column_q']
plot_TCWV(lon, lat, q, 'EM')
| [
"copy.copy",
"numpy.arange",
"numpy.multiply",
"numpy.linspace",
"matplotlib.cm.get_cmap",
"matplotlib.pyplot.savefig",
"numpy.amin",
"cartopy.crs.PlateCarree",
"shapely.geometry.LineString",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.title",
"xarray.open_dataset",
"cartopy.crs.Geodet... | [((22322, 22367), 'xarray.open_dataset', 'xr.open_dataset', (["(path + 'pressure_anomaly.nc')"], {}), "(path + 'pressure_anomaly.nc')\n", (22337, 22367), True, 'import xarray as xr\n'), ((22465, 22526), 'xarray.open_dataset', 'xr.open_dataset', (['"""atms597_proj3/data/pressure_anomaly_new.nc"""'], {}), "('atms597_proj3/data/pressure_anomaly_new.nc')\n", (22480, 22526), True, 'import xarray as xr\n'), ((22946, 22990), 'xarray.open_dataset', 'xr.open_dataset', (["(path + 'surface_anomaly.nc')"], {}), "(path + 'surface_anomaly.nc')\n", (22961, 22990), True, 'import xarray as xr\n'), ((23106, 23156), 'xarray.open_dataset', 'xr.open_dataset', (["(path + 'surface_gauss_anomaly.nc')"], {}), "(path + 'surface_gauss_anomaly.nc')\n", (23121, 23156), True, 'import xarray as xr\n'), ((23337, 23386), 'xarray.open_dataset', 'xr.open_dataset', (["(path + 'total_column_anomaly.nc')"], {}), "(path + 'total_column_anomaly.nc')\n", (23352, 23386), True, 'import xarray as xr\n'), ((23554, 23606), 'xarray.open_dataset', 'xr.open_dataset', (["(path + 'pressure_long_term_mean.nc')"], {}), "(path + 'pressure_long_term_mean.nc')\n", (23569, 23606), True, 'import xarray as xr\n'), ((24143, 24194), 'xarray.open_dataset', 'xr.open_dataset', (["(path + 'surface_long_term_mean.nc')"], {}), "(path + 'surface_long_term_mean.nc')\n", (24158, 24194), True, 'import xarray as xr\n'), ((24310, 24367), 'xarray.open_dataset', 'xr.open_dataset', (["(path + 'surface_gauss_long_term_mean.nc')"], {}), "(path + 'surface_gauss_long_term_mean.nc')\n", (24325, 24367), True, 'import xarray as xr\n'), ((24531, 24587), 'xarray.open_dataset', 'xr.open_dataset', (["(path + 'total_column_long_term_mean.nc')"], {}), "(path + 'total_column_long_term_mean.nc')\n", (24546, 24587), True, 'import xarray as xr\n'), ((24776, 24833), 'xarray.open_dataset', 'xr.open_dataset', (["(path + 'pressure_extreme_precip_mean.nc')"], {}), "(path + 'pressure_extreme_precip_mean.nc')\n", (24791, 24833), True, 'import xarray as xr\n'), ((25364, 25420), 'xarray.open_dataset', 'xr.open_dataset', (["(path + 'surface_extreme_precip_mean.nc')"], {}), "(path + 'surface_extreme_precip_mean.nc')\n", (25379, 25420), True, 'import xarray as xr\n'), ((25536, 25598), 'xarray.open_dataset', 'xr.open_dataset', (["(path + 'surface_gauss_extreme_precip_mean.nc')"], {}), "(path + 'surface_gauss_extreme_precip_mean.nc')\n", (25551, 25598), True, 'import xarray as xr\n'), ((25764, 25825), 'xarray.open_dataset', 'xr.open_dataset', (["(path + 'total_column_extreme_precip_mean.nc')"], {}), "(path + 'total_column_extreme_precip_mean.nc')\n", (25779, 25825), True, 'import xarray as xr\n'), ((833, 863), 'shapely.geometry.LineString', 'sgeom.LineString', (['points[side]'], {}), '(points[side])\n', (849, 863), True, 'import shapely.geometry as sgeom\n'), ((2624, 2635), 'copy.copy', 'copy', (['ticks'], {}), '(ticks)\n', (2628, 2635), False, 'from copy import copy\n'), ((3740, 3767), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (3750, 3767), True, 'import matplotlib.pyplot as plt\n'), ((5793, 5820), 'numpy.arange', 'np.arange', (['(0.0)', '(360.0)', '(30.0)'], {}), '(0.0, 360.0, 30.0)\n', (5802, 5820), True, 'import numpy as np\n'), ((5831, 5860), 'numpy.arange', 'np.arange', (['(-90.0)', '(100.0)', '(15.0)'], {}), '(-90.0, 100.0, 15.0)\n', (5840, 5860), True, 'import numpy as np\n'), ((6585, 6603), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6601, 6603), True, 'import matplotlib.pyplot as plt\n'), ((6727, 6766), 'matplotlib.pyplot.savefig', 'plt.savefig', (['pname'], {'bbox_inches': '"""tight"""'}), "(pname, bbox_inches='tight')\n", (6738, 6766), True, 'import matplotlib.pyplot as plt\n'), ((6773, 6783), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6781, 6783), True, 'import matplotlib.pyplot as plt\n'), ((7707, 7734), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (7717, 7734), True, 'import matplotlib.pyplot as plt\n'), ((9738, 9765), 'numpy.arange', 'np.arange', (['(0.0)', '(360.0)', '(30.0)'], {}), '(0.0, 360.0, 30.0)\n', (9747, 9765), True, 'import numpy as np\n'), ((9776, 9805), 'numpy.arange', 'np.arange', (['(-90.0)', '(100.0)', '(15.0)'], {}), '(-90.0, 100.0, 15.0)\n', (9785, 9805), True, 'import numpy as np\n'), ((10544, 10562), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10560, 10562), True, 'import matplotlib.pyplot as plt\n'), ((10686, 10725), 'matplotlib.pyplot.savefig', 'plt.savefig', (['pname'], {'bbox_inches': '"""tight"""'}), "(pname, bbox_inches='tight')\n", (10697, 10725), True, 'import matplotlib.pyplot as plt\n'), ((10732, 10742), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10740, 10742), True, 'import matplotlib.pyplot as plt\n'), ((11803, 11830), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (11813, 11830), True, 'import matplotlib.pyplot as plt\n'), ((14056, 14083), 'numpy.arange', 'np.arange', (['(0.0)', '(360.0)', '(30.0)'], {}), '(0.0, 360.0, 30.0)\n', (14065, 14083), True, 'import numpy as np\n'), ((14094, 14123), 'numpy.arange', 'np.arange', (['(-90.0)', '(100.0)', '(15.0)'], {}), '(-90.0, 100.0, 15.0)\n', (14103, 14123), True, 'import numpy as np\n'), ((14902, 14920), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14918, 14920), True, 'import matplotlib.pyplot as plt\n'), ((15044, 15083), 'matplotlib.pyplot.savefig', 'plt.savefig', (['pname'], {'bbox_inches': '"""tight"""'}), "(pname, bbox_inches='tight')\n", (15055, 15083), True, 'import matplotlib.pyplot as plt\n'), ((15090, 15100), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15098, 15100), True, 'import matplotlib.pyplot as plt\n'), ((16171, 16198), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (16181, 16198), True, 'import matplotlib.pyplot as plt\n'), ((18203, 18230), 'numpy.arange', 'np.arange', (['(0.0)', '(360.0)', '(30.0)'], {}), '(0.0, 360.0, 30.0)\n', (18212, 18230), True, 'import numpy as np\n'), ((18241, 18269), 'numpy.arange', 'np.arange', (['(-80.0)', '(80.0)', '(20.0)'], {}), '(-80.0, 80.0, 20.0)\n', (18250, 18269), True, 'import numpy as np\n'), ((19030, 19048), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (19046, 19048), True, 'import matplotlib.pyplot as plt\n'), ((19172, 19211), 'matplotlib.pyplot.savefig', 'plt.savefig', (['pname'], {'bbox_inches': '"""tight"""'}), "(pname, bbox_inches='tight')\n", (19183, 19211), True, 'import matplotlib.pyplot as plt\n'), ((19218, 19228), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19226, 19228), True, 'import matplotlib.pyplot as plt\n'), ((19869, 19896), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (19879, 19896), True, 'import matplotlib.pyplot as plt\n'), ((21109, 21136), 'numpy.arange', 'np.arange', (['(0.0)', '(360.0)', '(30.0)'], {}), '(0.0, 360.0, 30.0)\n', (21118, 21136), True, 'import numpy as np\n'), ((21147, 21175), 'numpy.arange', 'np.arange', (['(-80.0)', '(80.0)', '(20.0)'], {}), '(-80.0, 80.0, 20.0)\n', (21156, 21175), True, 'import numpy as np\n'), ((21936, 21954), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (21952, 21954), True, 'import matplotlib.pyplot as plt\n'), ((22078, 22117), 'matplotlib.pyplot.savefig', 'plt.savefig', (['pname'], {'bbox_inches': '"""tight"""'}), "(pname, bbox_inches='tight')\n", (22089, 22117), True, 'import matplotlib.pyplot as plt\n'), ((22124, 22134), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22132, 22134), True, 'import matplotlib.pyplot as plt\n'), ((2122, 2140), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (2138, 2140), True, 'import cartopy.crs as ccrs\n'), ((4263, 4295), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'ax': 'ax', 'shrink': '(0.75)'}), '(ax=ax, shrink=0.75)\n', (4275, 4295), True, 'import matplotlib.pyplot as plt\n'), ((6173, 6238), 'matplotlib.pyplot.title', 'plt.title', (["('250 hPa Winds' + '\\n' + 'long term mean')"], {'fontsize': '(18)'}), "('250 hPa Winds' + '\\n' + 'long term mean', fontsize=18)\n", (6182, 6238), True, 'import matplotlib.pyplot as plt\n'), ((8216, 8248), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'ax': 'ax', 'shrink': '(0.75)'}), '(ax=ax, shrink=0.75)\n', (8228, 8248), True, 'import matplotlib.pyplot as plt\n'), ((10117, 10187), 'matplotlib.pyplot.title', 'plt.title', (["('500 hPa Winds, GPH' + '\\n' + 'long term mean')"], {'fontsize': '(18)'}), "('500 hPa Winds, GPH' + '\\n' + 'long term mean', fontsize=18)\n", (10126, 10187), True, 'import matplotlib.pyplot as plt\n'), ((12314, 12346), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'ax': 'ax', 'shrink': '(0.75)'}), '(ax=ax, shrink=0.75)\n', (12326, 12346), True, 'import matplotlib.pyplot as plt\n'), ((14436, 14521), 'matplotlib.pyplot.title', 'plt.title', (["('850 hPa Winds, Temp, Humidity' + '\\n' + 'long term mean')"], {'fontsize': '(18)'}), "('850 hPa Winds, Temp, Humidity' + '\\n' + 'long term mean',\n fontsize=18)\n", (14445, 14521), True, 'import matplotlib.pyplot as plt\n'), ((16673, 16705), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'ax': 'ax', 'shrink': '(0.75)'}), '(ax=ax, shrink=0.75)\n', (16685, 16705), True, 'import matplotlib.pyplot as plt\n'), ((18582, 18658), 'matplotlib.pyplot.title', 'plt.title', (["('Surface Winds, Skin temp' + '\\n' + 'long term mean')"], {'fontsize': '(18)'}), "('Surface Winds, Skin temp' + '\\n' + 'long term mean', fontsize=18)\n", (18591, 18658), True, 'import matplotlib.pyplot as plt\n'), ((20400, 20432), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'ax': 'ax', 'shrink': '(0.75)'}), '(ax=ax, shrink=0.75)\n', (20412, 20432), True, 'import matplotlib.pyplot as plt\n'), ((21488, 21564), 'matplotlib.pyplot.title', 'plt.title', (["('Total column water vapor' + '\\n' + 'long term mean')"], {'fontsize': '(18)'}), "('Total column water vapor' + '\\n' + 'long term mean', fontsize=18)\n", (21497, 21564), True, 'import matplotlib.pyplot as plt\n'), ((23710, 23727), 'numpy.multiply', 'np.multiply', (['u', 'u'], {}), '(u, u)\n', (23721, 23727), True, 'import numpy as np\n'), ((23730, 23747), 'numpy.multiply', 'np.multiply', (['v', 'v'], {}), '(v, v)\n', (23741, 23747), True, 'import numpy as np\n'), ((24937, 24954), 'numpy.multiply', 'np.multiply', (['u', 'u'], {}), '(u, u)\n', (24948, 24954), True, 'import numpy as np\n'), ((24957, 24974), 'numpy.multiply', 'np.multiply', (['v', 'v'], {}), '(v, v)\n', (24968, 24974), True, 'import numpy as np\n'), ((2278, 2293), 'cartopy.crs.Geodetic', 'ccrs.Geodetic', ([], {}), '()\n', (2291, 2293), True, 'import cartopy.crs as ccrs\n'), ((3858, 3876), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (3874, 3876), True, 'import cartopy.crs as ccrs\n'), ((4893, 4918), 'numpy.amax', 'np.amax', (['[maxval, minval]'], {}), '([maxval, minval])\n', (4900, 4918), True, 'import numpy as np\n'), ((4934, 4983), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': '(-normmax)', 'vmax': 'normmax'}), '(vmin=-normmax, vmax=normmax)\n', (4954, 4983), True, 'import matplotlib as mpl\n'), ((5173, 5205), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'ax': 'ax', 'shrink': '(0.75)'}), '(ax=ax, shrink=0.75)\n', (5185, 5205), True, 'import matplotlib.pyplot as plt\n'), ((6302, 6379), 'matplotlib.pyplot.title', 'plt.title', (["('250 hPa Winds' + '\\n' + 'extreme precipitation days')"], {'fontsize': '(18)'}), "('250 hPa Winds' + '\\n' + 'extreme precipitation days', fontsize=18)\n", (6311, 6379), True, 'import matplotlib.pyplot as plt\n'), ((7825, 7843), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (7841, 7843), True, 'import cartopy.crs as ccrs\n'), ((8840, 8865), 'numpy.amax', 'np.amax', (['[maxval, minval]'], {}), '([maxval, minval])\n', (8847, 8865), True, 'import numpy as np\n'), ((8881, 8930), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': '(-normmax)', 'vmax': 'normmax'}), '(vmin=-normmax, vmax=normmax)\n', (8901, 8930), True, 'import matplotlib as mpl\n'), ((9120, 9152), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'ax': 'ax', 'shrink': '(0.75)'}), '(ax=ax, shrink=0.75)\n', (9132, 9152), True, 'import matplotlib.pyplot as plt\n'), ((10251, 10337), 'matplotlib.pyplot.title', 'plt.title', (["('500 hPa Winds, GPH' + '\\n' + 'extreme precipitation days')"], {'fontsize': '(18)'}), "('500 hPa Winds, GPH' + '\\n' + 'extreme precipitation days',\n fontsize=18)\n", (10260, 10337), True, 'import matplotlib.pyplot as plt\n'), ((11921, 11939), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (11937, 11939), True, 'import cartopy.crs as ccrs\n'), ((13054, 13079), 'numpy.amax', 'np.amax', (['[maxval, minval]'], {}), '([maxval, minval])\n', (13061, 13079), True, 'import numpy as np\n'), ((13095, 13144), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': '(-normmax)', 'vmax': 'normmax'}), '(vmin=-normmax, vmax=normmax)\n', (13115, 13144), True, 'import matplotlib as mpl\n'), ((13334, 13366), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'ax': 'ax', 'shrink': '(0.75)'}), '(ax=ax, shrink=0.75)\n', (13346, 13366), True, 'import matplotlib.pyplot as plt\n'), ((14583, 14680), 'matplotlib.pyplot.title', 'plt.title', (["('850 hPa Winds, Temp, Humidity' + '\\n' + 'extreme precipitation days')"], {'fontsize': '(18)'}), "('850 hPa Winds, Temp, Humidity' + '\\n' +\n 'extreme precipitation days', fontsize=18)\n", (14592, 14680), True, 'import matplotlib.pyplot as plt\n'), ((16287, 16305), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (16303, 16305), True, 'import cartopy.crs as ccrs\n'), ((17303, 17328), 'numpy.amax', 'np.amax', (['[maxval, minval]'], {}), '([maxval, minval])\n', (17310, 17328), True, 'import numpy as np\n'), ((17344, 17393), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': '(-normmax)', 'vmax': 'normmax'}), '(vmin=-normmax, vmax=normmax)\n', (17364, 17393), True, 'import matplotlib as mpl\n'), ((17585, 17617), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'ax': 'ax', 'shrink': '(0.75)'}), '(ax=ax, shrink=0.75)\n', (17597, 17617), True, 'import matplotlib.pyplot as plt\n'), ((18723, 18815), 'matplotlib.pyplot.title', 'plt.title', (["('Surface Winds, Skin temp' + '\\n' + 'extreme precipitation days')"], {'fontsize': '(18)'}), "('Surface Winds, Skin temp' + '\\n' + 'extreme precipitation days',\n fontsize=18)\n", (18732, 18815), True, 'import matplotlib.pyplot as plt\n'), ((19985, 20003), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (20001, 20003), True, 'import cartopy.crs as ccrs\n'), ((20601, 20626), 'numpy.amax', 'np.amax', (['[maxval, minval]'], {}), '([maxval, minval])\n', (20608, 20626), True, 'import numpy as np\n'), ((20642, 20691), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': '(-normmax)', 'vmax': 'normmax'}), '(vmin=-normmax, vmax=normmax)\n', (20662, 20691), True, 'import matplotlib as mpl\n'), ((20881, 20913), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {'ax': 'ax', 'shrink': '(0.75)'}), '(ax=ax, shrink=0.75)\n', (20893, 20913), True, 'import matplotlib.pyplot as plt\n'), ((21630, 21722), 'matplotlib.pyplot.title', 'plt.title', (["('Total column water vapor' + '\\n' + 'extreme precipitation days')"], {'fontsize': '(18)'}), "('Total column water vapor' + '\\n' + 'extreme precipitation days',\n fontsize=18)\n", (21639, 21722), True, 'import matplotlib.pyplot as plt\n'), ((4143, 4161), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (4159, 4161), True, 'import cartopy.crs as ccrs\n'), ((4192, 4211), 'matplotlib.cm.get_cmap', 'get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (4200, 4211), False, 'from matplotlib.cm import get_cmap\n'), ((4555, 4573), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (4571, 4573), True, 'import cartopy.crs as ccrs\n'), ((6441, 6506), 'matplotlib.pyplot.title', 'plt.title', (["('250 hPa Winds' + '\\n' + 'anomaly fields')"], {'fontsize': '(18)'}), "('250 hPa Winds' + '\\n' + 'anomaly fields', fontsize=18)\n", (6450, 6506), True, 'import matplotlib.pyplot as plt\n'), ((8096, 8114), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (8112, 8114), True, 'import cartopy.crs as ccrs\n'), ((8145, 8164), 'matplotlib.cm.get_cmap', 'get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (8153, 8164), False, 'from matplotlib.cm import get_cmap\n'), ((8506, 8524), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (8522, 8524), True, 'import cartopy.crs as ccrs\n'), ((10395, 10465), 'matplotlib.pyplot.title', 'plt.title', (["('500 hPa Winds, GPH' + '\\n' + 'anomaly fields')"], {'fontsize': '(18)'}), "('500 hPa Winds, GPH' + '\\n' + 'anomaly fields', fontsize=18)\n", (10404, 10465), True, 'import matplotlib.pyplot as plt\n'), ((12194, 12212), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (12210, 12212), True, 'import cartopy.crs as ccrs\n'), ((12243, 12262), 'matplotlib.cm.get_cmap', 'get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (12251, 12262), False, 'from matplotlib.cm import get_cmap\n'), ((12481, 12499), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (12497, 12499), True, 'import cartopy.crs as ccrs\n'), ((12722, 12740), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (12738, 12740), True, 'import cartopy.crs as ccrs\n'), ((14740, 14825), 'matplotlib.pyplot.title', 'plt.title', (["('850 hPa Winds, Temp, Humidity' + '\\n' + 'anomaly fields')"], {'fontsize': '(18)'}), "('850 hPa Winds, Temp, Humidity' + '\\n' + 'anomaly fields',\n fontsize=18)\n", (14749, 14825), True, 'import matplotlib.pyplot as plt\n'), ((16562, 16580), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (16578, 16580), True, 'import cartopy.crs as ccrs\n'), ((16611, 16630), 'matplotlib.cm.get_cmap', 'get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (16619, 16630), False, 'from matplotlib.cm import get_cmap\n'), ((16971, 16989), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (16987, 16989), True, 'import cartopy.crs as ccrs\n'), ((18874, 18950), 'matplotlib.pyplot.title', 'plt.title', (["('Surface Winds, Skin temp' + '\\n' + 'anomaly fields')"], {'fontsize': '(18)'}), "('Surface Winds, Skin temp' + '\\n' + 'anomaly fields', fontsize=18)\n", (18883, 18950), True, 'import matplotlib.pyplot as plt\n'), ((20289, 20307), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (20305, 20307), True, 'import cartopy.crs as ccrs\n'), ((20338, 20357), 'matplotlib.cm.get_cmap', 'get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (20346, 20357), False, 'from matplotlib.cm import get_cmap\n'), ((21781, 21856), 'matplotlib.pyplot.title', 'plt.title', (["('Total column water vapor' + '\\n' + 'anomaly field')"], {'fontsize': '(18)'}), "('Total column water vapor' + '\\n' + 'anomaly field', fontsize=18)\n", (21790, 21856), True, 'import matplotlib.pyplot as plt\n'), ((1068, 1094), 'numpy.linspace', 'np.linspace', (['b[2]', 'b[3]', 'n'], {}), '(b[2], b[3], n)\n', (1079, 1094), True, 'import numpy as np\n'), ((1500, 1526), 'numpy.linspace', 'np.linspace', (['b[0]', 'b[1]', 'n'], {}), '(b[0], b[1], n)\n', (1511, 1526), True, 'import numpy as np\n'), ((4837, 4850), 'numpy.amax', 'np.amax', (['data'], {}), '(data)\n', (4844, 4850), True, 'import numpy as np\n'), ((4860, 4873), 'numpy.amin', 'np.amin', (['data'], {}), '(data)\n', (4867, 4873), True, 'import numpy as np\n'), ((5041, 5059), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (5057, 5059), True, 'import cartopy.crs as ccrs\n'), ((5103, 5121), 'matplotlib.cm.get_cmap', 'get_cmap', (['"""RdBu_r"""'], {}), "('RdBu_r')\n", (5111, 5121), False, 'from matplotlib.cm import get_cmap\n'), ((5465, 5483), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (5481, 5483), True, 'import cartopy.crs as ccrs\n'), ((8784, 8797), 'numpy.amax', 'np.amax', (['data'], {}), '(data)\n', (8791, 8797), True, 'import numpy as np\n'), ((8807, 8820), 'numpy.amin', 'np.amin', (['data'], {}), '(data)\n', (8814, 8820), True, 'import numpy as np\n'), ((8988, 9006), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (9004, 9006), True, 'import cartopy.crs as ccrs\n'), ((9050, 9068), 'matplotlib.cm.get_cmap', 'get_cmap', (['"""RdBu_r"""'], {}), "('RdBu_r')\n", (9058, 9068), False, 'from matplotlib.cm import get_cmap\n'), ((9410, 9428), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (9426, 9428), True, 'import cartopy.crs as ccrs\n'), ((12998, 13011), 'numpy.amax', 'np.amax', (['data'], {}), '(data)\n', (13005, 13011), True, 'import numpy as np\n'), ((13021, 13034), 'numpy.amin', 'np.amin', (['data'], {}), '(data)\n', (13028, 13034), True, 'import numpy as np\n'), ((13202, 13220), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (13218, 13220), True, 'import cartopy.crs as ccrs\n'), ((13264, 13282), 'matplotlib.cm.get_cmap', 'get_cmap', (['"""RdBu_r"""'], {}), "('RdBu_r')\n", (13272, 13282), False, 'from matplotlib.cm import get_cmap\n'), ((13501, 13519), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (13517, 13519), True, 'import cartopy.crs as ccrs\n'), ((13728, 13746), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (13744, 13746), True, 'import cartopy.crs as ccrs\n'), ((17247, 17260), 'numpy.amax', 'np.amax', (['data'], {}), '(data)\n', (17254, 17260), True, 'import numpy as np\n'), ((17270, 17283), 'numpy.amin', 'np.amin', (['data'], {}), '(data)\n', (17277, 17283), True, 'import numpy as np\n'), ((17453, 17471), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (17469, 17471), True, 'import cartopy.crs as ccrs\n'), ((17515, 17533), 'matplotlib.cm.get_cmap', 'get_cmap', (['"""RdBu_r"""'], {}), "('RdBu_r')\n", (17523, 17533), False, 'from matplotlib.cm import get_cmap\n'), ((17875, 17893), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (17891, 17893), True, 'import cartopy.crs as ccrs\n'), ((20545, 20558), 'numpy.amax', 'np.amax', (['data'], {}), '(data)\n', (20552, 20558), True, 'import numpy as np\n'), ((20568, 20581), 'numpy.amin', 'np.amin', (['data'], {}), '(data)\n', (20575, 20581), True, 'import numpy as np\n'), ((20749, 20767), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (20765, 20767), True, 'import cartopy.crs as ccrs\n'), ((20811, 20829), 'matplotlib.cm.get_cmap', 'get_cmap', (['"""RdBu_r"""'], {}), "('RdBu_r')\n", (20819, 20829), False, 'from matplotlib.cm import get_cmap\n'), ((1051, 1062), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1059, 1062), True, 'import numpy as np\n'), ((1528, 1539), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1536, 1539), True, 'import numpy as np\n')] |
#!/usr/bin/python
# coding: utf-8
import numpy as np
from .facet import Facet
class PosRegion():
"""
Implement the convex polytope
"""
def __init__(self, pos_samples):
"""
Params:
pos_samples (np.array): dim+1 positive samples to create the
(dim)-polytope.
"""
self.dim = pos_samples.shape[1]
if (self.dim+1) != pos_samples.shape[0]:
raise ValueError("Wrong number of samples")
self.vertices = pos_samples
self.facets = []
self.create_facets()
def create_facets(self):
"""
Create the facets of the polytope
"""
# For each sample in the set of vertices, create the facet that does
# not contain this sample
for sample_id in range(self.vertices.shape[0]):
facet_points = np.delete(self.vertices, (sample_id), axis=0)
self.facets.append(
Facet(facet_points, self.vertices[sample_id, :])
)
def contain(self, point):
"""
Check if a point is inside the positive region.
A point is inside the positive region if it is not visible by any of
the facets of the positive region.
Params:
point (np.array): point to check.
Return:
(boolean): True if point is inside the positive region.
"""
contain = True
for facet in self.facets:
if facet.is_visible(point):
contain = False
break
return contain
def add_vertex(self, point):
"""
Add a new vertex on the positive region.
Params:
point (np.array): point to add to the positive region.
"""
# Step 1: Find visible facets
visible_facets = []
for facet in self.facets:
if facet.is_visible(point):
visible_facets.append(facet)
# If there is no visible facets, the point is inside the positive
# region, do don't do anything.
if not visible_facets:
return None
# Step 2: find ridges that connect a visible facet and a hidden facet.
# They are also the ridges that only occurs once in the set of visible
# facets.
horizon_ridges = []
hash_horizon_ridges = [] # Use hash to skip arrays comparison
horizon_ridges = []
# Work first with hash to skip array comparing issues
hash_horizon_ridges = []
for facet in visible_facets:
self.facets.remove(facet)
for ridge in facet.get_ridges():
if hash(ridge.tostring()) in hash_horizon_ridges:
hash_horizon_ridges.remove(hash(ridge.tostring()))
else:
hash_horizon_ridges.append(hash(ridge.tostring()))
# Finally, use ridge
for facet in visible_facets:
for ridge in facet.get_ridges():
if hash(ridge.tostring()) in hash_horizon_ridges:
horizon_ridges.append(ridge)
# Step 3: Add facets with the new points and horizon ridges
for ridge in horizon_ridges:
for point_id in range(self.vertices.shape[0]):
if self.vertices[point_id, :] not in ridge:
ref = self.vertices[point_id, :]
break
self.facets.append(
Facet(np.vstack((ridge, point)), ref)
)
# Finally, update the vertices of this region
self.vertices = np.vstack((
self.vertices,
point.reshape((1, -1)),
))
self.clean_vertices()
def clean_vertices(self):
"""
Remove vertices that are not on a facet
"""
to_remove = []
for vertex_id in range(self.vertices.shape[0]):
current_vertex = self.vertices[vertex_id, :]
is_useful = False
for facet in self.facets:
if current_vertex in facet.vertices:
is_useful = True
if not is_useful:
to_remove.append(vertex_id)
self.vertices = np.delete(self.vertices, to_remove, 0)
| [
"numpy.delete",
"numpy.vstack"
] | [((4221, 4259), 'numpy.delete', 'np.delete', (['self.vertices', 'to_remove', '(0)'], {}), '(self.vertices, to_remove, 0)\n', (4230, 4259), True, 'import numpy as np\n'), ((862, 905), 'numpy.delete', 'np.delete', (['self.vertices', 'sample_id'], {'axis': '(0)'}), '(self.vertices, sample_id, axis=0)\n', (871, 905), True, 'import numpy as np\n'), ((3462, 3487), 'numpy.vstack', 'np.vstack', (['(ridge, point)'], {}), '((ridge, point))\n', (3471, 3487), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import contextlib
import sys
import time
from django.db import models
from django.utils import timezone
from app.authentication.models import Workspace
from app.definitions.models import Datastore
from utils.mixins.models import UUIDModel
class Run(UUIDModel):
"""Represents scan and refresh of a datastore via Revisioner.
"""
SUCCESS = 'SUCCESS'
FAILURE = 'FAILURE'
PENDING = 'PENDING'
PARTIAL = 'PARTIAL'
workspace = models.ForeignKey(
to=Workspace,
on_delete=models.CASCADE,
related_name='run_history',
)
datastore = models.ForeignKey(
to=Datastore,
on_delete=models.CASCADE,
related_name='run_history',
)
created_at = models.DateTimeField(
auto_now_add=True,
help_text="Timestamp for when the run was created.",
)
started_at = models.DateTimeField(
default=None,
null=True,
help_text="Timestamp for when run queued actual processing tasks.",
)
finished_at = models.DateTimeField(
default=None,
null=True,
help_text="Timestamp for when run finished calculating all revisions.",
)
tasks_count = models.IntegerField(default=0)
fails_count = models.IntegerField(default=0)
@property
def status(self):
if self.finished_at is None:
return Run.PENDING
elif self.tasks_count == 1:
return Run.FAILURE
elif self.fails_count > 0:
return Run.PARTIAL
return Run.SUCCESS
@property
def epoch(self):
return int(time.mktime(self.created_at.date().timetuple()) * 1000)
@property
def started(self):
return self.started_at is not None
@property
def finished(self):
return self.finished_at is not None
@property
def is_datastore_first_run(self):
"""Check is this run is the first run ever for the datastore.
"""
return self.datastore.run_history.order_by('created_at').first() == self
def mark_as_started(self, save=True):
"""Mark the run as started.
"""
self.started_at = timezone.now()
if save:
self.save()
def mark_as_finished(self, save=True):
"""Mark the run as finished.
"""
self.tasks_count = self.tasks.count()
self.fails_count = self.tasks.filter(status=RunTask.FAILURE).count()
self.finished_at = timezone.now()
if save:
self.save()
class RunTask(models.Model):
"""Represents a Celery task that must complete before completing the run.
"""
SUCCESS = 'SUCCESS'
FAILURE = 'FAILURE'
PENDING = 'PENDING'
REVOKED = 'REVOKED'
STATUS_CHOICES = (
(SUCCESS, SUCCESS),
(FAILURE, FAILURE),
(PENDING, PENDING),
(REVOKED, REVOKED),
)
run = models.ForeignKey(
to=Run,
on_delete=models.CASCADE,
related_name='tasks',
)
meta_task_id = models.CharField(
max_length=512,
null=True,
help_text="Task ID for Celery",
)
status = models.CharField(
max_length=10,
choices=STATUS_CHOICES,
null=False,
blank=False,
default=PENDING,
)
error = models.TextField(null=True)
started_at = models.DateTimeField(
default=None,
null=True,
help_text="Timestamp for when the task started",
)
finished_at = models.DateTimeField(
default=None,
null=True,
help_text="Timestamp for when the task finished",
)
path = models.CharField(max_length=512, unique=True)
@property
def finished(self):
return self.finished_at is not None
def waiting(self):
return self.meta_task_id is None
def mark_as_started(self, meta_task_id=None, save=True):
"""Mark the task as started and provide the meta task ID if relevant.
"""
self.started_at = timezone.now()
self.meta_task_id = meta_task_id
if save:
self.save()
def mark_as_succeeded(self):
"""Mark the task as finished.
"""
self.status = RunTask.SUCCESS
self.finished_at = timezone.now()
self.save()
def mark_as_failed(self, message=None):
"""Mark the task as finished.
"""
self.status = RunTask.FAILURE
self.error = message
self.finished_at = timezone.now()
self.save()
@contextlib.contextmanager
def task_context(self, meta_task_id, on_failure=None):
"""Run some code as a task.
"""
self.mark_as_started(meta_task_id, save=False)
try:
yield self
except Exception as error:
self.mark_as_failed(str(error))
if on_failure and callable(on_failure):
on_failure(error)
if len(sys.argv) > 1 and sys.argv[1] == 'test':
raise
else:
self.mark_as_succeeded()
| [
"django.db.models.TextField",
"django.db.models.IntegerField",
"django.db.models.ForeignKey",
"django.utils.timezone.now",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((477, 567), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': 'Workspace', 'on_delete': 'models.CASCADE', 'related_name': '"""run_history"""'}), "(to=Workspace, on_delete=models.CASCADE, related_name=\n 'run_history')\n", (494, 567), False, 'from django.db import models\n'), ((611, 701), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': 'Datastore', 'on_delete': 'models.CASCADE', 'related_name': '"""run_history"""'}), "(to=Datastore, on_delete=models.CASCADE, related_name=\n 'run_history')\n", (628, 701), False, 'from django.db import models\n'), ((746, 843), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'help_text': '"""Timestamp for when the run was created."""'}), "(auto_now_add=True, help_text=\n 'Timestamp for when the run was created.')\n", (766, 843), False, 'from django.db import models\n'), ((880, 998), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'None', 'null': '(True)', 'help_text': '"""Timestamp for when run queued actual processing tasks."""'}), "(default=None, null=True, help_text=\n 'Timestamp for when run queued actual processing tasks.')\n", (900, 998), False, 'from django.db import models\n'), ((1044, 1166), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'None', 'null': '(True)', 'help_text': '"""Timestamp for when run finished calculating all revisions."""'}), "(default=None, null=True, help_text=\n 'Timestamp for when run finished calculating all revisions.')\n", (1064, 1166), False, 'from django.db import models\n'), ((1212, 1242), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (1231, 1242), False, 'from django.db import models\n'), ((1261, 1291), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(0)'}), '(default=0)\n', (1280, 1291), False, 'from django.db import models\n'), ((2887, 2960), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'to': 'Run', 'on_delete': 'models.CASCADE', 'related_name': '"""tasks"""'}), "(to=Run, on_delete=models.CASCADE, related_name='tasks')\n", (2904, 2960), False, 'from django.db import models\n'), ((3012, 3087), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(512)', 'null': '(True)', 'help_text': '"""Task ID for Celery"""'}), "(max_length=512, null=True, help_text='Task ID for Celery')\n", (3028, 3087), False, 'from django.db import models\n'), ((3133, 3235), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'choices': 'STATUS_CHOICES', 'null': '(False)', 'blank': '(False)', 'default': 'PENDING'}), '(max_length=10, choices=STATUS_CHOICES, null=False, blank=\n False, default=PENDING)\n', (3149, 3235), False, 'from django.db import models\n'), ((3291, 3318), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)'}), '(null=True)\n', (3307, 3318), False, 'from django.db import models\n'), ((3337, 3436), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'None', 'null': '(True)', 'help_text': '"""Timestamp for when the task started"""'}), "(default=None, null=True, help_text=\n 'Timestamp for when the task started')\n", (3357, 3436), False, 'from django.db import models\n'), ((3482, 3582), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'None', 'null': '(True)', 'help_text': '"""Timestamp for when the task finished"""'}), "(default=None, null=True, help_text=\n 'Timestamp for when the task finished')\n", (3502, 3582), False, 'from django.db import models\n'), ((3621, 3666), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(512)', 'unique': '(True)'}), '(max_length=512, unique=True)\n', (3637, 3666), False, 'from django.db import models\n'), ((2165, 2179), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (2177, 2179), False, 'from django.utils import timezone\n'), ((2464, 2478), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (2476, 2478), False, 'from django.utils import timezone\n'), ((3993, 4007), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (4005, 4007), False, 'from django.utils import timezone\n'), ((4239, 4253), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (4251, 4253), False, 'from django.utils import timezone\n'), ((4463, 4477), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (4475, 4477), False, 'from django.utils import timezone\n')] |
# -*- coding: utf-8 -*-
import os
import json
import tccli.options_define as OptionsDefine
import tccli.format_output as FormatOutput
from tccli.nice_command import NiceCommand
import tccli.error_msg as ErrorMsg
import tccli.help_template as HelpTemplate
from tccli import __version__
from tccli.utils import Utils
from tccli.configure import Configure
from tencentcloud.common import credential
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.cme.v20191029 import cme_client as cme_client_v20191029
from tencentcloud.cme.v20191029 import models as models_v20191029
from tccli.services.cme import v20191029
from tccli.services.cme.v20191029 import help as v20191029_help
def doDescribeTasks(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeTasks", g_param[OptionsDefine.Version])
return
param = {
"Platform": argv.get("--Platform"),
"ProjectId": argv.get("--ProjectId"),
"TaskTypeSet": Utils.try_to_json(argv, "--TaskTypeSet"),
"StatusSet": Utils.try_to_json(argv, "--StatusSet"),
"Offset": Utils.try_to_json(argv, "--Offset"),
"Limit": Utils.try_to_json(argv, "--Limit"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeTasksRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeTasks(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeTeams(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeTeams", g_param[OptionsDefine.Version])
return
param = {
"Platform": argv.get("--Platform"),
"TeamIds": Utils.try_to_json(argv, "--TeamIds"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeTeamsRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeTeams(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doExportVideoEditProject(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ExportVideoEditProject", g_param[OptionsDefine.Version])
return
param = {
"Platform": argv.get("--Platform"),
"ProjectId": argv.get("--ProjectId"),
"Definition": Utils.try_to_json(argv, "--Definition"),
"ExportDestination": argv.get("--ExportDestination"),
"CMEExportInfo": Utils.try_to_json(argv, "--CMEExportInfo"),
"VODExportInfo": Utils.try_to_json(argv, "--VODExportInfo"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ExportVideoEditProjectRequest()
model.from_json_string(json.dumps(param))
rsp = client.ExportVideoEditProject(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeSharedSpace(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeSharedSpace", g_param[OptionsDefine.Version])
return
param = {
"Platform": argv.get("--Platform"),
"Authorizee": Utils.try_to_json(argv, "--Authorizee"),
"Operator": argv.get("--Operator"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeSharedSpaceRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeSharedSpace(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doGrantResourceAuthorization(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("GrantResourceAuthorization", g_param[OptionsDefine.Version])
return
param = {
"Platform": argv.get("--Platform"),
"Owner": Utils.try_to_json(argv, "--Owner"),
"Resources": Utils.try_to_json(argv, "--Resources"),
"Authorizees": Utils.try_to_json(argv, "--Authorizees"),
"Permissions": Utils.try_to_json(argv, "--Permissions"),
"Operator": argv.get("--Operator"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.GrantResourceAuthorizationRequest()
model.from_json_string(json.dumps(param))
rsp = client.GrantResourceAuthorization(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doSearchMaterial(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("SearchMaterial", g_param[OptionsDefine.Version])
return
param = {
"Platform": argv.get("--Platform"),
"SearchScopes": Utils.try_to_json(argv, "--SearchScopes"),
"MaterialTypes": Utils.try_to_json(argv, "--MaterialTypes"),
"Text": argv.get("--Text"),
"Resolution": argv.get("--Resolution"),
"DurationRange": Utils.try_to_json(argv, "--DurationRange"),
"CreateTimeRange": Utils.try_to_json(argv, "--CreateTimeRange"),
"Tags": Utils.try_to_json(argv, "--Tags"),
"Sort": Utils.try_to_json(argv, "--Sort"),
"Offset": Utils.try_to_json(argv, "--Offset"),
"Limit": Utils.try_to_json(argv, "--Limit"),
"Operator": argv.get("--Operator"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.SearchMaterialRequest()
model.from_json_string(json.dumps(param))
rsp = client.SearchMaterial(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doRevokeResourceAuthorization(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("RevokeResourceAuthorization", g_param[OptionsDefine.Version])
return
param = {
"Platform": argv.get("--Platform"),
"Owner": Utils.try_to_json(argv, "--Owner"),
"Resources": Utils.try_to_json(argv, "--Resources"),
"Authorizees": Utils.try_to_json(argv, "--Authorizees"),
"Permissions": Utils.try_to_json(argv, "--Permissions"),
"Operator": argv.get("--Operator"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.RevokeResourceAuthorizationRequest()
model.from_json_string(json.dumps(param))
rsp = client.RevokeResourceAuthorization(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeJoinTeams(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeJoinTeams", g_param[OptionsDefine.Version])
return
param = {
"Platform": argv.get("--Platform"),
"MemberId": argv.get("--MemberId"),
"Offset": Utils.try_to_json(argv, "--Offset"),
"Limit": Utils.try_to_json(argv, "--Limit"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeJoinTeamsRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeJoinTeams(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeResourceAuthorization(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeResourceAuthorization", g_param[OptionsDefine.Version])
return
param = {
"Platform": argv.get("--Platform"),
"Owner": Utils.try_to_json(argv, "--Owner"),
"Resource": Utils.try_to_json(argv, "--Resource"),
"Operator": argv.get("--Operator"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeResourceAuthorizationRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeResourceAuthorization(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doImportMaterial(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ImportMaterial", g_param[OptionsDefine.Version])
return
param = {
"Platform": argv.get("--Platform"),
"VodFileId": argv.get("--VodFileId"),
"Owner": Utils.try_to_json(argv, "--Owner"),
"Name": argv.get("--Name"),
"ClassPath": argv.get("--ClassPath"),
"Tags": Utils.try_to_json(argv, "--Tags"),
"PreProcessDefinition": Utils.try_to_json(argv, "--PreProcessDefinition"),
"Operator": argv.get("--Operator"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ImportMaterialRequest()
model.from_json_string(json.dumps(param))
rsp = client.ImportMaterial(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeTaskDetail(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeTaskDetail", g_param[OptionsDefine.Version])
return
param = {
"Platform": argv.get("--Platform"),
"TaskId": argv.get("--TaskId"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeTaskDetailRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeTaskDetail(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyTeam(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ModifyTeam", g_param[OptionsDefine.Version])
return
param = {
"Platform": argv.get("--Platform"),
"TeamId": argv.get("--TeamId"),
"Name": argv.get("--Name"),
"Operator": argv.get("--Operator"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyTeamRequest()
model.from_json_string(json.dumps(param))
rsp = client.ModifyTeam(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteMaterial(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DeleteMaterial", g_param[OptionsDefine.Version])
return
param = {
"Platform": argv.get("--Platform"),
"MaterialId": argv.get("--MaterialId"),
"Operator": argv.get("--Operator"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteMaterialRequest()
model.from_json_string(json.dumps(param))
rsp = client.DeleteMaterial(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyMaterial(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ModifyMaterial", g_param[OptionsDefine.Version])
return
param = {
"Platform": argv.get("--Platform"),
"MaterialId": argv.get("--MaterialId"),
"Owner": Utils.try_to_json(argv, "--Owner"),
"Name": argv.get("--Name"),
"Tags": Utils.try_to_json(argv, "--Tags"),
"ClassPath": argv.get("--ClassPath"),
"Operator": argv.get("--Operator"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyMaterialRequest()
model.from_json_string(json.dumps(param))
rsp = client.ModifyMaterial(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteTeam(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DeleteTeam", g_param[OptionsDefine.Version])
return
param = {
"Platform": argv.get("--Platform"),
"TeamId": argv.get("--TeamId"),
"Operator": argv.get("--Operator"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteTeamRequest()
model.from_json_string(json.dumps(param))
rsp = client.DeleteTeam(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doAddTeamMember(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("AddTeamMember", g_param[OptionsDefine.Version])
return
param = {
"Platform": argv.get("--Platform"),
"TeamId": argv.get("--TeamId"),
"TeamMembers": Utils.try_to_json(argv, "--TeamMembers"),
"Operator": argv.get("--Operator"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.AddTeamMemberRequest()
model.from_json_string(json.dumps(param))
rsp = client.AddTeamMember(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyTeamMember(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ModifyTeamMember", g_param[OptionsDefine.Version])
return
param = {
"Platform": argv.get("--Platform"),
"TeamId": argv.get("--TeamId"),
"MemberId": argv.get("--MemberId"),
"Remark": argv.get("--Remark"),
"Role": argv.get("--Role"),
"Operator": argv.get("--Operator"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyTeamMemberRequest()
model.from_json_string(json.dumps(param))
rsp = client.ModifyTeamMember(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteTeamMembers(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DeleteTeamMembers", g_param[OptionsDefine.Version])
return
param = {
"Platform": argv.get("--Platform"),
"TeamId": argv.get("--TeamId"),
"MemberIds": Utils.try_to_json(argv, "--MemberIds"),
"Operator": argv.get("--Operator"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteTeamMembersRequest()
model.from_json_string(json.dumps(param))
rsp = client.DeleteTeamMembers(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteLoginStatus(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DeleteLoginStatus", g_param[OptionsDefine.Version])
return
param = {
"Platform": argv.get("--Platform"),
"UserIds": Utils.try_to_json(argv, "--UserIds"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteLoginStatusRequest()
model.from_json_string(json.dumps(param))
rsp = client.DeleteLoginStatus(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeProjects(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeProjects", g_param[OptionsDefine.Version])
return
param = {
"Platform": argv.get("--Platform"),
"ProjectIds": Utils.try_to_json(argv, "--ProjectIds"),
"AspectRatioSet": Utils.try_to_json(argv, "--AspectRatioSet"),
"CategorySet": Utils.try_to_json(argv, "--CategorySet"),
"Sort": Utils.try_to_json(argv, "--Sort"),
"Owner": Utils.try_to_json(argv, "--Owner"),
"Offset": Utils.try_to_json(argv, "--Offset"),
"Limit": Utils.try_to_json(argv, "--Limit"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeProjectsRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeProjects(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeLoginStatus(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeLoginStatus", g_param[OptionsDefine.Version])
return
param = {
"Platform": argv.get("--Platform"),
"UserIds": Utils.try_to_json(argv, "--UserIds"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeLoginStatusRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeLoginStatus(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeMaterials(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeMaterials", g_param[OptionsDefine.Version])
return
param = {
"Platform": argv.get("--Platform"),
"MaterialIds": Utils.try_to_json(argv, "--MaterialIds"),
"Sort": Utils.try_to_json(argv, "--Sort"),
"Operator": argv.get("--Operator"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeMaterialsRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeMaterials(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeClass(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeClass", g_param[OptionsDefine.Version])
return
param = {
"Platform": argv.get("--Platform"),
"Owner": Utils.try_to_json(argv, "--Owner"),
"Operator": argv.get("--Operator"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeClassRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeClass(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateTeam(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("CreateTeam", g_param[OptionsDefine.Version])
return
param = {
"Platform": argv.get("--Platform"),
"Name": argv.get("--Name"),
"OwnerId": argv.get("--OwnerId"),
"OwnerRemark": argv.get("--OwnerRemark"),
"TeamId": argv.get("--TeamId"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateTeamRequest()
model.from_json_string(json.dumps(param))
rsp = client.CreateTeam(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doModifyProject(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ModifyProject", g_param[OptionsDefine.Version])
return
param = {
"Platform": argv.get("--Platform"),
"ProjectId": argv.get("--ProjectId"),
"Name": argv.get("--Name"),
"AspectRatio": argv.get("--AspectRatio"),
"Owner": Utils.try_to_json(argv, "--Owner"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ModifyProjectRequest()
model.from_json_string(json.dumps(param))
rsp = client.ModifyProject(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateClass(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("CreateClass", g_param[OptionsDefine.Version])
return
param = {
"Platform": argv.get("--Platform"),
"Owner": Utils.try_to_json(argv, "--Owner"),
"ClassPath": argv.get("--ClassPath"),
"Operator": argv.get("--Operator"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateClassRequest()
model.from_json_string(json.dumps(param))
rsp = client.CreateClass(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doImportMediaToProject(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ImportMediaToProject", g_param[OptionsDefine.Version])
return
param = {
"Platform": argv.get("--Platform"),
"ProjectId": argv.get("--ProjectId"),
"VodFileId": argv.get("--VodFileId"),
"Name": argv.get("--Name"),
"PreProcessDefinition": Utils.try_to_json(argv, "--PreProcessDefinition"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ImportMediaToProjectRequest()
model.from_json_string(json.dumps(param))
rsp = client.ImportMediaToProject(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doListMedia(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ListMedia", g_param[OptionsDefine.Version])
return
param = {
"Platform": argv.get("--Platform"),
"ClassPath": argv.get("--ClassPath"),
"Owner": Utils.try_to_json(argv, "--Owner"),
"Offset": Utils.try_to_json(argv, "--Offset"),
"Limit": Utils.try_to_json(argv, "--Limit"),
"Operator": argv.get("--Operator"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ListMediaRequest()
model.from_json_string(json.dumps(param))
rsp = client.ListMedia(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doCreateProject(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("CreateProject", g_param[OptionsDefine.Version])
return
param = {
"Platform": argv.get("--Platform"),
"Category": argv.get("--Category"),
"Name": argv.get("--Name"),
"AspectRatio": argv.get("--AspectRatio"),
"Owner": Utils.try_to_json(argv, "--Owner"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.CreateProjectRequest()
model.from_json_string(json.dumps(param))
rsp = client.CreateProject(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteClass(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DeleteClass", g_param[OptionsDefine.Version])
return
param = {
"Platform": argv.get("--Platform"),
"Owner": Utils.try_to_json(argv, "--Owner"),
"ClassPath": argv.get("--ClassPath"),
"Operator": argv.get("--Operator"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteClassRequest()
model.from_json_string(json.dumps(param))
rsp = client.DeleteClass(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDeleteProject(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DeleteProject", g_param[OptionsDefine.Version])
return
param = {
"Platform": argv.get("--Platform"),
"ProjectId": argv.get("--ProjectId"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DeleteProjectRequest()
model.from_json_string(json.dumps(param))
rsp = client.DeleteProject(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doFlattenListMedia(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("FlattenListMedia", g_param[OptionsDefine.Version])
return
param = {
"Platform": argv.get("--Platform"),
"ClassPath": argv.get("--ClassPath"),
"Owner": Utils.try_to_json(argv, "--Owner"),
"Offset": Utils.try_to_json(argv, "--Offset"),
"Limit": Utils.try_to_json(argv, "--Limit"),
"Operator": argv.get("--Operator"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.FlattenListMediaRequest()
model.from_json_string(json.dumps(param))
rsp = client.FlattenListMedia(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeTeamMembers(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeTeamMembers", g_param[OptionsDefine.Version])
return
param = {
"Platform": argv.get("--Platform"),
"TeamId": argv.get("--TeamId"),
"MemberIds": Utils.try_to_json(argv, "--MemberIds"),
"Offset": Utils.try_to_json(argv, "--Offset"),
"Limit": Utils.try_to_json(argv, "--Limit"),
"Operator": argv.get("--Operator"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CmeClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeTeamMembersRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeTeamMembers(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
CLIENT_MAP = {
"v20191029": cme_client_v20191029,
}
MODELS_MAP = {
"v20191029": models_v20191029,
}
ACTION_MAP = {
"DescribeTasks": doDescribeTasks,
"DescribeTeams": doDescribeTeams,
"ExportVideoEditProject": doExportVideoEditProject,
"DescribeSharedSpace": doDescribeSharedSpace,
"GrantResourceAuthorization": doGrantResourceAuthorization,
"SearchMaterial": doSearchMaterial,
"RevokeResourceAuthorization": doRevokeResourceAuthorization,
"DescribeJoinTeams": doDescribeJoinTeams,
"DescribeResourceAuthorization": doDescribeResourceAuthorization,
"ImportMaterial": doImportMaterial,
"DescribeTaskDetail": doDescribeTaskDetail,
"ModifyTeam": doModifyTeam,
"DeleteMaterial": doDeleteMaterial,
"ModifyMaterial": doModifyMaterial,
"DeleteTeam": doDeleteTeam,
"AddTeamMember": doAddTeamMember,
"ModifyTeamMember": doModifyTeamMember,
"DeleteTeamMembers": doDeleteTeamMembers,
"DeleteLoginStatus": doDeleteLoginStatus,
"DescribeProjects": doDescribeProjects,
"DescribeLoginStatus": doDescribeLoginStatus,
"DescribeMaterials": doDescribeMaterials,
"DescribeClass": doDescribeClass,
"CreateTeam": doCreateTeam,
"ModifyProject": doModifyProject,
"CreateClass": doCreateClass,
"ImportMediaToProject": doImportMediaToProject,
"ListMedia": doListMedia,
"CreateProject": doCreateProject,
"DeleteClass": doDeleteClass,
"DeleteProject": doDeleteProject,
"FlattenListMedia": doFlattenListMedia,
"DescribeTeamMembers": doDescribeTeamMembers,
}
AVAILABLE_VERSION_LIST = [
v20191029.version,
]
AVAILABLE_VERSIONS = {
'v' + v20191029.version.replace('-', ''): {"help": v20191029_help.INFO,"desc": v20191029_help.DESC},
}
def cme_action(argv, arglist):
if "help" in argv:
versions = sorted(AVAILABLE_VERSIONS.keys())
opt_v = "--" + OptionsDefine.Version
version = versions[-1]
if opt_v in argv:
version = 'v' + argv[opt_v].replace('-', '')
if version not in versions:
print("available versions: %s" % " ".join(AVAILABLE_VERSION_LIST))
return
action_str = ""
docs = AVAILABLE_VERSIONS[version]["help"]
desc = AVAILABLE_VERSIONS[version]["desc"]
for action, info in docs.items():
action_str += " %s\n" % action
action_str += Utils.split_str(" ", info["desc"], 120)
helpstr = HelpTemplate.SERVICE % {"name": "cme", "desc": desc, "actions": action_str}
print(helpstr)
else:
print(ErrorMsg.FEW_ARG)
def version_merge():
help_merge = {}
for v in AVAILABLE_VERSIONS:
for action in AVAILABLE_VERSIONS[v]["help"]:
if action not in help_merge:
help_merge[action] = {}
help_merge[action]["cb"] = ACTION_MAP[action]
help_merge[action]["params"] = []
for param in AVAILABLE_VERSIONS[v]["help"][action]["params"]:
if param["name"] not in help_merge[action]["params"]:
help_merge[action]["params"].append(param["name"])
return help_merge
def register_arg(command):
cmd = NiceCommand("cme", cme_action)
command.reg_cmd(cmd)
cmd.reg_opt("help", "bool")
cmd.reg_opt(OptionsDefine.Version, "string")
help_merge = version_merge()
for actionName, action in help_merge.items():
c = NiceCommand(actionName, action["cb"])
cmd.reg_cmd(c)
c.reg_opt("help", "bool")
for param in action["params"]:
c.reg_opt("--" + param, "string")
for opt in OptionsDefine.ACTION_GLOBAL_OPT:
stropt = "--" + opt
c.reg_opt(stropt, "string")
def parse_global_arg(argv):
params = {}
for opt in OptionsDefine.ACTION_GLOBAL_OPT:
stropt = "--" + opt
if stropt in argv:
params[opt] = argv[stropt]
else:
params[opt] = None
if params[OptionsDefine.Version]:
params[OptionsDefine.Version] = "v" + params[OptionsDefine.Version].replace('-', '')
config_handle = Configure()
profile = config_handle.profile
if ("--" + OptionsDefine.Profile) in argv:
profile = argv[("--" + OptionsDefine.Profile)]
is_conexist, conf_path = config_handle._profile_existed(profile + "." + config_handle.configure)
is_creexist, cred_path = config_handle._profile_existed(profile + "." + config_handle.credential)
config = {}
cred = {}
if is_conexist:
config = config_handle._load_json_msg(conf_path)
if is_creexist:
cred = config_handle._load_json_msg(cred_path)
if os.environ.get(OptionsDefine.ENV_SECRET_ID):
cred[OptionsDefine.SecretId] = os.environ.get(OptionsDefine.ENV_SECRET_ID)
if os.environ.get(OptionsDefine.ENV_SECRET_KEY):
cred[OptionsDefine.SecretKey] = os.environ.get(OptionsDefine.ENV_SECRET_KEY)
if os.environ.get(OptionsDefine.ENV_REGION):
config[OptionsDefine.Region] = os.environ.get(OptionsDefine.ENV_REGION)
for param in params.keys():
if param == OptionsDefine.Version:
continue
if params[param] is None:
if param in [OptionsDefine.SecretKey, OptionsDefine.SecretId]:
if param in cred:
params[param] = cred[param]
else:
raise Exception("%s is invalid" % param)
else:
if param in config:
params[param] = config[param]
elif param == OptionsDefine.Region:
raise Exception("%s is invalid" % OptionsDefine.Region)
try:
if params[OptionsDefine.Version] is None:
version = config["cme"][OptionsDefine.Version]
params[OptionsDefine.Version] = "v" + version.replace('-', '')
if params[OptionsDefine.Endpoint] is None:
params[OptionsDefine.Endpoint] = config["cme"][OptionsDefine.Endpoint]
except Exception as err:
raise Exception("config file:%s error, %s" % (conf_path, str(err)))
versions = sorted(AVAILABLE_VERSIONS.keys())
if params[OptionsDefine.Version] not in versions:
raise Exception("available versions: %s" % " ".join(AVAILABLE_VERSION_LIST))
return params
def show_help(action, version):
docs = AVAILABLE_VERSIONS[version]["help"][action]
desc = AVAILABLE_VERSIONS[version]["desc"]
docstr = ""
for param in docs["params"]:
docstr += " %s\n" % ("--" + param["name"])
docstr += Utils.split_str(" ", param["desc"], 120)
helpmsg = HelpTemplate.ACTION % {"name": action, "service": "cme", "desc": desc, "params": docstr}
print(helpmsg)
def get_actions_info():
config = Configure()
new_version = max(AVAILABLE_VERSIONS.keys())
version = new_version
try:
profile = config._load_json_msg(os.path.join(config.cli_path, "default.configure"))
version = profile["cme"]["version"]
version = "v" + version.replace('-', '')
except Exception:
pass
if version not in AVAILABLE_VERSIONS.keys():
version = new_version
return AVAILABLE_VERSIONS[version]["help"]
| [
"tencentcloud.common.credential.Credential",
"tccli.services.cme.v20191029.version.replace",
"json.loads",
"json.dumps",
"os.environ.get",
"os.path.join",
"tccli.nice_command.NiceCommand",
"tccli.configure.Configure",
"tccli.utils.Utils.split_str",
"tccli.format_output.output",
"tccli.utils.Util... | [((1312, 1405), 'tencentcloud.common.credential.Credential', 'credential.Credential', (['g_param[OptionsDefine.SecretId]', 'g_param[OptionsDefine.SecretKey]'], {}), '(g_param[OptionsDefine.SecretId], g_param[\n OptionsDefine.SecretKey])\n', (1333, 1405), False, 'from tencentcloud.common import credential\n'), ((1634, 1698), 'tencentcloud.common.profile.client_profile.ClientProfile', 'ClientProfile', ([], {'httpProfile': 'http_profile', 'signMethod': '"""HmacSHA256"""'}), "(httpProfile=http_profile, signMethod='HmacSHA256')\n", (1647, 1698), False, 'from tencentcloud.common.profile.client_profile import ClientProfile\n'), ((2252, 2356), 'tccli.format_output.output', 'FormatOutput.output', (['"""action"""', 'jsonobj', 'g_param[OptionsDefine.Output]', 'g_param[OptionsDefine.Filter]'], {}), "('action', jsonobj, g_param[OptionsDefine.Output],\n g_param[OptionsDefine.Filter])\n", (2271, 2356), True, 'import tccli.format_output as FormatOutput\n'), ((2667, 2760), 'tencentcloud.common.credential.Credential', 'credential.Credential', (['g_param[OptionsDefine.SecretId]', 'g_param[OptionsDefine.SecretKey]'], {}), '(g_param[OptionsDefine.SecretId], g_param[\n OptionsDefine.SecretKey])\n', (2688, 2760), False, 'from tencentcloud.common import credential\n'), ((2989, 3053), 'tencentcloud.common.profile.client_profile.ClientProfile', 'ClientProfile', ([], {'httpProfile': 'http_profile', 'signMethod': '"""HmacSHA256"""'}), "(httpProfile=http_profile, signMethod='HmacSHA256')\n", (3002, 3053), False, 'from tencentcloud.common.profile.client_profile import ClientProfile\n'), ((3607, 3711), 'tccli.format_output.output', 'FormatOutput.output', (['"""action"""', 'jsonobj', 'g_param[OptionsDefine.Output]', 'g_param[OptionsDefine.Filter]'], {}), "('action', jsonobj, g_param[OptionsDefine.Output],\n g_param[OptionsDefine.Filter])\n", (3626, 3711), True, 'import tccli.format_output as FormatOutput\n'), ((4292, 4385), 'tencentcloud.common.credential.Credential', 'credential.Credential', (['g_param[OptionsDefine.SecretId]', 'g_param[OptionsDefine.SecretKey]'], {}), '(g_param[OptionsDefine.SecretId], g_param[\n OptionsDefine.SecretKey])\n', (4313, 4385), False, 'from tencentcloud.common import credential\n'), ((4614, 4678), 'tencentcloud.common.profile.client_profile.ClientProfile', 'ClientProfile', ([], {'httpProfile': 'http_profile', 'signMethod': '"""HmacSHA256"""'}), "(httpProfile=http_profile, signMethod='HmacSHA256')\n", (4627, 4678), False, 'from tencentcloud.common.profile.client_profile import ClientProfile\n'), ((5250, 5354), 'tccli.format_output.output', 'FormatOutput.output', (['"""action"""', 'jsonobj', 'g_param[OptionsDefine.Output]', 'g_param[OptionsDefine.Filter]'], {}), "('action', jsonobj, g_param[OptionsDefine.Output],\n g_param[OptionsDefine.Filter])\n", (5269, 5354), True, 'import tccli.format_output as FormatOutput\n'), ((5727, 5820), 'tencentcloud.common.credential.Credential', 'credential.Credential', (['g_param[OptionsDefine.SecretId]', 'g_param[OptionsDefine.SecretKey]'], {}), '(g_param[OptionsDefine.SecretId], g_param[\n OptionsDefine.SecretKey])\n', (5748, 5820), False, 'from tencentcloud.common import credential\n'), ((6049, 6113), 'tencentcloud.common.profile.client_profile.ClientProfile', 'ClientProfile', ([], {'httpProfile': 'http_profile', 'signMethod': '"""HmacSHA256"""'}), "(httpProfile=http_profile, signMethod='HmacSHA256')\n", (6062, 6113), False, 'from tencentcloud.common.profile.client_profile import ClientProfile\n'), ((6679, 6783), 'tccli.format_output.output', 'FormatOutput.output', (['"""action"""', 'jsonobj', 'g_param[OptionsDefine.Output]', 'g_param[OptionsDefine.Filter]'], {}), "('action', jsonobj, g_param[OptionsDefine.Output],\n g_param[OptionsDefine.Filter])\n", (6698, 6783), True, 'import tccli.format_output as FormatOutput\n'), ((7351, 7444), 'tencentcloud.common.credential.Credential', 'credential.Credential', (['g_param[OptionsDefine.SecretId]', 'g_param[OptionsDefine.SecretKey]'], {}), '(g_param[OptionsDefine.SecretId], g_param[\n OptionsDefine.SecretKey])\n', (7372, 7444), False, 'from tencentcloud.common import credential\n'), ((7673, 7737), 'tencentcloud.common.profile.client_profile.ClientProfile', 'ClientProfile', ([], {'httpProfile': 'http_profile', 'signMethod': '"""HmacSHA256"""'}), "(httpProfile=http_profile, signMethod='HmacSHA256')\n", (7686, 7737), False, 'from tencentcloud.common.profile.client_profile import ClientProfile\n'), ((8317, 8421), 'tccli.format_output.output', 'FormatOutput.output', (['"""action"""', 'jsonobj', 'g_param[OptionsDefine.Output]', 'g_param[OptionsDefine.Filter]'], {}), "('action', jsonobj, g_param[OptionsDefine.Output],\n g_param[OptionsDefine.Filter])\n", (8336, 8421), True, 'import tccli.format_output as FormatOutput\n'), ((9293, 9386), 'tencentcloud.common.credential.Credential', 'credential.Credential', (['g_param[OptionsDefine.SecretId]', 'g_param[OptionsDefine.SecretKey]'], {}), '(g_param[OptionsDefine.SecretId], g_param[\n OptionsDefine.SecretKey])\n', (9314, 9386), False, 'from tencentcloud.common import credential\n'), ((9615, 9679), 'tencentcloud.common.profile.client_profile.ClientProfile', 'ClientProfile', ([], {'httpProfile': 'http_profile', 'signMethod': '"""HmacSHA256"""'}), "(httpProfile=http_profile, signMethod='HmacSHA256')\n", (9628, 9679), False, 'from tencentcloud.common.profile.client_profile import ClientProfile\n'), ((10235, 10339), 'tccli.format_output.output', 'FormatOutput.output', (['"""action"""', 'jsonobj', 'g_param[OptionsDefine.Output]', 'g_param[OptionsDefine.Filter]'], {}), "('action', jsonobj, g_param[OptionsDefine.Output],\n g_param[OptionsDefine.Filter])\n", (10254, 10339), True, 'import tccli.format_output as FormatOutput\n'), ((10909, 11002), 'tencentcloud.common.credential.Credential', 'credential.Credential', (['g_param[OptionsDefine.SecretId]', 'g_param[OptionsDefine.SecretKey]'], {}), '(g_param[OptionsDefine.SecretId], g_param[\n OptionsDefine.SecretKey])\n', (10930, 11002), False, 'from tencentcloud.common import credential\n'), ((11231, 11295), 'tencentcloud.common.profile.client_profile.ClientProfile', 'ClientProfile', ([], {'httpProfile': 'http_profile', 'signMethod': '"""HmacSHA256"""'}), "(httpProfile=http_profile, signMethod='HmacSHA256')\n", (11244, 11295), False, 'from tencentcloud.common.profile.client_profile import ClientProfile\n'), ((11877, 11981), 'tccli.format_output.output', 'FormatOutput.output', (['"""action"""', 'jsonobj', 'g_param[OptionsDefine.Output]', 'g_param[OptionsDefine.Filter]'], {}), "('action', jsonobj, g_param[OptionsDefine.Output],\n g_param[OptionsDefine.Filter])\n", (11896, 11981), True, 'import tccli.format_output as FormatOutput\n'), ((12395, 12488), 'tencentcloud.common.credential.Credential', 'credential.Credential', (['g_param[OptionsDefine.SecretId]', 'g_param[OptionsDefine.SecretKey]'], {}), '(g_param[OptionsDefine.SecretId], g_param[\n OptionsDefine.SecretKey])\n', (12416, 12488), False, 'from tencentcloud.common import credential\n'), ((12717, 12781), 'tencentcloud.common.profile.client_profile.ClientProfile', 'ClientProfile', ([], {'httpProfile': 'http_profile', 'signMethod': '"""HmacSHA256"""'}), "(httpProfile=http_profile, signMethod='HmacSHA256')\n", (12730, 12781), False, 'from tencentcloud.common.profile.client_profile import ClientProfile\n'), ((13343, 13447), 'tccli.format_output.output', 'FormatOutput.output', (['"""action"""', 'jsonobj', 'g_param[OptionsDefine.Output]', 'g_param[OptionsDefine.Filter]'], {}), "('action', jsonobj, g_param[OptionsDefine.Output],\n g_param[OptionsDefine.Filter])\n", (13362, 13447), True, 'import tccli.format_output as FormatOutput\n'), ((13889, 13982), 'tencentcloud.common.credential.Credential', 'credential.Credential', (['g_param[OptionsDefine.SecretId]', 'g_param[OptionsDefine.SecretKey]'], {}), '(g_param[OptionsDefine.SecretId], g_param[\n OptionsDefine.SecretKey])\n', (13910, 13982), False, 'from tencentcloud.common import credential\n'), ((14211, 14275), 'tencentcloud.common.profile.client_profile.ClientProfile', 'ClientProfile', ([], {'httpProfile': 'http_profile', 'signMethod': '"""HmacSHA256"""'}), "(httpProfile=http_profile, signMethod='HmacSHA256')\n", (14224, 14275), False, 'from tencentcloud.common.profile.client_profile import ClientProfile\n'), ((14861, 14965), 'tccli.format_output.output', 'FormatOutput.output', (['"""action"""', 'jsonobj', 'g_param[OptionsDefine.Output]', 'g_param[OptionsDefine.Filter]'], {}), "('action', jsonobj, g_param[OptionsDefine.Output],\n g_param[OptionsDefine.Filter])\n", (14880, 14965), True, 'import tccli.format_output as FormatOutput\n'), ((15580, 15673), 'tencentcloud.common.credential.Credential', 'credential.Credential', (['g_param[OptionsDefine.SecretId]', 'g_param[OptionsDefine.SecretKey]'], {}), '(g_param[OptionsDefine.SecretId], g_param[\n OptionsDefine.SecretKey])\n', (15601, 15673), False, 'from tencentcloud.common import credential\n'), ((15902, 15966), 'tencentcloud.common.profile.client_profile.ClientProfile', 'ClientProfile', ([], {'httpProfile': 'http_profile', 'signMethod': '"""HmacSHA256"""'}), "(httpProfile=http_profile, signMethod='HmacSHA256')\n", (15915, 15966), False, 'from tencentcloud.common.profile.client_profile import ClientProfile\n'), ((16522, 16626), 'tccli.format_output.output', 'FormatOutput.output', (['"""action"""', 'jsonobj', 'g_param[OptionsDefine.Output]', 'g_param[OptionsDefine.Filter]'], {}), "('action', jsonobj, g_param[OptionsDefine.Output],\n g_param[OptionsDefine.Filter])\n", (16541, 16626), True, 'import tccli.format_output as FormatOutput\n'), ((16930, 17023), 'tencentcloud.common.credential.Credential', 'credential.Credential', (['g_param[OptionsDefine.SecretId]', 'g_param[OptionsDefine.SecretKey]'], {}), '(g_param[OptionsDefine.SecretId], g_param[\n OptionsDefine.SecretKey])\n', (16951, 17023), False, 'from tencentcloud.common import credential\n'), ((17252, 17316), 'tencentcloud.common.profile.client_profile.ClientProfile', 'ClientProfile', ([], {'httpProfile': 'http_profile', 'signMethod': '"""HmacSHA256"""'}), "(httpProfile=http_profile, signMethod='HmacSHA256')\n", (17265, 17316), False, 'from tencentcloud.common.profile.client_profile import ClientProfile\n'), ((17880, 17984), 'tccli.format_output.output', 'FormatOutput.output', (['"""action"""', 'jsonobj', 'g_param[OptionsDefine.Output]', 'g_param[OptionsDefine.Filter]'], {}), "('action', jsonobj, g_param[OptionsDefine.Output],\n g_param[OptionsDefine.Filter])\n", (17899, 17984), True, 'import tccli.format_output as FormatOutput\n'), ((18352, 18445), 'tencentcloud.common.credential.Credential', 'credential.Credential', (['g_param[OptionsDefine.SecretId]', 'g_param[OptionsDefine.SecretKey]'], {}), '(g_param[OptionsDefine.SecretId], g_param[\n OptionsDefine.SecretKey])\n', (18373, 18445), False, 'from tencentcloud.common import credential\n'), ((18674, 18738), 'tencentcloud.common.profile.client_profile.ClientProfile', 'ClientProfile', ([], {'httpProfile': 'http_profile', 'signMethod': '"""HmacSHA256"""'}), "(httpProfile=http_profile, signMethod='HmacSHA256')\n", (18687, 18738), False, 'from tencentcloud.common.profile.client_profile import ClientProfile\n'), ((19286, 19390), 'tccli.format_output.output', 'FormatOutput.output', (['"""action"""', 'jsonobj', 'g_param[OptionsDefine.Output]', 'g_param[OptionsDefine.Filter]'], {}), "('action', jsonobj, g_param[OptionsDefine.Output],\n g_param[OptionsDefine.Filter])\n", (19305, 19390), True, 'import tccli.format_output as FormatOutput\n'), ((19738, 19831), 'tencentcloud.common.credential.Credential', 'credential.Credential', (['g_param[OptionsDefine.SecretId]', 'g_param[OptionsDefine.SecretKey]'], {}), '(g_param[OptionsDefine.SecretId], g_param[\n OptionsDefine.SecretKey])\n', (19759, 19831), False, 'from tencentcloud.common import credential\n'), ((20060, 20124), 'tencentcloud.common.profile.client_profile.ClientProfile', 'ClientProfile', ([], {'httpProfile': 'http_profile', 'signMethod': '"""HmacSHA256"""'}), "(httpProfile=http_profile, signMethod='HmacSHA256')\n", (20073, 20124), False, 'from tencentcloud.common.profile.client_profile import ClientProfile\n'), ((20680, 20784), 'tccli.format_output.output', 'FormatOutput.output', (['"""action"""', 'jsonobj', 'g_param[OptionsDefine.Output]', 'g_param[OptionsDefine.Filter]'], {}), "('action', jsonobj, g_param[OptionsDefine.Output],\n g_param[OptionsDefine.Filter])\n", (20699, 20784), True, 'import tccli.format_output as FormatOutput\n'), ((21318, 21411), 'tencentcloud.common.credential.Credential', 'credential.Credential', (['g_param[OptionsDefine.SecretId]', 'g_param[OptionsDefine.SecretKey]'], {}), '(g_param[OptionsDefine.SecretId], g_param[\n OptionsDefine.SecretKey])\n', (21339, 21411), False, 'from tencentcloud.common import credential\n'), ((21640, 21704), 'tencentcloud.common.profile.client_profile.ClientProfile', 'ClientProfile', ([], {'httpProfile': 'http_profile', 'signMethod': '"""HmacSHA256"""'}), "(httpProfile=http_profile, signMethod='HmacSHA256')\n", (21653, 21704), False, 'from tencentcloud.common.profile.client_profile import ClientProfile\n'), ((22260, 22364), 'tccli.format_output.output', 'FormatOutput.output', (['"""action"""', 'jsonobj', 'g_param[OptionsDefine.Output]', 'g_param[OptionsDefine.Filter]'], {}), "('action', jsonobj, g_param[OptionsDefine.Output],\n g_param[OptionsDefine.Filter])\n", (22279, 22364), True, 'import tccli.format_output as FormatOutput\n'), ((22696, 22789), 'tencentcloud.common.credential.Credential', 'credential.Credential', (['g_param[OptionsDefine.SecretId]', 'g_param[OptionsDefine.SecretKey]'], {}), '(g_param[OptionsDefine.SecretId], g_param[\n OptionsDefine.SecretKey])\n', (22717, 22789), False, 'from tencentcloud.common import credential\n'), ((23018, 23082), 'tencentcloud.common.profile.client_profile.ClientProfile', 'ClientProfile', ([], {'httpProfile': 'http_profile', 'signMethod': '"""HmacSHA256"""'}), "(httpProfile=http_profile, signMethod='HmacSHA256')\n", (23031, 23082), False, 'from tencentcloud.common.profile.client_profile import ClientProfile\n'), ((23630, 23734), 'tccli.format_output.output', 'FormatOutput.output', (['"""action"""', 'jsonobj', 'g_param[OptionsDefine.Output]', 'g_param[OptionsDefine.Filter]'], {}), "('action', jsonobj, g_param[OptionsDefine.Output],\n g_param[OptionsDefine.Filter])\n", (23649, 23734), True, 'import tccli.format_output as FormatOutput\n'), ((24137, 24230), 'tencentcloud.common.credential.Credential', 'credential.Credential', (['g_param[OptionsDefine.SecretId]', 'g_param[OptionsDefine.SecretKey]'], {}), '(g_param[OptionsDefine.SecretId], g_param[\n OptionsDefine.SecretKey])\n', (24158, 24230), False, 'from tencentcloud.common import credential\n'), ((24459, 24523), 'tencentcloud.common.profile.client_profile.ClientProfile', 'ClientProfile', ([], {'httpProfile': 'http_profile', 'signMethod': '"""HmacSHA256"""'}), "(httpProfile=http_profile, signMethod='HmacSHA256')\n", (24472, 24523), False, 'from tencentcloud.common.profile.client_profile import ClientProfile\n'), ((25077, 25181), 'tccli.format_output.output', 'FormatOutput.output', (['"""action"""', 'jsonobj', 'g_param[OptionsDefine.Output]', 'g_param[OptionsDefine.Filter]'], {}), "('action', jsonobj, g_param[OptionsDefine.Output],\n g_param[OptionsDefine.Filter])\n", (25096, 25181), True, 'import tccli.format_output as FormatOutput\n'), ((25645, 25738), 'tencentcloud.common.credential.Credential', 'credential.Credential', (['g_param[OptionsDefine.SecretId]', 'g_param[OptionsDefine.SecretKey]'], {}), '(g_param[OptionsDefine.SecretId], g_param[\n OptionsDefine.SecretKey])\n', (25666, 25738), False, 'from tencentcloud.common import credential\n'), ((25967, 26031), 'tencentcloud.common.profile.client_profile.ClientProfile', 'ClientProfile', ([], {'httpProfile': 'http_profile', 'signMethod': '"""HmacSHA256"""'}), "(httpProfile=http_profile, signMethod='HmacSHA256')\n", (25980, 26031), False, 'from tencentcloud.common.profile.client_profile import ClientProfile\n'), ((26591, 26695), 'tccli.format_output.output', 'FormatOutput.output', (['"""action"""', 'jsonobj', 'g_param[OptionsDefine.Output]', 'g_param[OptionsDefine.Filter]'], {}), "('action', jsonobj, g_param[OptionsDefine.Output],\n g_param[OptionsDefine.Filter])\n", (26610, 26695), True, 'import tccli.format_output as FormatOutput\n'), ((27102, 27195), 'tencentcloud.common.credential.Credential', 'credential.Credential', (['g_param[OptionsDefine.SecretId]', 'g_param[OptionsDefine.SecretKey]'], {}), '(g_param[OptionsDefine.SecretId], g_param[\n OptionsDefine.SecretKey])\n', (27123, 27195), False, 'from tencentcloud.common import credential\n'), ((27424, 27488), 'tencentcloud.common.profile.client_profile.ClientProfile', 'ClientProfile', ([], {'httpProfile': 'http_profile', 'signMethod': '"""HmacSHA256"""'}), "(httpProfile=http_profile, signMethod='HmacSHA256')\n", (27437, 27488), False, 'from tencentcloud.common.profile.client_profile import ClientProfile\n'), ((28050, 28154), 'tccli.format_output.output', 'FormatOutput.output', (['"""action"""', 'jsonobj', 'g_param[OptionsDefine.Output]', 'g_param[OptionsDefine.Filter]'], {}), "('action', jsonobj, g_param[OptionsDefine.Output],\n g_param[OptionsDefine.Filter])\n", (28069, 28154), True, 'import tccli.format_output as FormatOutput\n'), ((28473, 28566), 'tencentcloud.common.credential.Credential', 'credential.Credential', (['g_param[OptionsDefine.SecretId]', 'g_param[OptionsDefine.SecretKey]'], {}), '(g_param[OptionsDefine.SecretId], g_param[\n OptionsDefine.SecretKey])\n', (28494, 28566), False, 'from tencentcloud.common import credential\n'), ((28795, 28859), 'tencentcloud.common.profile.client_profile.ClientProfile', 'ClientProfile', ([], {'httpProfile': 'http_profile', 'signMethod': '"""HmacSHA256"""'}), "(httpProfile=http_profile, signMethod='HmacSHA256')\n", (28808, 28859), False, 'from tencentcloud.common.profile.client_profile import ClientProfile\n'), ((29421, 29525), 'tccli.format_output.output', 'FormatOutput.output', (['"""action"""', 'jsonobj', 'g_param[OptionsDefine.Output]', 'g_param[OptionsDefine.Filter]'], {}), "('action', jsonobj, g_param[OptionsDefine.Output],\n g_param[OptionsDefine.Filter])\n", (29440, 29525), True, 'import tccli.format_output as FormatOutput\n'), ((30196, 30289), 'tencentcloud.common.credential.Credential', 'credential.Credential', (['g_param[OptionsDefine.SecretId]', 'g_param[OptionsDefine.SecretKey]'], {}), '(g_param[OptionsDefine.SecretId], g_param[\n OptionsDefine.SecretKey])\n', (30217, 30289), False, 'from tencentcloud.common import credential\n'), ((30518, 30582), 'tencentcloud.common.profile.client_profile.ClientProfile', 'ClientProfile', ([], {'httpProfile': 'http_profile', 'signMethod': '"""HmacSHA256"""'}), "(httpProfile=http_profile, signMethod='HmacSHA256')\n", (30531, 30582), False, 'from tencentcloud.common.profile.client_profile import ClientProfile\n'), ((31142, 31246), 'tccli.format_output.output', 'FormatOutput.output', (['"""action"""', 'jsonobj', 'g_param[OptionsDefine.Output]', 'g_param[OptionsDefine.Filter]'], {}), "('action', jsonobj, g_param[OptionsDefine.Output],\n g_param[OptionsDefine.Filter])\n", (31161, 31246), True, 'import tccli.format_output as FormatOutput\n'), ((31569, 31662), 'tencentcloud.common.credential.Credential', 'credential.Credential', (['g_param[OptionsDefine.SecretId]', 'g_param[OptionsDefine.SecretKey]'], {}), '(g_param[OptionsDefine.SecretId], g_param[\n OptionsDefine.SecretKey])\n', (31590, 31662), False, 'from tencentcloud.common import credential\n'), ((31891, 31955), 'tencentcloud.common.profile.client_profile.ClientProfile', 'ClientProfile', ([], {'httpProfile': 'http_profile', 'signMethod': '"""HmacSHA256"""'}), "(httpProfile=http_profile, signMethod='HmacSHA256')\n", (31904, 31955), False, 'from tencentcloud.common.profile.client_profile import ClientProfile\n'), ((32521, 32625), 'tccli.format_output.output', 'FormatOutput.output', (['"""action"""', 'jsonobj', 'g_param[OptionsDefine.Output]', 'g_param[OptionsDefine.Filter]'], {}), "('action', jsonobj, g_param[OptionsDefine.Output],\n g_param[OptionsDefine.Filter])\n", (32540, 32625), True, 'import tccli.format_output as FormatOutput\n'), ((33047, 33140), 'tencentcloud.common.credential.Credential', 'credential.Credential', (['g_param[OptionsDefine.SecretId]', 'g_param[OptionsDefine.SecretKey]'], {}), '(g_param[OptionsDefine.SecretId], g_param[\n OptionsDefine.SecretKey])\n', (33068, 33140), False, 'from tencentcloud.common import credential\n'), ((33369, 33433), 'tencentcloud.common.profile.client_profile.ClientProfile', 'ClientProfile', ([], {'httpProfile': 'http_profile', 'signMethod': '"""HmacSHA256"""'}), "(httpProfile=http_profile, signMethod='HmacSHA256')\n", (33382, 33433), False, 'from tencentcloud.common.profile.client_profile import ClientProfile\n'), ((33995, 34099), 'tccli.format_output.output', 'FormatOutput.output', (['"""action"""', 'jsonobj', 'g_param[OptionsDefine.Output]', 'g_param[OptionsDefine.Filter]'], {}), "('action', jsonobj, g_param[OptionsDefine.Output],\n g_param[OptionsDefine.Filter])\n", (34014, 34099), True, 'import tccli.format_output as FormatOutput\n'), ((34450, 34543), 'tencentcloud.common.credential.Credential', 'credential.Credential', (['g_param[OptionsDefine.SecretId]', 'g_param[OptionsDefine.SecretKey]'], {}), '(g_param[OptionsDefine.SecretId], g_param[\n OptionsDefine.SecretKey])\n', (34471, 34543), False, 'from tencentcloud.common import credential\n'), ((34772, 34836), 'tencentcloud.common.profile.client_profile.ClientProfile', 'ClientProfile', ([], {'httpProfile': 'http_profile', 'signMethod': '"""HmacSHA256"""'}), "(httpProfile=http_profile, signMethod='HmacSHA256')\n", (34785, 34836), False, 'from tencentcloud.common.profile.client_profile import ClientProfile\n'), ((35390, 35494), 'tccli.format_output.output', 'FormatOutput.output', (['"""action"""', 'jsonobj', 'g_param[OptionsDefine.Output]', 'g_param[OptionsDefine.Filter]'], {}), "('action', jsonobj, g_param[OptionsDefine.Output],\n g_param[OptionsDefine.Filter])\n", (35409, 35494), True, 'import tccli.format_output as FormatOutput\n'), ((35910, 36003), 'tencentcloud.common.credential.Credential', 'credential.Credential', (['g_param[OptionsDefine.SecretId]', 'g_param[OptionsDefine.SecretKey]'], {}), '(g_param[OptionsDefine.SecretId], g_param[\n OptionsDefine.SecretKey])\n', (35931, 36003), False, 'from tencentcloud.common import credential\n'), ((36232, 36296), 'tencentcloud.common.profile.client_profile.ClientProfile', 'ClientProfile', ([], {'httpProfile': 'http_profile', 'signMethod': '"""HmacSHA256"""'}), "(httpProfile=http_profile, signMethod='HmacSHA256')\n", (36245, 36296), False, 'from tencentcloud.common.profile.client_profile import ClientProfile\n'), ((36844, 36948), 'tccli.format_output.output', 'FormatOutput.output', (['"""action"""', 'jsonobj', 'g_param[OptionsDefine.Output]', 'g_param[OptionsDefine.Filter]'], {}), "('action', jsonobj, g_param[OptionsDefine.Output],\n g_param[OptionsDefine.Filter])\n", (36863, 36948), True, 'import tccli.format_output as FormatOutput\n'), ((37387, 37480), 'tencentcloud.common.credential.Credential', 'credential.Credential', (['g_param[OptionsDefine.SecretId]', 'g_param[OptionsDefine.SecretKey]'], {}), '(g_param[OptionsDefine.SecretId], g_param[\n OptionsDefine.SecretKey])\n', (37408, 37480), False, 'from tencentcloud.common import credential\n'), ((37709, 37773), 'tencentcloud.common.profile.client_profile.ClientProfile', 'ClientProfile', ([], {'httpProfile': 'http_profile', 'signMethod': '"""HmacSHA256"""'}), "(httpProfile=http_profile, signMethod='HmacSHA256')\n", (37722, 37773), False, 'from tencentcloud.common.profile.client_profile import ClientProfile\n'), ((38327, 38431), 'tccli.format_output.output', 'FormatOutput.output', (['"""action"""', 'jsonobj', 'g_param[OptionsDefine.Output]', 'g_param[OptionsDefine.Filter]'], {}), "('action', jsonobj, g_param[OptionsDefine.Output],\n g_param[OptionsDefine.Filter])\n", (38346, 38431), True, 'import tccli.format_output as FormatOutput\n'), ((38824, 38917), 'tencentcloud.common.credential.Credential', 'credential.Credential', (['g_param[OptionsDefine.SecretId]', 'g_param[OptionsDefine.SecretKey]'], {}), '(g_param[OptionsDefine.SecretId], g_param[\n OptionsDefine.SecretKey])\n', (38845, 38917), False, 'from tencentcloud.common import credential\n'), ((39146, 39210), 'tencentcloud.common.profile.client_profile.ClientProfile', 'ClientProfile', ([], {'httpProfile': 'http_profile', 'signMethod': '"""HmacSHA256"""'}), "(httpProfile=http_profile, signMethod='HmacSHA256')\n", (39159, 39210), False, 'from tencentcloud.common.profile.client_profile import ClientProfile\n'), ((39760, 39864), 'tccli.format_output.output', 'FormatOutput.output', (['"""action"""', 'jsonobj', 'g_param[OptionsDefine.Output]', 'g_param[OptionsDefine.Filter]'], {}), "('action', jsonobj, g_param[OptionsDefine.Output],\n g_param[OptionsDefine.Filter])\n", (39779, 39864), True, 'import tccli.format_output as FormatOutput\n'), ((40343, 40436), 'tencentcloud.common.credential.Credential', 'credential.Credential', (['g_param[OptionsDefine.SecretId]', 'g_param[OptionsDefine.SecretKey]'], {}), '(g_param[OptionsDefine.SecretId], g_param[\n OptionsDefine.SecretKey])\n', (40364, 40436), False, 'from tencentcloud.common import credential\n'), ((40665, 40729), 'tencentcloud.common.profile.client_profile.ClientProfile', 'ClientProfile', ([], {'httpProfile': 'http_profile', 'signMethod': '"""HmacSHA256"""'}), "(httpProfile=http_profile, signMethod='HmacSHA256')\n", (40678, 40729), False, 'from tencentcloud.common.profile.client_profile import ClientProfile\n'), ((41297, 41401), 'tccli.format_output.output', 'FormatOutput.output', (['"""action"""', 'jsonobj', 'g_param[OptionsDefine.Output]', 'g_param[OptionsDefine.Filter]'], {}), "('action', jsonobj, g_param[OptionsDefine.Output],\n g_param[OptionsDefine.Filter])\n", (41316, 41401), True, 'import tccli.format_output as FormatOutput\n'), ((41898, 41991), 'tencentcloud.common.credential.Credential', 'credential.Credential', (['g_param[OptionsDefine.SecretId]', 'g_param[OptionsDefine.SecretKey]'], {}), '(g_param[OptionsDefine.SecretId], g_param[\n OptionsDefine.SecretKey])\n', (41919, 41991), False, 'from tencentcloud.common import credential\n'), ((42220, 42284), 'tencentcloud.common.profile.client_profile.ClientProfile', 'ClientProfile', ([], {'httpProfile': 'http_profile', 'signMethod': '"""HmacSHA256"""'}), "(httpProfile=http_profile, signMethod='HmacSHA256')\n", (42233, 42284), False, 'from tencentcloud.common.profile.client_profile import ClientProfile\n'), ((42830, 42934), 'tccli.format_output.output', 'FormatOutput.output', (['"""action"""', 'jsonobj', 'g_param[OptionsDefine.Output]', 'g_param[OptionsDefine.Filter]'], {}), "('action', jsonobj, g_param[OptionsDefine.Output],\n g_param[OptionsDefine.Filter])\n", (42849, 42934), True, 'import tccli.format_output as FormatOutput\n'), ((43371, 43464), 'tencentcloud.common.credential.Credential', 'credential.Credential', (['g_param[OptionsDefine.SecretId]', 'g_param[OptionsDefine.SecretKey]'], {}), '(g_param[OptionsDefine.SecretId], g_param[\n OptionsDefine.SecretKey])\n', (43392, 43464), False, 'from tencentcloud.common import credential\n'), ((43693, 43757), 'tencentcloud.common.profile.client_profile.ClientProfile', 'ClientProfile', ([], {'httpProfile': 'http_profile', 'signMethod': '"""HmacSHA256"""'}), "(httpProfile=http_profile, signMethod='HmacSHA256')\n", (43706, 43757), False, 'from tencentcloud.common.profile.client_profile import ClientProfile\n'), ((44311, 44415), 'tccli.format_output.output', 'FormatOutput.output', (['"""action"""', 'jsonobj', 'g_param[OptionsDefine.Output]', 'g_param[OptionsDefine.Filter]'], {}), "('action', jsonobj, g_param[OptionsDefine.Output],\n g_param[OptionsDefine.Filter])\n", (44330, 44415), True, 'import tccli.format_output as FormatOutput\n'), ((44808, 44901), 'tencentcloud.common.credential.Credential', 'credential.Credential', (['g_param[OptionsDefine.SecretId]', 'g_param[OptionsDefine.SecretKey]'], {}), '(g_param[OptionsDefine.SecretId], g_param[\n OptionsDefine.SecretKey])\n', (44829, 44901), False, 'from tencentcloud.common import credential\n'), ((45130, 45194), 'tencentcloud.common.profile.client_profile.ClientProfile', 'ClientProfile', ([], {'httpProfile': 'http_profile', 'signMethod': '"""HmacSHA256"""'}), "(httpProfile=http_profile, signMethod='HmacSHA256')\n", (45143, 45194), False, 'from tencentcloud.common.profile.client_profile import ClientProfile\n'), ((45744, 45848), 'tccli.format_output.output', 'FormatOutput.output', (['"""action"""', 'jsonobj', 'g_param[OptionsDefine.Output]', 'g_param[OptionsDefine.Filter]'], {}), "('action', jsonobj, g_param[OptionsDefine.Output],\n g_param[OptionsDefine.Filter])\n", (45763, 45848), True, 'import tccli.format_output as FormatOutput\n'), ((46148, 46241), 'tencentcloud.common.credential.Credential', 'credential.Credential', (['g_param[OptionsDefine.SecretId]', 'g_param[OptionsDefine.SecretKey]'], {}), '(g_param[OptionsDefine.SecretId], g_param[\n OptionsDefine.SecretKey])\n', (46169, 46241), False, 'from tencentcloud.common import credential\n'), ((46470, 46534), 'tencentcloud.common.profile.client_profile.ClientProfile', 'ClientProfile', ([], {'httpProfile': 'http_profile', 'signMethod': '"""HmacSHA256"""'}), "(httpProfile=http_profile, signMethod='HmacSHA256')\n", (46483, 46534), False, 'from tencentcloud.common.profile.client_profile import ClientProfile\n'), ((47088, 47192), 'tccli.format_output.output', 'FormatOutput.output', (['"""action"""', 'jsonobj', 'g_param[OptionsDefine.Output]', 'g_param[OptionsDefine.Filter]'], {}), "('action', jsonobj, g_param[OptionsDefine.Output],\n g_param[OptionsDefine.Filter])\n", (47107, 47192), True, 'import tccli.format_output as FormatOutput\n'), ((47703, 47796), 'tencentcloud.common.credential.Credential', 'credential.Credential', (['g_param[OptionsDefine.SecretId]', 'g_param[OptionsDefine.SecretKey]'], {}), '(g_param[OptionsDefine.SecretId], g_param[\n OptionsDefine.SecretKey])\n', (47724, 47796), False, 'from tencentcloud.common import credential\n'), ((48025, 48089), 'tencentcloud.common.profile.client_profile.ClientProfile', 'ClientProfile', ([], {'httpProfile': 'http_profile', 'signMethod': '"""HmacSHA256"""'}), "(httpProfile=http_profile, signMethod='HmacSHA256')\n", (48038, 48089), False, 'from tencentcloud.common.profile.client_profile import ClientProfile\n'), ((48649, 48753), 'tccli.format_output.output', 'FormatOutput.output', (['"""action"""', 'jsonobj', 'g_param[OptionsDefine.Output]', 'g_param[OptionsDefine.Filter]'], {}), "('action', jsonobj, g_param[OptionsDefine.Output],\n g_param[OptionsDefine.Filter])\n", (48668, 48753), True, 'import tccli.format_output as FormatOutput\n'), ((49272, 49365), 'tencentcloud.common.credential.Credential', 'credential.Credential', (['g_param[OptionsDefine.SecretId]', 'g_param[OptionsDefine.SecretKey]'], {}), '(g_param[OptionsDefine.SecretId], g_param[\n OptionsDefine.SecretKey])\n', (49293, 49365), False, 'from tencentcloud.common import credential\n'), ((49594, 49658), 'tencentcloud.common.profile.client_profile.ClientProfile', 'ClientProfile', ([], {'httpProfile': 'http_profile', 'signMethod': '"""HmacSHA256"""'}), "(httpProfile=http_profile, signMethod='HmacSHA256')\n", (49607, 49658), False, 'from tencentcloud.common.profile.client_profile import ClientProfile\n'), ((50224, 50328), 'tccli.format_output.output', 'FormatOutput.output', (['"""action"""', 'jsonobj', 'g_param[OptionsDefine.Output]', 'g_param[OptionsDefine.Filter]'], {}), "('action', jsonobj, g_param[OptionsDefine.Output],\n g_param[OptionsDefine.Filter])\n", (50243, 50328), True, 'import tccli.format_output as FormatOutput\n'), ((53527, 53557), 'tccli.nice_command.NiceCommand', 'NiceCommand', (['"""cme"""', 'cme_action'], {}), "('cme', cme_action)\n", (53538, 53557), False, 'from tccli.nice_command import NiceCommand\n'), ((54450, 54461), 'tccli.configure.Configure', 'Configure', ([], {}), '()\n', (54459, 54461), False, 'from tccli.configure import Configure\n'), ((54993, 55036), 'os.environ.get', 'os.environ.get', (['OptionsDefine.ENV_SECRET_ID'], {}), '(OptionsDefine.ENV_SECRET_ID)\n', (55007, 55036), False, 'import os\n'), ((55128, 55172), 'os.environ.get', 'os.environ.get', (['OptionsDefine.ENV_SECRET_KEY'], {}), '(OptionsDefine.ENV_SECRET_KEY)\n', (55142, 55172), False, 'import os\n'), ((55266, 55306), 'os.environ.get', 'os.environ.get', (['OptionsDefine.ENV_REGION'], {}), '(OptionsDefine.ENV_REGION)\n', (55280, 55306), False, 'import os\n'), ((57101, 57112), 'tccli.configure.Configure', 'Configure', ([], {}), '()\n', (57110, 57112), False, 'from tccli.configure import Configure\n'), ((1083, 1123), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--TaskTypeSet"""'], {}), "(argv, '--TaskTypeSet')\n", (1100, 1123), False, 'from tccli.utils import Utils\n'), ((1146, 1184), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--StatusSet"""'], {}), "(argv, '--StatusSet')\n", (1163, 1184), False, 'from tccli.utils import Utils\n'), ((1204, 1239), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Offset"""'], {}), "(argv, '--Offset')\n", (1221, 1239), False, 'from tccli.utils import Utils\n'), ((1258, 1292), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Limit"""'], {}), "(argv, '--Limit')\n", (1275, 1292), False, 'from tccli.utils import Utils\n'), ((2000, 2017), 'json.dumps', 'json.dumps', (['param'], {}), '(param)\n', (2010, 2017), False, 'import json\n'), ((2137, 2155), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (2147, 2155), False, 'import json\n'), ((2611, 2647), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--TeamIds"""'], {}), "(argv, '--TeamIds')\n", (2628, 2647), False, 'from tccli.utils import Utils\n'), ((3355, 3372), 'json.dumps', 'json.dumps', (['param'], {}), '(param)\n', (3365, 3372), False, 'import json\n'), ((3492, 3510), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (3502, 3510), False, 'import json\n'), ((4033, 4072), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Definition"""'], {}), "(argv, '--Definition')\n", (4050, 4072), False, 'from tccli.utils import Utils\n'), ((4161, 4203), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--CMEExportInfo"""'], {}), "(argv, '--CMEExportInfo')\n", (4178, 4203), False, 'from tccli.utils import Utils\n'), ((4230, 4272), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--VODExportInfo"""'], {}), "(argv, '--VODExportInfo')\n", (4247, 4272), False, 'from tccli.utils import Utils\n'), ((4989, 5006), 'json.dumps', 'json.dumps', (['param'], {}), '(param)\n', (4999, 5006), False, 'import json\n'), ((5135, 5153), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (5145, 5153), False, 'import json\n'), ((5624, 5663), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Authorizee"""'], {}), "(argv, '--Authorizee')\n", (5641, 5663), False, 'from tccli.utils import Utils\n'), ((6421, 6438), 'json.dumps', 'json.dumps', (['param'], {}), '(param)\n', (6431, 6438), False, 'import json\n'), ((6564, 6582), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (6574, 6582), False, 'import json\n'), ((7062, 7096), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Owner"""'], {}), "(argv, '--Owner')\n", (7079, 7096), False, 'from tccli.utils import Utils\n'), ((7119, 7157), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Resources"""'], {}), "(argv, '--Resources')\n", (7136, 7157), False, 'from tccli.utils import Utils\n'), ((7182, 7222), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Authorizees"""'], {}), "(argv, '--Authorizees')\n", (7199, 7222), False, 'from tccli.utils import Utils\n'), ((7247, 7287), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Permissions"""'], {}), "(argv, '--Permissions')\n", (7264, 7287), False, 'from tccli.utils import Utils\n'), ((8052, 8069), 'json.dumps', 'json.dumps', (['param'], {}), '(param)\n', (8062, 8069), False, 'import json\n'), ((8202, 8220), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (8212, 8220), False, 'import json\n'), ((8683, 8724), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--SearchScopes"""'], {}), "(argv, '--SearchScopes')\n", (8700, 8724), False, 'from tccli.utils import Utils\n'), ((8751, 8793), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--MaterialTypes"""'], {}), "(argv, '--MaterialTypes')\n", (8768, 8793), False, 'from tccli.utils import Utils\n'), ((8904, 8946), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--DurationRange"""'], {}), "(argv, '--DurationRange')\n", (8921, 8946), False, 'from tccli.utils import Utils\n'), ((8975, 9019), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--CreateTimeRange"""'], {}), "(argv, '--CreateTimeRange')\n", (8992, 9019), False, 'from tccli.utils import Utils\n'), ((9037, 9070), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Tags"""'], {}), "(argv, '--Tags')\n", (9054, 9070), False, 'from tccli.utils import Utils\n'), ((9088, 9121), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Sort"""'], {}), "(argv, '--Sort')\n", (9105, 9121), False, 'from tccli.utils import Utils\n'), ((9141, 9176), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Offset"""'], {}), "(argv, '--Offset')\n", (9158, 9176), False, 'from tccli.utils import Utils\n'), ((9195, 9229), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Limit"""'], {}), "(argv, '--Limit')\n", (9212, 9229), False, 'from tccli.utils import Utils\n'), ((9982, 9999), 'json.dumps', 'json.dumps', (['param'], {}), '(param)\n', (9992, 9999), False, 'import json\n'), ((10120, 10138), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (10130, 10138), False, 'import json\n'), ((10620, 10654), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Owner"""'], {}), "(argv, '--Owner')\n", (10637, 10654), False, 'from tccli.utils import Utils\n'), ((10677, 10715), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Resources"""'], {}), "(argv, '--Resources')\n", (10694, 10715), False, 'from tccli.utils import Utils\n'), ((10740, 10780), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Authorizees"""'], {}), "(argv, '--Authorizees')\n", (10757, 10780), False, 'from tccli.utils import Utils\n'), ((10805, 10845), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Permissions"""'], {}), "(argv, '--Permissions')\n", (10822, 10845), False, 'from tccli.utils import Utils\n'), ((11611, 11628), 'json.dumps', 'json.dumps', (['param'], {}), '(param)\n', (11621, 11628), False, 'import json\n'), ((11762, 11780), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (11772, 11780), False, 'import json\n'), ((12287, 12322), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Offset"""'], {}), "(argv, '--Offset')\n", (12304, 12322), False, 'from tccli.utils import Utils\n'), ((12341, 12375), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Limit"""'], {}), "(argv, '--Limit')\n", (12358, 12375), False, 'from tccli.utils import Utils\n'), ((13087, 13104), 'json.dumps', 'json.dumps', (['param'], {}), '(param)\n', (13097, 13104), False, 'import json\n'), ((13228, 13246), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (13238, 13246), False, 'import json\n'), ((13732, 13766), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Owner"""'], {}), "(argv, '--Owner')\n", (13749, 13766), False, 'from tccli.utils import Utils\n'), ((13788, 13825), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Resource"""'], {}), "(argv, '--Resource')\n", (13805, 13825), False, 'from tccli.utils import Utils\n'), ((14593, 14610), 'json.dumps', 'json.dumps', (['param'], {}), '(param)\n', (14603, 14610), False, 'import json\n'), ((14746, 14764), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (14756, 14764), False, 'import json\n'), ((15266, 15300), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Owner"""'], {}), "(argv, '--Owner')\n", (15283, 15300), False, 'from tccli.utils import Utils\n'), ((15400, 15433), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Tags"""'], {}), "(argv, '--Tags')\n", (15417, 15433), False, 'from tccli.utils import Utils\n'), ((15467, 15516), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--PreProcessDefinition"""'], {}), "(argv, '--PreProcessDefinition')\n", (15484, 15516), False, 'from tccli.utils import Utils\n'), ((16269, 16286), 'json.dumps', 'json.dumps', (['param'], {}), '(param)\n', (16279, 16286), False, 'import json\n'), ((16407, 16425), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (16417, 16425), False, 'import json\n'), ((17623, 17640), 'json.dumps', 'json.dumps', (['param'], {}), '(param)\n', (17633, 17640), False, 'import json\n'), ((17765, 17783), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (17775, 17783), False, 'import json\n'), ((19037, 19054), 'json.dumps', 'json.dumps', (['param'], {}), '(param)\n', (19047, 19054), False, 'import json\n'), ((19171, 19189), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (19181, 19189), False, 'import json\n'), ((20427, 20444), 'json.dumps', 'json.dumps', (['param'], {}), '(param)\n', (20437, 20444), False, 'import json\n'), ((20565, 20583), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (20575, 20583), False, 'import json\n'), ((21087, 21121), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Owner"""'], {}), "(argv, '--Owner')\n", (21104, 21121), False, 'from tccli.utils import Utils\n'), ((21175, 21208), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Tags"""'], {}), "(argv, '--Tags')\n", (21192, 21208), False, 'from tccli.utils import Utils\n'), ((22007, 22024), 'json.dumps', 'json.dumps', (['param'], {}), '(param)\n', (22017, 22024), False, 'import json\n'), ((22145, 22163), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (22155, 22163), False, 'import json\n'), ((23381, 23398), 'json.dumps', 'json.dumps', (['param'], {}), '(param)\n', (23391, 23398), False, 'import json\n'), ((23515, 23533), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (23525, 23533), False, 'import json\n'), ((24033, 24073), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--TeamMembers"""'], {}), "(argv, '--TeamMembers')\n", (24050, 24073), False, 'from tccli.utils import Utils\n'), ((24825, 24842), 'json.dumps', 'json.dumps', (['param'], {}), '(param)\n', (24835, 24842), False, 'import json\n'), ((24962, 24980), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (24972, 24980), False, 'import json\n'), ((26336, 26353), 'json.dumps', 'json.dumps', (['param'], {}), '(param)\n', (26346, 26353), False, 'import json\n'), ((26476, 26494), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (26486, 26494), False, 'import json\n'), ((27000, 27038), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--MemberIds"""'], {}), "(argv, '--MemberIds')\n", (27017, 27038), False, 'from tccli.utils import Utils\n'), ((27794, 27811), 'json.dumps', 'json.dumps', (['param'], {}), '(param)\n', (27804, 27811), False, 'import json\n'), ((27935, 27953), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (27945, 27953), False, 'import json\n'), ((28417, 28453), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--UserIds"""'], {}), "(argv, '--UserIds')\n", (28434, 28453), False, 'from tccli.utils import Utils\n'), ((29165, 29182), 'json.dumps', 'json.dumps', (['param'], {}), '(param)\n', (29175, 29182), False, 'import json\n'), ((29306, 29324), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (29316, 29324), False, 'import json\n'), ((29789, 29828), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--ProjectIds"""'], {}), "(argv, '--ProjectIds')\n", (29806, 29828), False, 'from tccli.utils import Utils\n'), ((29856, 29899), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--AspectRatioSet"""'], {}), "(argv, '--AspectRatioSet')\n", (29873, 29899), False, 'from tccli.utils import Utils\n'), ((29924, 29964), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--CategorySet"""'], {}), "(argv, '--CategorySet')\n", (29941, 29964), False, 'from tccli.utils import Utils\n'), ((29982, 30015), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Sort"""'], {}), "(argv, '--Sort')\n", (29999, 30015), False, 'from tccli.utils import Utils\n'), ((30034, 30068), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Owner"""'], {}), "(argv, '--Owner')\n", (30051, 30068), False, 'from tccli.utils import Utils\n'), ((30088, 30123), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Offset"""'], {}), "(argv, '--Offset')\n", (30105, 30123), False, 'from tccli.utils import Utils\n'), ((30142, 30176), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Limit"""'], {}), "(argv, '--Limit')\n", (30159, 30176), False, 'from tccli.utils import Utils\n'), ((30887, 30904), 'json.dumps', 'json.dumps', (['param'], {}), '(param)\n', (30897, 30904), False, 'import json\n'), ((31027, 31045), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (31037, 31045), False, 'import json\n'), ((31513, 31549), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--UserIds"""'], {}), "(argv, '--UserIds')\n", (31530, 31549), False, 'from tccli.utils import Utils\n'), ((32263, 32280), 'json.dumps', 'json.dumps', (['param'], {}), '(param)\n', (32273, 32280), False, 'import json\n'), ((32406, 32424), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (32416, 32424), False, 'import json\n'), ((32892, 32932), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--MaterialIds"""'], {}), "(argv, '--MaterialIds')\n", (32909, 32932), False, 'from tccli.utils import Utils\n'), ((32950, 32983), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Sort"""'], {}), "(argv, '--Sort')\n", (32967, 32983), False, 'from tccli.utils import Utils\n'), ((33739, 33756), 'json.dumps', 'json.dumps', (['param'], {}), '(param)\n', (33749, 33756), False, 'import json\n'), ((33880, 33898), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (33890, 33898), False, 'import json\n'), ((34352, 34386), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Owner"""'], {}), "(argv, '--Owner')\n", (34369, 34386), False, 'from tccli.utils import Utils\n'), ((35138, 35155), 'json.dumps', 'json.dumps', (['param'], {}), '(param)\n', (35148, 35155), False, 'import json\n'), ((35275, 35293), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (35285, 35293), False, 'import json\n'), ((36595, 36612), 'json.dumps', 'json.dumps', (['param'], {}), '(param)\n', (36605, 36612), False, 'import json\n'), ((36729, 36747), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (36739, 36747), False, 'import json\n'), ((37333, 37367), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Owner"""'], {}), "(argv, '--Owner')\n", (37350, 37367), False, 'from tccli.utils import Utils\n'), ((38075, 38092), 'json.dumps', 'json.dumps', (['param'], {}), '(param)\n', (38085, 38092), False, 'import json\n'), ((38212, 38230), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (38222, 38230), False, 'import json\n'), ((38680, 38714), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Owner"""'], {}), "(argv, '--Owner')\n", (38697, 38714), False, 'from tccli.utils import Utils\n'), ((39510, 39527), 'json.dumps', 'json.dumps', (['param'], {}), '(param)\n', (39520, 39527), False, 'import json\n'), ((39645, 39663), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (39655, 39663), False, 'import json\n'), ((40274, 40323), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--PreProcessDefinition"""'], {}), "(argv, '--PreProcessDefinition')\n", (40291, 40323), False, 'from tccli.utils import Utils\n'), ((41038, 41055), 'json.dumps', 'json.dumps', (['param'], {}), '(param)\n', (41048, 41055), False, 'import json\n'), ((41182, 41200), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (41192, 41200), False, 'import json\n'), ((41692, 41726), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Owner"""'], {}), "(argv, '--Owner')\n", (41709, 41726), False, 'from tccli.utils import Utils\n'), ((41746, 41781), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Offset"""'], {}), "(argv, '--Offset')\n", (41763, 41781), False, 'from tccli.utils import Utils\n'), ((41800, 41834), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Limit"""'], {}), "(argv, '--Limit')\n", (41817, 41834), False, 'from tccli.utils import Utils\n'), ((42582, 42599), 'json.dumps', 'json.dumps', (['param'], {}), '(param)\n', (42592, 42599), False, 'import json\n'), ((42715, 42733), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (42725, 42733), False, 'import json\n'), ((43317, 43351), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Owner"""'], {}), "(argv, '--Owner')\n", (43334, 43351), False, 'from tccli.utils import Utils\n'), ((44059, 44076), 'json.dumps', 'json.dumps', (['param'], {}), '(param)\n', (44069, 44076), False, 'import json\n'), ((44196, 44214), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (44206, 44214), False, 'import json\n'), ((44664, 44698), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Owner"""'], {}), "(argv, '--Owner')\n", (44681, 44698), False, 'from tccli.utils import Utils\n'), ((45494, 45511), 'json.dumps', 'json.dumps', (['param'], {}), '(param)\n', (45504, 45511), False, 'import json\n'), ((45629, 45647), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (45639, 45647), False, 'import json\n'), ((46836, 46853), 'json.dumps', 'json.dumps', (['param'], {}), '(param)\n', (46846, 46853), False, 'import json\n'), ((46973, 46991), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (46983, 46991), False, 'import json\n'), ((47497, 47531), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Owner"""'], {}), "(argv, '--Owner')\n", (47514, 47531), False, 'from tccli.utils import Utils\n'), ((47551, 47586), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Offset"""'], {}), "(argv, '--Offset')\n", (47568, 47586), False, 'from tccli.utils import Utils\n'), ((47605, 47639), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Limit"""'], {}), "(argv, '--Limit')\n", (47622, 47639), False, 'from tccli.utils import Utils\n'), ((48394, 48411), 'json.dumps', 'json.dumps', (['param'], {}), '(param)\n', (48404, 48411), False, 'import json\n'), ((48534, 48552), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (48544, 48552), False, 'import json\n'), ((49062, 49100), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--MemberIds"""'], {}), "(argv, '--MemberIds')\n", (49079, 49100), False, 'from tccli.utils import Utils\n'), ((49120, 49155), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Offset"""'], {}), "(argv, '--Offset')\n", (49137, 49155), False, 'from tccli.utils import Utils\n'), ((49174, 49208), 'tccli.utils.Utils.try_to_json', 'Utils.try_to_json', (['argv', '"""--Limit"""'], {}), "(argv, '--Limit')\n", (49191, 49208), False, 'from tccli.utils import Utils\n'), ((49966, 49983), 'json.dumps', 'json.dumps', (['param'], {}), '(param)\n', (49976, 49983), False, 'import json\n'), ((50109, 50127), 'json.loads', 'json.loads', (['result'], {}), '(result)\n', (50119, 50127), False, 'import json\n'), ((51987, 52021), 'tccli.services.cme.v20191029.version.replace', 'v20191029.version.replace', (['"""-"""', '""""""'], {}), "('-', '')\n", (52012, 52021), False, 'from tccli.services.cme import v20191029\n'), ((53760, 53797), 'tccli.nice_command.NiceCommand', 'NiceCommand', (['actionName', "action['cb']"], {}), "(actionName, action['cb'])\n", (53771, 53797), False, 'from tccli.nice_command import NiceCommand\n'), ((55077, 55120), 'os.environ.get', 'os.environ.get', (['OptionsDefine.ENV_SECRET_ID'], {}), '(OptionsDefine.ENV_SECRET_ID)\n', (55091, 55120), False, 'import os\n'), ((55214, 55258), 'os.environ.get', 'os.environ.get', (['OptionsDefine.ENV_SECRET_KEY'], {}), '(OptionsDefine.ENV_SECRET_KEY)\n', (55228, 55258), False, 'import os\n'), ((55347, 55387), 'os.environ.get', 'os.environ.get', (['OptionsDefine.ENV_REGION'], {}), '(OptionsDefine.ENV_REGION)\n', (55361, 55387), False, 'import os\n'), ((56891, 56938), 'tccli.utils.Utils.split_str', 'Utils.split_str', (['""" """', "param['desc']", '(120)'], {}), "(' ', param['desc'], 120)\n", (56906, 56938), False, 'from tccli.utils import Utils\n'), ((52731, 52777), 'tccli.utils.Utils.split_str', 'Utils.split_str', (['""" """', "info['desc']", '(120)'], {}), "(' ', info['desc'], 120)\n", (52746, 52777), False, 'from tccli.utils import Utils\n'), ((57237, 57287), 'os.path.join', 'os.path.join', (['config.cli_path', '"""default.configure"""'], {}), "(config.cli_path, 'default.configure')\n", (57249, 57287), False, 'import os\n')] |
#!/usr/bin/env python
# Copyright 2017 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import nested_scopes, generators, division, absolute_import, \
with_statement, print_function, unicode_literals
import sys
import time
from pyghmi.ipmi import command as ipmi_command
from pyghmi import exceptions as pyghmi_exception
from tabulate import tabulate
from lib.inventory import Inventory
from lib.ipmi_power import IpmiPower
from lib.logger import Logger
from get_dhcp_lease_info import GetDhcpLeases
class IpmiPowerPXE(object):
def __init__(self, log, inv_file, dhcp_leases_path, time_out, wait):
self.log = log
inv = Inventory(self.log, inv_file)
self.ipmi_power = IpmiPower(self.log)
# Get list of BMCs from DHCP lease file
dhcp_leases = GetDhcpLeases(dhcp_leases_path, self.log)
bmc_leases = dhcp_leases.get_mac_ip()
bmc_list = []
unsuccessful_ip_list = []
for mac, ipv4 in bmc_leases.items():
bmc = {}
bmc['ipv4'] = ipv4
bmc['rack_id'] = 'passive'
for userid, password in inv.yield_ipmi_credential_sets():
bmc['userid'] = userid
bmc['password'] = password
self.log.debug(
'Attempting IPMI connection to IP: %s userid: %s '
'password: %s' % (ipv4, userid, password))
try:
_rc, _ = self.ipmi_power.is_power_off(bmc)
except SystemExit:
continue
self.log.info(
'Successful IPMI connection to IP: %s userid: %s '
'password: %s' % (ipv4, userid, password))
bmc_list.append(bmc)
break
else:
self.log.warning(
'Unsuccessful IPMI connection to IP: %s' % ipv4)
bmc.pop('userid')
bmc.pop('password')
unsuccessful_ip_list.append(bmc)
if len(bmc_list) > 0:
print("-" * 47)
print("Successful IPMI connections:")
print("-" * 47)
print(tabulate(bmc_list, headers="keys"))
if len(bmc_list) < inv.get_expected_node_count():
msg = ('\nFAIL: %d BMC(s) defined in config.yml but only %d IPMI '
'connection(s) found!' %
(inv.get_expected_node_count(), len(bmc_list)))
self.log.error(msg)
print(msg)
if len(unsuccessful_ip_list) > 0:
print("-" * 54)
print("IPs with DHCP leases but IPMI connection unsuccessful:")
print("-" * 54)
print(tabulate(unsuccessful_ip_list, headers="keys"))
sys.exit
# Power off
for bmc in bmc_list:
_rc, _ = self.ipmi_power.is_power_off(bmc)
if _rc:
self.log.debug(
'Already powered off - Rack: %s - IP: %s' %
(bmc['rack_id'], bmc['ipv4']))
else:
self.ipmi_power.set_power_off(bmc)
start_time = time.time()
attempt = 1
bmcs = list(bmc_list)
while bmcs:
if time.time() > start_time + time_out:
break
time.sleep(wait)
bmcs[:] = [
bmc
for bmc in bmcs
if self._is_not_power_off(bmc, attempt) is not None]
attempt += 1
for bmc in bmcs:
self.log.error(
'Power off unsuccessful - Rack: %s - IP: %s - State: %s' %
(bmc['rack_id'], bmc['ipv4'], bmc['power_state']))
for bmc in bmcs:
sys.exit(1)
# Set boot device to pxe (not persistent)
bootdev = 'pxe'
persist = False
for bmc in bmc_list:
ipmi_cmd = ipmi_command.Command(
bmc=bmc['ipv4'],
userid=bmc['userid'],
password=bmc['password'])
try:
ipmi_cmd.set_bootdev(bootdev, persist)
except pyghmi_exception.IpmiException as error:
log.error(
'set_bootdev failed (device=%s persist=%s) - '
'IP: %s, %s' %
(bootdev, persist, bmc['ipv4'], str(error)))
sys.exit(1)
log.info(
'set_bootdev success (device=%s persist=%s) - '
'IP: %s' %
(bootdev, persist, bmc['ipv4']))
# Power on
for bmc in bmc_list:
_rc, _ = self.ipmi_power.is_power_on(bmc)
if _rc:
self.log.info(
'Already powered on - Rack: %s - IP: %s' %
(bmc['rack_id'], bmc['ipv4']))
else:
self.ipmi_power.set_power_on(bmc)
start_time = time.time()
attempt = 1
bmcs = list(bmc_list)
while bmcs:
if time.time() > start_time + time_out:
break
time.sleep(wait)
bmcs[:] = [
bmc
for bmc in bmcs
if self._is_not_power_on(bmc, attempt) is not None]
attempt += 1
for bmc in bmcs:
self.log.error(
'Power on unsuccessful - Rack: %s - IP: %s - State: %s' %
(bmc['rack_id'], bmc['ipv4'], bmc['power_state']))
for bmc in bmcs:
sys.exit(1)
def _is_not_power_on(self, bmc, attempt):
_rc, power_state = self.ipmi_power.is_power_on(bmc)
if _rc:
self.log.info(
'Power on successful - Rack: %s - IP: %s' %
(bmc['rack_id'], bmc['ipv4']))
return None
bmc['power_state'] = power_state
self.log.debug(
'Power on pending - Rack: %s - IP: %s - State: %s - Attempt: %s' %
(bmc['rack_id'], bmc['ipv4'], bmc['power_state'], attempt))
return bmc
def _is_not_power_off(self, bmc, attempt):
_rc, power_state = self.ipmi_power.is_power_off(bmc)
if _rc:
self.log.info(
'Power off successful - Rack: %s - IP: %s' %
(bmc['rack_id'], bmc['ipv4']))
return None
bmc['power_state'] = power_state
self.log.debug(
'Power off pending - Rack: %s - IP: %s - State: %s - Attempt: %s' %
(bmc['rack_id'], bmc['ipv4'], bmc['power_state'], attempt))
return bmc
if __name__ == '__main__':
"""
Arg1: inventory file
Arg2: dhcp leases file path
Arg3: time out
Arg4: wait time
Arg5: log level
"""
LOG = Logger(__file__)
if len(sys.argv) != 6:
try:
raise Exception()
except Exception:
LOG.error('Invalid argument count')
sys.exit(1)
INV_FILE = sys.argv[1]
DHCP_LEASES_PATH = sys.argv[2]
TIME_OUT = int(sys.argv[3])
WAIT = int(sys.argv[4])
LOG.set_level(sys.argv[5])
IpmiPowerPXE(LOG, INV_FILE, DHCP_LEASES_PATH, TIME_OUT, WAIT)
| [
"tabulate.tabulate",
"sys.exit",
"pyghmi.ipmi.command.Command",
"time.sleep",
"get_dhcp_lease_info.GetDhcpLeases",
"lib.ipmi_power.IpmiPower",
"lib.inventory.Inventory",
"lib.logger.Logger",
"time.time"
] | [((7262, 7278), 'lib.logger.Logger', 'Logger', (['__file__'], {}), '(__file__)\n', (7268, 7278), False, 'from lib.logger import Logger\n'), ((1196, 1225), 'lib.inventory.Inventory', 'Inventory', (['self.log', 'inv_file'], {}), '(self.log, inv_file)\n', (1205, 1225), False, 'from lib.inventory import Inventory\n'), ((1252, 1271), 'lib.ipmi_power.IpmiPower', 'IpmiPower', (['self.log'], {}), '(self.log)\n', (1261, 1271), False, 'from lib.ipmi_power import IpmiPower\n'), ((1343, 1384), 'get_dhcp_lease_info.GetDhcpLeases', 'GetDhcpLeases', (['dhcp_leases_path', 'self.log'], {}), '(dhcp_leases_path, self.log)\n', (1356, 1384), False, 'from get_dhcp_lease_info import GetDhcpLeases\n'), ((3698, 3709), 'time.time', 'time.time', ([], {}), '()\n', (3707, 3709), False, 'import time\n'), ((5457, 5468), 'time.time', 'time.time', ([], {}), '()\n', (5466, 5468), False, 'import time\n'), ((3866, 3882), 'time.sleep', 'time.sleep', (['wait'], {}), '(wait)\n', (3876, 3882), False, 'import time\n'), ((4286, 4297), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4294, 4297), False, 'import sys\n'), ((4449, 4539), 'pyghmi.ipmi.command.Command', 'ipmi_command.Command', ([], {'bmc': "bmc['ipv4']", 'userid': "bmc['userid']", 'password': "bmc['password']"}), "(bmc=bmc['ipv4'], userid=bmc['userid'], password=bmc[\n 'password'])\n", (4469, 4539), True, 'from pyghmi.ipmi import command as ipmi_command\n'), ((5625, 5641), 'time.sleep', 'time.sleep', (['wait'], {}), '(wait)\n', (5635, 5641), False, 'import time\n'), ((6043, 6054), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6051, 6054), False, 'import sys\n'), ((2715, 2749), 'tabulate.tabulate', 'tabulate', (['bmc_list'], {'headers': '"""keys"""'}), "(bmc_list, headers='keys')\n", (2723, 2749), False, 'from tabulate import tabulate\n'), ((3795, 3806), 'time.time', 'time.time', ([], {}), '()\n', (3804, 3806), False, 'import time\n'), ((5554, 5565), 'time.time', 'time.time', ([], {}), '()\n', (5563, 5565), False, 'import time\n'), ((7436, 7447), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7444, 7447), False, 'import sys\n'), ((3267, 3313), 'tabulate.tabulate', 'tabulate', (['unsuccessful_ip_list'], {'headers': '"""keys"""'}), "(unsuccessful_ip_list, headers='keys')\n", (3275, 3313), False, 'from tabulate import tabulate\n'), ((4926, 4937), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4934, 4937), False, 'import sys\n')] |
import logging
from silex_client.action.command_base import CommandBase
from silex_client.action.action_query import ActionQuery
class Save(CommandBase):
"""
Save current scene with context as path
"""
parameters = {
"file_path": {"label": "filename", "type": str},
}
@CommandBase.conform_command()
async def __call__(
self,
parameters,
action_query: ActionQuery,
logger: logging.Logger,
):
print("Executing save")
| [
"silex_client.action.command_base.CommandBase.conform_command"
] | [((306, 335), 'silex_client.action.command_base.CommandBase.conform_command', 'CommandBase.conform_command', ([], {}), '()\n', (333, 335), False, 'from silex_client.action.command_base import CommandBase\n')] |
from unittest import TestCase
from musicscore.musicxml.types.complextypes.attributes import Clef
from musicscore.musicxml.types.complextypes.clef import Sign
class Test(TestCase):
def setUp(self) -> None:
self.clef = Clef()
self.clef.add_child(Sign('F'))
def test_1(self):
clef = self.clef
expected = '''<clef>
<sign>F</sign>
</clef>
'''
actual = clef.to_string()
self.assertEqual(expected, actual)
def test_2(self):
clef = self.clef
clef.number = 2
expected = '''<clef number="2">
<sign>F</sign>
</clef>
'''
actual = clef.to_string()
self.assertEqual(expected, actual)
def test_3(self):
clef = self.clef
clef.after_barline = 'yes'
expected = '''<clef after-barline="yes">
<sign>F</sign>
</clef>
'''
actual = clef.to_string()
self.assertEqual(expected, actual)
def test_4(self):
clef = self.clef
clef.size = 'large'
expected = '''<clef size="large">
<sign>F</sign>
</clef>
'''
actual = clef.to_string()
self.assertEqual(expected, actual)
def test_5(self):
clef = self.clef
clef.additional = 'no'
expected = '''<clef additional="no">
<sign>F</sign>
</clef>
'''
actual = clef.to_string()
self.assertEqual(expected, actual)
def test_6(self):
clef = self.clef
clef.number = 2
clef.additional = 'no'
clef.size = 'large'
clef.after_barline = 'yes'
expected = '''<clef number="2" additional="no" size="large" after-barline="yes">
<sign>F</sign>
</clef>
'''
actual = clef.to_string()
self.assertEqual(expected, actual)
# def test_7(self):
# clef = Clef(number=2)
| [
"musicscore.musicxml.types.complextypes.clef.Sign",
"musicscore.musicxml.types.complextypes.attributes.Clef"
] | [((232, 238), 'musicscore.musicxml.types.complextypes.attributes.Clef', 'Clef', ([], {}), '()\n', (236, 238), False, 'from musicscore.musicxml.types.complextypes.attributes import Clef\n'), ((267, 276), 'musicscore.musicxml.types.complextypes.clef.Sign', 'Sign', (['"""F"""'], {}), "('F')\n", (271, 276), False, 'from musicscore.musicxml.types.complextypes.clef import Sign\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import torch
import numpy as np
from torch.cuda.amp import autocast, GradScaler
from src.opts import opt
from src.dataset import Dataset
from src.losses import CtdetLoss
from src.utils.logger import Logger
from src.utils.average_meter import AverageMeter, TimeMeter
from src.model import get_model, load_model, save_model
def train(model, train_loader, criterion, optimizer, logger, opt, epoch, scaler, time_stats):
model.train()
avg_loss_stats = {l: AverageMeter()
for l in ['loss', 'hm_loss', 'wh_loss', 'off_loss']}
for iter_id, batch in enumerate(train_loader):
# to cuda
for k in batch:
batch[k] = batch[k].to(device=opt.device, non_blocking=True)
# amp
with autocast():
output = model(batch['input'])
loss_stats = criterion(output, batch)
loss = loss_stats['loss'].mean()
optimizer.zero_grad()
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
# else:
# # no amp
# output = model(batch['input'])
# loss_stats = criterion(output, batch)
# loss = loss_stats['loss'].mean()
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
info = f'train : [{epoch}][{iter_id}/{len(train_loader)}] |'
for l in avg_loss_stats:
avg_loss_stats[l].update(
loss_stats[l].mean().item(), batch['input'].size(0))
info += f'|{l} {avg_loss_stats[l].avg:.4f} '
time_stats.update(epoch, iter_id)
info += f'|left_time: {time_stats.left_time:.1f} hour'
# log
if iter_id % 100 == 0:
logger.write(info)
def val(model, val_loader, criterion, logger, opt, epoch):
with torch.no_grad():
model.eval()
torch.cuda.empty_cache()
avg_loss_stats = {l: AverageMeter()
for l in ['loss', 'hm_loss', 'wh_loss', 'off_loss']}
for iter_id, batch in enumerate(val_loader):
for k in batch:
batch[k] = batch[k].to(device=opt.device, non_blocking=True)
output = model(batch['input'])
loss_stats = criterion(output, batch)
info = f'val : [{epoch}][{iter_id}/{len(val_loader)}] |'
for l in avg_loss_stats:
avg_loss_stats[l].update(
loss_stats[l].mean().item(), batch['input'].size(0))
info += f'|{l} {avg_loss_stats[l].avg:.4f} '
# log
if iter_id % 100 == 0:
logger.write(info)
def main():
torch.manual_seed(317)
torch.backends.cudnn.benckmark = True
train_logger = Logger(opt, "train")
val_logger = Logger(opt, "val")
start_epoch = 0
print('Creating model...')
model = get_model(opt.arch, opt.heads).to(opt.device)
optimizer = torch.optim.Adam(model.parameters(), opt.lr)
criterion = CtdetLoss(opt)
print('Loading model...')
if opt.load_model != '':
model, optimizer, start_epoch = load_model(
model, opt.load_model, optimizer, opt.lr, opt.lr_step)
model = torch.nn.DataParallel(model)
# amp
scaler = GradScaler()
print('Setting up data...')
train_dataset = Dataset(opt, 'train')
val_dataset = Dataset(opt, 'val')
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=opt.batch_size,
shuffle=True,
num_workers=16,
pin_memory=True,
drop_last=True
)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=1,
shuffle=False,
num_workers=1,
pin_memory=True
)
# cal left time
time_stats = TimeMeter(opt.num_epochs, len(train_loader))
for epoch in range(start_epoch + 1, opt.num_epochs + 1):
print('train...')
train(model, train_loader, criterion, optimizer,
train_logger, opt, epoch, scaler, time_stats)
if epoch % opt.val_intervals == 0:
print('val...')
val(model, val_loader, criterion, val_logger, opt, epoch)
save_model(os.path.join(opt.save_dir, f'model_{epoch}.pth'),
epoch, model, optimizer)
# update learning rate
if epoch in opt.lr_step:
lr = opt.lr * (0.1 ** (opt.lr_step.index(epoch) + 1))
print('Drop LR to', lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# without optimizer
save_model(os.path.join(opt.save_dir, 'model_final.pth'), epoch, model)
if __name__ == '__main__':
main()
| [
"torch.manual_seed",
"src.model.load_model",
"src.opts.opt.lr_step.index",
"torch.cuda.amp.GradScaler",
"src.dataset.Dataset",
"src.model.get_model",
"os.path.join",
"torch.nn.DataParallel",
"src.utils.logger.Logger",
"torch.cuda.amp.autocast",
"src.losses.CtdetLoss",
"torch.utils.data.DataLoa... | [((2877, 2899), 'torch.manual_seed', 'torch.manual_seed', (['(317)'], {}), '(317)\n', (2894, 2899), False, 'import torch\n'), ((2965, 2985), 'src.utils.logger.Logger', 'Logger', (['opt', '"""train"""'], {}), "(opt, 'train')\n", (2971, 2985), False, 'from src.utils.logger import Logger\n'), ((3004, 3022), 'src.utils.logger.Logger', 'Logger', (['opt', '"""val"""'], {}), "(opt, 'val')\n", (3010, 3022), False, 'from src.utils.logger import Logger\n'), ((3216, 3230), 'src.losses.CtdetLoss', 'CtdetLoss', (['opt'], {}), '(opt)\n', (3225, 3230), False, 'from src.losses import CtdetLoss\n'), ((3428, 3456), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (3449, 3456), False, 'import torch\n'), ((3484, 3496), 'torch.cuda.amp.GradScaler', 'GradScaler', ([], {}), '()\n', (3494, 3496), False, 'from torch.cuda.amp import autocast, GradScaler\n'), ((3553, 3574), 'src.dataset.Dataset', 'Dataset', (['opt', '"""train"""'], {}), "(opt, 'train')\n", (3560, 3574), False, 'from src.dataset import Dataset\n'), ((3594, 3613), 'src.dataset.Dataset', 'Dataset', (['opt', '"""val"""'], {}), "(opt, 'val')\n", (3601, 3613), False, 'from src.dataset import Dataset\n'), ((3636, 3772), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_dataset'], {'batch_size': 'opt.batch_size', 'shuffle': '(True)', 'num_workers': '(16)', 'pin_memory': '(True)', 'drop_last': '(True)'}), '(train_dataset, batch_size=opt.batch_size,\n shuffle=True, num_workers=16, pin_memory=True, drop_last=True)\n', (3663, 3772), False, 'import torch\n'), ((3850, 3955), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['val_dataset'], {'batch_size': '(1)', 'shuffle': '(False)', 'num_workers': '(1)', 'pin_memory': '(True)'}), '(val_dataset, batch_size=1, shuffle=False,\n num_workers=1, pin_memory=True)\n', (3877, 3955), False, 'import torch\n'), ((614, 628), 'src.utils.average_meter.AverageMeter', 'AverageMeter', ([], {}), '()\n', (626, 628), False, 'from src.utils.average_meter import AverageMeter, TimeMeter\n'), ((2015, 2030), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2028, 2030), False, 'import torch\n'), ((2063, 2087), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (2085, 2087), False, 'import torch\n'), ((3335, 3400), 'src.model.load_model', 'load_model', (['model', 'opt.load_model', 'optimizer', 'opt.lr', 'opt.lr_step'], {}), '(model, opt.load_model, optimizer, opt.lr, opt.lr_step)\n', (3345, 3400), False, 'from src.model import get_model, load_model, save_model\n'), ((4878, 4923), 'os.path.join', 'os.path.join', (['opt.save_dir', '"""model_final.pth"""'], {}), "(opt.save_dir, 'model_final.pth')\n", (4890, 4923), False, 'import os\n'), ((906, 916), 'torch.cuda.amp.autocast', 'autocast', ([], {}), '()\n', (914, 916), False, 'from torch.cuda.amp import autocast, GradScaler\n'), ((2118, 2132), 'src.utils.average_meter.AverageMeter', 'AverageMeter', ([], {}), '()\n', (2130, 2132), False, 'from src.utils.average_meter import AverageMeter, TimeMeter\n'), ((3091, 3121), 'src.model.get_model', 'get_model', (['opt.arch', 'opt.heads'], {}), '(opt.arch, opt.heads)\n', (3100, 3121), False, 'from src.model import get_model, load_model, save_model\n'), ((4468, 4516), 'os.path.join', 'os.path.join', (['opt.save_dir', 'f"""model_{epoch}.pth"""'], {}), "(opt.save_dir, f'model_{epoch}.pth')\n", (4480, 4516), False, 'import os\n'), ((4671, 4695), 'src.opts.opt.lr_step.index', 'opt.lr_step.index', (['epoch'], {}), '(epoch)\n', (4688, 4695), False, 'from src.opts import opt\n')] |
import fitz
import logging
from pathlib import Path
from typing import List, Dict
from .rvnstu import Rvnstu
from .ftoolbox import Ftoolbox
from .merger import Merger
logger = logging.getLogger(__name__)
class Mfitzer:
"""Class object who add function to fitz module
"""
def __init__(self, directory_path: str):
self.directory_path = directory_path
def readpage(self, file: str) -> str:
"""Return pdf first page to string
Args:
file (str): Fitz pdf
Returns:
str: text from pdf
"""
doc = fitz.open(file)
# First page
page = doc[0]
return page.getText()
def data_idstudent(self, text: str) -> str:
"""Return id student from text (relevedenote)
Args:
text (str): text from pdf file
Returns:
str: id student
"""
return Rvnstu(text).idstudent
def joinpdf(self, data: List, mg: Merger, ft: Ftoolbox) -> None:
"""Merge pdf with same id student in name file
Args:
data (List): Array of id student
mg (Merger): Object Merger
ft (Ftoolbox): Object Ftoolbox
"""
for id in data:
pages = ft.get_pdffiles(Path(self.directory_path), extension=f'*{id}.pdf')
mg.merge(pages, Path(self.directory_path, f'{id}.pdf'), ft.remove_file)
def mftzer_main(self, *, join=False) -> Dict:
"""Rename file if pdf text have id student.
Return dictionnary with {id_student, associated file number}
Args:
join (bool, optional): Flag for use joinpdf function or not. Default to False
Keyword-only
Returns:
Dict: {id_student, associated file number}
"""
# init object
ft = Ftoolbox()
# init result
data = {}
# all pdf files
files = ft.get_pdffiles(Path(self.directory_path))
for file in files:
text = self.readpage(file)
print(text)
ne = self.data_idstudent(text)
if ne:
if ne not in data:
data[ne] = 1
ft.rename_file(file, ne)
logger.debug(f'{file} was renamed in {ne}.pdf')
else:
data[ne] += 1
# formate name file
# "{int}-{id_student}"
newfilename = f'{data.get(ne)}-{ne}'
ft.rename_file(file, newfilename)
logger.debug(f'File already exists for id : {ne}')
logger.debug(f'New file name for save {newfilename}')
else:
# remove file who haven't student id
ft.remove_file(file)
logger.debug(f'File {file} was removed')
# if value in dict > 1, return associated key in a list
# and if flag join is equal True boolean
if (karray := [a for a, b in data.items() if b > 1]) and (join):
# init object
mg = Merger()
logger.debug('Join file with same id')
self.joinpdf(karray, mg, ft)
| [
"logging.getLogger",
"pathlib.Path",
"fitz.open"
] | [((178, 205), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (195, 205), False, 'import logging\n'), ((582, 597), 'fitz.open', 'fitz.open', (['file'], {}), '(file)\n', (591, 597), False, 'import fitz\n'), ((1962, 1987), 'pathlib.Path', 'Path', (['self.directory_path'], {}), '(self.directory_path)\n', (1966, 1987), False, 'from pathlib import Path\n'), ((1270, 1295), 'pathlib.Path', 'Path', (['self.directory_path'], {}), '(self.directory_path)\n', (1274, 1295), False, 'from pathlib import Path\n'), ((1349, 1387), 'pathlib.Path', 'Path', (['self.directory_path', 'f"""{id}.pdf"""'], {}), "(self.directory_path, f'{id}.pdf')\n", (1353, 1387), False, 'from pathlib import Path\n')] |
from scvi.models import VAE, MeanVarianceVAE
from scvi.inference import UnsupervisedTrainer
from .de_model import DEModel
import numpy as np
class ScVIClassic(DEModel):
def __init__(self, dataset, reconstruction_loss, n_latent, full_cov=False,
do_mean_variance=False, name=''):
super().__init__(dataset=dataset, name=name)
self.reconstruction_loss = reconstruction_loss
self.n_latent = n_latent
self.full_cov = full_cov
if do_mean_variance:
self.model_type = MeanVarianceVAE
else:
self.model_type = VAE
self.model = None
self.trainer = None
self.is_fully_init = False
# early_stopping_kwargs={'early_stopping_metric': 'll',
# 'save_best_state_metric': 'll',
# 'patience': 15, 'threshold': 3}
def full_init(self):
self.model = self.model_type(n_input=self.dataset.nb_genes, n_batch=self.dataset.n_batches,
reconstruction_loss=self.reconstruction_loss,
n_latent=self.n_latent,
full_cov=self.full_cov)
self.trainer = UnsupervisedTrainer(model=self.model, gene_dataset=self.dataset,
use_cuda=True,
train_size=0.7, kl=1, frequency=1)
self.is_fully_init = True
def train(self, **train_params):
assert self.is_fully_init
if len(train_params) == 0:
train_params = {'n_epochs': 150, 'lr': 1e-3}
self.trainer.train(**train_params)
def predict_de(self, n_samples=10000, M_permutation=100000, n_for_each=10,
idx1=None, idx2=None, mode='rho'):
assert mode in ['rho', 'gamma']
full = self.trainer.create_posterior(self.model, self.dataset,
indices=np.arange(len(self.dataset)))
if idx1 is None and idx2 is None:
cell_pos1 = np.where(self.dataset.labels.ravel() == 0)[0][:n_for_each]
cell_pos2 = np.where(self.dataset.labels.ravel() == 1)[0][:n_for_each]
cell_idx1 = np.isin(np.arange(len(self.dataset)), cell_pos1)
cell_idx2 = np.isin(np.arange(len(self.dataset)), cell_pos2)
else:
cell_idx1 = idx1
cell_idx2 = idx2
de_res = full.differential_expression_score(cell_idx1, cell_idx2, n_samples=n_samples,
M_permutation=M_permutation)
de_res_gamma = full.differential_expression_gamma(cell_idx1, cell_idx2, n_samples=n_samples,
M_permutation=M_permutation)
de_res.loc[:, 'gamma_bayes1'] = de_res_gamma
de_res = de_res.sort_index()
self.de_pred = de_res.bayes1.abs()
de_res.columns = [self.name+'_'+col for col in de_res.columns]
return de_res
| [
"scvi.inference.UnsupervisedTrainer"
] | [((1241, 1360), 'scvi.inference.UnsupervisedTrainer', 'UnsupervisedTrainer', ([], {'model': 'self.model', 'gene_dataset': 'self.dataset', 'use_cuda': '(True)', 'train_size': '(0.7)', 'kl': '(1)', 'frequency': '(1)'}), '(model=self.model, gene_dataset=self.dataset, use_cuda=\n True, train_size=0.7, kl=1, frequency=1)\n', (1260, 1360), False, 'from scvi.inference import UnsupervisedTrainer\n')] |
from flask import Flask,render_template,request,redirect
from pymongo import MongoClient
app=Flask(__name__)
client=MongoClient('mongodb://127.0.0.1:27017')
db=client['names']
collection=db.record
@app.route('/',methods=['GET','POST'])
def index():
if request.method=='POST':
firstname=request.form['fname']
lastname=request.form['lname']
print(firstname)
db.record.insert_one({'firstname' : firstname,'lastname':lastname})
#print(dict(firstname=data['fname'],lastname=data['lname']))
#print(data['fname'])
return render_template('index.html')
if __name__=='__main__':
app.run(debug=True)
| [
"flask.render_template",
"pymongo.MongoClient",
"flask.Flask"
] | [((94, 109), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (99, 109), False, 'from flask import Flask, render_template, request, redirect\n'), ((117, 157), 'pymongo.MongoClient', 'MongoClient', (['"""mongodb://127.0.0.1:27017"""'], {}), "('mongodb://127.0.0.1:27017')\n", (128, 157), False, 'from pymongo import MongoClient\n'), ((584, 613), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (599, 613), False, 'from flask import Flask, render_template, request, redirect\n')] |
import torch
import torch.nn as nn
import torch.nn.functional as F
class sub_pixel(nn.Module):
def __init__(self, scale, act=False):
super(sub_pixel, self).__init__()
modules = []
modules.append(nn.PixelShuffle(scale))
self.body = nn.Sequential(*modules)
def forward(self, x):
x = self.body(x)
return x
class make_dense(nn.Module):
def __init__(self, nChannels, growthRate, kernel_size=3):
super(make_dense, self).__init__()
self.conv = nn.Conv2d(nChannels, growthRate, kernel_size=kernel_size, padding=(kernel_size - 1) // 2,
bias=False)
def forward(self, x):
out = F.relu(self.conv(x))
out = torch.cat((x, out), 1)
return out
# Residual dense block (RDB) architecture
class RDB(nn.Module):
def __init__(self, nChannels, nDenselayer, growthRate):
super(RDB, self).__init__()
nChannels_ = nChannels
modules = []
for i in range(nDenselayer):
modules.append(make_dense(nChannels_, growthRate))
nChannels_ += growthRate
self.dense_layers = nn.Sequential(*modules)
self.conv_1x1 = nn.Conv2d(nChannels_, nChannels, kernel_size=1, padding=0, bias=False)
def forward(self, x):
out = self.dense_layers(x)
out = self.conv_1x1(out)
out = out + x
return out
# Residual Dense Network
class RDN(nn.Module):
def __init__(self, n_channel, n_denselayer, n_feat, n_scale, growth_rate):
super(RDN, self).__init__()
nChannel = n_channel
nDenselayer = n_denselayer
nFeat = n_feat
scale = n_scale
growthRate = growth_rate
# F-1
self.conv1 = nn.Conv2d(nChannel, nFeat, kernel_size=3, padding=1, bias=True)
# F0
self.conv2 = nn.Conv2d(nFeat, nFeat, kernel_size=3, padding=1, bias=True)
# RDBs 3
self.RDB1 = RDB(nFeat, nDenselayer, growthRate)
self.RDB2 = RDB(nFeat, nDenselayer, growthRate)
self.RDB3 = RDB(nFeat, nDenselayer, growthRate)
# global feature fusion (GFF)
self.GFF_1x1 = nn.Conv2d(nFeat * 3, nFeat, kernel_size=1, padding=0, bias=True)
self.GFF_3x3 = nn.Conv2d(nFeat, nFeat, kernel_size=3, padding=1, bias=True)
# Upsampler
self.conv_up = nn.Conv2d(nFeat, nFeat * scale * scale, kernel_size=3, padding=1, bias=True)
self.upsample = sub_pixel(scale)
# conv
self.conv3 = nn.Conv2d(nFeat, nChannel, kernel_size=3, padding=1, bias=True)
def forward(self, x):
F_ = self.conv1(x)
F_0 = self.conv2(F_)
F_1 = self.RDB1(F_0)
F_2 = self.RDB2(F_1)
F_3 = self.RDB3(F_2)
FF = torch.cat((F_1, F_2, F_3), 1)
FdLF = self.GFF_1x1(FF)
FGF = self.GFF_3x3(FdLF)
FDF = FGF + F_
us = self.conv_up(FDF)
us = self.upsample(us)
output = self.conv3(us)
return output
def _crop_and_merge(to_crop: torch.Tensor, to_merge_to: torch.Tensor) -> torch.Tensor:
padding = [0, 0, to_merge_to.size()[2] - to_crop.size()[2], to_merge_to.size()[3] - to_crop.size()[3]]
cropped_to_crop = F.pad(to_crop, padding)
return torch.cat((cropped_to_crop, to_merge_to), dim=1)
class UNet(nn.Module):
def __init__(self, depth: int = 5):
super(UNet, self).__init__()
# part 1
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, padding=1, bias=True)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3, padding=1, bias=True)
self.pool1 = nn.MaxPool2d(2, stride=1, padding=1)
# part 2
self.conv3 = nn.Conv2d(64, 128, kernel_size=3, padding=1, bias=True)
self.conv4 = nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=True)
self.pool2 = nn.MaxPool2d(2, stride=1, padding=1)
# part 3
self.conv5 = nn.Conv2d(128, 256, kernel_size=3, padding=1, bias=True)
self.conv6 = nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=True)
self.pool3 = nn.MaxPool2d(2, stride=1, padding=1)
# part 4
self.conv7 = nn.Conv2d(256, 512, kernel_size=3, padding=1, bias=True)
self.conv8 = nn.Conv2d(512, 512, kernel_size=3, padding=1, bias=True)
self.pool4 = nn.MaxPool2d(2, stride=1, padding=1)
# part5
self.conv9 = nn.Conv2d(512, 1024, kernel_size=3, padding=1, bias=True)
self.conv10 = nn.Conv2d(1024, 1024, kernel_size=3, padding=1, bias=True)
self.up_conv1 = nn.ConvTranspose2d(1024, 512, kernel_size=2, padding=1, bias=True)
# part6
self.conv11 = nn.Conv2d(1024, 512, kernel_size=3, padding=1, bias=True)
self.conv12 = nn.Conv2d(512, 512, kernel_size=3, padding=1, bias=True)
self.up_conv2 = nn.ConvTranspose2d(512, 256, kernel_size=2, padding=1, bias=True)
# part7
self.conv13 = nn.Conv2d(512, 256, kernel_size=3, padding=1, bias=True)
self.conv14 = nn.Conv2d(256, 256, kernel_size=3, padding=1, bias=True)
self.up_conv3 = nn.ConvTranspose2d(256, 128, kernel_size=2, padding=1, bias=True)
# part8
self.conv15 = nn.Conv2d(256, 128, kernel_size=3, padding=1, bias=True)
self.conv16 = nn.Conv2d(128, 128, kernel_size=3, padding=1, bias=True)
self.up_conv4 = nn.ConvTranspose2d(128, 64, kernel_size=2, padding=1, bias=True)
# part9
self.conv17 = nn.Conv2d(128, 64, kernel_size=3, padding=1, bias=True)
self.conv18 = nn.Conv2d(64, 64, kernel_size=3, padding=1, bias=True)
self.conv19 = nn.Conv2d(64, 3, kernel_size=1, bias=True)
def forward(self, x):
level_1_down = F.relu(self.conv2(F.relu(self.conv1(x))))
level_2_down = F.relu(self.conv4(F.relu(self.conv3(self.pool1(level_1_down)))))
level_3_down = F.relu(self.conv6(F.relu(self.conv5(self.pool2(level_2_down)))))
level_4_down = F.relu(self.conv8(F.relu(self.conv7(self.pool3(level_3_down)))))
level_5_up = self.up_conv1(F.relu(self.conv10(F.relu(self.conv9(self.pool4(level_4_down))))))
level_6_up = self.up_conv2(F.relu(self.conv12(F.relu(self.conv11(_crop_and_merge(level_4_down, level_5_up))))))
level_7_up = self.up_conv3(F.relu(self.conv14(F.relu(self.conv13(_crop_and_merge(level_3_down, level_6_up))))))
level_8_up = self.up_conv4(F.relu(self.conv16(F.relu(self.conv15(_crop_and_merge(level_2_down, level_7_up))))))
out = self.conv19(F.relu(self.conv18(F.relu(self.conv17(_crop_and_merge(level_1_down, level_8_up))))))
return F.relu(out)
class ESPCN(nn.Module):
def __init__(self, upscale_factor: int, input_channel_size: int, output_channel_size: int):
super(ESPCN, self).__init__()
self.conv1 = nn.Conv2d(input_channel_size, 8, (5, 5), (1, 1), (2, 2))
self.conv2 = nn.Conv2d(8, 4, (3, 3), (1, 1), (1, 1))
self.conv3 = nn.Conv2d(4, output_channel_size * (upscale_factor ** 2), (3, 3), (1, 1), (1, 1))
self.pixel_shuffle = nn.PixelShuffle(upscale_factor)
def forward(self, x):
x = torch.tanh(self.conv1(x))
x = torch.tanh(self.conv2(x))
x = torch.sigmoid(self.pixel_shuffle(self.conv3(x)))
return x
| [
"torch.nn.PixelShuffle",
"torch.nn.Sequential",
"torch.nn.Conv2d",
"torch.nn.MaxPool2d",
"torch.nn.functional.relu",
"torch.nn.functional.pad",
"torch.nn.ConvTranspose2d",
"torch.cat"
] | [((3198, 3221), 'torch.nn.functional.pad', 'F.pad', (['to_crop', 'padding'], {}), '(to_crop, padding)\n', (3203, 3221), True, 'import torch.nn.functional as F\n'), ((3234, 3282), 'torch.cat', 'torch.cat', (['(cropped_to_crop, to_merge_to)'], {'dim': '(1)'}), '((cropped_to_crop, to_merge_to), dim=1)\n', (3243, 3282), False, 'import torch\n'), ((269, 292), 'torch.nn.Sequential', 'nn.Sequential', (['*modules'], {}), '(*modules)\n', (282, 292), True, 'import torch.nn as nn\n'), ((518, 624), 'torch.nn.Conv2d', 'nn.Conv2d', (['nChannels', 'growthRate'], {'kernel_size': 'kernel_size', 'padding': '((kernel_size - 1) // 2)', 'bias': '(False)'}), '(nChannels, growthRate, kernel_size=kernel_size, padding=(\n kernel_size - 1) // 2, bias=False)\n', (527, 624), True, 'import torch.nn as nn\n'), ((726, 748), 'torch.cat', 'torch.cat', (['(x, out)', '(1)'], {}), '((x, out), 1)\n', (735, 748), False, 'import torch\n'), ((1147, 1170), 'torch.nn.Sequential', 'nn.Sequential', (['*modules'], {}), '(*modules)\n', (1160, 1170), True, 'import torch.nn as nn\n'), ((1195, 1265), 'torch.nn.Conv2d', 'nn.Conv2d', (['nChannels_', 'nChannels'], {'kernel_size': '(1)', 'padding': '(0)', 'bias': '(False)'}), '(nChannels_, nChannels, kernel_size=1, padding=0, bias=False)\n', (1204, 1265), True, 'import torch.nn as nn\n'), ((1746, 1809), 'torch.nn.Conv2d', 'nn.Conv2d', (['nChannel', 'nFeat'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(True)'}), '(nChannel, nFeat, kernel_size=3, padding=1, bias=True)\n', (1755, 1809), True, 'import torch.nn as nn\n'), ((1844, 1904), 'torch.nn.Conv2d', 'nn.Conv2d', (['nFeat', 'nFeat'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(True)'}), '(nFeat, nFeat, kernel_size=3, padding=1, bias=True)\n', (1853, 1904), True, 'import torch.nn as nn\n'), ((2151, 2215), 'torch.nn.Conv2d', 'nn.Conv2d', (['(nFeat * 3)', 'nFeat'], {'kernel_size': '(1)', 'padding': '(0)', 'bias': '(True)'}), '(nFeat * 3, nFeat, kernel_size=1, padding=0, bias=True)\n', (2160, 2215), True, 'import torch.nn as nn\n'), ((2239, 2299), 'torch.nn.Conv2d', 'nn.Conv2d', (['nFeat', 'nFeat'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(True)'}), '(nFeat, nFeat, kernel_size=3, padding=1, bias=True)\n', (2248, 2299), True, 'import torch.nn as nn\n'), ((2343, 2419), 'torch.nn.Conv2d', 'nn.Conv2d', (['nFeat', '(nFeat * scale * scale)'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(True)'}), '(nFeat, nFeat * scale * scale, kernel_size=3, padding=1, bias=True)\n', (2352, 2419), True, 'import torch.nn as nn\n'), ((2497, 2560), 'torch.nn.Conv2d', 'nn.Conv2d', (['nFeat', 'nChannel'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(True)'}), '(nFeat, nChannel, kernel_size=3, padding=1, bias=True)\n', (2506, 2560), True, 'import torch.nn as nn\n'), ((2744, 2773), 'torch.cat', 'torch.cat', (['(F_1, F_2, F_3)', '(1)'], {}), '((F_1, F_2, F_3), 1)\n', (2753, 2773), False, 'import torch\n'), ((3424, 3477), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(True)'}), '(3, 64, kernel_size=3, padding=1, bias=True)\n', (3433, 3477), True, 'import torch.nn as nn\n'), ((3499, 3553), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(True)'}), '(64, 64, kernel_size=3, padding=1, bias=True)\n', (3508, 3553), True, 'import torch.nn as nn\n'), ((3575, 3611), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {'stride': '(1)', 'padding': '(1)'}), '(2, stride=1, padding=1)\n', (3587, 3611), True, 'import torch.nn as nn\n'), ((3651, 3706), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(True)'}), '(64, 128, kernel_size=3, padding=1, bias=True)\n', (3660, 3706), True, 'import torch.nn as nn\n'), ((3728, 3784), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(True)'}), '(128, 128, kernel_size=3, padding=1, bias=True)\n', (3737, 3784), True, 'import torch.nn as nn\n'), ((3806, 3842), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {'stride': '(1)', 'padding': '(1)'}), '(2, stride=1, padding=1)\n', (3818, 3842), True, 'import torch.nn as nn\n'), ((3882, 3938), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(True)'}), '(128, 256, kernel_size=3, padding=1, bias=True)\n', (3891, 3938), True, 'import torch.nn as nn\n'), ((3960, 4016), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(True)'}), '(256, 256, kernel_size=3, padding=1, bias=True)\n', (3969, 4016), True, 'import torch.nn as nn\n'), ((4038, 4074), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {'stride': '(1)', 'padding': '(1)'}), '(2, stride=1, padding=1)\n', (4050, 4074), True, 'import torch.nn as nn\n'), ((4114, 4170), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(512)'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(True)'}), '(256, 512, kernel_size=3, padding=1, bias=True)\n', (4123, 4170), True, 'import torch.nn as nn\n'), ((4192, 4248), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(512)'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(True)'}), '(512, 512, kernel_size=3, padding=1, bias=True)\n', (4201, 4248), True, 'import torch.nn as nn\n'), ((4270, 4306), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)'], {'stride': '(1)', 'padding': '(1)'}), '(2, stride=1, padding=1)\n', (4282, 4306), True, 'import torch.nn as nn\n'), ((4345, 4402), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(1024)'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(True)'}), '(512, 1024, kernel_size=3, padding=1, bias=True)\n', (4354, 4402), True, 'import torch.nn as nn\n'), ((4425, 4483), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1024)', '(1024)'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(True)'}), '(1024, 1024, kernel_size=3, padding=1, bias=True)\n', (4434, 4483), True, 'import torch.nn as nn\n'), ((4508, 4574), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(1024)', '(512)'], {'kernel_size': '(2)', 'padding': '(1)', 'bias': '(True)'}), '(1024, 512, kernel_size=2, padding=1, bias=True)\n', (4526, 4574), True, 'import torch.nn as nn\n'), ((4614, 4671), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1024)', '(512)'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(True)'}), '(1024, 512, kernel_size=3, padding=1, bias=True)\n', (4623, 4671), True, 'import torch.nn as nn\n'), ((4694, 4750), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(512)'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(True)'}), '(512, 512, kernel_size=3, padding=1, bias=True)\n', (4703, 4750), True, 'import torch.nn as nn\n'), ((4775, 4840), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(512)', '(256)'], {'kernel_size': '(2)', 'padding': '(1)', 'bias': '(True)'}), '(512, 256, kernel_size=2, padding=1, bias=True)\n', (4793, 4840), True, 'import torch.nn as nn\n'), ((4880, 4936), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(256)'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(True)'}), '(512, 256, kernel_size=3, padding=1, bias=True)\n', (4889, 4936), True, 'import torch.nn as nn\n'), ((4959, 5015), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(True)'}), '(256, 256, kernel_size=3, padding=1, bias=True)\n', (4968, 5015), True, 'import torch.nn as nn\n'), ((5040, 5105), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(256)', '(128)'], {'kernel_size': '(2)', 'padding': '(1)', 'bias': '(True)'}), '(256, 128, kernel_size=2, padding=1, bias=True)\n', (5058, 5105), True, 'import torch.nn as nn\n'), ((5145, 5201), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(128)'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(True)'}), '(256, 128, kernel_size=3, padding=1, bias=True)\n', (5154, 5201), True, 'import torch.nn as nn\n'), ((5224, 5280), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(128)'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(True)'}), '(128, 128, kernel_size=3, padding=1, bias=True)\n', (5233, 5280), True, 'import torch.nn as nn\n'), ((5305, 5369), 'torch.nn.ConvTranspose2d', 'nn.ConvTranspose2d', (['(128)', '(64)'], {'kernel_size': '(2)', 'padding': '(1)', 'bias': '(True)'}), '(128, 64, kernel_size=2, padding=1, bias=True)\n', (5323, 5369), True, 'import torch.nn as nn\n'), ((5409, 5464), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(64)'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(True)'}), '(128, 64, kernel_size=3, padding=1, bias=True)\n', (5418, 5464), True, 'import torch.nn as nn\n'), ((5487, 5541), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(64)'], {'kernel_size': '(3)', 'padding': '(1)', 'bias': '(True)'}), '(64, 64, kernel_size=3, padding=1, bias=True)\n', (5496, 5541), True, 'import torch.nn as nn\n'), ((5564, 5606), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(3)'], {'kernel_size': '(1)', 'bias': '(True)'}), '(64, 3, kernel_size=1, bias=True)\n', (5573, 5606), True, 'import torch.nn as nn\n'), ((6553, 6564), 'torch.nn.functional.relu', 'F.relu', (['out'], {}), '(out)\n', (6559, 6564), True, 'import torch.nn.functional as F\n'), ((6747, 6803), 'torch.nn.Conv2d', 'nn.Conv2d', (['input_channel_size', '(8)', '(5, 5)', '(1, 1)', '(2, 2)'], {}), '(input_channel_size, 8, (5, 5), (1, 1), (2, 2))\n', (6756, 6803), True, 'import torch.nn as nn\n'), ((6825, 6864), 'torch.nn.Conv2d', 'nn.Conv2d', (['(8)', '(4)', '(3, 3)', '(1, 1)', '(1, 1)'], {}), '(8, 4, (3, 3), (1, 1), (1, 1))\n', (6834, 6864), True, 'import torch.nn as nn\n'), ((6886, 6965), 'torch.nn.Conv2d', 'nn.Conv2d', (['(4)', '(output_channel_size * upscale_factor ** 2)', '(3, 3)', '(1, 1)', '(1, 1)'], {}), '(4, output_channel_size * upscale_factor ** 2, (3, 3), (1, 1), (1, 1))\n', (6895, 6965), True, 'import torch.nn as nn\n'), ((6997, 7028), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['upscale_factor'], {}), '(upscale_factor)\n', (7012, 7028), True, 'import torch.nn as nn\n'), ((225, 247), 'torch.nn.PixelShuffle', 'nn.PixelShuffle', (['scale'], {}), '(scale)\n', (240, 247), True, 'import torch.nn as nn\n')] |
import gym
import gym_sokoban
import torch
import numpy as np
import random
import time
from utilities.channelConverter import hwc2chw
from experts.utils import get_distance
from external_actions import get_astar_action
import warnings
warnings.simplefilter("ignore", UserWarning)
def test_the_agent(agent, data_path, USE_CUDA, eval_num, args=None, display=False, deter=False, Variable=None):
solved = []
rewards = []
#specify the environment you wanna use; v0 means sample sub-cases randomly, and v1 only sample targeted sub-cases;
#env = gym.make('Curriculum-Sokoban-v2', data_path = data_path, seed=random.randint(0,100))
env = gym.make('Curriculum-Sokoban-v2', data_path = data_path)
solved_maps = []
unsolved_maps = []
for i in range(eval_num):
episode_reward = 0
state = env.reset()
if display:
print('#### Start ####')
print(env.room_state)
print('{} steps towards the goal state'.format(get_distance(env.room_state)))
time.sleep(1)
state = hwc2chw(state, test=True)
if USE_CUDA:
state = state.cuda()
action = agent.select_action(state.unsqueeze(0), test=1, determinisitc=deter)
next_state, reward, done, _ = env.step(action.item())
episode_reward += reward
next_state = hwc2chw(next_state, test=True)
if display:
print('#### action taken ####')
print('taken action is {}, expert action is {}'.format(action.item(), get_astar_action(env.room_state)))
print(env.room_state)
print('{} steps towards the goal state'.format(get_distance(env.room_state)))
time.sleep(1)
i = 1
while not done:
state = next_state
if USE_CUDA:
state = state.cuda()
with torch.no_grad():
action = agent.select_action(state.unsqueeze(0), test=1, determinisitc=deter)
if display:
print('#### action taken ####')
print('taken action is {}, expert action is {}'.format(action.item(), get_astar_action(env.room_state)))
print(env.room_state)
print('{} steps towards the goal state'.format(get_distance(env.room_state)))
time.sleep(1)
next_state, reward, done, _ = env.step(action.item())
if get_distance(env.room_state) == -1:
if display:
print('The game is unsolvable now')
time.sleep(2)
break
episode_reward += reward
next_state = hwc2chw(next_state, test=True)
i += 1
if i < env.max_steps and get_distance(env.room_state) != -1:
solved.append(1)
solved_maps.append(env.selected_map)
else:
unsolved_maps.append(env.selected_map)
rewards.append(episode_reward)
return np.sum(solved)/eval_num
| [
"external_actions.get_astar_action",
"experts.utils.get_distance",
"time.sleep",
"numpy.sum",
"utilities.channelConverter.hwc2chw",
"warnings.simplefilter",
"torch.no_grad",
"gym.make"
] | [((239, 283), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'UserWarning'], {}), "('ignore', UserWarning)\n", (260, 283), False, 'import warnings\n'), ((660, 714), 'gym.make', 'gym.make', (['"""Curriculum-Sokoban-v2"""'], {'data_path': 'data_path'}), "('Curriculum-Sokoban-v2', data_path=data_path)\n", (668, 714), False, 'import gym\n'), ((1072, 1097), 'utilities.channelConverter.hwc2chw', 'hwc2chw', (['state'], {'test': '(True)'}), '(state, test=True)\n', (1079, 1097), False, 'from utilities.channelConverter import hwc2chw\n'), ((1354, 1384), 'utilities.channelConverter.hwc2chw', 'hwc2chw', (['next_state'], {'test': '(True)'}), '(next_state, test=True)\n', (1361, 1384), False, 'from utilities.channelConverter import hwc2chw\n'), ((2971, 2985), 'numpy.sum', 'np.sum', (['solved'], {}), '(solved)\n', (2977, 2985), True, 'import numpy as np\n'), ((1042, 1055), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1052, 1055), False, 'import time\n'), ((1702, 1715), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1712, 1715), False, 'import time\n'), ((2648, 2678), 'utilities.channelConverter.hwc2chw', 'hwc2chw', (['next_state'], {'test': '(True)'}), '(next_state, test=True)\n', (2655, 2678), False, 'from utilities.channelConverter import hwc2chw\n'), ((1866, 1881), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1879, 1881), False, 'import torch\n'), ((2318, 2331), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2328, 2331), False, 'import time\n'), ((2413, 2441), 'experts.utils.get_distance', 'get_distance', (['env.room_state'], {}), '(env.room_state)\n', (2425, 2441), False, 'from experts.utils import get_distance\n'), ((2549, 2562), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2559, 2562), False, 'import time\n'), ((2732, 2760), 'experts.utils.get_distance', 'get_distance', (['env.room_state'], {}), '(env.room_state)\n', (2744, 2760), False, 'from experts.utils import get_distance\n'), ((999, 1027), 'experts.utils.get_distance', 'get_distance', (['env.room_state'], {}), '(env.room_state)\n', (1011, 1027), False, 'from experts.utils import get_distance\n'), ((1531, 1563), 'external_actions.get_astar_action', 'get_astar_action', (['env.room_state'], {}), '(env.room_state)\n', (1547, 1563), False, 'from external_actions import get_astar_action\n'), ((1659, 1687), 'experts.utils.get_distance', 'get_distance', (['env.room_state'], {}), '(env.room_state)\n', (1671, 1687), False, 'from experts.utils import get_distance\n'), ((2135, 2167), 'external_actions.get_astar_action', 'get_astar_action', (['env.room_state'], {}), '(env.room_state)\n', (2151, 2167), False, 'from external_actions import get_astar_action\n'), ((2271, 2299), 'experts.utils.get_distance', 'get_distance', (['env.room_state'], {}), '(env.room_state)\n', (2283, 2299), False, 'from experts.utils import get_distance\n')] |
import copy
from csv import DictReader
from sklearn.model_selection import KFold
from sklearn.pipeline import Pipeline
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.svm import SVC
from features import SentenceLength, BagOfTfIDF, WordOverlap
from features import POS, NER
from features import ToMatrix
from preprocess import TokenizedLemmas
from scorer import *
def get_dataset():
# use FNC's function for reading dataset.
training_set_dicts = load_dataset("../datasets/train_stances.csv")
# read the bodies
bodies = DictReader(open("../datasets/train_bodies.csv", encoding="utf8"))
bodies_dict = {i["Body ID"]: i["articleBody"] for i in bodies}
for instance in training_set_dicts:
instance['articleBody'] = bodies_dict[instance["Body ID"]]
instance['features'] = {}
# print(training_set_dicts[0])
return training_set_dicts
def run_classifier(test, train):
pipeline = Pipeline([('preprocess_lemmas', TokenizedLemmas()),
# ('sent_len', SentenceLength()),
# ('tfidf', BagOfTfIDF(train)),
# ('pos', POS()),
# ('ner', NER()),
('word_overlap', WordOverlap()),
('transform', ToMatrix()),
('norm', MinMaxScaler()),
('clf', SVC())])
print("Started pipeline ...")
true_labels = [instance['Stance'] for instance in train]
pipeline.fit(train, true_labels)
print("Finished training.")
# return the predicted labels of the test set
return pipeline.predict(test)
def test_cross_validation():
X = get_dataset()[:100]
# 1. Run cross-validation, using KFold class from sklearn
k_folds = KFold(n_splits=10, random_state=42)
scores = 0 # this will store the sum of scores of each split
relative_scores = 0
# 2. Iterate over the 5 splits of the data
for train_index, test_index in k_folds.split(X):
X_train = [X[i] for i in train_index]
X_test = [X[i] for i in test_index]
# 2.1. Run a classifier with the fold
predicted_labels = run_classifier(train=X_train, test=X_test)
# 2.2. Get the score of the prediction, using get_fnc_score
score, rate_null = get_fnc_score(X_test, predicted_labels)
print("Score {0:.5f}% of max and {1:.5f} relative to const unreleated".format(score, rate_null))
# print_confusion_matrix(cm)
scores += score
relative_scores += rate_null
# 2.3. Get the average score, achieved from the cross-validation
print("Average score for {0} folds is {1:.5f}% and relative imporvement {2:.5f}".format(
k_folds.n_splits, scores / k_folds.n_splits, relative_scores / k_folds.n_splits))
def get_fnc_score(X_test, predicted_labels):
"""Helper function to get FNC score"""
X_test_pred = copy.deepcopy(X_test)
for x, pred in zip(X_test_pred, predicted_labels):
x['Stance'] = pred
# return the submission score of FNC system
score, confusion_matrix = score_submission(X_test, X_test_pred)
null_score, max_score = score_defaults(X_test)
return [float(score) / (max_score + 0.0000001), float(score) / (null_score + 0.0000001)]
if __name__ == "__main__":
test_cross_validation()
| [
"features.ToMatrix",
"preprocess.TokenizedLemmas",
"copy.deepcopy",
"sklearn.model_selection.KFold",
"sklearn.preprocessing.data.MinMaxScaler",
"features.WordOverlap",
"sklearn.svm.SVC"
] | [((1867, 1902), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(10)', 'random_state': '(42)'}), '(n_splits=10, random_state=42)\n', (1872, 1902), False, 'from sklearn.model_selection import KFold\n'), ((3005, 3026), 'copy.deepcopy', 'copy.deepcopy', (['X_test'], {}), '(X_test)\n', (3018, 3026), False, 'import copy\n'), ((982, 999), 'preprocess.TokenizedLemmas', 'TokenizedLemmas', ([], {}), '()\n', (997, 999), False, 'from preprocess import TokenizedLemmas\n'), ((1291, 1304), 'features.WordOverlap', 'WordOverlap', ([], {}), '()\n', (1302, 1304), False, 'from features import SentenceLength, BagOfTfIDF, WordOverlap\n'), ((1355, 1365), 'features.ToMatrix', 'ToMatrix', ([], {}), '()\n', (1363, 1365), False, 'from features import ToMatrix\n'), ((1411, 1425), 'sklearn.preprocessing.data.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (1423, 1425), False, 'from sklearn.preprocessing.data import MinMaxScaler\n'), ((1470, 1475), 'sklearn.svm.SVC', 'SVC', ([], {}), '()\n', (1473, 1475), False, 'from sklearn.svm import SVC\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Constant settings for Cowbird application.
Constants defined with format ``COWBIRD_[VARIABLE_NAME]`` can be matched with corresponding
settings formatted as ``cowbird.[variable_name]`` in the ``cowbird.ini`` configuration file.
.. note::
Since the ``cowbird.ini`` file has to be loaded by the application to retrieve various configuration settings,
constant ``COWBIRD_INI_FILE_PATH`` (or any other `path variable` defined before it - see below) has to be defined
by environment variable if the default location is not desired (ie: if you want to provide your own configuration).
"""
import logging
import os
import re
from typing import TYPE_CHECKING
from pyramid.settings import asbool
from pyramid.threadlocal import get_current_registry
if TYPE_CHECKING:
# pylint: disable=W0611,unused-import
from typing import Optional
from cowbird.typedefs import AnySettingsContainer, SettingValue
# ===========================
# path variables
# ===========================
COWBIRD_MODULE_DIR = os.path.abspath(os.path.dirname(__file__))
COWBIRD_ROOT = os.path.dirname(COWBIRD_MODULE_DIR)
COWBIRD_CONFIG_DIR = os.getenv(
"COWBIRD_CONFIG_DIR", os.path.join(COWBIRD_ROOT, "config"))
COWBIRD_CONFIG_PATH = os.getenv("COWBIRD_CONFIG_PATH") # default None, require explicit specification
COWBIRD_INI_FILE_PATH = os.getenv(
"COWBIRD_INI_FILE_PATH", "{}/cowbird.ini".format(COWBIRD_CONFIG_DIR))
def _get_default_log_level():
"""
Get logging level from INI configuration file or fallback to default ``INFO`` if it cannot be retrieved.
"""
_default_log_lvl = "INFO"
try:
from cowbird.utils import get_settings_from_config_ini # pylint: disable=C0415 # avoid circular import error
_settings = get_settings_from_config_ini(COWBIRD_INI_FILE_PATH, section="logger_cowbird")
_default_log_lvl = _settings.get("level", _default_log_lvl)
# also considers 'ModuleNotFoundError' derived from 'ImportError', but not added to avoid Python <3.6 name error
except (AttributeError, ImportError): # noqa: W0703 # nosec: B110
pass
return _default_log_lvl
# ===========================
# variables from cowbird.env
# ===========================
# ---------------------------
# COWBIRD
# ---------------------------
COWBIRD_URL = os.getenv("COWBIRD_URL", None) # must be defined
COWBIRD_LOG_LEVEL = os.getenv("COWBIRD_LOG_LEVEL", _get_default_log_level()) # log level to apply to the loggers
COWBIRD_LOG_PRINT = asbool(os.getenv("COWBIRD_LOG_PRINT", False)) # log also forces print to the console
COWBIRD_LOG_REQUEST = asbool(os.getenv("COWBIRD_LOG_REQUEST", True)) # log detail of every incoming request
COWBIRD_LOG_EXCEPTION = asbool(os.getenv("COWBIRD_LOG_EXCEPTION", True)) # log detail of generated exceptions
COWBIRD_ADMIN_PERMISSION = "admin"
# ===========================
# constants
# ===========================
# ignore matches of settings and environment variables for following cases
COWBIRD_CONSTANTS = [
"COWBIRD_CONSTANTS",
"COWBIRD_MODULE_DIR",
"COWBIRD_ROOT",
"COWBIRD_ADMIN_PERMISSION",
# add more as needed
]
# ===========================
# utilities
# ===========================
_REGEX_ASCII_ONLY = re.compile(r"\W|^(?=\d)")
_SETTING_SECTION_PREFIXES = [
"cowbird",
]
_SETTINGS_REQUIRED = [
"COWBIRD_URL",
# FIXME: add others here as needed
]
def get_constant_setting_name(name):
"""
Find the equivalent setting name of the provided environment variable name.
Lower-case name and replace all non-ascii chars by `_`.
Then, convert known prefixes with their dotted name.
"""
name = re.sub(_REGEX_ASCII_ONLY, "_", name.strip().lower())
for prefix in _SETTING_SECTION_PREFIXES:
known_prefix = "{}_".format(prefix)
dotted_prefix = "{}.".format(prefix)
if name.startswith(known_prefix):
return name.replace(known_prefix, dotted_prefix, 1)
return name
def get_constant(constant_name, # type: str
settings_container=None, # type: Optional[AnySettingsContainer]
settings_name=None, # type: Optional[str]
default_value=None, # type: Optional[SettingValue]
raise_missing=True, # type: bool
print_missing=False, # type: bool
raise_not_set=True # type: bool
): # type: (...) -> SettingValue
"""
Search in order for matched value of :paramref:`constant_name`:
1. search in :py:data:`COWBIRD_CONSTANTS`
2. search in settings if specified
3. search alternative setting names (see below)
4. search in :mod:`cowbird.constants` definitions
5. search in environment variables
Parameter :paramref:`constant_name` is expected to have the format ``COWBIRD_[VARIABLE_NAME]`` although any value
can be passed to retrieve generic settings from all above mentioned search locations.
If :paramref:`settings_name` is provided as alternative name, it is used as is to search for results if
:paramref:`constant_name` was not found. Otherwise, ``cowbird.[variable_name]`` is used for additional search when
the format ``COWBIRD_[VARIABLE_NAME]`` was used for :paramref:`constant_name`
(i.e.: ``COWBIRD_ADMIN_USER`` will also search for ``cowbird.admin_user`` and so on for corresponding constants).
:param constant_name: key to search for a value
:param settings_container: WSGI application settings container (if not provided, uses found one in current thread)
:param settings_name: alternative name for `settings` if specified
:param default_value: default value to be returned if not found anywhere, and exception raises are disabled.
:param raise_missing: raise exception if key is not found anywhere
:param print_missing: print message if key is not found anywhere, return ``None``
:param raise_not_set: raise an exception if the found key is ``None``, search until last case if others are ``None``
:returns: found value or `default_value`
:raises ValueError: if resulting value is invalid based on options (by default raise missing/``None`` value)
:raises LookupError: if no appropriate value could be found from all search locations (according to options)
"""
from cowbird.utils import get_settings, print_log, raise_log # pylint: disable=C0415 # avoid circular import error
if constant_name in COWBIRD_CONSTANTS:
return globals()[constant_name]
missing = True
cowbird_value = None
if settings_container:
settings = get_settings(settings_container)
else:
# note: this will work only after include of cowbird will have triggered configurator setup
print_log("Using settings from local thread.", level=logging.DEBUG)
settings = get_settings(get_current_registry())
if settings and constant_name in settings: # pylint: disable=E1135
missing = False
cowbird_value = settings.get(constant_name)
if cowbird_value is not None:
print_log("Constant found in settings with: {}".format(constant_name), level=logging.DEBUG)
return cowbird_value
if not settings_name:
settings_name = get_constant_setting_name(constant_name)
print_log("Constant alternate search: {}".format(settings_name), level=logging.DEBUG)
if settings and settings_name and settings_name in settings: # pylint: disable=E1135
missing = False
cowbird_value = settings.get(settings_name)
if cowbird_value is not None:
print_log("Constant found in settings with: {}".format(settings_name), level=logging.DEBUG)
return cowbird_value
cowbird_globals = globals()
if constant_name in cowbird_globals:
missing = False
cowbird_value = cowbird_globals.get(constant_name)
if cowbird_value is not None:
print_log("Constant found in definitions with: {}".format(constant_name), level=logging.DEBUG)
return cowbird_value
if constant_name in os.environ:
missing = False
cowbird_value = os.environ.get(constant_name)
if cowbird_value is not None:
print_log("Constant found in environment with: {}".format(constant_name), level=logging.DEBUG)
return cowbird_value
if not missing and raise_not_set:
raise_log("Constant was found but was not set: {}".format(constant_name),
level=logging.ERROR, exception=ValueError)
if missing and raise_missing:
raise_log("Constant could not be found: {}".format(constant_name),
level=logging.ERROR, exception=LookupError)
if missing and print_missing:
print_log("Constant could not be found: {} (using default: {})"
.format(constant_name, default_value), level=logging.WARN)
return cowbird_value or default_value
def validate_required(container):
# type: (AnySettingsContainer) -> None
"""
Validates that some value is provided for every mandatory configuration setting.
:raises: when any of the requirements are missing a definition.
"""
for cfg in _SETTINGS_REQUIRED:
get_constant(cfg, settings_container=container, raise_missing=True, raise_not_set=True)
| [
"cowbird.utils.get_settings_from_config_ini",
"pyramid.threadlocal.get_current_registry",
"os.getenv",
"re.compile",
"cowbird.utils.get_settings",
"os.path.join",
"os.environ.get",
"cowbird.utils.print_log",
"os.path.dirname"
] | [((1124, 1159), 'os.path.dirname', 'os.path.dirname', (['COWBIRD_MODULE_DIR'], {}), '(COWBIRD_MODULE_DIR)\n', (1139, 1159), False, 'import os\n'), ((1278, 1310), 'os.getenv', 'os.getenv', (['"""COWBIRD_CONFIG_PATH"""'], {}), "('COWBIRD_CONFIG_PATH')\n", (1287, 1310), False, 'import os\n'), ((2355, 2385), 'os.getenv', 'os.getenv', (['"""COWBIRD_URL"""', 'None'], {}), "('COWBIRD_URL', None)\n", (2364, 2385), False, 'import os\n'), ((3316, 3342), 're.compile', 're.compile', (['"""\\\\W|^(?=\\\\d)"""'], {}), "('\\\\W|^(?=\\\\d)')\n", (3326, 3342), False, 'import re\n'), ((1082, 1107), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1097, 1107), False, 'import os\n'), ((1218, 1254), 'os.path.join', 'os.path.join', (['COWBIRD_ROOT', '"""config"""'], {}), "(COWBIRD_ROOT, 'config')\n", (1230, 1254), False, 'import os\n'), ((2552, 2589), 'os.getenv', 'os.getenv', (['"""COWBIRD_LOG_PRINT"""', '(False)'], {}), "('COWBIRD_LOG_PRINT', False)\n", (2561, 2589), False, 'import os\n'), ((2675, 2713), 'os.getenv', 'os.getenv', (['"""COWBIRD_LOG_REQUEST"""', '(True)'], {}), "('COWBIRD_LOG_REQUEST', True)\n", (2684, 2713), False, 'import os\n'), ((2798, 2838), 'os.getenv', 'os.getenv', (['"""COWBIRD_LOG_EXCEPTION"""', '(True)'], {}), "('COWBIRD_LOG_EXCEPTION', True)\n", (2807, 2838), False, 'import os\n'), ((1803, 1880), 'cowbird.utils.get_settings_from_config_ini', 'get_settings_from_config_ini', (['COWBIRD_INI_FILE_PATH'], {'section': '"""logger_cowbird"""'}), "(COWBIRD_INI_FILE_PATH, section='logger_cowbird')\n", (1831, 1880), False, 'from cowbird.utils import get_settings_from_config_ini\n'), ((6733, 6765), 'cowbird.utils.get_settings', 'get_settings', (['settings_container'], {}), '(settings_container)\n', (6745, 6765), False, 'from cowbird.utils import get_settings, print_log, raise_log\n'), ((6884, 6951), 'cowbird.utils.print_log', 'print_log', (['"""Using settings from local thread."""'], {'level': 'logging.DEBUG'}), "('Using settings from local thread.', level=logging.DEBUG)\n", (6893, 6951), False, 'from cowbird.utils import get_settings, print_log, raise_log\n'), ((8275, 8304), 'os.environ.get', 'os.environ.get', (['constant_name'], {}), '(constant_name)\n', (8289, 8304), False, 'import os\n'), ((6984, 7006), 'pyramid.threadlocal.get_current_registry', 'get_current_registry', ([], {}), '()\n', (7004, 7006), False, 'from pyramid.threadlocal import get_current_registry\n')] |
import logging
import os
class FifoFile:
"""
Fifo object that handles directory and file creation
"""
instance_counter = 1
"""Makes any fifo name unique"""
def __init__(self, fifo_path=None, filename_prefix = "smartbot"):
self.fifo_filename = "{}_{}.{}".format(filename_prefix, str(FifoFile.instance_counter),"fifo")
FifoFile.instance_counter += 1
self.full_path = self._make_fifo(fifo_path, self.fifo_filename)
def write(self, command):
logging.debug("Sending command '{}' to fifo '{}'".format(command,self.full_path))
with open(self.full_path, "w") as out_file:
out_file.write(command + "\n")
logging.info("Written '" + command + "' to " + self.full_path)
def _make_fifo(self, path, filename):
"""
Creates fifo file and returns path to it
"""
self._create_fifo_directory(path)
full_path = os.path.join(path, filename)
self._create_fifo_file(full_path)
return full_path
def _create_fifo_file(self, full_path):
if not os.path.exists(full_path):
logging.info("Making fifo at {}".format(full_path))
os.mkfifo(full_path)
def _create_fifo_directory(self, path):
if not os.path.isdir(path):
logging.info("Creating directory: {}".format(path))
os.makedirs(path)
def delete(self):
logging.debug("attempting to remove fifo {}".format(self.full_path))
if self.full_path and os.path.exists(self.full_path):
logging.debug("Removing {}".format(self.full_path))
os.remove(self.full_path)
def __del__(self):
self.delete()
| [
"os.path.exists",
"os.makedirs",
"os.path.join",
"os.path.isdir",
"os.mkfifo",
"logging.info",
"os.remove"
] | [((688, 750), 'logging.info', 'logging.info', (['("Written \'" + command + "\' to " + self.full_path)'], {}), '("Written \'" + command + "\' to " + self.full_path)\n', (700, 750), False, 'import logging\n'), ((929, 957), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (941, 957), False, 'import os\n'), ((1085, 1110), 'os.path.exists', 'os.path.exists', (['full_path'], {}), '(full_path)\n', (1099, 1110), False, 'import os\n'), ((1188, 1208), 'os.mkfifo', 'os.mkfifo', (['full_path'], {}), '(full_path)\n', (1197, 1208), False, 'import os\n'), ((1269, 1288), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (1282, 1288), False, 'import os\n'), ((1366, 1383), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (1377, 1383), False, 'import os\n'), ((1514, 1544), 'os.path.exists', 'os.path.exists', (['self.full_path'], {}), '(self.full_path)\n', (1528, 1544), False, 'import os\n'), ((1622, 1647), 'os.remove', 'os.remove', (['self.full_path'], {}), '(self.full_path)\n', (1631, 1647), False, 'import os\n')] |
# Standard imports
import unittest
import json
import logging
from datetime import datetime, timedelta
# Our imports
from emission.core.get_database import get_db, get_mode_db, get_section_db
from emission.analysis.result.precompute import precompute_results
from emission.core.wrapper.user import User
from emission.core.wrapper.client import Client
import emission.tests.common
from emission.clients.testclient import testclient
from emission.clients.data import data
import emission.tests.common as etc
class TestPrecomputeResults(unittest.TestCase):
def setUp(self):
self.testUsers = ["<EMAIL>", "<EMAIL>", "<EMAIL>",
"<EMAIL>", "<EMAIL>"]
self.serverName = 'localhost'
# Sometimes, we may have entries left behind in the database if one of the tests failed
# or threw an exception, so let us start by cleaning up all entries
emission.tests.common.dropAllCollections(get_db())
self.ModesColl = get_mode_db()
self.assertEquals(self.ModesColl.find().count(), 0)
self.SectionsColl = get_section_db()
self.assertEquals(self.SectionsColl.find().count(), 0)
emission.tests.common.loadTable(self.serverName, "Stage_Modes", "emission/tests/data/modes.json")
emission.tests.common.loadTable(self.serverName, "Stage_Sections", "emission/tests/data/testModeInferFile")
# Let's make sure that the users are registered so that they have profiles
for userEmail in self.testUsers:
User.register(userEmail)
self.now = datetime.now()
self.dayago = self.now - timedelta(days=1)
self.weekago = self.now - timedelta(weeks = 1)
for section in self.SectionsColl.find():
section['section_start_datetime'] = self.dayago
section['section_end_datetime'] = self.dayago + timedelta(hours = 1)
if (section['confirmed_mode'] == 5):
# We only cluster bus and train trips
# And our test data only has bus trips
section['section_start_point'] = {u'type': u'Point', u'coordinates': [-122.270039042, 37.8800285728]}
section['section_end_point'] = {u'type': u'Point', u'coordinates': [-122.2690412952, 37.8739578595]}
# print("Section start = %s, section end = %s" %
# (section['section_start_datetime'], section['section_end_datetime']))
# Replace the user email with the UUID
section['user_id'] = User.fromEmail(section['user_id']).uuid
self.SectionsColl.save(section)
self.pr = precompute_results.PrecomputeResults()
def testClientSpecificPrecompute(self):
for email in self.testUsers:
currUser = User.fromEmail(email)
self.assertEqual(currUser.getProfile().get("testfield1"), None)
self.assertEqual(currUser.getProfile().get("testfield2"), None)
self.assertEqual(data.getCarbonFootprint(currUser), None)
fakeEmail = "<EMAIL>"
client = Client("testclient")
client.update(createKey = False)
emission.tests.common.makeValid(client)
(resultPre, resultReg) = client.preRegister("this_is_the_super_secret_id", fakeEmail)
user = User.fromEmail(fakeEmail)
self.assertEqual(user.getFirstStudy(), 'testclient')
self.pr.precomputeResults()
self.assertEqual(user.getProfile()['testfield1'], 'value1')
self.assertEqual(user.getProfile()['testfield2'], 'value2')
for email in self.testUsers:
if email != fakeEmail:
currUser = User.fromEmail(email)
carbonFootprint = data.getCarbonFootprint(currUser)
self.assertEqual(len(carbonFootprint), 12)
if __name__ == '__main__':
etc.configLogging()
unittest.main()
| [
"emission.tests.common.configLogging",
"emission.core.get_database.get_db",
"emission.analysis.result.precompute.precompute_results.PrecomputeResults",
"emission.core.wrapper.user.User.register",
"emission.core.wrapper.client.Client",
"emission.core.wrapper.user.User.fromEmail",
"emission.core.get_datab... | [((3775, 3794), 'emission.tests.common.configLogging', 'etc.configLogging', ([], {}), '()\n', (3792, 3794), True, 'import emission.tests.common as etc\n'), ((3799, 3814), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3812, 3814), False, 'import unittest\n'), ((981, 994), 'emission.core.get_database.get_mode_db', 'get_mode_db', ([], {}), '()\n', (992, 994), False, 'from emission.core.get_database import get_db, get_mode_db, get_section_db\n'), ((1084, 1100), 'emission.core.get_database.get_section_db', 'get_section_db', ([], {}), '()\n', (1098, 1100), False, 'from emission.core.get_database import get_db, get_mode_db, get_section_db\n'), ((1567, 1581), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1579, 1581), False, 'from datetime import datetime, timedelta\n'), ((3012, 3032), 'emission.core.wrapper.client.Client', 'Client', (['"""testclient"""'], {}), "('testclient')\n", (3018, 3032), False, 'from emission.core.wrapper.client import Client\n'), ((3232, 3257), 'emission.core.wrapper.user.User.fromEmail', 'User.fromEmail', (['fakeEmail'], {}), '(fakeEmail)\n', (3246, 3257), False, 'from emission.core.wrapper.user import User\n'), ((945, 953), 'emission.core.get_database.get_db', 'get_db', ([], {}), '()\n', (951, 953), False, 'from emission.core.get_database import get_db, get_mode_db, get_section_db\n'), ((1522, 1546), 'emission.core.wrapper.user.User.register', 'User.register', (['userEmail'], {}), '(userEmail)\n', (1535, 1546), False, 'from emission.core.wrapper.user import User\n'), ((1615, 1632), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1624, 1632), False, 'from datetime import datetime, timedelta\n'), ((1667, 1685), 'datetime.timedelta', 'timedelta', ([], {'weeks': '(1)'}), '(weeks=1)\n', (1676, 1685), False, 'from datetime import datetime, timedelta\n'), ((2575, 2613), 'emission.analysis.result.precompute.precompute_results.PrecomputeResults', 'precompute_results.PrecomputeResults', ([], {}), '()\n', (2611, 2613), False, 'from emission.analysis.result.precompute import precompute_results\n'), ((2719, 2740), 'emission.core.wrapper.user.User.fromEmail', 'User.fromEmail', (['email'], {}), '(email)\n', (2733, 2740), False, 'from emission.core.wrapper.user import User\n'), ((1854, 1872), 'datetime.timedelta', 'timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (1863, 1872), False, 'from datetime import datetime, timedelta\n'), ((2473, 2507), 'emission.core.wrapper.user.User.fromEmail', 'User.fromEmail', (["section['user_id']"], {}), "(section['user_id'])\n", (2487, 2507), False, 'from emission.core.wrapper.user import User\n'), ((2922, 2955), 'emission.clients.data.data.getCarbonFootprint', 'data.getCarbonFootprint', (['currUser'], {}), '(currUser)\n', (2945, 2955), False, 'from emission.clients.data import data\n'), ((3593, 3614), 'emission.core.wrapper.user.User.fromEmail', 'User.fromEmail', (['email'], {}), '(email)\n', (3607, 3614), False, 'from emission.core.wrapper.user import User\n'), ((3650, 3683), 'emission.clients.data.data.getCarbonFootprint', 'data.getCarbonFootprint', (['currUser'], {}), '(currUser)\n', (3673, 3683), False, 'from emission.clients.data import data\n')] |
# coding=utf-8
# Copyright (c) 2015 The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details.
import logging as std_logging
log = std_logging.getLogger('import')
import collections
import time
from sqlalchemy.orm import RelationshipProperty, ColumnProperty
class timed(object):
def __init__(self, logger, thing="", **kwargs):
self.logger = logger
self.enter_msg = kwargs.get('enter_msg',
"{thing}...").format(thing=thing)
self.exit_msg = kwargs.get('exit_msg',
"...{thing} took {{duration}} seconds.").format(thing=thing)
def __enter__(self):
self._t = time.time()
self.logger.info(self.enter_msg)
def __exit__(self, *args):
if self.exit_msg is not None:
self.logger.info(
self.exit_msg.format(duration=(time.time()-self._t)))
def invert_dict(d):
inv_d = {}
for k, v in d.items():
inv_d.setdefault(v, set()).add(k)
return inv_d
class DependencyError(Exception):
pass
class TranslationRegistry(object):
# TODO sort by resource dependencies instead of pycroft model?
_provides = {}
_satisfies = collections.defaultdict(lambda: set())
_requires = collections.defaultdict(lambda: set())
def requires_function(self, *other_funcs):
"""Explicit dependence other functions"""
def decorator(func):
self._requires[func] = set(other_funcs)
return func
return decorator
def provides(self, *metas, **kwargs):
"""Register a translation function to create given ModelMetas
Register that the decorated function provides the `metas` and
what foreign keys are satisfied by the parent tables (those in
in meta.__bases__) already.
This decorator does not change the decorated function.
The decorated function
:param metas: Subclasses of _ModelMeta that are provided by
that function (:attr:`_provides[meta]` is set to
``func``).
:param satisfies: An iterable of Attributes that are
considered to be satisfied by the decorated function.
They must have a `property` attribute of either
:py:cls:`ColumnProperty` or
:py:cls:`RelationshipProperty`. The ``property.columns``
or ``property.local_columns`` are registered to
``_satisfies[func]``, respectively.
"""
def decorator(func):
for meta in metas:
self._provides[meta.__table__] = func
parent_tables = set(parent.__table__ for parent in meta.__bases__ if hasattr(parent, '__table__'))
for fkc in meta.__table__.foreign_key_constraints:
if fkc.referred_table in parent_tables:
self._satisfies[func].update(fkc.columns)
for instr_attr in kwargs.get('satisfies', ()):
prop = instr_attr.property
if isinstance(prop, ColumnProperty):
self._satisfies[func].update(prop.columns)
elif isinstance(prop, RelationshipProperty):
self._satisfies[func].update(prop.local_columns)
else:
raise NotImplementedError
return func
return decorator
def _required_translations(self, func):
translates = invert_dict(self._provides)[func]
required = set()
for table in translates:
for fkc in table.foreign_key_constraints:
if (fkc.referred_table not in translates and
not self._satisfies[func].issuperset(fkc.columns)):
try:
required.add(self._provides[fkc.referred_table])
except KeyError as e:
raise DependencyError(
"Nothing provides {reftable} "
"(referenced from fkey {srctable}.{fkeys}),"
"required by {funcname}".format(
reftable=fkc.referred_table.name,
srctable=fkc.table.name,
fkeys=fkc.column_keys,
funcname=func.__name__))
required.update(self._requires[func])
return required
def requirement_graph(self):
return {func: self._required_translations(func)
for func in set(self._provides.values())}
def sorted_functions(self):
func_dep_map = self.requirement_graph()
sorted_funcs = []
# dependency-free funcs
ready_funcs = set(func for func, deps in func_dep_map.items()
if not deps)
while ready_funcs:
# pop executed off ready_funcs and insert it in node_list
executed = ready_funcs.pop()
func_dep_map.pop(executed)
sorted_funcs.append(executed)
# find funcs which depend on executed
from_selection = [func for func, deps in func_dep_map.items()
if executed in deps]
for func in from_selection :
# remove dependency
func_dep_map[func].remove(executed)
# if func has all its dependencies executed,
if not func_dep_map[func]:
# it can be executed
ready_funcs.add(func)
if func_dep_map:
raise DependencyError("Cyclic dependencies present: {}".format(
func_dep_map))
else:
return sorted_funcs
| [
"logging.getLogger",
"time.time"
] | [((259, 290), 'logging.getLogger', 'std_logging.getLogger', (['"""import"""'], {}), "('import')\n", (280, 290), True, 'import logging as std_logging\n'), ((794, 805), 'time.time', 'time.time', ([], {}), '()\n', (803, 805), False, 'import time\n'), ((994, 1005), 'time.time', 'time.time', ([], {}), '()\n', (1003, 1005), False, 'import time\n')] |
'''https://practice.geeksforgeeks.org/problems/pairs-with-positive-negative-values/0'''
from collections import defaultdict
import heapq
import re
def pos_neg_pairs(A):
count = defaultdict(lambda: [0,0])
for i, n in enumerate(A):
if n < 0:
count[abs(n)][0] += 1
else:
count[n][1] += 1
pairs = []
for k,v in count.items():
if v[0] == v[1]:
for i in range(v[0]):
pairs.append((-k, k))
heapq.heapify(pairs)
return [heapq.heappop(pairs) for i in range(len(pairs))]
if __name__ == '__main__':
A = [1,-3,2,3,6,-1]
print(''.join(pos_neg_pairs(A)))
print(re.sub(r'[\[\(-,\)\]]','',''.join(pos_neg_pairs(A)))) | [
"heapq.heappop",
"heapq.heapify",
"collections.defaultdict"
] | [((183, 211), 'collections.defaultdict', 'defaultdict', (['(lambda : [0, 0])'], {}), '(lambda : [0, 0])\n', (194, 211), False, 'from collections import defaultdict\n'), ((485, 505), 'heapq.heapify', 'heapq.heapify', (['pairs'], {}), '(pairs)\n', (498, 505), False, 'import heapq\n'), ((519, 539), 'heapq.heappop', 'heapq.heappop', (['pairs'], {}), '(pairs)\n', (532, 539), False, 'import heapq\n')] |
from __future__ import print_function
import time
from jobby import JobbyJob
with JobbyJob(dict()) as job:
print(time.time())
| [
"time.time"
] | [((120, 131), 'time.time', 'time.time', ([], {}), '()\n', (129, 131), False, 'import time\n')] |
#! /usr/bin/python3
# monitoring for expressvpn and restarting the connection everytime it wigs out
from os import environ
from time import sleep
from shutil import copyfile
from sys import exit
import subprocess, pexpect
def check_location(loc):
"""Makes sure the location is valid. I'm just going to list the big ones.
If this is stopping you from doing something cool, remove it yourself!"""
valid_locations = ('smart', 'usny', 'ussf', 'usch', 'usda', 'usla2', 'usmi2',
'usse', 'cato', 'cato2')
if loc in valid_locations:
return loc
elif loc == None:
print('No location found, picking smart.')
return 'smart'
else:
print('Invalid or unlisted location, reverting to smart.')
return 'smart'
def conn_status():
"""Checks, parses and returns status of VPN connection as True or False"""
result = subprocess.check_output(["expressvpn", "status"])
if b"Connected" in result:
print("ExpressVPN connection was checked and is live.")
if b"A new version" in result:
print("ExpressVPN reports there is a new version available.")
return True
else:
print("ExpressVPN connection was checked and is down.")
return False
def conn_start():
"""Starts the expressvpn connection"""
location = check_location(environ.get('LOCATION'))
result = subprocess.call(["expressvpn", "connect", location])
if result == 0:
print("ExpressVPN connection initiated.")
else:
exit('Something has gone wrong. Terminating.')
def first_start():
"""Activates VPN, checks for success then starts the connection.
DNS gets locked during startup so we need to mess around to
get it to behave."""
if environ.get('ACTIVATION') == None:
exit('No activation code set, please set and run again.')
copyfile('/etc/resolv.conf', '/tmp/resolv.conf')
subprocess.call(['umount', '/etc/resolv.conf'])
copyfile('/tmp/resolv.conf', '/etc/resolv.conf')
result = subprocess.check_output(['service', 'expressvpn', 'restart'])
if b'[ OK ]' in result:
child = pexpect.spawn('expressvpn activate')
out = child.expect(
["Enter activation code:",
"Already activated. Logout from your account (y/N)?"])
if out == 0:
child.sendline(environ.get('ACTIVATION'))
child.expect("information.")
child.sendline('n')
elif out == 1:
child.sendline('n')
else:
print(environ.get('ACTIVATION'))
exit("Unable to activate ExpressVPN.")
child.expect(pexpect.EOF)
conn_start()
sleep(60)
def recovery():
print("Attempting to recover ExpressVPN.")
attempt = 1
attempt_number = 5
while attempt < attempt_number:
conn_start()
sleep(5 * attempt)
if conn_status():
print("ExpressVPN recovered successfully.")
break
else:
attempt += 1
if attempt >= attempt_number:
print(
"Unable to reconnect ExpressVPN.")
exit(
"Terminating monitor script.")
#Main loop, activates and connects
#Every 60 seconds, it will check to make sure its connected
#On failure, it runs through the recovery loop
#If the recovery loop can't get the connection back, it terminates the script
#which restarts the container
first_start()
while True:
if conn_status():
sleep(60)
else:
recovery()
| [
"subprocess.check_output",
"pexpect.spawn",
"os.environ.get",
"time.sleep",
"shutil.copyfile",
"subprocess.call",
"sys.exit"
] | [((883, 932), 'subprocess.check_output', 'subprocess.check_output', (["['expressvpn', 'status']"], {}), "(['expressvpn', 'status'])\n", (906, 932), False, 'import subprocess, pexpect\n'), ((1387, 1439), 'subprocess.call', 'subprocess.call', (["['expressvpn', 'connect', location]"], {}), "(['expressvpn', 'connect', location])\n", (1402, 1439), False, 'import subprocess, pexpect\n'), ((1874, 1922), 'shutil.copyfile', 'copyfile', (['"""/etc/resolv.conf"""', '"""/tmp/resolv.conf"""'], {}), "('/etc/resolv.conf', '/tmp/resolv.conf')\n", (1882, 1922), False, 'from shutil import copyfile\n'), ((1927, 1974), 'subprocess.call', 'subprocess.call', (["['umount', '/etc/resolv.conf']"], {}), "(['umount', '/etc/resolv.conf'])\n", (1942, 1974), False, 'import subprocess, pexpect\n'), ((1979, 2027), 'shutil.copyfile', 'copyfile', (['"""/tmp/resolv.conf"""', '"""/etc/resolv.conf"""'], {}), "('/tmp/resolv.conf', '/etc/resolv.conf')\n", (1987, 2027), False, 'from shutil import copyfile\n'), ((2041, 2102), 'subprocess.check_output', 'subprocess.check_output', (["['service', 'expressvpn', 'restart']"], {}), "(['service', 'expressvpn', 'restart'])\n", (2064, 2102), False, 'import subprocess, pexpect\n'), ((1349, 1372), 'os.environ.get', 'environ.get', (['"""LOCATION"""'], {}), "('LOCATION')\n", (1360, 1372), False, 'from os import environ\n'), ((1528, 1574), 'sys.exit', 'exit', (['"""Something has gone wrong. Terminating."""'], {}), "('Something has gone wrong. Terminating.')\n", (1532, 1574), False, 'from sys import exit\n'), ((1769, 1794), 'os.environ.get', 'environ.get', (['"""ACTIVATION"""'], {}), "('ACTIVATION')\n", (1780, 1794), False, 'from os import environ\n'), ((1812, 1869), 'sys.exit', 'exit', (['"""No activation code set, please set and run again."""'], {}), "('No activation code set, please set and run again.')\n", (1816, 1869), False, 'from sys import exit\n'), ((2147, 2183), 'pexpect.spawn', 'pexpect.spawn', (['"""expressvpn activate"""'], {}), "('expressvpn activate')\n", (2160, 2183), False, 'import subprocess, pexpect\n'), ((2694, 2703), 'time.sleep', 'sleep', (['(60)'], {}), '(60)\n', (2699, 2703), False, 'from time import sleep\n'), ((2873, 2891), 'time.sleep', 'sleep', (['(5 * attempt)'], {}), '(5 * attempt)\n', (2878, 2891), False, 'from time import sleep\n'), ((3531, 3540), 'time.sleep', 'sleep', (['(60)'], {}), '(60)\n', (3536, 3540), False, 'from time import sleep\n'), ((2366, 2391), 'os.environ.get', 'environ.get', (['"""ACTIVATION"""'], {}), "('ACTIVATION')\n", (2377, 2391), False, 'from os import environ\n'), ((2592, 2630), 'sys.exit', 'exit', (['"""Unable to activate ExpressVPN."""'], {}), "('Unable to activate ExpressVPN.')\n", (2596, 2630), False, 'from sys import exit\n'), ((3167, 3202), 'sys.exit', 'exit', (['"""Terminating monitor script."""'], {}), "('Terminating monitor script.')\n", (3171, 3202), False, 'from sys import exit\n'), ((2553, 2578), 'os.environ.get', 'environ.get', (['"""ACTIVATION"""'], {}), "('ACTIVATION')\n", (2564, 2578), False, 'from os import environ\n')] |
import datetime as d
import uuid
from django.utils import timezone
from draftHost import models
from json import JsonObject, JsonTime, EmailMasker
from performance import ReadOnlyCachedAttribute
import nfl, draft
class JsonFantasyRoster(JsonObject):
fields = ['slots',]
show_id = False
class JsonFantasyDraft(JsonObject):
fields = ['admin', 'team_limit',]
functions = ['time_per_pick_s',
'teams',
'roster',
'draft_start',
'current_time',
'selections',]
@ReadOnlyCachedAttribute
def teams(self):
return models.FantasyTeam.objects.filter(draft=self.db_object)
def get_time_per_pick_s(self):
return self.db_object.time_per_pick
def get_teams(self):
json = []
for team in self.teams:
json_player = JsonFantasyTeam(team)
json_player.show_draft_id = False # already showing the draft...
json.append(json_player.json_dict())
return json
def get_draft_start(self):
time = JsonTime(self.db_object.draft_start)
time.now = timezone.now()
return time.json_dict()
def get_roster(self):
return JsonFantasyRoster(self.db_object.roster).json_dict()
def get_current_time(self):
return JsonTime(d.datetime.now()).json_dict()
def get_selections(self):
selections_queryset = models.FantasySelection.objects.filter(
draft_pick__fantasy_team__draft=self.db_object
)
selections = []
for s in selections_queryset:
json = JsonFantasySelection(s)
json.show_team = True
selections.append(json.json_dict())
return selections
class JsonFantasyTeam(JsonObject):
fields = ['name',]
functions = ['picks', 'selection_ids', 'draft_id', 'email', 'players']
pick_options = { 'show_team': False, }
show_players = False
mask_email = True
@ReadOnlyCachedAttribute
def builder(self):
return draft.PickBuilder(self.db_object)
def get_email(self):
email = self.db_object.email
if self.mask_email:
return EmailMasker(email).masked
return email
def get_picks(self):
return self.builder.get_picks(is_team=True,
options=self.pick_options)
def get_selection_ids(self):
return self.builder.get_selections(is_team=True)
def get_draft_id(self):
return self.db_object.draft.id
def get_players(self):
selections = self.builder.raw_selections(is_team=True)
players = [s.player for s in selections]
json_players = []
for p in players:
json_player = nfl.JsonNflPlayer(p)
json_player.show_team = False
json_players.append(json_player.json_dict())
return json_players
class FantasyTeamCreator(object):
"""Adds a FantasyTeam to a draft given a team name & email"""
def __init__(self, team_form_data):
self.data = team_form_data
def create_team(self):
"""Creates and returns the new team or None on error"""
draft = models.FantasyDraft.objects.get(pk=self.data['draft_id'])
if draft:
del self.data['draft_id']
self.data['draft'] = draft
# check the draft password
if draft.password:
form_pw = self.data['password']
if form_pw != draft.password:
return None
if 'password' in self.data:
del self.data['password']
self.data['auth_key'] = self.get_auth_key()
team, created = models.FantasyTeam.objects.get_or_create(**self.data)
return team
else:
return None
def get_auth_key(self):
key = uuid.uuid4()
return str(key)
class JsonFantasyPick(JsonObject):
fields = ['pick_number',]
functions = ['team', 'expires', 'starts', 'active']
now = None
def get_starts(self):
return self.__time(self.db_object.starts)
def get_expires(self):
return self.__time(self.db_object.expires)
def __time(self, when):
time = JsonTime(when)
if self.now is not None:
time.now = self.now
return time.json_dict()
def get_team(self):
team = JsonFantasyTeam(self.db_object.fantasy_team)
team.show_picks = False
return team.json_dict()
def get_active(self):
if self.now is not None:
return self.db_object.is_active(self.now)
class JsonFantasySelection(JsonObject):
functions = ['team', 'draft_pick', 'player', 'when',]
show_team = False
def get_team(self):
team = JsonFantasyTeam(self.db_object.draft_pick.fantasy_team)
team.show_picks = False
return team.json_dict()
def get_draft_pick(self):
pick = JsonFantasyPick(self.db_object.draft_pick)
pick.show_selection_ids = False
if self.show_team:
pick.show_team = False
return pick.json_dict()
def get_player(self):
player = nfl.JsonNflPlayer(self.db_object.player)
player.show_team = self.show_team
return player.json_dict()
def get_when(self):
return JsonTime(self.db_object.when).json_dict()
| [
"draft.PickBuilder",
"json.JsonTime",
"draftHost.models.FantasyTeam.objects.filter",
"django.utils.timezone.now",
"uuid.uuid4",
"draftHost.models.FantasyTeam.objects.get_or_create",
"datetime.datetime.now",
"nfl.JsonNflPlayer",
"draftHost.models.FantasyDraft.objects.get",
"json.EmailMasker",
"dr... | [((625, 680), 'draftHost.models.FantasyTeam.objects.filter', 'models.FantasyTeam.objects.filter', ([], {'draft': 'self.db_object'}), '(draft=self.db_object)\n', (658, 680), False, 'from draftHost import models\n'), ((1078, 1114), 'json.JsonTime', 'JsonTime', (['self.db_object.draft_start'], {}), '(self.db_object.draft_start)\n', (1086, 1114), False, 'from json import JsonObject, JsonTime, EmailMasker\n'), ((1134, 1148), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1146, 1148), False, 'from django.utils import timezone\n'), ((1424, 1515), 'draftHost.models.FantasySelection.objects.filter', 'models.FantasySelection.objects.filter', ([], {'draft_pick__fantasy_team__draft': 'self.db_object'}), '(draft_pick__fantasy_team__draft=self\n .db_object)\n', (1462, 1515), False, 'from draftHost import models\n'), ((2041, 2074), 'draft.PickBuilder', 'draft.PickBuilder', (['self.db_object'], {}), '(self.db_object)\n', (2058, 2074), False, 'import nfl, draft\n'), ((3185, 3242), 'draftHost.models.FantasyDraft.objects.get', 'models.FantasyDraft.objects.get', ([], {'pk': "self.data['draft_id']"}), "(pk=self.data['draft_id'])\n", (3216, 3242), False, 'from draftHost import models\n'), ((3859, 3871), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3869, 3871), False, 'import uuid\n'), ((4234, 4248), 'json.JsonTime', 'JsonTime', (['when'], {}), '(when)\n', (4242, 4248), False, 'from json import JsonObject, JsonTime, EmailMasker\n'), ((5158, 5198), 'nfl.JsonNflPlayer', 'nfl.JsonNflPlayer', (['self.db_object.player'], {}), '(self.db_object.player)\n', (5175, 5198), False, 'import nfl, draft\n'), ((2752, 2772), 'nfl.JsonNflPlayer', 'nfl.JsonNflPlayer', (['p'], {}), '(p)\n', (2769, 2772), False, 'import nfl, draft\n'), ((3700, 3753), 'draftHost.models.FantasyTeam.objects.get_or_create', 'models.FantasyTeam.objects.get_or_create', ([], {}), '(**self.data)\n', (3740, 3753), False, 'from draftHost import models\n'), ((2185, 2203), 'json.EmailMasker', 'EmailMasker', (['email'], {}), '(email)\n', (2196, 2203), False, 'from json import JsonObject, JsonTime, EmailMasker\n'), ((5315, 5344), 'json.JsonTime', 'JsonTime', (['self.db_object.when'], {}), '(self.db_object.when)\n', (5323, 5344), False, 'from json import JsonObject, JsonTime, EmailMasker\n'), ((1333, 1349), 'datetime.datetime.now', 'd.datetime.now', ([], {}), '()\n', (1347, 1349), True, 'import datetime as d\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
(c) 2017 - Copyright Red Hat Inc
Authors:
<NAME> <<EMAIL>>
Tests for flake8 compliance of the code
"""
from __future__ import unicode_literals, absolute_import
import os
import subprocess
import sys
import unittest
import six
REPO_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "pagure")
)
TESTS_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__)))
class TestStyle(unittest.TestCase):
"""This test class contains tests pertaining to code style."""
def test_code_with_flake8(self):
"""Enforce PEP-8 compliance on the codebase.
This test runs flake8 on the code, and will fail if it returns a
non-zero exit code.
"""
# We ignore E712, which disallows non-identity comparisons with True and False
# We ignore W503, which disallows line break before binary operator
flake8_command = [
sys.executable,
"-m",
"flake8",
"--ignore=E712,W503,E203,E902",
"--max-line-length=80",
REPO_PATH,
]
# check if we have an old flake8 or not
import flake8
flake8_v = flake8.__version__.split(".")
for idx, val in enumerate(flake8_v):
try:
val = int(val)
except ValueError:
pass
flake8_v[idx] = val
old_flake = tuple(flake8_v) < (3, 0)
if old_flake:
raise unittest.SkipTest("Flake8 version too old to be useful")
proc = subprocess.Popen(
flake8_command, stdout=subprocess.PIPE, cwd=REPO_PATH
)
print(proc.communicate())
self.assertEqual(proc.returncode, 0)
@unittest.skipIf(
not (six.PY3 and sys.version_info.minor >= 6),
"Black is only available in python 3.6+",
)
def test_code_with_black(self):
"""Enforce black compliance on the codebase.
This test runs black on the code, and will fail if it returns a
non-zero exit code.
"""
black_command = [
sys.executable,
"-m",
"black",
"-l",
"79",
"--check",
"--diff",
"--exclude",
'"/(\.eggs|\.git|\.hg|\.mypy_cache|\.nox|\.tox|\.venv|_build|buck-out|build|dist)/"',
REPO_PATH,
TESTS_PATH,
]
proc = subprocess.Popen(
black_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=REPO_PATH,
)
stdout, stderr = proc.communicate()
print("stdout: ")
print(stdout.decode("utf-8"))
print("stderr: ")
print(stderr.decode("utf-8"))
self.assertEqual(proc.returncode, 0)
if __name__ == "__main__":
unittest.main(verbosity=2)
| [
"flake8.__version__.split",
"subprocess.Popen",
"unittest.skipIf",
"os.path.dirname",
"unittest.SkipTest",
"unittest.main"
] | [((1768, 1876), 'unittest.skipIf', 'unittest.skipIf', (['(not (six.PY3 and sys.version_info.minor >= 6))', '"""Black is only available in python 3.6+"""'], {}), "(not (six.PY3 and sys.version_info.minor >= 6),\n 'Black is only available in python 3.6+')\n", (1783, 1876), False, 'import unittest\n'), ((2872, 2898), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (2885, 2898), False, 'import unittest\n'), ((335, 360), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (350, 360), False, 'import os\n'), ((422, 447), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (437, 447), False, 'import os\n'), ((1222, 1251), 'flake8.__version__.split', 'flake8.__version__.split', (['"""."""'], {}), "('.')\n", (1246, 1251), False, 'import flake8\n'), ((1588, 1659), 'subprocess.Popen', 'subprocess.Popen', (['flake8_command'], {'stdout': 'subprocess.PIPE', 'cwd': 'REPO_PATH'}), '(flake8_command, stdout=subprocess.PIPE, cwd=REPO_PATH)\n', (1604, 1659), False, 'import subprocess\n'), ((2467, 2566), 'subprocess.Popen', 'subprocess.Popen', (['black_command'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'cwd': 'REPO_PATH'}), '(black_command, stdout=subprocess.PIPE, stderr=subprocess.\n PIPE, cwd=REPO_PATH)\n', (2483, 2566), False, 'import subprocess\n'), ((1515, 1571), 'unittest.SkipTest', 'unittest.SkipTest', (['"""Flake8 version too old to be useful"""'], {}), "('Flake8 version too old to be useful')\n", (1532, 1571), False, 'import unittest\n')] |
"""
Test `sinethesizer.effects.equalizer` module.
Author: <NAME>
"""
from typing import Any, Dict, List
import numpy as np
import pytest
from scipy.signal import spectrogram
from sinethesizer.effects.equalizer import apply_equalizer
from sinethesizer.synth.core import Event
from sinethesizer.oscillators import generate_mono_wave
@pytest.mark.parametrize(
"frequencies, frame_rate, kind, kwargs, spectrogram_params, expected",
[
(
# `frequencies`
[100 * x for x in range(1, 20)],
# `frame_rate`
10000,
# `kind`
'absolute',
# `kwargs`
{
'breakpoint_frequencies': [300, 700],
'gains': [0.2, 1.0],
},
# `spectrogram_params`
{'nperseg': 100},
# `expected`
# In this test case, `expected` contains summed over time power
# for frequencies 0, 100, 200, ..., 1900 respectively.
np.array(
[
0.0021011, 0.0249528, 0.0277226, 0.0387388, 0.0996291,
0.2081294, 0.3571571, 0.5181565, 0.55258, 0.557289,
0.5601418, 0.5615491, 0.5621033, 0.5622196, 0.5619461,
0.5608991, 0.5583538, 0.5535695, 0.5462548, 0.536942
]
)
),
(
# `frequencies`
[100 * x for x in range(1, 20)],
# `frame_rate`
10000,
# `kind`
'absolute',
# `kwargs`
{
'breakpoint_frequencies': [0, 500, 1200, 1900],
'gains': [0, 1.0, 0.1, 1.0],
},
# `spectrogram_params`
{'nperseg': 100},
# `expected`
# In this test case, `expected` contains summed over time power
# for frequencies 0, 100, 200, ..., 1900 respectively.
np.array(
[
0.0062764, 0.0342341, 0.0986968, 0.2045612, 0.3501325,
0.4880824, 0.4132437, 0.306272, 0.2138001, 0.1371348,
0.0776751, 0.03646, 0.0184661, 0.0364665, 0.0775099,
0.136432, 0.2119483, 0.3025262, 0.4070148, 0.5069672
]
)
),
(
# `frequencies`
[100 * x for x in range(1, 20)],
# `frame_rate`
10000,
# `kind`
'absolute',
# `kwargs`
{
'breakpoint_frequencies': [0, 500, 1200, 1900, 5000],
'gains': [0, 1.0, 0.1, 1.0, 1.0],
},
# `spectrogram_params`
{'nperseg': 100},
# `expected`
# In this test case, `expected` contains summed over time power
# for frequencies 0, 100, 200, ..., 1900 respectively.
np.array(
[
0.0062764, 0.0342341, 0.0986968, 0.2045612, 0.3501325,
0.4880824, 0.4132437, 0.306272, 0.2138001, 0.1371348,
0.0776751, 0.03646, 0.0184661, 0.0364665, 0.0775099,
0.136432, 0.2119483, 0.3025262, 0.4070148, 0.5069672
]
)
),
(
# `frequencies`
[100 * x for x in range(1, 20)],
# `frame_rate`
10000,
# `kind`
'relative',
# `kwargs`
{
'breakpoint_frequencies_ratios': [0, 5, 12, 19, 50],
'gains': [0, 1.0, 0.1, 1.0, 1.0],
},
# `spectrogram_params`
{'nperseg': 100},
# `expected`
# In this test case, `expected` contains summed over time power
# for frequencies 0, 100, 200, ..., 1900 respectively.
np.array(
[
0.0062764, 0.0342341, 0.0986968, 0.2045612, 0.3501325,
0.4880824, 0.4132437, 0.306272, 0.2138001, 0.1371348,
0.0776751, 0.03646, 0.0184661, 0.0364665, 0.0775099,
0.136432, 0.2119483, 0.3025262, 0.4070148, 0.5069672
]
)
),
]
)
def test_apply_equalizer(
frequencies: List[float], frame_rate: int, kind: str,
kwargs: Dict[str, Any], spectrogram_params: Dict[str, Any],
expected: np.ndarray
) -> None:
"""Test `apply_equalizer` function."""
waves = [
generate_mono_wave(
'sine', frequency, np.ones(frame_rate), frame_rate
)
for frequency in frequencies
]
sound = sum(waves)
sound = np.vstack((sound, sound))
event = Event(
instrument='any_instrument',
start_time=0,
duration=1,
frequency=min(frequencies),
velocity=1,
effects='',
frame_rate=frame_rate
)
sound = apply_equalizer(sound, event, kind, **kwargs)
spc = spectrogram(sound[0], frame_rate, **spectrogram_params)[2]
result = spc.sum(axis=1)[:len(expected)]
np.testing.assert_almost_equal(result, expected)
| [
"sinethesizer.effects.equalizer.apply_equalizer",
"numpy.ones",
"scipy.signal.spectrogram",
"numpy.testing.assert_almost_equal",
"numpy.array",
"numpy.vstack"
] | [((4672, 4697), 'numpy.vstack', 'np.vstack', (['(sound, sound)'], {}), '((sound, sound))\n', (4681, 4697), True, 'import numpy as np\n'), ((4920, 4965), 'sinethesizer.effects.equalizer.apply_equalizer', 'apply_equalizer', (['sound', 'event', 'kind'], {}), '(sound, event, kind, **kwargs)\n', (4935, 4965), False, 'from sinethesizer.effects.equalizer import apply_equalizer\n'), ((5084, 5132), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['result', 'expected'], {}), '(result, expected)\n', (5114, 5132), True, 'import numpy as np\n'), ((4976, 5031), 'scipy.signal.spectrogram', 'spectrogram', (['sound[0]', 'frame_rate'], {}), '(sound[0], frame_rate, **spectrogram_params)\n', (4987, 5031), False, 'from scipy.signal import spectrogram\n'), ((4552, 4571), 'numpy.ones', 'np.ones', (['frame_rate'], {}), '(frame_rate)\n', (4559, 4571), True, 'import numpy as np\n'), ((1007, 1247), 'numpy.array', 'np.array', (['[0.0021011, 0.0249528, 0.0277226, 0.0387388, 0.0996291, 0.2081294, \n 0.3571571, 0.5181565, 0.55258, 0.557289, 0.5601418, 0.5615491, \n 0.5621033, 0.5622196, 0.5619461, 0.5608991, 0.5583538, 0.5535695, \n 0.5462548, 0.536942]'], {}), '([0.0021011, 0.0249528, 0.0277226, 0.0387388, 0.0996291, 0.2081294,\n 0.3571571, 0.5181565, 0.55258, 0.557289, 0.5601418, 0.5615491, \n 0.5621033, 0.5622196, 0.5619461, 0.5608991, 0.5583538, 0.5535695, \n 0.5462548, 0.536942])\n', (1015, 1247), True, 'import numpy as np\n'), ((1953, 2193), 'numpy.array', 'np.array', (['[0.0062764, 0.0342341, 0.0986968, 0.2045612, 0.3501325, 0.4880824, \n 0.4132437, 0.306272, 0.2138001, 0.1371348, 0.0776751, 0.03646, \n 0.0184661, 0.0364665, 0.0775099, 0.136432, 0.2119483, 0.3025262, \n 0.4070148, 0.5069672]'], {}), '([0.0062764, 0.0342341, 0.0986968, 0.2045612, 0.3501325, 0.4880824,\n 0.4132437, 0.306272, 0.2138001, 0.1371348, 0.0776751, 0.03646, \n 0.0184661, 0.0364665, 0.0775099, 0.136432, 0.2119483, 0.3025262, \n 0.4070148, 0.5069672])\n', (1961, 2193), True, 'import numpy as np\n'), ((2910, 3150), 'numpy.array', 'np.array', (['[0.0062764, 0.0342341, 0.0986968, 0.2045612, 0.3501325, 0.4880824, \n 0.4132437, 0.306272, 0.2138001, 0.1371348, 0.0776751, 0.03646, \n 0.0184661, 0.0364665, 0.0775099, 0.136432, 0.2119483, 0.3025262, \n 0.4070148, 0.5069672]'], {}), '([0.0062764, 0.0342341, 0.0986968, 0.2045612, 0.3501325, 0.4880824,\n 0.4132437, 0.306272, 0.2138001, 0.1371348, 0.0776751, 0.03646, \n 0.0184661, 0.0364665, 0.0775099, 0.136432, 0.2119483, 0.3025262, \n 0.4070148, 0.5069672])\n', (2918, 3150), True, 'import numpy as np\n'), ((3866, 4106), 'numpy.array', 'np.array', (['[0.0062764, 0.0342341, 0.0986968, 0.2045612, 0.3501325, 0.4880824, \n 0.4132437, 0.306272, 0.2138001, 0.1371348, 0.0776751, 0.03646, \n 0.0184661, 0.0364665, 0.0775099, 0.136432, 0.2119483, 0.3025262, \n 0.4070148, 0.5069672]'], {}), '([0.0062764, 0.0342341, 0.0986968, 0.2045612, 0.3501325, 0.4880824,\n 0.4132437, 0.306272, 0.2138001, 0.1371348, 0.0776751, 0.03646, \n 0.0184661, 0.0364665, 0.0775099, 0.136432, 0.2119483, 0.3025262, \n 0.4070148, 0.5069672])\n', (3874, 4106), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import os
import sys
import time
import socket
import psutil
import datetime
from collections import defaultdict
import lumbermill.utils.DictUtils as DictUtils
from lumbermill.BaseThreadedModule import BaseThreadedModule
from lumbermill.utils.Decorators import ModuleDocstringParser, setInterval
from lumbermill.utils.StatisticCollector import StatisticCollector, MultiProcessStatisticCollector
from lumbermill.utils.misc import AnsiColors, TimedFunctionManager
@ModuleDocstringParser
class SimpleStats(BaseThreadedModule):
"""
Collect and log some simple statistic data.
Use this module if you just need some simple statistics on how many events are passing through
Per default, statistics will just be send to stdout.
As a side note: This module inits MultiProcessStatisticCollector. As it uses multiprocessing.Manager().dict()
this will start another process. So if you use SimpleStats, you will see workers + 1 processes in the process
list.
For possible values for process_statistics see: https://code.google.com/archive/p/psutil/wikis/Documentation.wiki#CPU
Configuration template:
- SimpleStats:
interval: # <default: 10; type: integer; is: optional>
event_type_statistics: # <default: True; type: boolean; is: optional>
receive_rate_statistics: # <default: True; type: boolean; is: optional>
waiting_event_statistics: # <default: False; type: boolean; is: optional>
process_statistics: # <default: ['cpu_percent','memory_percent']; type: boolean||list; is: optional>
emit_as_event: # <default: False; type: boolean; is: optional>
"""
module_type = "misc"
"""Set module type"""
def configure(self, configuration):
# Call parent configure method
BaseThreadedModule.configure(self, configuration)
self.emit_as_event = self.getConfigurationValue('emit_as_event')
self.interval = self.getConfigurationValue('interval')
self.event_type_statistics = self.getConfigurationValue('event_type_statistics')
self.process_statistics = self.getConfigurationValue('process_statistics')
self.stats_namespace = "SimpleStats"
self.stats_collector = StatisticCollector()
self.mp_stats_collector = MultiProcessStatisticCollector()
self.stats_collector.initCounter(self.stats_namespace)
self.mp_stats_collector.initCounter(self.stats_namespace)
self.module_queues = {}
self.psutil_processes = []
self.last_values = {'events_received': 0}
self.methods = dir(self)
def getRunTimedFunctionsFunc(self):
@setInterval(self.interval)
def evaluateStats():
self.accumulateReceiveRateStats()
self.accumulateEventTypeStats()
if self.lumbermill.is_master():
self.printIntervalStatistics()
return evaluateStats
def accumulateEventTypeStats(self):
for event_type, count in self.stats_collector.getAllCounters(namespace=self.stats_namespace).items():
if count == 0:
continue
self.mp_stats_collector.incrementCounter(event_type, count, namespace=self.stats_namespace)
self.stats_collector.resetCounter(event_type, namespace=self.stats_namespace)
def accumulateReceiveRateStats(self):
if self.stats_collector.getCounter('events_received', namespace=self.stats_namespace) == 0:
return
self.mp_stats_collector.incrementCounter('events_received', self.stats_collector.getCounter('events_received'), namespace=self.stats_namespace)
self.stats_collector.resetCounter('events_received', namespace=self.stats_namespace)
def printIntervalStatistics(self):
self.logger.info("############# Statistics (PID: %s) #############" % os.getpid())
if self.getConfigurationValue('receive_rate_statistics'):
self.receiveRateStatistics()
if self.getConfigurationValue('event_type_statistics'):
self.eventTypeStatistics()
if self.getConfigurationValue('waiting_event_statistics'):
self.eventsInQueuesStatistics()
if self.getConfigurationValue('process_statistics'):
self.processStatistics()
def receiveRateStatistics(self):
self.logger.info(">> Receive rate stats")
events_received = self.mp_stats_collector.getCounter('events_received', namespace=self.stats_namespace)
# If LumberMill is shutting down and running with multiple processes, we might end up with an empty return value.
if not events_received:
return
self.logger.info("Received events in %ss: %s%s (%s/eps)%s" % (self.getConfigurationValue('interval'), AnsiColors.YELLOW, events_received, (events_received/self.interval), AnsiColors.ENDC))
if self.emit_as_event:
self.sendEvent(DictUtils.getDefaultEventDict({"stats_type": "receiverate_stats", "receiverate_count": events_received, "receiverate_count_per_sec": int((events_received/self.interval)), "interval": self.interval, "timestamp": time.time()}, caller_class_name="Statistics", event_type="statistic"))
self.mp_stats_collector.setCounter('last_events_received', events_received, namespace=self.stats_namespace)
self.mp_stats_collector.resetCounter('events_received', namespace=self.stats_namespace)
def eventTypeStatistics(self):
self.logger.info(">> EventTypes Statistics")
try:
for event_type in sorted(self.mp_stats_collector.getAllCounters(namespace=self.stats_namespace).keys()):
if not event_type.startswith('event_type_'):
continue
count = self.mp_stats_collector.getCounter(event_type, namespace=self.stats_namespace)
event_name = event_type.replace('event_type_', '').lower()
self.logger.info("EventType: %s%s%s - Hits: %s%s%s" % (AnsiColors.YELLOW, event_name, AnsiColors.ENDC, AnsiColors.YELLOW, count, AnsiColors.ENDC))
if self.emit_as_event:
self.sendEvent(DictUtils.getDefaultEventDict({"stats_type": "event_type_stats", "%s_count" % event_name: count, "%s_count_per_sec" % event_name:int((count/self.interval)), "interval": self.interval, "timestamp": time.time()}, caller_class_name="Statistics", event_type="statistic"))
self.mp_stats_collector.setCounter("last_%s" % event_type, count, namespace=self.stats_namespace)
self.mp_stats_collector.resetCounter(event_type, namespace=self.stats_namespace)
except BrokenPipeError:
# BrokenPipeError may be thrown when exiting via CTRL+C. Ignore it.
pass
except socket.error as e:
# socket.error: [Errno 2] No such file or directory may be thrown when exiting via CTRL+C. Ignore it.
etype, evalue, etb = sys.exc_info()
if "No such file or directory" in str(evalue):
pass
else:
raise e
def eventsInQueuesStatistics(self):
if len(self.module_queues) == 0:
return
self.logger.info(">> Queue stats")
for module_name, queue in sorted(self.module_queues.items()):
try:
self.logger.info("Events in %s queue: %s%s%s" % (module_name, AnsiColors.YELLOW, queue.qsize(), AnsiColors.ENDC))
except NotImplementedError:
self.logger.info("Getting queue size of multiprocessed queues is not implemented for this platform.")
return
if self.emit_as_event:
self.sendEvent(DictUtils.getDefaultEventDict({"stats_type": "queue_stats", "count": queue.qsize(), "interval": self.interval, "timestamp": time.time()}, caller_class_name="Statistics", event_type="statistic"))
def processStatistics(self):
stats_event = {"stats_type": "process_stats", "timestamp": time.time()}
stats_event["worker_count"] = len(self.lumbermill.child_processes) + 1
stats_event["uptime"] = int(time.time() - self.psutil_processes[0].create_time())
self.logger.info(">> Process stats")
self.logger.info("num workers: %d" % (len(self.lumbermill.child_processes)+1))
self.logger.info("started: %s" % datetime.datetime.fromtimestamp(self.psutil_processes[0].create_time()).strftime("%Y-%m-%d %H:%M:%S"))
aggregated_metrics = defaultdict(int)
for psutil_process in self.psutil_processes:
stats_event["pid"] = psutil_process.pid
for metric_name, metric_value in psutil_process.as_dict(self.process_statistics).items():
# Call metric specific method if it exists.
if "convertMetric_%s" % metric_name in self.methods:
metric_name, metric_value = getattr(self, "convertMetric_%s" % self.action)(metric_name, metric_value)
try:
aggregated_metrics[metric_name] += metric_value
except TypeError:
try:
metric_value = dict(metric_value.__dict__)
except:
pass
try:
stats_event[metric_name].append(metric_value)
except KeyError:
stats_event[metric_name] = [metric_value]
self.logger.info("%s(pid: %s): %s" % (metric_name, psutil_process.pid, metric_value))
if self.emit_as_event:
self.sendEvent(DictUtils.getDefaultEventDict(stats_event, caller_class_name="Statistics", event_type="statistic"))
for agg_metric_name, agg_metric_value in aggregated_metrics.items():
self.logger.info("%s: %s" % (agg_metric_name, agg_metric_value))
if self.emit_as_event:
self.sendEvent(DictUtils.getDefaultEventDict(aggregated_metrics, caller_class_name="Statistics", event_type="statistic"))
def getLastReceiveCount(self):
try:
received_counter = self.mp_stats_collector.getCounter('last_events_received', namespace=self.stats_namespace)
except KeyError:
received_counter = 0
return received_counter
def getLastEventTypeCounter(self):
event_type_counter = {}
for event_type in sorted(self.mp_stats_collector.getAllCounters(namespace=self.stats_namespace).keys()):
if not event_type.startswith('last_event_type_'):
continue
count = self.mp_stats_collector.getCounter(event_type, namespace=self.stats_namespace)
event_name = event_type.replace('last_event_type_', '').lower()
event_type_counter[event_name] = count
return event_type_counter
def getEventsInQueuesCounter(self):
event_queue_counter = {}
for module_name, queue in sorted(self.module_queues.items()):
try:
event_queue_counter[module_name] = queue.qsize()
except NotImplementedError:
self.logger.debug("Getting queue size of multiprocessed queues is not implemented for this platform.")
continue
return event_queue_counter
def initAfterFork(self):
# Get all configured queues for waiting event stats.
self.module_queues = self.lumbermill.getAllQueues()
self.psutil_processes.append(psutil.Process(self.lumbermill.getMainProcessId()))
for worker in self.lumbermill.child_processes:
self.psutil_processes.append(psutil.Process(worker.pid))
TimedFunctionManager.startTimedFunction(self.getRunTimedFunctionsFunc())
BaseThreadedModule.initAfterFork(self)
def handleEvent(self, event):
self.stats_collector.incrementCounter('events_received', namespace=self.stats_namespace)
if self.event_type_statistics:
try:
self.stats_collector.incrementCounter('event_type_%s' % event['lumbermill']['event_type'], namespace=self.stats_namespace)
except:
pass
yield event
def shutDown(self):
self.accumulateReceiveRateStats()
self.accumulateEventTypeStats()
if self.lumbermill.is_master():
self.printIntervalStatistics()
self.mp_stats_collector.shutDown()
BaseThreadedModule.shutDown(self)
| [
"lumbermill.utils.Decorators.setInterval",
"lumbermill.utils.StatisticCollector.StatisticCollector",
"psutil.Process",
"lumbermill.BaseThreadedModule.BaseThreadedModule.shutDown",
"lumbermill.utils.StatisticCollector.MultiProcessStatisticCollector",
"sys.exc_info",
"collections.defaultdict",
"os.getpi... | [((1880, 1929), 'lumbermill.BaseThreadedModule.BaseThreadedModule.configure', 'BaseThreadedModule.configure', (['self', 'configuration'], {}), '(self, configuration)\n', (1908, 1929), False, 'from lumbermill.BaseThreadedModule import BaseThreadedModule\n'), ((2314, 2334), 'lumbermill.utils.StatisticCollector.StatisticCollector', 'StatisticCollector', ([], {}), '()\n', (2332, 2334), False, 'from lumbermill.utils.StatisticCollector import StatisticCollector, MultiProcessStatisticCollector\n'), ((2369, 2401), 'lumbermill.utils.StatisticCollector.MultiProcessStatisticCollector', 'MultiProcessStatisticCollector', ([], {}), '()\n', (2399, 2401), False, 'from lumbermill.utils.StatisticCollector import StatisticCollector, MultiProcessStatisticCollector\n'), ((2731, 2757), 'lumbermill.utils.Decorators.setInterval', 'setInterval', (['self.interval'], {}), '(self.interval)\n', (2742, 2757), False, 'from lumbermill.utils.Decorators import ModuleDocstringParser, setInterval\n'), ((8515, 8531), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (8526, 8531), False, 'from collections import defaultdict\n'), ((11743, 11781), 'lumbermill.BaseThreadedModule.BaseThreadedModule.initAfterFork', 'BaseThreadedModule.initAfterFork', (['self'], {}), '(self)\n', (11775, 11781), False, 'from lumbermill.BaseThreadedModule import BaseThreadedModule\n'), ((12411, 12444), 'lumbermill.BaseThreadedModule.BaseThreadedModule.shutDown', 'BaseThreadedModule.shutDown', (['self'], {}), '(self)\n', (12438, 12444), False, 'from lumbermill.BaseThreadedModule import BaseThreadedModule\n'), ((8028, 8039), 'time.time', 'time.time', ([], {}), '()\n', (8037, 8039), False, 'import time\n'), ((3919, 3930), 'os.getpid', 'os.getpid', ([], {}), '()\n', (3928, 3930), False, 'import os\n'), ((6987, 7001), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (6999, 7001), False, 'import sys\n'), ((8156, 8167), 'time.time', 'time.time', ([], {}), '()\n', (8165, 8167), False, 'import time\n'), ((9945, 10055), 'lumbermill.utils.DictUtils.getDefaultEventDict', 'DictUtils.getDefaultEventDict', (['aggregated_metrics'], {'caller_class_name': '"""Statistics"""', 'event_type': '"""statistic"""'}), "(aggregated_metrics, caller_class_name=\n 'Statistics', event_type='statistic')\n", (9974, 10055), True, 'import lumbermill.utils.DictUtils as DictUtils\n'), ((11626, 11652), 'psutil.Process', 'psutil.Process', (['worker.pid'], {}), '(worker.pid)\n', (11640, 11652), False, 'import psutil\n'), ((9633, 9735), 'lumbermill.utils.DictUtils.getDefaultEventDict', 'DictUtils.getDefaultEventDict', (['stats_event'], {'caller_class_name': '"""Statistics"""', 'event_type': '"""statistic"""'}), "(stats_event, caller_class_name='Statistics',\n event_type='statistic')\n", (9662, 9735), True, 'import lumbermill.utils.DictUtils as DictUtils\n'), ((5190, 5201), 'time.time', 'time.time', ([], {}), '()\n', (5199, 5201), False, 'import time\n'), ((7856, 7867), 'time.time', 'time.time', ([], {}), '()\n', (7865, 7867), False, 'import time\n'), ((6394, 6405), 'time.time', 'time.time', ([], {}), '()\n', (6403, 6405), False, 'import time\n')] |
"""empty message
Revision ID: <KEY>
Revises: <KEY>
Create Date: 2018-06-12 14:27:07.207294
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('movies',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('showname', sa.String(length=32), nullable=True),
sa.Column('shownameen', sa.String(length=64), nullable=True),
sa.Column('director', sa.String(length=32), nullable=True),
sa.Column('leadingRole', sa.String(length=256), nullable=True),
sa.Column('type', sa.String(length=32), nullable=True),
sa.Column('country', sa.String(length=64), nullable=True),
sa.Column('language', sa.String(length=32), nullable=True),
sa.Column('duration', sa.Integer(), nullable=True),
sa.Column('screeningmodel', sa.String(length=16), nullable=True),
sa.Column('openday', sa.DateTime(), nullable=True),
sa.Column('backgroundpicture', sa.String(length=256), nullable=True),
sa.Column('flag', sa.Integer(), nullable=True),
sa.Column('isdelete', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('movies')
# ### end Alembic commands ###
| [
"sqlalchemy.DateTime",
"alembic.op.drop_table",
"sqlalchemy.Boolean",
"sqlalchemy.PrimaryKeyConstraint",
"sqlalchemy.Integer",
"sqlalchemy.String"
] | [((1410, 1433), 'alembic.op.drop_table', 'op.drop_table', (['"""movies"""'], {}), "('movies')\n", (1423, 1433), False, 'from alembic import op\n'), ((1250, 1279), 'sqlalchemy.PrimaryKeyConstraint', 'sa.PrimaryKeyConstraint', (['"""id"""'], {}), "('id')\n", (1273, 1279), True, 'import sqlalchemy as sa\n'), ((402, 414), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (412, 414), True, 'import sqlalchemy as sa\n'), ((459, 479), 'sqlalchemy.String', 'sa.String', ([], {'length': '(32)'}), '(length=32)\n', (468, 479), True, 'import sqlalchemy as sa\n'), ((525, 545), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (534, 545), True, 'import sqlalchemy as sa\n'), ((589, 609), 'sqlalchemy.String', 'sa.String', ([], {'length': '(32)'}), '(length=32)\n', (598, 609), True, 'import sqlalchemy as sa\n'), ((656, 677), 'sqlalchemy.String', 'sa.String', ([], {'length': '(256)'}), '(length=256)\n', (665, 677), True, 'import sqlalchemy as sa\n'), ((717, 737), 'sqlalchemy.String', 'sa.String', ([], {'length': '(32)'}), '(length=32)\n', (726, 737), True, 'import sqlalchemy as sa\n'), ((780, 800), 'sqlalchemy.String', 'sa.String', ([], {'length': '(64)'}), '(length=64)\n', (789, 800), True, 'import sqlalchemy as sa\n'), ((844, 864), 'sqlalchemy.String', 'sa.String', ([], {'length': '(32)'}), '(length=32)\n', (853, 864), True, 'import sqlalchemy as sa\n'), ((908, 920), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (918, 920), True, 'import sqlalchemy as sa\n'), ((970, 990), 'sqlalchemy.String', 'sa.String', ([], {'length': '(16)'}), '(length=16)\n', (979, 990), True, 'import sqlalchemy as sa\n'), ((1033, 1046), 'sqlalchemy.DateTime', 'sa.DateTime', ([], {}), '()\n', (1044, 1046), True, 'import sqlalchemy as sa\n'), ((1099, 1120), 'sqlalchemy.String', 'sa.String', ([], {'length': '(256)'}), '(length=256)\n', (1108, 1120), True, 'import sqlalchemy as sa\n'), ((1160, 1172), 'sqlalchemy.Integer', 'sa.Integer', ([], {}), '()\n', (1170, 1172), True, 'import sqlalchemy as sa\n'), ((1216, 1228), 'sqlalchemy.Boolean', 'sa.Boolean', ([], {}), '()\n', (1226, 1228), True, 'import sqlalchemy as sa\n')] |
from __future__ import annotations
import datetime
import json
from abc import ABC, abstractmethod
from collections import defaultdict, deque, namedtuple
from typing import (
Any,
Deque,
Dict,
Iterator,
List,
Mapping,
Optional,
Set,
TextIO,
Tuple,
)
import numpy as np
import scipy.sparse
from . import labeler, ontology, timeline, utils
ColumnValue = namedtuple("ColumnValue", ["column", "value"])
"""A value for a particular column
.. py:attribute:: column
The index for the column
.. py:attribute:: value
The value for that column
"""
class FeaturizerList:
"""
Featurizer list consists of a list of featurizers to be used to featurize data.
It enables training, featurization, column name extraction and serialization/deserialization.
"""
def __init__(self, featurizers: List[Featurizer]):
"""Create the FeaturizerList from a sequence of featurizers.
Args:
featurizers (list of :class:`Featurizer`): The featurizers to use for
transforming the patients.
"""
self.featurizers = featurizers
def train_featurizers(
self,
timelines: timeline.TimelineReader,
labeler: labeler.Labeler,
end_date: Optional[datetime.date] = None,
) -> None:
"""
Train a list of featurizers on the provided patients using the given labeler.
Args:
timelines (:class:`stride_ml.timeline.TimelineReader`): The timelines to read from.
labeler (:class:`stride_ml.labeler.Labeler`): The labeler to train with.
end_date (datetime.date): An optional date used to filter data off the end of the timeline.
"""
any_needs_training = any(
featurizer.needs_training() for featurizer in self.featurizers
)
if not any_needs_training:
return
all_patients = labeler.get_all_patient_ids()
for patient_id in timelines.get_patient_ids():
if all_patients is not None and patient_id not in all_patients:
continue
patient = timelines.get_patient(patient_id, end_date=end_date)
labels = labeler.label(patient)
if len(labels) == 0:
continue
label_indices = {label.day_index for label in labels}
for featurizer in self.featurizers:
if featurizer.needs_training():
featurizer.train(patient, label_indices)
for featurizer in self.featurizers:
featurizer.finalize_training()
def featurize(
self,
timelines: timeline.TimelineReader,
labeler: labeler.Labeler,
end_date: Optional[datetime.date] = None,
) -> Tuple[Any, Any, Any, Any]:
"""
Apply a list of featurizers to obtain a feature matrix and label vector for the given patients.
Args:
timelines (:class:`stride_ml.timeline.TimelineReader`): The timelines to read from.
labeler (:class:`stride_ml.labeler.Labeler`): The labeler to compute labels with.
end_date (datetime.date): An optional date used to filter data off the end of the timeline.
Returns:
This returns a tuple (data_matrix, labels, patient_ids, patient_day_indices).
data_matrix is a sparse matrix of all the features of all the featurizers.
labels is a list of boolean values representing the labels for each row in the matrix.
patient_ids is a list of the patient ids for each row.
patient_day_indices is a list of the day indices for each row.
"""
data = []
indices: List[int] = []
indptr = []
result_labels = []
patient_ids = []
patient_day_indices = []
all_patients = labeler.get_all_patient_ids()
for patient_id in timelines.get_patient_ids():
if all_patients is not None and patient_id not in all_patients:
continue
patient = timelines.get_patient(patient_id, end_date=end_date)
labels = labeler.label(patient)
if len(labels) == 0:
continue
label_indices = set()
for label in labels:
if label.day_index in label_indices:
raise ValueError(
"The provided labeler is invalid as it contains multiple labels "
f"for patient {patient.patient_id} at day index {label.day_index}"
)
label_indices.add(label.day_index)
columns_by_featurizer = []
for featurizer in self.featurizers:
columns = featurizer.transform(patient, label_indices)
assert len(columns) == len(label_indices), (
f"The featurizer {featurizer} didn't provide enough rows for {labeler}"
" on patient {patient_id} ({len(columns)} != {len(label_indices)})"
)
columns_by_featurizer.append(columns)
for i, label in enumerate(labels):
indptr.append(len(indices))
result_labels.append(label.is_positive)
patient_ids.append(patient.patient_id)
patient_day_indices.append(label.day_index)
column_offset = 0
for j, feature_columns in enumerate(columns_by_featurizer):
for column, value in feature_columns[i]:
assert (
0 <= column < self.featurizers[j].num_columns()
), (
f"The featurizer {self.featurizers[j]} provided an out of bounds column for "
f"{labeler} on patient {patient.patient_id} ({column} should be between 0 and "
f"{self.featurizers[j].num_columns()})"
)
indices.append(column_offset + column)
data.append(value)
column_offset += self.featurizers[j].num_columns()
total_columns = sum(
featurizer.num_columns() for featurizer in self.featurizers
)
indptr.append(len(indices))
data = np.array(data, dtype=np.float32)
indices = np.array(indices, dtype=np.int32)
indptr = np.array(indptr, dtype=np.int32)
data_matrix = scipy.sparse.csr_matrix(
(data, indices, indptr), shape=(len(result_labels), total_columns)
)
return (
data_matrix,
np.array(result_labels, dtype=np.float32),
np.array(patient_ids, dtype=np.int32),
np.array(patient_day_indices, dtype=np.int32),
)
def get_column_name(self, column_index: int) -> str:
offset = 0
for featurizer in self.featurizers:
if offset <= column_index < (offset + featurizer.num_columns()):
return f"Featurizer {featurizer}, {featurizer.get_column_name(column_index - offset)}"
offset += featurizer.num_columns()
assert False, "This should never happen"
def save(self, fp: TextIO) -> None:
json.dump([featurizer.to_dict() for featurizer in self.featurizers], fp)
def load(self, fp: TextIO) -> None:
for data_for_featurizer, featurizer in zip(
json.load(fp), self.featurizers
):
featurizer.from_dict(data_for_featurizer)
class Featurizer(ABC):
def train(self, patient: timeline.Patient, label_indices: Set[int]) -> None:
"""
Train the featurizer on the given patients and label indices.
This should do nothing if the featurizer doesn't need training.
Args:
patient: A patient to train on.
label_indices (:obj:set: of int): The set of indices for that patient.
"""
pass
def finalize_training(self) -> None:
"""
Finish the featurizer at the end of training. This is not needed for every
featurizer, but does become necessary for things like verifying counts, etc.
"""
pass # The default version does nothing
@abstractmethod
def num_columns(self) -> int:
"""
Returns: The number of columns that this featurizer creates.
"""
@abstractmethod
def transform(
self, patient: timeline.Patient, label_indices: Set[int]
) -> List[List[ColumnValue]]:
"""
Transform a patient into a series of rows using the specified timepoints.
Args:
patient: The patient to train on.
label_indices (:obj:set of int): The indices which will be labeled.
Returns:
A list of rows. Each row in turn is a list of :class:`ColumnValues<ColumnValue>` for the
values in each column.
"""
def to_dict(self) -> Dict[str, Any]:
"""
Serialize the featurizer to a JSON compatible dict
Returns:
A JSON compatible dict.
"""
return {}
def from_dict(self, data: Mapping[str, Any]) -> None:
"""
Restore the state of the featurizer from a JSON compatible dict.
Args:
data: A JSON compatible dict from to_dict
"""
pass
def get_column_name(self, column_index: int) -> str:
"""
An optional method that enables the user to get the name of a column.
Args:
column_index: The index of the column
"""
return "no name"
def needs_training(self) -> bool:
return False
###########################################
# Useful featurizers
###########################################
class AgeFeaturizer(Featurizer):
"""
Produces the (possibly normalized) age at the prediction timepoint.
"""
def __init__(self, normalize: bool = True):
self.normalize = normalize
self.age_statistics = utils.OnlineStatistics()
def train(self, patient: timeline.Patient, label_indices: Set[int]) -> None:
if self.normalize:
for i, day in enumerate(patient.days):
if i in label_indices:
self.age_statistics.add(day.age)
def num_columns(self) -> int:
return 1
def transform(
self, patient: timeline.Patient, label_indices: Set[int]
) -> List[List[ColumnValue]]:
all_columns = []
for i, day in enumerate(patient.days):
if i in label_indices:
if self.normalize:
standardized_age = (
day.age - self.age_statistics.mean()
) / self.age_statistics.standard_deviation()
all_columns.append([ColumnValue(0, standardized_age)])
else:
all_columns.append([ColumnValue(0, day.age)])
return all_columns
def to_dict(self) -> Dict[str, Any]:
return {"age_statistics": self.age_statistics.to_dict()}
def from_dict(self, data: Mapping[str, Any]) -> None:
self.age_statistics = utils.OnlineStatistics(data["age_statistics"])
def needs_training(self) -> bool:
return self.normalize
class IsIcd10Era(Featurizer):
"""
Produces the (possibly normalized) age at the prediction timepoint.
"""
def num_columns(self) -> int:
return 1
def transform(
self, patient: timeline.Patient, label_indices: Set[int]
) -> List[List[ColumnValue]]:
all_columns = []
for i, day in enumerate(patient.days):
if i in label_indices:
all_columns.append([ColumnValue(0, day.date.year >= 2016)])
return all_columns
class CountFeaturizer(Featurizer):
"""
Produces one column per each diagnosis code, procedure code or prescription code.
The value in each column is the count of how many times that code appears in the patient record
up until the prediction time.
Note: time_bins should be a list optionally ending with None
Each integer in time_bins represents the end point for a particular bin. A final bin with None represents
a final bin which enables codes from any point in history.
"""
def __init__(
self,
timelines: timeline.TimelineReader,
ontologies: ontology.OntologyReader,
rollup: bool = False,
exclusion_codes: List[int] = [],
time_bins: Optional[List[Optional[int]]] = None,
):
self.patient_codes: utils.Dictionary[int] = utils.Dictionary()
self.recorded_date_codes = set(ontologies.get_recorded_date_codes())
self.exclusion_codes = set(exclusion_codes)
self.time_bins = time_bins
self.ontologies = ontologies
self.rollup = rollup
def get_codes(self, day: timeline.PatientDay) -> Iterator[int]:
for code in day.observations:
if (code in self.recorded_date_codes) and (
code not in self.exclusion_codes
):
if self.rollup:
for subcode in self.ontologies.get_subwords(code):
yield subcode
else:
yield code
def train(self, patient: timeline.Patient, label_indices: Set[int]) -> None:
for day in patient.days:
for code in self.get_codes(day):
self.patient_codes.add(code)
def num_columns(self) -> int:
if self.time_bins is None:
return len(self.patient_codes)
else:
return len(self.time_bins) * len(self.patient_codes)
def transform(
self, patient: timeline.Patient, label_indices: Set[int]
) -> List[List[ColumnValue]]:
all_columns = []
if self.time_bins is None:
current_codes: Dict[int, int] = defaultdict(int)
for i, day in enumerate(patient.days):
for code in self.get_codes(day):
if code in self.patient_codes:
current_codes[self.patient_codes.transform(code)] += 1
if i in label_indices:
all_columns.append(
[
ColumnValue(column, count)
for column, count in current_codes.items()
]
)
else:
codes_per_bin: Dict[int, Deque[Tuple[int, datetime.date]]] = {
i: deque() for i in range(len(self.time_bins))
}
code_counts_per_bin: Dict[int, Dict[int, int]] = {
i: defaultdict(int) for i in range(len(self.time_bins))
}
for day_index, day in enumerate(patient.days):
python_date = datetime.date(
day.date.year, day.date.month, day.date.day
)
for code in self.get_codes(day):
if code in self.patient_codes:
codes_per_bin[0].append((code, python_date))
code_counts_per_bin[0][code] += 1
for i, max_time in enumerate(self.time_bins):
if max_time is None:
# This means that this bin accepts everything
continue
while len(codes_per_bin[i]) > 0:
next_code, next_date = codes_per_bin[i][0]
if (python_date - next_date).days <= max_time:
break
else:
codes_per_bin[i + 1].append(
codes_per_bin[i].popleft()
)
code_counts_per_bin[i][next_code] -= 1
if code_counts_per_bin[i][next_code] == 0:
del code_counts_per_bin[i][next_code]
code_counts_per_bin[i + 1][next_code] += 1
if day_index in label_indices:
all_columns.append(
[
ColumnValue(
self.patient_codes.transform(code)
+ i * len(self.patient_codes),
count,
)
for i in range(len(self.time_bins))
for code, count in code_counts_per_bin[i].items()
]
)
return all_columns
def to_dict(self) -> Dict[str, Any]:
return {"patient_codes": self.patient_codes.to_dict()}
def from_dict(self, data: Mapping[str, Any]) -> None:
self.patient_codes = utils.Dictionary(data["patient_codes"])
def needs_training(self) -> bool:
return True
class BinaryFeaturizer(CountFeaturizer):
"""
Behaves like CountFeaturizer except all non-zero counts receive a value of 1.
"""
def transform(
self, patient: timeline.Patient, label_indices: Set[int]
) -> List[List[ColumnValue]]:
all_columns = []
current_codes = defaultdict(int)
for i, day in enumerate(patient.days):
for code in self.get_codes(day):
if code in self.patient_codes:
current_codes[self.patient_codes.transform(code)] = 1
if i in label_indices:
all_columns.append(
[
ColumnValue(column, count)
for column, count in current_codes.items()
]
)
return all_columns
class LabelerDerivedFeaturizer(Featurizer):
def __init__(self, label: labeler.Labeler):
self.label = label
def num_columns(self) -> int:
return 1
def transform(
self, patient: timeline.Patient, label_indices: Set[int]
) -> List[List[ColumnValue]]:
result = []
my_labels = self.label.label(patient)
label_dict = {
my_label.day_index: my_label.is_positive for my_label in my_labels
}
for i, day in enumerate(patient.days):
if i in label_indices:
feature = label_dict[i]
result.append([ColumnValue(0, feature)])
return result
class ConstantValueFeaturizer(Featurizer):
"""
This featurizer returns a constant value for each item.
It has only one column.
"""
def __init__(self, value: float):
self.value = value
def num_columns(self) -> int:
return 1
def transform(
self, patient: timeline.Patient, label_indices: Set[int]
) -> List[List[ColumnValue]]:
result = []
for i, day in enumerate(patient.days):
if i in label_indices:
result.append([ColumnValue(0, self.value)])
return result
class PreprocessedFeaturizer(Featurizer):
def __init__(self, value_map: Mapping[Tuple[int, int], float]):
self.value_map = value_map
def num_columns(self) -> int:
return 1
def transform(
self, patient: timeline.Patient, label_indices: Set[int]
) -> List[List[ColumnValue]]:
result = []
for i, day in enumerate(patient.days):
if i in label_indices:
value = self.value_map[(patient.patient_id, i)]
result.append([ColumnValue(0, value)])
return result
class NumericObservationWithValueFeaturizer(Featurizer):
"""
This featurizer transforms numeric lab values into binned counts.
The basic idea is that we do a pass over the training data to compute percentiles for the values and then
we use those percentiles to create bins for each lab.
"""
def __init__(
self,
timelines: timeline.TimelineReader,
ontologies: ontology.OntologyReader,
min_labs_per_bin: int = 1,
num_bins: int = 10,
):
self.recorded_date_codes = set(ontologies.get_recorded_date_codes())
self.observedNumericValues: Dict[int, List[float]] = defaultdict(list)
self.min_labs_per_bin = min_labs_per_bin
self.num_bins = num_bins
def train(self, patient: timeline.Patient, label_indices: Set[int]) -> None:
for day in patient.days:
for codeWithValue in day.observations_with_values:
if codeWithValue.code in self.recorded_date_codes:
if not codeWithValue.is_text:
self.observedNumericValues[codeWithValue.code].append(
codeWithValue.numeric_value
)
def needs_training(self) -> bool:
return True
def get_percentile(self, item: float, percentiles: List[float]) -> int:
"""Get the index for the given percentiles.
Note: There is one bin for each value in percentiles that starts at that value
"""
for i, p in enumerate(percentiles):
if item < p:
return i - 1
return len(percentiles) - 1
def finalize_training(self) -> None:
self.code_numeric_dictionary = {}
self.next_index = 0
for code, values in self.observedNumericValues.items():
values.sort()
percentiles = [-float("inf")]
for i in range(self.num_bins - 1):
next_value = values[
min(
round((len(values) - 1) * (i + 1) / self.num_bins),
len(values) - 1,
)
]
percentiles.append(next_value)
counts = [0 for _ in range(len(percentiles))]
for item in values:
counts[self.get_percentile(item, percentiles)] += 1
filtered_percentiles = []
current_low: Optional[float] = None
for i, p in enumerate(percentiles):
if counts[i] >= self.min_labs_per_bin:
if current_low is not None:
filtered_percentiles.append(current_low)
current_low = None
else:
filtered_percentiles.append(p)
elif counts[i] < self.min_labs_per_bin:
# We are skipping this one as there are too few counts
if current_low is None:
current_low = p
if (i + 1) < len(percentiles):
counts[i + 1] += counts[i]
if len(filtered_percentiles) == 0:
continue
indices_for_percentiles = list(
range(
self.next_index, self.next_index + len(filtered_percentiles)
)
)
self.next_index += len(filtered_percentiles)
self.code_numeric_dictionary[code] = (
filtered_percentiles,
indices_for_percentiles,
)
def num_columns(self) -> int:
return self.next_index
def transform(
self, patient: timeline.Patient, label_indices: Set[int]
) -> List[List[ColumnValue]]:
all_columns = []
current_codes: Dict[int, int] = defaultdict(int)
for i, day in enumerate(patient.days):
for codeWithValue in day.observations_with_values:
if codeWithValue.code in self.code_numeric_dictionary:
if not codeWithValue.is_text:
(
percentiles,
indices_for_percentiles,
) = self.code_numeric_dictionary[codeWithValue.code]
offset = self.get_percentile(
codeWithValue.numeric_value, percentiles
)
current_codes[indices_for_percentiles[offset]] += 1
if i in label_indices:
all_columns.append(
[
ColumnValue(column, count)
for column, count in current_codes.items()
]
)
return all_columns
def to_dict(self) -> Dict[str, Any]:
return {
"next_index": self.next_index,
"code_numeric_dictionary": list(
self.code_numeric_dictionary.items()
),
}
def from_dict(self, data: Mapping[str, Any]) -> None:
self.next_index = data["next_index"]
self.code_numeric_dictionary = {
code: values for code, values in data["code_numeric_dictionary"]
}
| [
"collections.namedtuple",
"collections.deque",
"numpy.array",
"collections.defaultdict",
"datetime.date",
"json.load"
] | [((396, 442), 'collections.namedtuple', 'namedtuple', (['"""ColumnValue"""', "['column', 'value']"], {}), "('ColumnValue', ['column', 'value'])\n", (406, 442), False, 'from collections import defaultdict, deque, namedtuple\n'), ((6342, 6374), 'numpy.array', 'np.array', (['data'], {'dtype': 'np.float32'}), '(data, dtype=np.float32)\n', (6350, 6374), True, 'import numpy as np\n'), ((6393, 6426), 'numpy.array', 'np.array', (['indices'], {'dtype': 'np.int32'}), '(indices, dtype=np.int32)\n', (6401, 6426), True, 'import numpy as np\n'), ((6444, 6476), 'numpy.array', 'np.array', (['indptr'], {'dtype': 'np.int32'}), '(indptr, dtype=np.int32)\n', (6452, 6476), True, 'import numpy as np\n'), ((17250, 17266), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (17261, 17266), False, 'from collections import defaultdict, deque, namedtuple\n'), ((20214, 20231), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (20225, 20231), False, 'from collections import defaultdict, deque, namedtuple\n'), ((23364, 23380), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (23375, 23380), False, 'from collections import defaultdict, deque, namedtuple\n'), ((6669, 6710), 'numpy.array', 'np.array', (['result_labels'], {'dtype': 'np.float32'}), '(result_labels, dtype=np.float32)\n', (6677, 6710), True, 'import numpy as np\n'), ((6724, 6761), 'numpy.array', 'np.array', (['patient_ids'], {'dtype': 'np.int32'}), '(patient_ids, dtype=np.int32)\n', (6732, 6761), True, 'import numpy as np\n'), ((6775, 6820), 'numpy.array', 'np.array', (['patient_day_indices'], {'dtype': 'np.int32'}), '(patient_day_indices, dtype=np.int32)\n', (6783, 6820), True, 'import numpy as np\n'), ((7459, 7472), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (7468, 7472), False, 'import json\n'), ((13923, 13939), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (13934, 13939), False, 'from collections import defaultdict, deque, namedtuple\n'), ((14559, 14566), 'collections.deque', 'deque', ([], {}), '()\n', (14564, 14566), False, 'from collections import defaultdict, deque, namedtuple\n'), ((14700, 14716), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (14711, 14716), False, 'from collections import defaultdict, deque, namedtuple\n'), ((14857, 14915), 'datetime.date', 'datetime.date', (['day.date.year', 'day.date.month', 'day.date.day'], {}), '(day.date.year, day.date.month, day.date.day)\n', (14870, 14915), False, 'import datetime\n')] |
from datetime import datetime
import logging
import os
import sys
from typing import List
import requests
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s', level=logging.INFO)
required_environment_variables: List[str] = [
'RANCHER_BEARER_TOKEN',
'RANCHER_CLUSTER_ID',
'RANCHER_NAMESPACE',
'RANCHER_PROJECT_ID',
'RANCHER_URL',
'RANCHER_WORKLOADS',
'RANCHER_DOCKER_REGISTRY',
'UPDATE_IMAGES', # 要更新的镜像地址: 类似hub.docker.com/test/get:1a1d2547
]
missing_environment_variables: List[str] = []
for required_environment_variable in required_environment_variables:
if required_environment_variable not in os.environ:
missing_environment_variables.append(required_environment_variable)
if len(missing_environment_variables) > 0:
logging.error("These environment variables are required but not set: {missing_environment_variables}".format(
missing_environment_variables=', '.join(missing_environment_variables),
))
sys.exit(1)
rancher_bearer_token = os.environ['RANCHER_BEARER_TOKEN']
rancher_cluster_id = os.environ['RANCHER_CLUSTER_ID']
rancher_namespace = os.environ['RANCHER_NAMESPACE']
rancher_project_id = os.environ['RANCHER_PROJECT_ID']
rancher_url = os.environ['RANCHER_URL']
rancher_workloads = os.environ['RANCHER_WORKLOADS']
update_image = os.environ["UPDATE_IMAGES"]
github_sha = os.environ["GITHUB_SHA"]
def send_msg_to_slack(msg):
if os.environ.get("SLACK_API", ""):
logging.info("slack_api: {}".format(os.environ["SLACK_API"]))
resp = requests.post(os.environ["SLACK_API"], json={"text": msg})
logging.info("slack return :", resp.text)
# 这里要做一下转换,如果要部署的docker可以使用内网, 那么替换成内网的域名
rancher_docker_registry = os.environ.get("RANCHER_DOCKER_REGISTRY", "")
if rancher_docker_registry:
update_image = "{}/{}:sha-{}".format(rancher_docker_registry, update_image, github_sha[:7])
logging.info("rancher要更新的镜像地址是:{}".format(update_image))
send_msg_to_slack("镜像构建完成, 即将要更新的镜像名为: %s" % update_image)
def generate_workload_url(r_workload: str) -> str:
return (
'{rancher_url}/v3/project/{rancher_cluster_id}:{rancher_project_id}'
'/workloads/deployment:{rancher_namespace}:{rancher_workload}'
).format(
rancher_cluster_id=rancher_cluster_id,
rancher_namespace=rancher_namespace,
rancher_project_id=rancher_project_id,
rancher_url=rancher_url,
rancher_workload=r_workload,
)
headers = {
'Authorization': 'Bearer {rancher_bearer_token}'.format(
rancher_bearer_token=rancher_bearer_token,
),
}
for rancher_workload in rancher_workloads.split(','):
url = generate_workload_url(rancher_workload)
response_get = requests.get(
headers={
**headers
},
url=url,
verify=False,
)
response_get.raise_for_status()
workload = response_get.json()
workload['annotations']['cattle.io/timestamp'] = datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')
for index, i in enumerate(workload["containers"]):
workload["containers"][index]["image"] = update_image
response_put = requests.put(
headers={
**headers,
},
json=workload,
url=url,
verify=False,
)
response_put.raise_for_status()
logging.info("Workload {rancher_workload} is successfully redeployed.".format(
rancher_workload=rancher_workload,
))
send_msg_to_slack("%s代码已提交, 更新rancher工作节点成功, 更新的镜像名为: %s" % (rancher_workload, update_image))
| [
"logging.basicConfig",
"requests.post",
"os.environ.get",
"requests.get",
"datetime.datetime.now",
"requests.put",
"sys.exit",
"logging.info"
] | [((114, 208), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s [%(levelname)s] %(message)s"""', 'level': 'logging.INFO'}), "(format='%(asctime)s [%(levelname)s] %(message)s', level\n =logging.INFO)\n", (133, 208), False, 'import logging\n'), ((1791, 1836), 'os.environ.get', 'os.environ.get', (['"""RANCHER_DOCKER_REGISTRY"""', '""""""'], {}), "('RANCHER_DOCKER_REGISTRY', '')\n", (1805, 1836), False, 'import os\n'), ((1022, 1033), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1030, 1033), False, 'import sys\n'), ((1479, 1510), 'os.environ.get', 'os.environ.get', (['"""SLACK_API"""', '""""""'], {}), "('SLACK_API', '')\n", (1493, 1510), False, 'import os\n'), ((2809, 2865), 'requests.get', 'requests.get', ([], {'headers': '{**headers}', 'url': 'url', 'verify': '(False)'}), '(headers={**headers}, url=url, verify=False)\n', (2821, 2865), False, 'import requests\n'), ((3241, 3312), 'requests.put', 'requests.put', ([], {'headers': '{**headers}', 'json': 'workload', 'url': 'url', 'verify': '(False)'}), '(headers={**headers}, json=workload, url=url, verify=False)\n', (3253, 3312), False, 'import requests\n'), ((1599, 1657), 'requests.post', 'requests.post', (["os.environ['SLACK_API']"], {'json': "{'text': msg}"}), "(os.environ['SLACK_API'], json={'text': msg})\n", (1612, 1657), False, 'import requests\n'), ((1667, 1708), 'logging.info', 'logging.info', (['"""slack return :"""', 'resp.text'], {}), "('slack return :', resp.text)\n", (1679, 1708), False, 'import logging\n'), ((3056, 3070), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3068, 3070), False, 'from datetime import datetime\n')] |
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QWidget, QLabel
from PyQt5.QtCore import Qt
from src.resources.base_containers import BaseVContainer
from src.backend.styling import load_stylesheet
from src.backend.config import ConfigFunctions
class SnowReport(BaseVContainer):
label: QLabel = None
report: QLabel = None
def __init__(self, parent: QWidget = None, report: str = None):
super(SnowReport, self).__init__(parent)
self.initUI(report=report)
self._add_stylesheet_info()
def initUI(self, report: str):
self.label = QLabel("Today's Weather")
self.label.setFixedHeight(int(self.config.get_height() * 0.05))
self.report = QLabel(report)
self.report.setWordWrap(True)
self.report.setFixedHeight(int(self.config.get_height() * 0.20))
self.layout.addWidget(self.label, alignment=Qt.AlignTop)
self.layout.addWidget(self.report, alignment=Qt.AlignBottom)
def _add_stylesheet_info(self):
self.label.setObjectName('todayWeatherReportLabel')
self.report.setObjectName('todayWeatherReportText')
self.setStyleSheet(load_stylesheet('weather_table.qss'))
class CurrentIcon(QWidget):
pic_map: QPixmap = None
icon: QLabel = None
icon_path: str = None
def __init__(self, parent: QWidget = None, weather_icon_path: str = None):
super(CurrentIcon, self).__init__(parent)
self.icon_path = weather_icon_path
self.initUI()
def initUI(self):
self.icon = QLabel(self)
def setIcon(self):
self.pic_map = QPixmap(self.icon_path).scaled(
self.width(),
self.width()
)
self.icon.setPixmap(self.pic_map)
class CurrentTemp(QLabel):
config: ConfigFunctions = None
def __init__(self, parent=None, cur_temp=''):
super(CurrentTemp, self).__init__(parent)
self.config = ConfigFunctions()
self.initUI(cur_temp=cur_temp)
def initUI(self, cur_temp):
self.setText(cur_temp)
self.setObjectName('curWeatherTemp')
self.setFixedHeight(int(self.config.get_height() * 0.22))
self.setStyleSheet(load_stylesheet('weather_table.qss'))
| [
"PyQt5.QtGui.QPixmap",
"PyQt5.QtWidgets.QLabel",
"src.backend.config.ConfigFunctions",
"src.backend.styling.load_stylesheet"
] | [((589, 614), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['"""Today\'s Weather"""'], {}), '("Today\'s Weather")\n', (595, 614), False, 'from PyQt5.QtWidgets import QWidget, QLabel\n'), ((709, 723), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['report'], {}), '(report)\n', (715, 723), False, 'from PyQt5.QtWidgets import QWidget, QLabel\n'), ((1537, 1549), 'PyQt5.QtWidgets.QLabel', 'QLabel', (['self'], {}), '(self)\n', (1543, 1549), False, 'from PyQt5.QtWidgets import QWidget, QLabel\n'), ((1919, 1936), 'src.backend.config.ConfigFunctions', 'ConfigFunctions', ([], {}), '()\n', (1934, 1936), False, 'from src.backend.config import ConfigFunctions\n'), ((1153, 1189), 'src.backend.styling.load_stylesheet', 'load_stylesheet', (['"""weather_table.qss"""'], {}), "('weather_table.qss')\n", (1168, 1189), False, 'from src.backend.styling import load_stylesheet\n'), ((2178, 2214), 'src.backend.styling.load_stylesheet', 'load_stylesheet', (['"""weather_table.qss"""'], {}), "('weather_table.qss')\n", (2193, 2214), False, 'from src.backend.styling import load_stylesheet\n'), ((1597, 1620), 'PyQt5.QtGui.QPixmap', 'QPixmap', (['self.icon_path'], {}), '(self.icon_path)\n', (1604, 1620), False, 'from PyQt5.QtGui import QPixmap\n')] |
# Generated by Django 3.0.2 on 2020-01-12 13:11
from django.db import migrations
bootstrap_version = "4.4.1"
jquery_version = "3.3.1"
popover_version = "1.14.3"
fontawesome_version = "4.7.0"
urls_settings = [
{
"library": 4,
"version": bootstrap_version,
"url": f"https://stackpath.bootstrapcdn.com/bootstrap/{bootstrap_version}/css/bootstrap.min.css",
"url_pattern": "https://stackpath.bootstrapcdn.com/bootstrap/{version}/css/bootstrap.min.css",
"integrity": "<KEY>",
"active": True
},
{
"library": 1,
"version": bootstrap_version,
"url": f"https://stackpath.bootstrapcdn.com/bootstrap/{bootstrap_version}/js/bootstrap.min.js",
"url_pattern": "https://stackpath.bootstrapcdn.com/bootstrap/{version}/js/bootstrap.min.js",
"integrity": "<KEY>",
"active": True
},
{
"library": 1,
"version": bootstrap_version,
"url": f"https://stackpath.bootstrapcdn.com/bootstrap/{bootstrap_version}/js/bootstrap.bundle.min.js",
"url_pattern": "https://stackpath.bootstrapcdn.com/bootstrap/{version}/js/bootstrap.bundle.min.js",
"integrity": "<KEY>",
"active": False
},
{
"library": 2,
"version": jquery_version,
"url": f"https://code.jquery.com/jquery-{jquery_version}.min.js",
"url_pattern": "https://code.jquery.com/jquery-{version}.min.js",
"integrity": "<KEY>",
"active": True
},
{
"library": 2,
"version": jquery_version,
"url": f"https://code.jquery.com//jquery-{jquery_version}.slim.min.js",
"url_pattern": "https://code.jquery.com//jquery-{version}.slim.min.js",
"integrity": "<KEY>",
"active": False
},
{
"library": 3,
"version": popover_version,
"url": f"https://cdnjs.cloudflare.com/ajax/libs/popper.js/{popover_version}/umd/popper.min.js",
"url_pattern": "https://cdnjs.cloudflare.com/ajax/libs/popper.js/{version}/umd/popper.min.js",
"integrity": "<KEY>",
"active": True
},
{
"library": 5,
"version": fontawesome_version,
"url": f"https://stackpath.bootstrapcdn.com/font-awesome/{fontawesome_version}/css/font-awesome.min.css",
"url_pattern": "https://stackpath.bootstrapcdn.com/font-awesome/{version}/css/font-awesome.min.css",
"integrity": "<KEY>",
"active": True
}
]
def forwards(apps, schema_editor):
IncludeBootstrap = apps.get_model('django_include_bootstrap', 'IncludeBootstrap')
entity = []
for item in urls_settings:
entity.append(IncludeBootstrap(**item))
IncludeBootstrap.objects.bulk_create(entity)
def backwards(apps, schema_editor):
IncludeBootstrap = apps.get_model('django_include_bootstrap', 'IncludeBootstrap')
IncludeBootstrap.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('django_include_bootstrap', '0001_initial'),
]
operations = [
migrations.RunPython(forwards, backwards)
]
| [
"django.db.migrations.RunPython"
] | [((3042, 3083), 'django.db.migrations.RunPython', 'migrations.RunPython', (['forwards', 'backwards'], {}), '(forwards, backwards)\n', (3062, 3083), False, 'from django.db import migrations\n')] |
#! /usr/bin/python3
#
# Copyright (c) 2019 <NAME> <<EMAIL>>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from usb_2001TC import *
import time
import sys
import fcntl
import os
def toContinue():
answer = input('Continue [yY]? ')
if (answer == 'y' or answer == 'Y'):
return True
else:
return False
def main():
# initalize the class
try:
usb2001tc = usb_2001TC()
print("got a device\n")
except:
print('No USB-2001TC device found.')
return
while True:
print("\nUSB-2001-TC Testing")
print("----------------")
print("Hit 'a' to read AIn.")
print("Hit 'b' to blink LED.")
print("Hit 'c' to get calibration slope and offset")
print("Hit 'C' to get calibration date")
print("Hit 'd' to set device")
print("Hit 'i' to get CJC and Analog Input readings")
print("Hit 'I' to get information about the device")
print("Hit 'F' to get the CJC reading in degree F")
print("Hit 'K' to get the CJC reading in degree Kelvin")
print("Hit 'G' to call get_all")
print("Hit 'r' to get reset device")
print("Hit 's' to get serial number")
print("Hit 'S' to get status")
print("Hit 't' to get the temperature")
print("Hit 'T' to write temperature to file")
print("Hit 'v' to get firmware version")
print("Hit 'e' to exit.")
ch = input('\n')
if ch == 'b':
count = int(input('Enter number of times to blink: '))
usb2001tc.Blink(count)
elif ch == 'a':
print("AIn = ", usb2001tc.AIn())
elif ch == 'c':
print("Calibration data: Slope = ", usb2001tc.getSlope(), " Offset = ", usb2001tc.getOffset())
elif ch == 'C':
print('MFG Calibration date: ', usb2001tc.getMFGCAL())
elif ch == 'd':
thermo = input("Input Thermocouple type [J,K,R,S,T,N,E,B]: ")
usb2001tc.sendSensorType(thermo)
thermo = usb2001tc.getSensorType()
print("Sensor Type = ", thermo)
elif ch == 'e':
usb2001tc.udev.close()
exit(0)
elif ch == 'i':
print("CJC = ", usb2001tc.getCJCDegC(), " degree C")
elif ch == 'F':
print("CJC = ", usb2001tc.getCJCDegF(), " degree F")
elif ch == 'K':
print("CJC = ", usb2001tc.getCJCDegKelvin(), " degree K")
elif ch == 'G':
usb2001tc.GetAll()
elif ch == 'r':
usb2001tc.Reset()
elif ch == 's':
print("Serial No: %s" % usb2001tc.getSerialNumber())
elif ch == 'S':
print("Status = %s" % usb2001tc.getStatus())
elif ch == 'I':
print("Manufacturer: %s" % usb2001tc.getManufacturer())
print("Product: %s" % usb2001tc.getProduct())
print("Serial No: %s" % usb2001tc.getSerialNumber())
elif ch == 'v':
print("Firmware version: %s" % usb2001tc.getFirmwareVersion())
elif ch == 't':
# put the board in the correct voltage range +/- 73.125mV
usb2001tc.setVoltageRange(4)
ch = input("Input Thermocouple type [J,K,R,S,T,N,E,B]: ")
tc_type = ch
for i in range(10):
temperature = usb2001tc.tc_temperature(tc_type)
print("Thermocouple type: ", ch, "Temperature = ", temperature, "C ", temperature*9./5. + 32., "F ")
time.sleep(1)
if __name__ == "__main__":
main()
| [
"time.sleep"
] | [((3827, 3840), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3837, 3840), False, 'import time\n')] |
import itertools
import sys
import tkinter
import tkursed
class ColorCycleExample(tkursed.SimpleTkursedWindow):
def __init__(self) -> None:
super().__init__()
self.color_cycle = itertools.cycle(
[
(255, 0, 0),
(0, 255, 0),
(0, 0, 255),
]
)
self.last = 0
def handle_tick(self, event: tkinter.Event) -> None:
if self.tkursed.tick - self.last > 16 or self.tkursed.tick == 1:
self.last = self.tkursed.tick
self.tkursed.tkursed_state.canvas.background_color = next(self.color_cycle)
self.tkursed.is_dirty = True
def main() -> int:
ColorCycleExample().mainloop()
return 0
if __name__ == "__main__":
try:
sys.exit(main())
except KeyboardInterrupt:
print("Caught SIGINT", file=sys.stderr)
sys.exit(1)
| [
"itertools.cycle",
"sys.exit"
] | [((201, 257), 'itertools.cycle', 'itertools.cycle', (['[(255, 0, 0), (0, 255, 0), (0, 0, 255)]'], {}), '([(255, 0, 0), (0, 255, 0), (0, 0, 255)])\n', (216, 257), False, 'import itertools\n'), ((885, 896), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (893, 896), False, 'import sys\n')] |
import timm
import torch.nn as nn
import torch
from pytorch_lightning import LightningModule
import torchmetrics
from typing import Dict, Any
class EfficientNetB1(nn.Module):
def __init__(self):
super().__init__()
self.backbone = timm.create_model(
'efficientnet_b1', pretrained=True, num_classes=0, in_chans=3
)
num_features = self.backbone.num_features
self.head = nn.Linear(num_features, 1)
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = self.backbone(x)
out = self.head(out)
return out
class Model(LightningModule):
def __init__(self):
super().__init__()
self._build_model()
self._build_criterion()
self._build_metric()
def _build_model(self):
self.model = EfficientNetB1()
def _build_criterion(self):
self.criterion = torch.nn.BCEWithLogitsLoss()
def _build_optimizer(self):
self.optimizer = torch.optim.Adam(self.parameters(), lr=1e-5)
def _build_scheduler(self):
self.scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
self.optimizer, T_0=20, eta_min=1e-4
)
def _build_metric(self):
self.metric = torchmetrics.F1Score(num_classes=1)
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = self.model(x)
return out
def training_step(self, batch, batch_idx):
loss, pred, labels = self._share_step(batch)
return {'loss': loss, 'pred': pred.detach(), 'labels': labels.detach()}
def validation_step(self, batch, batch_idx):
loss, pred, labels = self._share_step(batch)
return {'loss': loss, 'pred': pred.detach(), 'labels': labels.detach()}
def training_epoch_end(self, outputs):
self._share_epoch_end(outputs, 'train')
def validation_epoch_end(self, outputs):
self._share_epoch_end(outputs, 'val')
def _share_step(self, batch):
images, labels = batch
pred = self.forward(images.float()).sigmoid()
loss = self.criterion(pred, labels.float())
return loss, pred, labels
def _share_epoch_end(self, outputs, mode):
preds = []
labels = []
for output in outputs:
pred, label = output['pred'], output['labels']
preds.append(pred)
labels.append(label)
preds = torch.cat(preds)
labels = torch.cat(labels)
metric = self.metric(preds, labels)
self.logger.experiment.log_metric(f'{mode}_loss', metric)
def configure_optimizers(self) -> Dict[Any, Any]:
self._build_optimizer()
self._build_scheduler()
return {"optimizer": self.optimizer, "lr_scheduler": self.scheduler}
| [
"timm.create_model",
"torch.nn.Linear",
"torch.nn.BCEWithLogitsLoss",
"torchmetrics.F1Score",
"torch.optim.lr_scheduler.CosineAnnealingWarmRestarts",
"torch.cat"
] | [((253, 338), 'timm.create_model', 'timm.create_model', (['"""efficientnet_b1"""'], {'pretrained': '(True)', 'num_classes': '(0)', 'in_chans': '(3)'}), "('efficientnet_b1', pretrained=True, num_classes=0, in_chans=3\n )\n", (270, 338), False, 'import timm\n'), ((426, 452), 'torch.nn.Linear', 'nn.Linear', (['num_features', '(1)'], {}), '(num_features, 1)\n', (435, 452), True, 'import torch.nn as nn\n'), ((894, 922), 'torch.nn.BCEWithLogitsLoss', 'torch.nn.BCEWithLogitsLoss', ([], {}), '()\n', (920, 922), False, 'import torch\n'), ((1093, 1189), 'torch.optim.lr_scheduler.CosineAnnealingWarmRestarts', 'torch.optim.lr_scheduler.CosineAnnealingWarmRestarts', (['self.optimizer'], {'T_0': '(20)', 'eta_min': '(0.0001)'}), '(self.optimizer, T_0=20,\n eta_min=0.0001)\n', (1145, 1189), False, 'import torch\n'), ((1262, 1297), 'torchmetrics.F1Score', 'torchmetrics.F1Score', ([], {'num_classes': '(1)'}), '(num_classes=1)\n', (1282, 1297), False, 'import torchmetrics\n'), ((2441, 2457), 'torch.cat', 'torch.cat', (['preds'], {}), '(preds)\n', (2450, 2457), False, 'import torch\n'), ((2475, 2492), 'torch.cat', 'torch.cat', (['labels'], {}), '(labels)\n', (2484, 2492), False, 'import torch\n')] |
import unittest
import subprocess
import requests
PORT=8080
class TestHW1(unittest.TestCase):
def test1(self):
res = requests.get('http://localhost:'+str(PORT)+'/check')
self.assertEqual(res.text, 'This is a GET request', msg='Incorrect response to GET request to /check endpoint')
self.assertEqual(res.status_code, 200, msg='Did not return status 200 to GET request to /check endpoint')
def test2(self):
res = requests.post('http://localhost:'+str(PORT)+'/check')
self.assertEqual(res.text, 'This is a POST request', msg='Incorrect response to POST request to /check endpoint')
self.assertEqual(res.status_code, 200, msg='Did not return status 200 to POST request to /check endpoint')
def test3(self):
res = requests.put('http://localhost:'+str(PORT)+'/check')
self.assertEqual(res.status_code, 405, msg='Did not return status 405 to PUT request to /check endpoint')
def test4(self):
res = requests.get('http://localhost:'+str(PORT)+'/hello?name=Peter')
self.assertEqual(res.text, 'Hello Peter!', msg='Incorrect response to /hello?name=Peter endpoint')
def test5(self):
res = requests.get('http://localhost:'+str(PORT)+'/hello')
self.assertEqual(res.text, 'Hello user!', msg='Incorrect response to /hello endpoint')
if __name__ == '__main__':
unittest.main()
| [
"unittest.main"
] | [((1360, 1375), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1373, 1375), False, 'import unittest\n')] |
import lxml
import lxml.html
from collections import defaultdict
import voeventparse as vp
import datetime
import iso8601
from astropy.coordinates import SkyCoord
import astropy.units as u
from fourpisky.voevent import (
create_skeleton_4pisky_voevent,
asassn_alert_substream,
get_stream_ivorn_prefix,
)
from fourpisky.feeds.feedbase import FeedBase
import logging
logger = logging.getLogger(__name__)
ASSASN_BAD_IDS = [
'ASASSN-15uh', # Datestamp has been replaced with junk
'ASASSN-15co', # Datestamp has been replaced with junk
'Comet ASASSN1', # Moving object
]
ASASSN_TIMESTAMP_ID_MAP = {
'2013-09-14.53': 'iPTF13dge', # Malformed href in other id col.
}
ASASSN_EARLIEST_REPARSE_DATE=iso8601.parse_date("2017-10-18")
class AsassnFeed(FeedBase):
name = "ASASSN webpage"
url = "http://www.astronomy.ohio-state.edu/asassn/transients.html"
substream = asassn_alert_substream
stream_ivorn_prefix = get_stream_ivorn_prefix(substream)
hash_byte_range = (0, 10000)
hash_cache_path = None
# VOEvent details:
text_params_groupname = 'asassn_params'
url_params_groupname = 'asassn_urls'
def __init__(self, hash_cache_path=None):
super(AsassnFeed, self).__init__(hash_cache_path)
def generate_voevent(self, feed_id):
rowdict = self.event_id_data_map[feed_id]
params = rowdict['param']
urls = rowdict['url']
stream_id = self.feed_id_to_stream_id(feed_id)
v = create_skeleton_4pisky_voevent(substream=self.substream,
stream_id=stream_id,
role=vp.definitions.roles.observation,
date=datetime.datetime.utcnow()
)
vp.add_how(v, references=vp.Reference(uri=self.url))
v.How.Description = "Parsed from ASASSN listings page by 4PiSky-Bot."
timestamp_dt = asassn_timestamp_str_to_datetime(
params[AsassnKeys.detection_timestamp])
posn_sc = SkyCoord(params['ra'], params['dec'],
unit=(u.hourangle, u.deg))
# Couldn't find a formal analysis of positional accuracy, but
# http://dx.doi.org/10.1088/0004-637X/788/1/48
# states the angular resolution as 16 arcseconds, so we'll go with that.
err_radius_estimate = 16 * u.arcsec
posn_simple = vp.Position2D(ra=posn_sc.ra.deg,
dec=posn_sc.dec.deg,
err=err_radius_estimate.to(u.deg).value,
units=vp.definitions.units.degrees,
system=vp.definitions.sky_coord_system.utc_icrs_geo,
)
vp.add_where_when(
v,
coords=posn_simple,
obs_time=timestamp_dt,
observatory_location=vp.definitions.observatory_location.geosurface)
asassn_params = [vp.Param(key, params[key]) for key in
(AsassnKeys.id_asassn,
AsassnKeys.id_other,
AsassnKeys.detection_timestamp,
AsassnKeys.ra,
AsassnKeys.dec,
AsassnKeys.spec_class,
AsassnKeys.comment,
)
if key in params
]
if AsassnKeys.mag_v in params:
asassn_params.append(
vp.Param(AsassnKeys.mag_v, params[AsassnKeys.mag_v],
unit='mag', ucd="phot.mag",
)
)
if AsassnKeys.id_other in urls:
asassn_params.append(
vp.Param(AsassnKeys.id_other,
urls[AsassnKeys.id_other][0][0])
)
asassn_urls = [vp.Param(key, urls[key][0][1]) for key in urls]
v.What.append(vp.Group(params=asassn_params,
name=self.text_params_groupname))
v.What.append(vp.Group(params=asassn_urls,
name=self.url_params_groupname))
return v
def event_data_to_event_id(self, event_data):
"""
Derive a feed-specific identifier for a given event.
Args:
event_data: Feed specific datastructure, typically just a dictionary.
NB feed id should contain timestamp prefix followed by underscore,
we use this for deduplication.
(Even if the event details are updated the timestamp should remain the
same.)
"""
# OK. Fiddly date-string formatting. Aim here is to get a uniform
# date-time format so that anything ordered by IVORN will also
# be date-time ordered. Users can always check the XML content
# for the original ASSASSN timestamp-string.
# We parse-and-reformat the date-string to zero-pad the day digit as
# needed.
# Finally, we regenerate the 'decimal-days' suffix,
# fixed at 2 decimal places.
# (since some earlier events don't have this suffix at all we can't
# just tokenize it).
external_id = extract_asassn_id(event_data)
timestamp_input_string = event_data['param'][
AsassnKeys.detection_timestamp]
timestamp_dt = asassn_timestamp_str_to_datetime(
timestamp_input_string).replace(tzinfo=None)
uniform_date_str = timestamp_dt.strftime('%Y-%m-%d')
start_of_day = datetime.datetime(timestamp_dt.year,
timestamp_dt.month,
timestamp_dt.day
)
# Friday afternoon kludge:
day_fraction_float = (
(timestamp_dt - start_of_day).total_seconds() / 3600. / 24.
)
day_fraction_str = f"{day_fraction_float:.2f}"[1:]
feed_id = ''.join((uniform_date_str, day_fraction_str,
'_', external_id))
return feed_id
def get_ivorn_prefixes_for_duplicate(self, feed_id):
"""
Determines what a possible duplicate ivorn might be prefixed by.
For ASASSN - assumes timestamp unchanging even if the
event gets renamed. We match on the substream + timestamp
(i.e. everything up to the first underscore in the stream_id).
"""
stream_id = self.feed_id_to_stream_id(feed_id)
return [
self.stream_ivorn_prefix + stream_id.split('_', 1)[0],
]
def parse_content_to_event_data_list(self):
tree = lxml.html.fromstring(self.content)
events = transform_pagetree(tree)
return events
# ==========================================================================
def extract_asassn_id(rowdict):
params = rowdict['param']
urls = rowdict['url']
# print group_params
# print urls
# Check for known-bad rows, manually resolved:
timestamp = params[AsassnKeys.detection_timestamp]
if timestamp in ASASSN_TIMESTAMP_ID_MAP:
return ASASSN_TIMESTAMP_ID_MAP[timestamp]
# Now try to parse any vaguely reasonable data
asassn_id = params.get(AsassnKeys.id_asassn)
if asassn_id is not None:
if asassn_id.startswith('ASASSN') or asassn_id.startswith('ASASN'):
external_id = asassn_id
else:
raise ValueError(
f'Could not extract Id for row- unrecognised id format: {asassn_id}')
else:
# Ensure ASASSN ID is not something weird
assert asassn_id is None
# Then, look for alt-id
alt_id_text = params.get(AsassnKeys.id_other)
alt_id_url = urls.get(AsassnKeys.id_other)
# Otherwise, check for alt_id text:
if alt_id_text:
external_id = alt_id_text.strip()
elif alt_id_url:
first_url_text_href_pair = alt_id_url[0]
external_id = first_url_text_href_pair[0]
else:
cell = rowdict['raw'][asassn_headers_2018.index('ATEL')]
# print cell.text
# print [c.text for c in cell.getchildren()]
# print '-------------------'
# print '-------------------'
raise ValueError('Could not extract Id for this row, '
'no id found')
return external_id
def asassn_timestamp_str_to_datetime(timestamp_str):
if '.' in timestamp_str:
date_str, day_fraction_str = timestamp_str.split('.')
day_fraction_str = '0.' + day_fraction_str
else:
date_str = timestamp_str
day_fraction_str = 0.
timestamp_dt = (iso8601.parse_date(date_str) +
datetime.timedelta(days=float(day_fraction_str)))
return timestamp_dt
# =======================================================================
asassn_headers_2018 = (
'ASAS-SN',
'Other',
'ATEL',
'RA',
'Dec',
'Discovery',
'V/g',
'SDSS',
'DSS',
'Vizier',
'Spectroscopic Class',
'Comments'
)
asassn_ncols = len(asassn_headers_2018)
class AsassnKeys():
id_asassn = 'id_assasn'
id_other = 'id_other'
atel_url = 'atel_url'
ra = 'ra'
dec = 'dec'
detection_timestamp = 'detection_timestamp'
mag_v = 'mag_v'
sdss_url = 'sdss_url'
dss_url = 'dss_url'
vizier_url = 'vizier_url'
spec_class = 'spec_class'
comment = 'comment'
asassn_hdr_to_internal_key_map = {
'ASAS-SN': AsassnKeys.id_asassn,
'Other': AsassnKeys.id_other,
'ATEL': AsassnKeys.atel_url,
'RA': AsassnKeys.ra,
'Dec': AsassnKeys.dec,
'Discovery': AsassnKeys.detection_timestamp,
'V/g': AsassnKeys.mag_v,
'SDSS': AsassnKeys.sdss_url,
'DSS': AsassnKeys.dss_url,
'Vizier': AsassnKeys.vizier_url,
'Spectroscopic Class': AsassnKeys.spec_class,
'Comments': AsassnKeys.comment,
}
assert tuple(asassn_hdr_to_internal_key_map.keys()) == asassn_headers_2018
asassn_url_only_keys = (
AsassnKeys.atel_url,
AsassnKeys.sdss_url,
AsassnKeys.dss_url,
AsassnKeys.vizier_url,
)
def extract_etree_cells(tree):
tbl = tree.xpath('//table')[0]
children = tbl.getchildren()
# expect two header rows, then a malformed data row. Joy.
assert children[0].tag == 'tr'
assert children[1].tag == 'tr'
assert children[2].tag != 'tr'
cells = children[2:]
headers = tuple([c.text for c in children[0].getchildren()])
# We expect a multiple of assasn_ncols:
assert (len(cells) % asassn_ncols) == 0
# Check headers unchanged
assert headers == asassn_headers_2018
return cells
def asassn_htmlrow_to_dict(cellrow):
param_dict = {}
url_dict = defaultdict(list)
for idx, col_hdr in enumerate(asassn_headers_2018):
param_key = asassn_hdr_to_internal_key_map[col_hdr]
elt = cellrow[idx]
if elt.text and not col_hdr in asassn_url_only_keys:
text = elt.text.strip()
param_dict[param_key] = text
children = elt.getchildren()
if children:
for child in children:
if 'href' in child.attrib:
url_dict[param_key].append(
(child.text, child.attrib['href'])
)
# Delete any entries which are merely placeholders, e.g. '-----'.
trimmed_params = {}
for k, v in param_dict.items():
if not len(v.strip().replace('-', '')):
continue # Skip this one if it's a '------' style placeholder
trimmed_params[k] = v
return {'param': trimmed_params,
'url': url_dict,
'raw': cellrow
}
def transform_pagetree(tree):
"""
Restructure an array of cells into a list of dictionaries
Since parsing to this stage is robust, we also perform bad-row excision here.
"""
cells = extract_etree_cells(tree)
cellrows = []
# Stride through cells at rowlength inferred by ncols
for row_idx, _ in enumerate(cells[::asassn_ncols]):
# Select all cells in current stride, create list representing row
row = [c for c in cells[
asassn_ncols * row_idx:asassn_ncols * row_idx + asassn_ncols]]
cellrows.append(row)
events = []
for cr in cellrows:
event_dict = asassn_htmlrow_to_dict(cr)
row_id = event_dict['param'].get(AsassnKeys.id_asassn)
if row_id in ASSASN_BAD_IDS:
logger.warning('Removed bad ASASSN row with ID {}'.format(row_id))
continue
try:
row_timestamp = asassn_timestamp_str_to_datetime(
event_dict['param'].get(AsassnKeys.detection_timestamp)
)
if not row_timestamp > ASASSN_EARLIEST_REPARSE_DATE:
continue
events.append(event_dict)
except:
logger.exception('Error parsing rowdict:' + str(event_dict))
raise
return events
| [
"logging.getLogger",
"datetime.datetime",
"datetime.datetime.utcnow",
"lxml.html.fromstring",
"iso8601.parse_date",
"astropy.coordinates.SkyCoord",
"voeventparse.Group",
"fourpisky.voevent.get_stream_ivorn_prefix",
"voeventparse.add_where_when",
"voeventparse.Reference",
"voeventparse.Param",
... | [((388, 415), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (405, 415), False, 'import logging\n'), ((726, 758), 'iso8601.parse_date', 'iso8601.parse_date', (['"""2017-10-18"""'], {}), "('2017-10-18')\n", (744, 758), False, 'import iso8601\n'), ((952, 986), 'fourpisky.voevent.get_stream_ivorn_prefix', 'get_stream_ivorn_prefix', (['substream'], {}), '(substream)\n', (975, 986), False, 'from fourpisky.voevent import create_skeleton_4pisky_voevent, asassn_alert_substream, get_stream_ivorn_prefix\n'), ((10797, 10814), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (10808, 10814), False, 'from collections import defaultdict\n'), ((2076, 2140), 'astropy.coordinates.SkyCoord', 'SkyCoord', (["params['ra']", "params['dec']"], {'unit': '(u.hourangle, u.deg)'}), "(params['ra'], params['dec'], unit=(u.hourangle, u.deg))\n", (2084, 2140), False, 'from astropy.coordinates import SkyCoord\n'), ((2817, 2953), 'voeventparse.add_where_when', 'vp.add_where_when', (['v'], {'coords': 'posn_simple', 'obs_time': 'timestamp_dt', 'observatory_location': 'vp.definitions.observatory_location.geosurface'}), '(v, coords=posn_simple, obs_time=timestamp_dt,\n observatory_location=vp.definitions.observatory_location.geosurface)\n', (2834, 2953), True, 'import voeventparse as vp\n'), ((5600, 5674), 'datetime.datetime', 'datetime.datetime', (['timestamp_dt.year', 'timestamp_dt.month', 'timestamp_dt.day'], {}), '(timestamp_dt.year, timestamp_dt.month, timestamp_dt.day)\n', (5617, 5674), False, 'import datetime\n'), ((6710, 6744), 'lxml.html.fromstring', 'lxml.html.fromstring', (['self.content'], {}), '(self.content)\n', (6730, 6744), False, 'import lxml\n'), ((8750, 8778), 'iso8601.parse_date', 'iso8601.parse_date', (['date_str'], {}), '(date_str)\n', (8768, 8778), False, 'import iso8601\n'), ((3024, 3050), 'voeventparse.Param', 'vp.Param', (['key', 'params[key]'], {}), '(key, params[key])\n', (3032, 3050), True, 'import voeventparse as vp\n'), ((3942, 3972), 'voeventparse.Param', 'vp.Param', (['key', 'urls[key][0][1]'], {}), '(key, urls[key][0][1])\n', (3950, 3972), True, 'import voeventparse as vp\n'), ((4013, 4076), 'voeventparse.Group', 'vp.Group', ([], {'params': 'asassn_params', 'name': 'self.text_params_groupname'}), '(params=asassn_params, name=self.text_params_groupname)\n', (4021, 4076), True, 'import voeventparse as vp\n'), ((4131, 4191), 'voeventparse.Group', 'vp.Group', ([], {'params': 'asassn_urls', 'name': 'self.url_params_groupname'}), '(params=asassn_urls, name=self.url_params_groupname)\n', (4139, 4191), True, 'import voeventparse as vp\n'), ((1735, 1761), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ([], {}), '()\n', (1759, 1761), False, 'import datetime\n'), ((1841, 1867), 'voeventparse.Reference', 'vp.Reference', ([], {'uri': 'self.url'}), '(uri=self.url)\n', (1853, 1867), True, 'import voeventparse as vp\n'), ((3580, 3665), 'voeventparse.Param', 'vp.Param', (['AsassnKeys.mag_v', 'params[AsassnKeys.mag_v]'], {'unit': '"""mag"""', 'ucd': '"""phot.mag"""'}), "(AsassnKeys.mag_v, params[AsassnKeys.mag_v], unit='mag', ucd='phot.mag'\n )\n", (3588, 3665), True, 'import voeventparse as vp\n'), ((3817, 3879), 'voeventparse.Param', 'vp.Param', (['AsassnKeys.id_other', 'urls[AsassnKeys.id_other][0][0]'], {}), '(AsassnKeys.id_other, urls[AsassnKeys.id_other][0][0])\n', (3825, 3879), True, 'import voeventparse as vp\n')] |
"""
Copyright (c) 2020 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import argparse
def config_args_parse(version):
"""
Process args parse.
Returns:
[ArgumentParser]: Argument parse instance
"""
# check if was provided the program version
if not version:
raise ValueError('It was not provided a valid version')
# configure the ArgumentParse
parser = argparse.ArgumentParser(
description='Process parquet file data', epilog='Enjoy the program!')
parser.version = version
# configure supported arguments
parser.add_argument('-P',
metavar='--path',
type=str,
action='store',
help='the path of parquet file')
parser.add_argument('-H',
metavar='--head',
type=int,
required=False,
choices=range(1, 200),
action='store',
help='the numbers of the first rows to be returned. The default value is 5 and the maximum accepted is 199 and this is the default operation selected by program if no one is provided.')
parser.add_argument('-T',
metavar='--tail',
type=int,
required=False,
choices=range(1, 200),
action='store',
help='the numbers of the last rows to be returned. The maximum number accepted is 199')
parser.add_argument('-D',
metavar='--drop',
type=int,
required=False,
choices=range(1, 200),
action='store',
help='the numbers of the first rows to be dropped. The maximum number accepted is 199')
parser.add_argument('-SC',
type=str,
nargs="+",
action="store",
help=' List of selected columns to be returned separated by space',
required=False)
parser.add_argument('-C',
help='Get total rows',
action='store_true')
parser.add_argument('-verbose',
help='enable verbose mode',
action='store_true')
parser.add_argument('-v',
action='version',
help='shows the app version')
# parse received arguments
args = parser.parse_args()
parquet_file_path = args.P
# check if the provided path exists
if not is_valid_file(parquet_file_path):
raise FileNotFoundError('the provided file ' +
parquet_file_path + ' does not exists')
return args
def is_valid_file(path):
"""
Check if provided path exists
Args:
path (string): Path of file
Returns:
[Bool]: True if the file exists
"""
return os.path.exists(path)
| [
"os.path.exists",
"argparse.ArgumentParser"
] | [((906, 1004), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process parquet file data"""', 'epilog': '"""Enjoy the program!"""'}), "(description='Process parquet file data', epilog=\n 'Enjoy the program!')\n", (929, 1004), False, 'import argparse\n'), ((3578, 3598), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (3592, 3598), False, 'import os\n')] |
#!/usr/bin/env python
import vtk
import random
numTuples = 12
bitter = vtk.vtkFloatArray()
bitter.SetNumberOfTuples(numTuples)
crispy = vtk.vtkFloatArray()
crispy.SetNumberOfTuples(numTuples)
crunchy = vtk.vtkFloatArray()
crunchy.SetNumberOfTuples(numTuples)
salty = vtk.vtkFloatArray()
salty.SetNumberOfTuples(numTuples)
oily = vtk.vtkFloatArray()
oily.SetNumberOfTuples(numTuples)
for i in range(numTuples):
bitter.SetTuple1(i, random.randint(1, 10))
crispy.SetTuple1(i, random.randint(-1, 1))
crunchy.SetTuple1(i, random.randint(1, 100))
salty.SetTuple1(i, random.randint(0, 10))
oily.SetTuple1(i, random.randint(5, 25))
dobj = vtk.vtkDataObject()
dobj.GetFieldData().AddArray(bitter)
dobj.GetFieldData().AddArray(crispy)
dobj.GetFieldData().AddArray(crunchy)
dobj.GetFieldData().AddArray(salty)
dobj.GetFieldData().AddArray(oily)
actor = vtk.vtkSpiderPlotActor()
actor.SetInputData(dobj)
actor.SetTitle("spider plot")
actor.SetIndependentVariablesToColumns()
actor.GetPositionCoordinate().SetValue(0.05, 0.1, 0.0)
actor.GetPosition2Coordinate().SetValue(0.95, 0.85, 0.0)
actor.GetProperty().SetColor(1, 0, 0)
actor.SetAxisLabel(0, "Bitter")
actor.SetAxisRange(0, 1, 10)
actor.SetAxisLabel(1, "Crispy")
actor.SetAxisRange(1, -1, 1)
actor.SetAxisLabel(2, "Crunchy")
actor.SetAxisRange(2, 1, 100)
actor.SetAxisLabel(3, "Salty")
actor.SetAxisRange(3, 0, 10)
actor.SetAxisLabel(4, "Oily")
actor.SetAxisRange(4, 5, 25)
actor.GetLegendActor().SetNumberOfEntries(numTuples)
for i in range(numTuples):
actor.SetPlotColor(i, random.random(), random.random(), random.random())
actor.LegendVisibilityOn()
# // Set text colors (same as actor for backward compat with test)
# actor.GetTitleTextProperty().SetColor(1, 1, 0)
# actor.GetLabelTextProperty().SetColor(1, 0, 0)
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren1.AddActor(actor)
ren1.SetBackground(0, 0, 0)
renWin.SetSize(500, 500)
iren.Initialize()
renWin.Render()
iren.Start()
| [
"vtk.vtkRenderWindowInteractor",
"vtk.vtkRenderWindow",
"vtk.vtkRenderer",
"vtk.vtkSpiderPlotActor",
"vtk.vtkFloatArray",
"random.random",
"random.randint",
"vtk.vtkDataObject"
] | [((74, 93), 'vtk.vtkFloatArray', 'vtk.vtkFloatArray', ([], {}), '()\n', (91, 93), False, 'import vtk\n'), ((140, 159), 'vtk.vtkFloatArray', 'vtk.vtkFloatArray', ([], {}), '()\n', (157, 159), False, 'import vtk\n'), ((207, 226), 'vtk.vtkFloatArray', 'vtk.vtkFloatArray', ([], {}), '()\n', (224, 226), False, 'import vtk\n'), ((273, 292), 'vtk.vtkFloatArray', 'vtk.vtkFloatArray', ([], {}), '()\n', (290, 292), False, 'import vtk\n'), ((336, 355), 'vtk.vtkFloatArray', 'vtk.vtkFloatArray', ([], {}), '()\n', (353, 355), False, 'import vtk\n'), ((660, 679), 'vtk.vtkDataObject', 'vtk.vtkDataObject', ([], {}), '()\n', (677, 679), False, 'import vtk\n'), ((872, 896), 'vtk.vtkSpiderPlotActor', 'vtk.vtkSpiderPlotActor', ([], {}), '()\n', (894, 896), False, 'import vtk\n'), ((1815, 1832), 'vtk.vtkRenderer', 'vtk.vtkRenderer', ([], {}), '()\n', (1830, 1832), False, 'import vtk\n'), ((1842, 1863), 'vtk.vtkRenderWindow', 'vtk.vtkRenderWindow', ([], {}), '()\n', (1861, 1863), False, 'import vtk\n'), ((1896, 1927), 'vtk.vtkRenderWindowInteractor', 'vtk.vtkRenderWindowInteractor', ([], {}), '()\n', (1925, 1927), False, 'import vtk\n'), ((442, 463), 'random.randint', 'random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (456, 463), False, 'import random\n'), ((489, 510), 'random.randint', 'random.randint', (['(-1)', '(1)'], {}), '(-1, 1)\n', (503, 510), False, 'import random\n'), ((537, 559), 'random.randint', 'random.randint', (['(1)', '(100)'], {}), '(1, 100)\n', (551, 559), False, 'import random\n'), ((584, 605), 'random.randint', 'random.randint', (['(0)', '(10)'], {}), '(0, 10)\n', (598, 605), False, 'import random\n'), ((629, 650), 'random.randint', 'random.randint', (['(5)', '(25)'], {}), '(5, 25)\n', (643, 650), False, 'import random\n'), ((1559, 1574), 'random.random', 'random.random', ([], {}), '()\n', (1572, 1574), False, 'import random\n'), ((1576, 1591), 'random.random', 'random.random', ([], {}), '()\n', (1589, 1591), False, 'import random\n'), ((1593, 1608), 'random.random', 'random.random', ([], {}), '()\n', (1606, 1608), False, 'import random\n')] |
class MyModel:
def get_model(self,input_h):
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
model = Sequential()
model.add(Conv2D(30, 5, padding='same',activation='relu',input_shape=( input_h, input_h,3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(40, 5,activation='relu',padding='same'))
model.add(MaxPooling2D(pool_size=(4, 4)))
model.add(Conv2D(60, 3,activation='relu',padding='same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(60,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(3))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.summary()
return model
| [
"keras.layers.Conv2D",
"keras.layers.Flatten",
"keras.layers.MaxPooling2D",
"keras.models.Sequential",
"keras.layers.Activation",
"keras.layers.Dense",
"keras.layers.Dropout"
] | [((232, 244), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (242, 244), False, 'from keras.models import Sequential\n'), ((263, 350), 'keras.layers.Conv2D', 'Conv2D', (['(30)', '(5)'], {'padding': '"""same"""', 'activation': '"""relu"""', 'input_shape': '(input_h, input_h, 3)'}), "(30, 5, padding='same', activation='relu', input_shape=(input_h,\n input_h, 3))\n", (269, 350), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((365, 395), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (377, 395), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((415, 463), 'keras.layers.Conv2D', 'Conv2D', (['(40)', '(5)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(40, 5, activation='relu', padding='same')\n", (421, 463), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((481, 511), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(4, 4)'}), '(pool_size=(4, 4))\n', (493, 511), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((531, 579), 'keras.layers.Conv2D', 'Conv2D', (['(60)', '(3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(60, 3, activation='relu', padding='same')\n", (537, 579), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((597, 627), 'keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (609, 627), False, 'from keras.layers import Conv2D, MaxPooling2D\n'), ((648, 657), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (655, 657), False, 'from keras.layers import Activation, Dropout, Flatten, Dense\n'), ((677, 705), 'keras.layers.Dense', 'Dense', (['(60)'], {'activation': '"""relu"""'}), "(60, activation='relu')\n", (682, 705), False, 'from keras.layers import Activation, Dropout, Flatten, Dense\n'), ((724, 736), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (731, 736), False, 'from keras.layers import Activation, Dropout, Flatten, Dense\n'), ((756, 764), 'keras.layers.Dense', 'Dense', (['(3)'], {}), '(3)\n', (761, 764), False, 'from keras.layers import Activation, Dropout, Flatten, Dense\n'), ((784, 805), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (794, 805), False, 'from keras.layers import Activation, Dropout, Flatten, Dense\n')] |
from django.urls import reverse
from django_webtest import WebTest
from webtest import Text
from openpersonen.api.tests.factory_models import PersoonFactory, UserFactory
from openpersonen.contrib.demo.models import Kind, Ouder, Persoon
class TestPersoonAdmin(WebTest):
def setUp(self):
super().setUp()
self.persoon = PersoonFactory.create(
burgerservicenummer_persoon=123456789,
voornamen_persoon="persoon_voornamen",
geslachtsnaam_persoon="geslachtsnaam_persoon",
geboortedatum_persoon="20200929",
geslachtsaanduiding="M",
)
self.url = reverse(
"admin:demo_persoon_change", kwargs={"object_id": self.persoon.pk}
)
self.user = UserFactory(is_superuser=True, is_staff=True, is_active=True)
self.app.set_user(self.user)
def test_adding_kind(self):
kind_burgerservicenummer = 987654321
response = self.app.get(self.url)
form = response.forms["persoon_form"]
form["kind_set-TOTAL_FORMS"] = 1
form["kind_set-INITIAL_FORMS"] = 0
form["kind_set-MIN_NUM_FORMS"] = 0
form["kind_set-MAX_NUM_FORMS"] = 1000
form["burgerservicenummer_persoon"] = self.persoon.burgerservicenummer_persoon
form["voornamen_persoon"] = self.persoon.voornamen_persoon
form["geslachtsnaam_persoon"] = self.persoon.geslachtsnaam_persoon
form["geboortedatum_persoon"] = self.persoon.geboortedatum_persoon
form["geslachtsaanduiding"] = self.persoon.geslachtsaanduiding
text = Text(
form,
"input",
"kind_set-0-persoon",
1,
value=self.persoon.id,
id="kind_set-0-persoon",
)
form.field_order.append(("kind_set-0-persoon", text))
form.fields["kind_set-0-persoon"] = text
text = Text(
form,
"input",
"kind_set-0-burgerservicenummer_kind",
1,
value=kind_burgerservicenummer,
id="kind_set-0-burgerservicenummer_kind",
)
form.field_order.append(("kind_set-0-burgerservicenummer_kind", text))
form.fields["kind_set-0-burgerservicenummer_kind"] = text
response = form.submit()
self.assertTrue(response.status_code, 200)
self.assertTrue(
Kind.objects.filter(
burgerservicenummer_kind=kind_burgerservicenummer
).exists()
)
self.assertTrue(
Persoon.objects.filter(
burgerservicenummer_persoon=kind_burgerservicenummer
).exists()
)
self.assertTrue(
Ouder.objects.filter(
burgerservicenummer_ouder=self.persoon.burgerservicenummer_persoon
).exists()
)
def test_adding_ouder(self):
ouder_burgerservicenummer = 987654321
response = self.app.get(self.url)
form = response.forms["persoon_form"]
form["ouder_set-TOTAL_FORMS"] = 1
form["ouder_set-INITIAL_FORMS"] = 0
form["ouder_set-MIN_NUM_FORMS"] = 0
form["ouder_set-MAX_NUM_FORMS"] = 1000
form["burgerservicenummer_persoon"] = self.persoon.burgerservicenummer_persoon
form["voornamen_persoon"] = self.persoon.voornamen_persoon
form["geslachtsnaam_persoon"] = self.persoon.geslachtsnaam_persoon
form["geboortedatum_persoon"] = self.persoon.geboortedatum_persoon
form["geslachtsaanduiding"] = self.persoon.geslachtsaanduiding
text = Text(
form,
"input",
"ouder_set-0-persoon",
1,
value=self.persoon.id,
id="ouder_set-0-persoon",
)
form.field_order.append(("ouder_set-0-persoon", text))
form.fields["ouder_set-0-persoon"] = text
text = Text(
form,
"input",
"ouder_set-0-burgerservicenummer_ouder",
1,
value=ouder_burgerservicenummer,
id="ouder_set-0-burgerservicenummer_ouder",
)
form.field_order.append(("ouder_set-0-burgerservicenummer_ouder", text))
form.fields["ouder_set-0-burgerservicenummer_ouder"] = text
response = form.submit()
self.assertTrue(response.status_code, 200)
self.assertTrue(
Ouder.objects.filter(
burgerservicenummer_ouder=ouder_burgerservicenummer
).exists()
)
self.assertTrue(
Persoon.objects.filter(
burgerservicenummer_persoon=ouder_burgerservicenummer
).exists()
)
self.assertTrue(
Kind.objects.filter(
burgerservicenummer_kind=self.persoon.burgerservicenummer_persoon
).exists()
)
| [
"webtest.Text",
"openpersonen.contrib.demo.models.Kind.objects.filter",
"openpersonen.contrib.demo.models.Ouder.objects.filter",
"openpersonen.contrib.demo.models.Persoon.objects.filter",
"openpersonen.api.tests.factory_models.UserFactory",
"django.urls.reverse",
"openpersonen.api.tests.factory_models.P... | [((341, 559), 'openpersonen.api.tests.factory_models.PersoonFactory.create', 'PersoonFactory.create', ([], {'burgerservicenummer_persoon': '(123456789)', 'voornamen_persoon': '"""persoon_voornamen"""', 'geslachtsnaam_persoon': '"""geslachtsnaam_persoon"""', 'geboortedatum_persoon': '"""20200929"""', 'geslachtsaanduiding': '"""M"""'}), "(burgerservicenummer_persoon=123456789,\n voornamen_persoon='persoon_voornamen', geslachtsnaam_persoon=\n 'geslachtsnaam_persoon', geboortedatum_persoon='20200929',\n geslachtsaanduiding='M')\n", (362, 559), False, 'from openpersonen.api.tests.factory_models import PersoonFactory, UserFactory\n'), ((637, 712), 'django.urls.reverse', 'reverse', (['"""admin:demo_persoon_change"""'], {'kwargs': "{'object_id': self.persoon.pk}"}), "('admin:demo_persoon_change', kwargs={'object_id': self.persoon.pk})\n", (644, 712), False, 'from django.urls import reverse\n'), ((755, 816), 'openpersonen.api.tests.factory_models.UserFactory', 'UserFactory', ([], {'is_superuser': '(True)', 'is_staff': '(True)', 'is_active': '(True)'}), '(is_superuser=True, is_staff=True, is_active=True)\n', (766, 816), False, 'from openpersonen.api.tests.factory_models import PersoonFactory, UserFactory\n'), ((1587, 1684), 'webtest.Text', 'Text', (['form', '"""input"""', '"""kind_set-0-persoon"""', '(1)'], {'value': 'self.persoon.id', 'id': '"""kind_set-0-persoon"""'}), "(form, 'input', 'kind_set-0-persoon', 1, value=self.persoon.id, id=\n 'kind_set-0-persoon')\n", (1591, 1684), False, 'from webtest import Text\n'), ((1890, 2030), 'webtest.Text', 'Text', (['form', '"""input"""', '"""kind_set-0-burgerservicenummer_kind"""', '(1)'], {'value': 'kind_burgerservicenummer', 'id': '"""kind_set-0-burgerservicenummer_kind"""'}), "(form, 'input', 'kind_set-0-burgerservicenummer_kind', 1, value=\n kind_burgerservicenummer, id='kind_set-0-burgerservicenummer_kind')\n", (1894, 2030), False, 'from webtest import Text\n'), ((3574, 3673), 'webtest.Text', 'Text', (['form', '"""input"""', '"""ouder_set-0-persoon"""', '(1)'], {'value': 'self.persoon.id', 'id': '"""ouder_set-0-persoon"""'}), "(form, 'input', 'ouder_set-0-persoon', 1, value=self.persoon.id, id=\n 'ouder_set-0-persoon')\n", (3578, 3673), False, 'from webtest import Text\n'), ((3881, 4026), 'webtest.Text', 'Text', (['form', '"""input"""', '"""ouder_set-0-burgerservicenummer_ouder"""', '(1)'], {'value': 'ouder_burgerservicenummer', 'id': '"""ouder_set-0-burgerservicenummer_ouder"""'}), "(form, 'input', 'ouder_set-0-burgerservicenummer_ouder', 1, value=\n ouder_burgerservicenummer, id='ouder_set-0-burgerservicenummer_ouder')\n", (3885, 4026), False, 'from webtest import Text\n'), ((2377, 2447), 'openpersonen.contrib.demo.models.Kind.objects.filter', 'Kind.objects.filter', ([], {'burgerservicenummer_kind': 'kind_burgerservicenummer'}), '(burgerservicenummer_kind=kind_burgerservicenummer)\n', (2396, 2447), False, 'from openpersonen.contrib.demo.models import Kind, Ouder, Persoon\n'), ((2534, 2610), 'openpersonen.contrib.demo.models.Persoon.objects.filter', 'Persoon.objects.filter', ([], {'burgerservicenummer_persoon': 'kind_burgerservicenummer'}), '(burgerservicenummer_persoon=kind_burgerservicenummer)\n', (2556, 2610), False, 'from openpersonen.contrib.demo.models import Kind, Ouder, Persoon\n'), ((2697, 2790), 'openpersonen.contrib.demo.models.Ouder.objects.filter', 'Ouder.objects.filter', ([], {'burgerservicenummer_ouder': 'self.persoon.burgerservicenummer_persoon'}), '(burgerservicenummer_ouder=self.persoon.\n burgerservicenummer_persoon)\n', (2717, 2790), False, 'from openpersonen.contrib.demo.models import Kind, Ouder, Persoon\n'), ((4377, 4450), 'openpersonen.contrib.demo.models.Ouder.objects.filter', 'Ouder.objects.filter', ([], {'burgerservicenummer_ouder': 'ouder_burgerservicenummer'}), '(burgerservicenummer_ouder=ouder_burgerservicenummer)\n', (4397, 4450), False, 'from openpersonen.contrib.demo.models import Kind, Ouder, Persoon\n'), ((4537, 4614), 'openpersonen.contrib.demo.models.Persoon.objects.filter', 'Persoon.objects.filter', ([], {'burgerservicenummer_persoon': 'ouder_burgerservicenummer'}), '(burgerservicenummer_persoon=ouder_burgerservicenummer)\n', (4559, 4614), False, 'from openpersonen.contrib.demo.models import Kind, Ouder, Persoon\n'), ((4701, 4792), 'openpersonen.contrib.demo.models.Kind.objects.filter', 'Kind.objects.filter', ([], {'burgerservicenummer_kind': 'self.persoon.burgerservicenummer_persoon'}), '(burgerservicenummer_kind=self.persoon.\n burgerservicenummer_persoon)\n', (4720, 4792), False, 'from openpersonen.contrib.demo.models import Kind, Ouder, Persoon\n')] |
import os
import itertools
import re
from typing import List, Optional, Tuple, Dict, Callable, Any, NamedTuple
from string import Template
from typing import List
from tokenizers import Tokenizer, Encoding
dirname = os.path.dirname(__file__)
css_filename = os.path.join(dirname, "visualizer-styles.css")
with open(css_filename) as f:
css = f.read()
class Annotation:
start: int
end: int
label: int
def __init__(self, start: int, end: int, label: str):
self.start = start
self.end = end
self.label = label
AnnotationList = List[Annotation]
PartialIntList = List[Optional[int]]
class CharStateKey(NamedTuple):
token_ix: Optional[int]
anno_ix: Optional[int]
class CharState:
char_ix: Optional[int]
def __init__(self, char_ix):
self.char_ix = char_ix
self.anno_ix: Optional[int] = None
self.tokens: List[int] = []
@property
def token_ix(self):
return self.tokens[0] if len(self.tokens) > 0 else None
@property
def is_multitoken(self):
"""
BPE tokenizers can output more than one token for a char
"""
return len(self.tokens) > 1
def partition_key(self) -> CharStateKey:
return CharStateKey(
token_ix=self.token_ix,
anno_ix=self.anno_ix,
)
class Aligned:
pass
class EncodingVisualizer:
"""
Build an EncodingVisualizer
Args:
tokenizer (:class:`~tokenizers.Tokenizer`):
A tokenizer instance
default_to_notebook (:obj:`bool`):
Whether to render html output in a notebook by default
annotation_converter (:obj:`Callable`, `optional`):
An optional (lambda) function that takes an annotation in any format and returns
an Annotation object
"""
unk_token_regex = re.compile("(.{1}\b)?(unk|oov)(\b.{1})?", flags=re.IGNORECASE)
def __init__(
self,
tokenizer: Tokenizer,
default_to_notebook: bool = True,
annotation_converter: Optional[Callable[[Any], Annotation]] = None,
):
if default_to_notebook:
try:
from IPython.core.display import display, HTML
except ImportError as e:
raise Exception(
"""We couldn't import IPython utils for html display.
Are you running in a notebook?
You can also pass `default_to_notebook=False` to get back raw HTML
"""
)
self.tokenizer = tokenizer
self.default_to_notebook = default_to_notebook
self.annotation_coverter = annotation_converter
pass
def __call__(
self,
text: str,
annotations: AnnotationList = [],
default_to_notebook: Optional[bool] = None,
) -> Optional[str]:
"""
Build a visualization of the given text
Args:
text (:obj:`str`):
The text to tokenize
annotations (:obj:`List[Annotation]`, `optional`):
An optional list of annotations of the text. The can either be an annotation class
or anything else if you instantiated the visualizer with a converter function
default_to_notebook (:obj:`bool`, `optional`, defaults to `False`):
If True, will render the html in a notebook. Otherwise returns an html string.
Returns:
The HTML string if default_to_notebook is False, otherwise (default) returns None and
renders the HTML in the notebook
"""
final_default_to_notebook = self.default_to_notebook
if default_to_notebook is not None:
final_default_to_notebook = default_to_notebook
if final_default_to_notebook:
try:
from IPython.core.display import display, HTML
except ImportError as e:
raise Exception(
"""We couldn't import IPython utils for html display.
Are you running in a notebook?"""
)
if self.annotation_coverter is not None:
annotations = list(map(self.annotation_coverter, annotations))
encoding = self.tokenizer.encode(text)
html = EncodingVisualizer.__make_html(text, encoding, annotations)
if final_default_to_notebook:
display(HTML(html))
else:
return html
@staticmethod
def calculate_label_colors(annotations: AnnotationList) -> Dict[str, str]:
"""
Generates a color palette for all the labels in a given set of annotations
Args:
annotations (:obj:`Annotation`):
A list of annotations
Returns:
:obj:`dict`: A dictionary mapping labels to colors in HSL format
"""
if len(annotations) == 0:
return {}
labels = set(map(lambda x: x.label, annotations))
num_labels = len(labels)
h_step = int(255 / num_labels)
if h_step < 20:
h_step = 20
s = 32
l = 64
h = 10
colors = {}
for label in sorted(
labels
): # sort so we always get the same colors for a given set of labels
colors[label] = f"hsl({h},{s}%,{l}%"
h += h_step
return colors
@staticmethod
def consecutive_chars_to_html(
consecutive_chars_list: List[CharState],
text: str,
encoding: Encoding,
):
"""
Converts a list of "consecutive chars" into a single HTML element.
Chars are consecutive if they fall under the same word, token and annotation.
The CharState class is a named tuple with a "partition_key" method that makes it easy to
compare if two chars are consecutive.
Args:
consecutive_chars_list (:obj:`List[CharState]`):
A list of CharStates that have been grouped together
text (:obj:`str`):
The original text being processed
encoding (:class:`~tokenizers.Encoding`):
The encoding returned from the tokenizer
Returns:
:obj:`str`: The HTML span for a set of consecutive chars
"""
first = consecutive_chars_list[0]
if first.char_ix is None:
# its a special token
stoken = encoding.tokens[first.token_ix]
# special tokens are represented as empty spans. We use the data attribute and css
# magic to display it
return f'<span class="special-token" data-stoken={stoken}></span>'
# We're not in a special token so this group has a start and end.
last = consecutive_chars_list[-1]
start = first.char_ix
end = last.char_ix + 1
span_text = text[start:end]
css_classes = [] # What css classes will we apply on the resulting span
data_items = {} # What data attributes will we apply on the result span
if first.token_ix is not None:
# We can either be in a token or not (e.g. in white space)
css_classes.append("token")
if first.is_multitoken:
css_classes.append("multi-token")
if first.token_ix % 2:
# We use this to color alternating tokens.
# A token might be split by an annotation that ends in the middle of it, so this
# lets us visually indicate a consecutive token despite its possible splitting in
# the html markup
css_classes.append("odd-token")
else:
# Like above, but a different color so we can see the tokens alternate
css_classes.append("even-token")
if (
EncodingVisualizer.unk_token_regex.search(encoding.tokens[first.token_ix])
is not None
):
# This is a special token that is in the text. probably UNK
css_classes.append("special-token")
# TODO is this the right name for the data attribute ?
data_items["stok"] = encoding.tokens[first.token_ix]
else:
# In this case we are looking at a group/single char that is not tokenized.
# e.g. white space
css_classes.append("non-token")
css = f'''class="{' '.join(css_classes)}"'''
data = ""
for key, val in data_items.items():
data += f' data-{key}="{val}"'
return f"<span {css} {data} >{span_text}</span>"
@staticmethod
def __make_html(text: str, encoding: Encoding, annotations: AnnotationList) -> str:
char_states = EncodingVisualizer.__make_char_states(text, encoding, annotations)
current_consecutive_chars = [char_states[0]]
prev_anno_ix = char_states[0].anno_ix
spans = []
label_colors_dict = EncodingVisualizer.calculate_label_colors(annotations)
cur_anno_ix = char_states[0].anno_ix
if cur_anno_ix is not None:
# If we started in an annotation make a span for it
anno = annotations[cur_anno_ix]
label = anno.label
color = label_colors_dict[label]
spans.append(f'<span class="annotation" style="color:{color}" data-label="{label}">')
for cs in char_states[1:]:
cur_anno_ix = cs.anno_ix
if cur_anno_ix != prev_anno_ix:
# If we've transitioned in or out of an annotation
spans.append(
# Create a span from the current consecutive characters
EncodingVisualizer.consecutive_chars_to_html(
current_consecutive_chars,
text=text,
encoding=encoding,
)
)
current_consecutive_chars = [cs]
if prev_anno_ix is not None:
# if we transitioned out of an annotation close it's span
spans.append("</span>")
if cur_anno_ix is not None:
# If we entered a new annotation make a span for it
anno = annotations[cur_anno_ix]
label = anno.label
color = label_colors_dict[label]
spans.append(
f'<span class="annotation" style="color:{color}" data-label="{label}">'
)
prev_anno_ix = cur_anno_ix
if cs.partition_key() == current_consecutive_chars[0].partition_key():
# If the current charchter is in the same "group" as the previous one
current_consecutive_chars.append(cs)
else:
# Otherwise we make a span for the previous group
spans.append(
EncodingVisualizer.consecutive_chars_to_html(
current_consecutive_chars,
text=text,
encoding=encoding,
)
)
# An reset the consecutive_char_list to form a new group
current_consecutive_chars = [cs]
# All that's left is to fill out the final span
# TODO I think there is an edge case here where an annotation's span might not close
spans.append(
EncodingVisualizer.consecutive_chars_to_html(
current_consecutive_chars,
text=text,
encoding=encoding,
)
)
res = HTMLBody(spans) # Send the list of spans to the body of our html
return res
@staticmethod
def __make_anno_map(text: str, annotations: AnnotationList) -> PartialIntList:
"""
Args:
text (:obj:`str`):
The raw text we want to align to
annotations (:obj:`AnnotationList`):
A (possibly empty) list of annotations
Returns:
A list of length len(text) whose entry at index i is None if there is no annotation on
charachter i or k, the index of the annotation that covers index i where k is with
respect to the list of annotations
"""
annotation_map = [None] * len(text)
for anno_ix, a in enumerate(annotations):
for i in range(a.start, a.end):
annotation_map[i] = anno_ix
return annotation_map
@staticmethod
def __make_char_states(
text: str, encoding: Encoding, annotations: AnnotationList
) -> List[CharState]:
"""
For each character in the original text, we emit a tuple representing it's "state":
* which token_ix it corresponds to
* which word_ix it corresponds to
* which annotation_ix it corresponds to
Args:
text (:obj:`str`):
The raw text we want to align to
annotations (:obj:`List[Annotation]`):
A (possibly empty) list of annotations
encoding: (:class:`~tokenizers.Encoding`):
The encoding returned from the tokenizer
Returns:
:obj:`List[CharState]`: A list of CharStates, indicating for each char in the text what
it's state is
"""
annotation_map = EncodingVisualizer.__make_anno_map(text, annotations)
# Todo make this a dataclass or named tuple
char_states: List[CharState] = [CharState(char_ix) for char_ix in range(len(text))]
for token_ix, token in enumerate(encoding.tokens):
offsets = encoding.token_to_chars(token_ix)
if offsets is not None:
start, end = offsets
for i in range(start, end):
char_states[i].tokens.append(token_ix)
for char_ix, anno_ix in enumerate(annotation_map):
char_states[char_ix].anno_ix = anno_ix
return char_states
def HTMLBody(children: List[str], css_styles=css) -> str:
"""
Generates the full html with css from a list of html spans
Args:
children (:obj:`List[str]`):
A list of strings, assumed to be html elements
css_styles (:obj:`str`, `optional`):
Optional alternative implementation of the css
Returns:
:obj:`str`: An HTML string with style markup
"""
children_text = "".join(children)
return f"""
<html>
<head>
<style>
{css_styles}
</style>
</head>
<body>
<div class="tokenized-text" dir=auto>
{children_text}
</div>
</body>
</html>
"""
| [
"os.path.dirname",
"os.path.join",
"IPython.core.display.HTML",
"re.compile"
] | [((218, 243), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (233, 243), False, 'import os\n'), ((259, 305), 'os.path.join', 'os.path.join', (['dirname', '"""visualizer-styles.css"""'], {}), "(dirname, 'visualizer-styles.css')\n", (271, 305), False, 'import os\n'), ((1857, 1923), 're.compile', 're.compile', (['"""(.{1}\x08)?(unk|oov)(\x08.{1})?"""'], {'flags': 're.IGNORECASE'}), "('(.{1}\\x08)?(unk|oov)(\\x08.{1})?', flags=re.IGNORECASE)\n", (1867, 1923), False, 'import re\n'), ((4435, 4445), 'IPython.core.display.HTML', 'HTML', (['html'], {}), '(html)\n', (4439, 4445), False, 'from IPython.core.display import display, HTML\n')] |
from django.contrib import admin
from .models import (
ChatGroup,
GroupMembership,
GroupMessage,
GroupMessageInfo,
)
admin.site.register(ChatGroup)
admin.site.register(GroupMembership)
admin.site.register(GroupMessage)
admin.site.register(GroupMessageInfo)
| [
"django.contrib.admin.site.register"
] | [((135, 165), 'django.contrib.admin.site.register', 'admin.site.register', (['ChatGroup'], {}), '(ChatGroup)\n', (154, 165), False, 'from django.contrib import admin\n'), ((166, 202), 'django.contrib.admin.site.register', 'admin.site.register', (['GroupMembership'], {}), '(GroupMembership)\n', (185, 202), False, 'from django.contrib import admin\n'), ((203, 236), 'django.contrib.admin.site.register', 'admin.site.register', (['GroupMessage'], {}), '(GroupMessage)\n', (222, 236), False, 'from django.contrib import admin\n'), ((237, 274), 'django.contrib.admin.site.register', 'admin.site.register', (['GroupMessageInfo'], {}), '(GroupMessageInfo)\n', (256, 274), False, 'from django.contrib import admin\n')] |
from successor.skaters.scalarskaters.scalarskaterfactory import scaler_skater_factory
def suc_tsa_p2_d0_q1(y,s,k,a=None,t=None,e=None,r=None):
return scaler_skater_factory(y=y,s=s,k=k,skater_name='tsa_p2_d0_q1',n_input=160)
SCALAR_TSA_SKATERS = [suc_tsa_p2_d0_q1]
| [
"successor.skaters.scalarskaters.scalarskaterfactory.scaler_skater_factory"
] | [((156, 233), 'successor.skaters.scalarskaters.scalarskaterfactory.scaler_skater_factory', 'scaler_skater_factory', ([], {'y': 'y', 's': 's', 'k': 'k', 'skater_name': '"""tsa_p2_d0_q1"""', 'n_input': '(160)'}), "(y=y, s=s, k=k, skater_name='tsa_p2_d0_q1', n_input=160)\n", (177, 233), False, 'from successor.skaters.scalarskaters.scalarskaterfactory import scaler_skater_factory\n')] |
#!/usr/bin/env python2.7
"Run a regresion test the library cells for DRC"
import unittest
from testutils import header
import sys,os
sys.path.append(os.path.join(sys.path[0],"../.."))
sys.path.append(os.path.join(sys.path[0],".."))
import globals
import debug
import calibre
OPTS = globals.OPTS
class big_test(unittest.TestCase):
"""
Simplest two pin route test with no blockages using the pin locations instead of labels.
"""
def runTest(self):
globals.init_openram("config_{0}".format(OPTS.tech_name))
import design
import router
class gdscell(design.design):
"""
A generic GDS design that we can route on.
"""
def __init__(self, name):
#design.design.__init__(self, name)
debug.info(2, "Create {0} object".format(name))
self.name = name
self.gds_file = "{0}/{1}.gds".format(os.path.dirname(os.path.realpath(__file__)),name)
self.sp_file = "{0}/{1}.sp".format(os.path.dirname(os.path.realpath(__file__)),name)
design.hierarchy_layout.layout.__init__(self, name)
design.hierarchy_spice.spice.__init__(self, name)
class routing(design.design,unittest.TestCase):
"""
A generic GDS design that we can route on.
"""
def __init__(self, name):
design.design.__init__(self, name)
debug.info(2, "Create {0} object".format(name))
cell = gdscell(name)
self.add_inst(name=name,
mod=cell,
offset=[0,0])
self.connect_inst([])
self.gdsname = "{0}/{1}.gds".format(os.path.dirname(os.path.realpath(__file__)),name)
r=router.router(self.gdsname)
layer_stack =("metal3","via2","metal2")
connections=[('out_0_2', 'a_0_0'),
('out_0_3', 'b_0_0'),
('out_0_0', 'a_0_1'),
('out_1_2', 'a_1_0'),
('out_1_3', 'b_1_0'),
('out_1_0', 'a_1_1'),
('out_2_1', 'a_2_0'),
('out_2_2', 'b_2_0'),
('out_3_1', 'a_3_0'),
('out_3_2', 'b_3_0'),
('out_4_6', 'a_4_0'),
('out_4_7', 'b_4_0'),
('out_4_8', 'a_4_2'),
('out_4_9', 'b_4_2'),
('out_4_10', 'a_4_4'),
('out_4_11', 'b_4_4'),
('out_4_0', 'a_4_1'),
('out_4_2', 'b_4_1'),
('out_4_4', 'a_4_5'),
('out_4_1', 'a_4_3'),
('out_4_5', 'b_4_3')]
for (src,tgt) in connections:
self.assertTrue(r.route(self,layer_stack,src=src,dest=tgt))
# This test only runs on scn3me_subm tech
if OPTS.tech_name=="scn3me_subm":
r = routing("07_big_test_{0}".format(OPTS.tech_name))
self.local_check(r)
else:
debug.warning("This test does not support technology {0}".format(OPTS.tech_name))
# fails if there are any DRC errors on any cells
globals.end_openram()
def local_check(self, r):
tempgds = OPTS.openram_temp + "temp.gds"
r.gds_write(tempgds)
self.assertFalse(calibre.run_drc(r.name, tempgds))
os.remove(tempgds)
# instantiate a copy of the class to actually run the test
if __name__ == "__main__":
(OPTS, args) = globals.parse_args()
del sys.argv[1:]
header(__file__, OPTS.tech_name)
unittest.main()
| [
"globals.end_openram",
"design.hierarchy_spice.spice.__init__",
"os.path.join",
"router.router",
"calibre.run_drc",
"os.path.realpath",
"testutils.header",
"globals.parse_args",
"unittest.main",
"design.hierarchy_layout.layout.__init__",
"design.design.__init__",
"os.remove"
] | [((150, 184), 'os.path.join', 'os.path.join', (['sys.path[0]', '"""../.."""'], {}), "(sys.path[0], '../..')\n", (162, 184), False, 'import sys, os\n'), ((201, 232), 'os.path.join', 'os.path.join', (['sys.path[0]', '""".."""'], {}), "(sys.path[0], '..')\n", (213, 232), False, 'import sys, os\n'), ((3896, 3916), 'globals.parse_args', 'globals.parse_args', ([], {}), '()\n', (3914, 3916), False, 'import globals\n'), ((3942, 3974), 'testutils.header', 'header', (['__file__', 'OPTS.tech_name'], {}), '(__file__, OPTS.tech_name)\n', (3948, 3974), False, 'from testutils import header\n'), ((3979, 3994), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3992, 3994), False, 'import unittest\n'), ((3539, 3560), 'globals.end_openram', 'globals.end_openram', ([], {}), '()\n', (3558, 3560), False, 'import globals\n'), ((3738, 3756), 'os.remove', 'os.remove', (['tempgds'], {}), '(tempgds)\n', (3747, 3756), False, 'import sys, os\n'), ((3696, 3728), 'calibre.run_drc', 'calibre.run_drc', (['r.name', 'tempgds'], {}), '(r.name, tempgds)\n', (3711, 3728), False, 'import calibre\n'), ((1110, 1161), 'design.hierarchy_layout.layout.__init__', 'design.hierarchy_layout.layout.__init__', (['self', 'name'], {}), '(self, name)\n', (1149, 1161), False, 'import design\n'), ((1178, 1227), 'design.hierarchy_spice.spice.__init__', 'design.hierarchy_spice.spice.__init__', (['self', 'name'], {}), '(self, name)\n', (1215, 1227), False, 'import design\n'), ((1438, 1472), 'design.design.__init__', 'design.design.__init__', (['self', 'name'], {}), '(self, name)\n', (1460, 1472), False, 'import design\n'), ((1875, 1902), 'router.router', 'router.router', (['self.gdsname'], {}), '(self.gdsname)\n', (1888, 1902), False, 'import router\n'), ((959, 985), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (975, 985), False, 'import sys, os\n'), ((1060, 1086), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1076, 1086), False, 'import sys, os\n'), ((1823, 1849), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1839, 1849), False, 'import sys, os\n')] |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The exponentially scaled modified Bessel function of the first kind."""
import numpy as np
import scipy.special
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops.custom_gradient import custom_gradient
@custom_gradient
def ive(v, z):
"""Exponentially scaled modified Bessel function of the first kind."""
output = array_ops.reshape(script_ops.py_func(
lambda v, z: np.select(condlist=[v == 0, v == 1],
choicelist=[scipy.special.i0e(z, dtype=z.dtype),
scipy.special.i1e(z, dtype=z.dtype)],
default=scipy.special.ive(v, z, dtype=z.dtype)), [v, z], z.dtype),
ops.convert_to_tensor(array_ops.shape(z), dtype=dtypes.int32))
def grad(dy):
return None, dy * (ive(v - 1, z) - ive(v, z) * (v + z) / z)
return output, grad
| [
"tensorflow.python.ops.array_ops.shape"
] | [((1559, 1577), 'tensorflow.python.ops.array_ops.shape', 'array_ops.shape', (['z'], {}), '(z)\n', (1574, 1577), False, 'from tensorflow.python.ops import array_ops\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
alpha = np.random.rand(7)
alpha /= np.linalg.norm(alpha, 1)
n = 40
def index_to_position(index):
p = 0
a, b, c, d, e, f = index
index = [a, d, b, e, c, f]
for i in index:
p = p * n + i
return p
if __name__ == "__main__":
with open("fdm.tsv", "w") as f:
for i in range(n):
for j in range(n):
for k in range(n):
alpha = np.random.rand(7)
alpha /= np.linalg.norm(alpha, 1)
p = index_to_position([i, j, k, i, j, k])
print("{}\t{}".format(p, alpha[0]), file=f)
if i - 1 >= 0:
p = index_to_position([i, j, k, i - 1, j, k])
print("{}\t{}".format(p, alpha[1]), file=f)
if i + 1 < n:
p = index_to_position([i, j, k, i + 1, j, k])
print("{}\t{}".format(p, alpha[2]), file=f)
if j - 1 >= 0:
p = index_to_position([i, j, k, i, j - 1, k])
print("{}\t{}".format(p, alpha[3]), file=f)
if j + 1 < n:
p = index_to_position([i, j, k, i, j + 1, k])
print("{}\t{}".format(p, alpha[4]), file=f)
if k - 1 >= 0:
p = index_to_position([i, j, k, i, j, k - 1])
print("{}\t{}".format(p, alpha[5]), file=f)
if k + 1 < n:
p = index_to_position([i, j, k, i, j, k + 1])
print("{}\t{}".format(p, alpha[6]), file=f)
| [
"numpy.random.rand",
"numpy.linalg.norm"
] | [((76, 93), 'numpy.random.rand', 'np.random.rand', (['(7)'], {}), '(7)\n', (90, 93), True, 'import numpy as np\n'), ((103, 127), 'numpy.linalg.norm', 'np.linalg.norm', (['alpha', '(1)'], {}), '(alpha, 1)\n', (117, 127), True, 'import numpy as np\n'), ((476, 493), 'numpy.random.rand', 'np.random.rand', (['(7)'], {}), '(7)\n', (490, 493), True, 'import numpy as np\n'), ((523, 547), 'numpy.linalg.norm', 'np.linalg.norm', (['alpha', '(1)'], {}), '(alpha, 1)\n', (537, 547), True, 'import numpy as np\n')] |
# Generated by Django 3.1 on 2021-02-24 22:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("measures", "0004_rename_condition_component_measurement"),
]
operations = [
migrations.AlterModelOptions(
name="measurecondition",
options={"ordering": ["component_sequence_number"]},
),
]
| [
"django.db.migrations.AlterModelOptions"
] | [((246, 356), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""measurecondition"""', 'options': "{'ordering': ['component_sequence_number']}"}), "(name='measurecondition', options={'ordering':\n ['component_sequence_number']})\n", (274, 356), False, 'from django.db import migrations\n')] |
# -----------------------------------------------------------------------------
# WSDM Cup 2017 Classification and Evaluation
#
# Copyright (c) 2017 <NAME>, <NAME>, <NAME>, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
import logging
import itertools
import pandas as pd
import config
from src import evaluationutils
from src import storage
from src.classifiers import multipleinstance
from src.pipeline import optimization
_logger = logging.getLogger()
_metrics = {} # dictionary of pd.DataFrames, the key is the time label
DFOREST = 'DFOREST'
OFOREST = 'OFOREST'
BAGGING = 'BAGGING'
def default_random_forest(
training, validation, group=None, print_results=True):
_logger.info("Default random forest...")
clf = optimization.get_default_random_forest()
clf.set_params(n_jobs=config.CLASSIFICATION_N_JOBS)
index = _get_index(validation, 'Default random forest', group)
_, _, metrics = evaluationutils.fit_predict_evaluate(
index, clf, training, validation)
if print_results:
_logger.info("Writing model to file ...")
clf_name = evaluationutils.index_to_str(index)
storage.dump_clf(clf, clf_name)
_print_metrics(metrics)
_logger.info("Default random forest... done.")
return metrics
def optimized_random_forest(
training, validation, group=None, print_results=True, sample_weight=None):
_logger.info("Optimized random forest...")
clf = optimization.get_optimal_random_forest(validation.get_system_name())
clf.set_params(n_jobs=config.CLASSIFICATION_N_JOBS)
index = _get_index(validation, 'Optimized random forest', group)
_, _, metrics = evaluationutils.fit_predict_evaluate(
index, clf, training, validation, sample_weight=sample_weight)
if print_results:
_logger.info("Writing model to file ...")
clf.set_params(n_jobs=1)
clf_name = evaluationutils.index_to_str(index)
storage.dump_clf(clf, clf_name)
_print_metrics(metrics)
_logger.info("Optimized random forest... done.")
return metrics
def bagging_and_multiple_instance(
training, validation, group=None, print_results=True):
_logger.info("Bagging and multiple-instance...")
result = pd.DataFrame()
# Bagging
clf = optimization.get_optimal_bagging_classifier(validation.get_system_name())
clf.set_params(n_jobs=config.CLASSIFICATION_N_JOBS)
index = _get_index(validation, 'Bagging', group)
_, prob, metrics = evaluationutils.fit_predict_evaluate(
index, clf, training, validation)
result = result.append(metrics)
if print_results:
clf_name = evaluationutils.index_to_str(index)
storage.dump_clf(clf, clf_name)
_print_metrics(metrics)
# Single-instance learning (SIL)
clf = multipleinstance.SingleInstanceClassifier(
base_estimator=None, agg_func='cummean', window=config.BACKPRESSURE_WINDOW)
clf.set_proba(prob) # shortcut to save some computational time
index = _get_index(validation, 'SIL MI', group)
sil_pred, sil_prob = evaluationutils.predict(clf, validation, index)
metrics = evaluationutils.evaluate(index, sil_pred, sil_prob, validation)
result = result.append(metrics)
if print_results:
_print_metrics(metrics)
# Simple multiple-instance (SMI)
clf = optimization.get_optimal_bagging_classifier(validation.get_system_name())
clf.set_params(n_jobs=config.CLASSIFICATION_N_JOBS_SIMPLE_MI)
clf = multipleinstance.SimpleMultipleInstanceClassifier(
base_estimator=clf, trans_func='cummin_cummax', window=config.BACKPRESSURE_WINDOW)
index = _get_index(validation, 'Simple MI', group)
_, smi_prob, metrics = evaluationutils.fit_predict_evaluate(
index, clf, training, validation)
result = result.append(metrics)
if print_results:
_print_metrics(metrics)
# Combination of SIL and SMI
clf = multipleinstance.CombinedMultipleInstanceClassifier(
base_estimator=None)
# shortcut to save some computational time
clf.set_proba(sil_prob, smi_prob)
index = _get_index(validation, 'Combined MI', group)
combined_pred, combined_prob = evaluationutils.predict(clf, validation, index)
metrics = evaluationutils.evaluate(
index, combined_pred, combined_prob, validation)
result = result.append(metrics)
if print_results:
_print_metrics(metrics)
_logger.info("Bagging and multiple-instance... done.")
return result
def _get_index(dataset, clf_name, group_name=None):
values = (dataset.get_time_label(), dataset.get_system_name(), clf_name)
names = ['Dataset', 'System', 'Classifier']
if group_name is not None:
values = values + (group_name,)
names.append('Group')
result = pd.MultiIndex.from_tuples([values], names=names)
return result
def compute_metrics_for_classifiers_and_groups(training, validation):
local_metrics = pd.DataFrame()
for clf_name in [DFOREST, OFOREST, BAGGING]:
for group in ['ALL'] + validation.get_groups():
training2 = training.select_group(group)
validation2 = validation.select_group(group)
result = _compute_metrics_for_classifier(
training2, validation2, clf_name, group)
local_metrics = local_metrics.append(result)
local_metrics.to_csv(config.OUTPUT_PREFIX + '_' +
validation.get_time_label() +
'_classifiers_groups.csv')
screen_output = evaluationutils.remove_plots(local_metrics)
_logger.info("Metrics:\n" + str(screen_output))
def _compute_metrics_for_classifier(training, validation, clf_name, group):
if clf_name == DFOREST:
result = default_random_forest(
training, validation, group=group, print_results=False)
elif clf_name == OFOREST:
result = optimized_random_forest(
training, validation, group=group, print_results=False)
elif clf_name == BAGGING:
result = bagging_and_multiple_instance(
training, validation, group=group, print_results=False)
return result
def _print_metrics(metrics):
"""Print one metrics row and save it."""
time_label = metrics.index.get_level_values('Dataset')[0]
global _metrics
if time_label not in _metrics:
_metrics[time_label] = pd.DataFrame()
_metrics[time_label] = _metrics[time_label].append(metrics)
local_metrics = _metrics[time_label].copy()
local_metrics = _reverse_order_within_system_groups(local_metrics)
local_metrics = local_metrics[evaluationutils.COLUMNS]
evaluationutils.print_metrics(
local_metrics, time_label + '_results', append_global=False)
def _reverse_order_within_system_groups(metrics):
# returns a list of tuples based on the data frame's multiIndex
old_order = metrics.index.values
new_order = []
# group by first and second index column (Dataset and System)
for _, group in itertools.groupby(old_order, lambda x: x[0:2]):
new_order = new_order + list(group)[::-1]
result = metrics.reindex(new_order)
return result
| [
"logging.getLogger",
"src.storage.dump_clf",
"itertools.groupby",
"src.evaluationutils.evaluate",
"src.evaluationutils.predict",
"src.classifiers.multipleinstance.SingleInstanceClassifier",
"src.classifiers.multipleinstance.SimpleMultipleInstanceClassifier",
"src.pipeline.optimization.get_default_rand... | [((1536, 1555), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1553, 1555), False, 'import logging\n'), ((1836, 1876), 'src.pipeline.optimization.get_default_random_forest', 'optimization.get_default_random_forest', ([], {}), '()\n', (1874, 1876), False, 'from src.pipeline import optimization\n'), ((2022, 2092), 'src.evaluationutils.fit_predict_evaluate', 'evaluationutils.fit_predict_evaluate', (['index', 'clf', 'training', 'validation'], {}), '(index, clf, training, validation)\n', (2058, 2092), False, 'from src import evaluationutils\n'), ((2760, 2863), 'src.evaluationutils.fit_predict_evaluate', 'evaluationutils.fit_predict_evaluate', (['index', 'clf', 'training', 'validation'], {'sample_weight': 'sample_weight'}), '(index, clf, training, validation,\n sample_weight=sample_weight)\n', (2796, 2863), False, 'from src import evaluationutils\n'), ((3342, 3356), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3354, 3356), True, 'import pandas as pd\n'), ((3588, 3658), 'src.evaluationutils.fit_predict_evaluate', 'evaluationutils.fit_predict_evaluate', (['index', 'clf', 'training', 'validation'], {}), '(index, clf, training, validation)\n', (3624, 3658), False, 'from src import evaluationutils\n'), ((3901, 4023), 'src.classifiers.multipleinstance.SingleInstanceClassifier', 'multipleinstance.SingleInstanceClassifier', ([], {'base_estimator': 'None', 'agg_func': '"""cummean"""', 'window': 'config.BACKPRESSURE_WINDOW'}), "(base_estimator=None, agg_func=\n 'cummean', window=config.BACKPRESSURE_WINDOW)\n", (3942, 4023), False, 'from src.classifiers import multipleinstance\n'), ((4173, 4220), 'src.evaluationutils.predict', 'evaluationutils.predict', (['clf', 'validation', 'index'], {}), '(clf, validation, index)\n', (4196, 4220), False, 'from src import evaluationutils\n'), ((4235, 4298), 'src.evaluationutils.evaluate', 'evaluationutils.evaluate', (['index', 'sil_pred', 'sil_prob', 'validation'], {}), '(index, sil_pred, sil_prob, validation)\n', (4259, 4298), False, 'from src import evaluationutils\n'), ((4587, 4723), 'src.classifiers.multipleinstance.SimpleMultipleInstanceClassifier', 'multipleinstance.SimpleMultipleInstanceClassifier', ([], {'base_estimator': 'clf', 'trans_func': '"""cummin_cummax"""', 'window': 'config.BACKPRESSURE_WINDOW'}), "(base_estimator=clf,\n trans_func='cummin_cummax', window=config.BACKPRESSURE_WINDOW)\n", (4636, 4723), False, 'from src.classifiers import multipleinstance\n'), ((4811, 4881), 'src.evaluationutils.fit_predict_evaluate', 'evaluationutils.fit_predict_evaluate', (['index', 'clf', 'training', 'validation'], {}), '(index, clf, training, validation)\n', (4847, 4881), False, 'from src import evaluationutils\n'), ((5025, 5097), 'src.classifiers.multipleinstance.CombinedMultipleInstanceClassifier', 'multipleinstance.CombinedMultipleInstanceClassifier', ([], {'base_estimator': 'None'}), '(base_estimator=None)\n', (5076, 5097), False, 'from src.classifiers import multipleinstance\n'), ((5284, 5331), 'src.evaluationutils.predict', 'evaluationutils.predict', (['clf', 'validation', 'index'], {}), '(clf, validation, index)\n', (5307, 5331), False, 'from src import evaluationutils\n'), ((5346, 5419), 'src.evaluationutils.evaluate', 'evaluationutils.evaluate', (['index', 'combined_pred', 'combined_prob', 'validation'], {}), '(index, combined_pred, combined_prob, validation)\n', (5370, 5419), False, 'from src import evaluationutils\n'), ((5894, 5942), 'pandas.MultiIndex.from_tuples', 'pd.MultiIndex.from_tuples', (['[values]'], {'names': 'names'}), '([values], names=names)\n', (5919, 5942), True, 'import pandas as pd\n'), ((6054, 6068), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (6066, 6068), True, 'import pandas as pd\n'), ((7782, 7876), 'src.evaluationutils.print_metrics', 'evaluationutils.print_metrics', (['local_metrics', "(time_label + '_results')"], {'append_global': '(False)'}), "(local_metrics, time_label + '_results',\n append_global=False)\n", (7811, 7876), False, 'from src import evaluationutils\n'), ((8145, 8191), 'itertools.groupby', 'itertools.groupby', (['old_order', '(lambda x: x[0:2])'], {}), '(old_order, lambda x: x[0:2])\n', (8162, 8191), False, 'import itertools\n'), ((2194, 2229), 'src.evaluationutils.index_to_str', 'evaluationutils.index_to_str', (['index'], {}), '(index)\n', (2222, 2229), False, 'from src import evaluationutils\n'), ((2238, 2269), 'src.storage.dump_clf', 'storage.dump_clf', (['clf', 'clf_name'], {}), '(clf, clf_name)\n', (2254, 2269), False, 'from src import storage\n'), ((2994, 3029), 'src.evaluationutils.index_to_str', 'evaluationutils.index_to_str', (['index'], {}), '(index)\n', (3022, 3029), False, 'from src import evaluationutils\n'), ((3038, 3069), 'src.storage.dump_clf', 'storage.dump_clf', (['clf', 'clf_name'], {}), '(clf, clf_name)\n', (3054, 3069), False, 'from src import storage\n'), ((3745, 3780), 'src.evaluationutils.index_to_str', 'evaluationutils.index_to_str', (['index'], {}), '(index)\n', (3773, 3780), False, 'from src import evaluationutils\n'), ((3789, 3820), 'src.storage.dump_clf', 'storage.dump_clf', (['clf', 'clf_name'], {}), '(clf, clf_name)\n', (3805, 3820), False, 'from src import storage\n'), ((7516, 7530), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (7528, 7530), True, 'import pandas as pd\n'), ((6668, 6711), 'src.evaluationutils.remove_plots', 'evaluationutils.remove_plots', (['local_metrics'], {}), '(local_metrics)\n', (6696, 6711), False, 'from src import evaluationutils\n')] |
import os
import re
import pyblish.api
from avalon import aftereffects
class CollectExtensionVersion(pyblish.api.ContextPlugin):
""" Pulls and compares version of installed extension.
It is recommended to use same extension as in provided Openpype code.
Please use Anastasiy’s Extension Manager or ZXPInstaller to update
extension in case of an error.
You can locate extension.zxp in your installed Openpype code in
`repos/avalon-core/avalon/aftereffects`
"""
# This technically should be a validator, but other collectors might be
# impacted with usage of obsolete extension, so collector that runs first
# was chosen
order = pyblish.api.CollectorOrder - 0.5
label = "Collect extension version"
hosts = ["aftereffects"]
optional = True
active = True
def process(self, context):
installed_version = aftereffects.stub().get_extension_version()
if not installed_version:
raise ValueError("Unknown version, probably old extension")
manifest_url = os.path.join(os.path.dirname(aftereffects.__file__),
"extension", "CSXS", "manifest.xml")
if not os.path.exists(manifest_url):
self.log.debug("Unable to locate extension manifest, not checking")
return
expected_version = None
with open(manifest_url) as fp:
content = fp.read()
found = re.findall(r'(ExtensionBundleVersion=")([0-9\.]+)(")',
content)
if found:
expected_version = found[0][1]
if expected_version != installed_version:
msg = (
"Expected version '{}' found '{}'\n Please update"
" your installed extension, it might not work properly."
).format(expected_version, installed_version)
raise ValueError(msg)
| [
"os.path.dirname",
"re.findall",
"os.path.exists",
"avalon.aftereffects.stub"
] | [((1087, 1125), 'os.path.dirname', 'os.path.dirname', (['aftereffects.__file__'], {}), '(aftereffects.__file__)\n', (1102, 1125), False, 'import os\n'), ((1216, 1244), 'os.path.exists', 'os.path.exists', (['manifest_url'], {}), '(manifest_url)\n', (1230, 1244), False, 'import os\n'), ((1469, 1532), 're.findall', 're.findall', (['"""(ExtensionBundleVersion=")([0-9\\\\.]+)(")"""', 'content'], {}), '(\'(ExtensionBundleVersion=")([0-9\\\\.]+)(")\', content)\n', (1479, 1532), False, 'import re\n'), ((899, 918), 'avalon.aftereffects.stub', 'aftereffects.stub', ([], {}), '()\n', (916, 918), False, 'from avalon import aftereffects\n')] |
"""
Authors: <NAME>, <NAME>
Contact: <EMAIL>
Unit tests for the gbdxtools.Idaho class
"""
import os
from gbdxtools import Interface
from gbdxtools.idaho import Idaho
from auth_mock import get_mock_gbdx_session
import vcr
import tempfile
import unittest
def force(r1, r2):
return True
my_vcr = vcr.VCR()
my_vcr.register_matcher('force', force)
my_vcr.match_on = ['force']
class IdahoTest(unittest.TestCase):
_temp_path = None
@classmethod
def setUpClass(cls):
mock_gbdx_session = get_mock_gbdx_session(token='<PASSWORD>token')
cls.gbdx = Interface(gbdx_connection=mock_gbdx_session)
cls._temp_path = tempfile.mkdtemp()
print("Created: {}".format(cls._temp_path))
def test_init(self):
c = Idaho()
self.assertTrue(isinstance(c, Idaho))
@my_vcr.use_cassette('tests/unit/cassettes/test_idaho_get_images_by_catid_and_aoi.yaml',
filter_headers=['authorization'])
def test_idaho_get_images_by_catid_and_aoi(self):
i = Idaho()
catid = '10400100203F1300'
aoi_wkt = "POLYGON ((-105.0207996368408345 39.7338828628182839, -105.0207996368408345 39.7365972921260067, -105.0158751010894775 39.7365972921260067, -105.0158751010894775 39.7338828628182839, -105.0207996368408345 39.7338828628182839))"
results = i.get_images_by_catid_and_aoi(catid=catid, aoi_wkt=aoi_wkt)
assert len(results['results']) == 2
@my_vcr.use_cassette('tests/unit/cassettes/test_idaho_get_images_by_catid.yaml', filter_headers=['authorization'])
def test_idaho_get_images_by_catid(self):
i = Idaho()
catid = '10400100203F1300'
results = i.get_images_by_catid(catid=catid)
assert len(results['results']) == 12
@my_vcr.use_cassette('tests/unit/cassettes/test_idaho_describe_images.yaml', filter_headers=['authorization'])
def test_idaho_describe_images(self):
i = Idaho()
catid = '10400100203F1300'
description = i.describe_images(i.get_images_by_catid(catid=catid))
assert description['10400100203F1300']['parts'][1]['PAN']['id'] == 'b1f6448b-aecd-4d9b-99ec-9cad8d079043'
@my_vcr.use_cassette('tests/unit/cassettes/test_idaho_get_chip.yaml', filter_headers=['authorization'])
def test_idaho_get_chip(self):
i = Idaho()
catid = '10400100203F1300'
filename = os.path.join(self._temp_path, 'chip.tif')
result = i.get_chip([-105.00032901763916, 39.91207173503864, -104.99874114990234, 39.91310862390189], catid, filename=filename)
assert result
@my_vcr.use_cassette('tests/unit/cassettes/test_idaho_get_chip2.yaml', filter_headers=['authorization'])
def test_idaho_get_chip2(self):
i = Idaho()
catid = '10400100384B1B00'
filename = os.path.join(self._temp_path, 'chip2.tif')
result = i.get_chip([120.45363429504926, 30.247785383721883, 120.45511487442548, 30.249008773017273], catid, filename=filename)
assert result | [
"auth_mock.get_mock_gbdx_session",
"gbdxtools.Interface",
"os.path.join",
"gbdxtools.idaho.Idaho",
"vcr.VCR",
"tempfile.mkdtemp"
] | [((303, 312), 'vcr.VCR', 'vcr.VCR', ([], {}), '()\n', (310, 312), False, 'import vcr\n'), ((511, 557), 'auth_mock.get_mock_gbdx_session', 'get_mock_gbdx_session', ([], {'token': '"""<PASSWORD>token"""'}), "(token='<PASSWORD>token')\n", (532, 557), False, 'from auth_mock import get_mock_gbdx_session\n'), ((577, 621), 'gbdxtools.Interface', 'Interface', ([], {'gbdx_connection': 'mock_gbdx_session'}), '(gbdx_connection=mock_gbdx_session)\n', (586, 621), False, 'from gbdxtools import Interface\n'), ((647, 665), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (663, 665), False, 'import tempfile\n'), ((756, 763), 'gbdxtools.idaho.Idaho', 'Idaho', ([], {}), '()\n', (761, 763), False, 'from gbdxtools.idaho import Idaho\n'), ((1026, 1033), 'gbdxtools.idaho.Idaho', 'Idaho', ([], {}), '()\n', (1031, 1033), False, 'from gbdxtools.idaho import Idaho\n'), ((1615, 1622), 'gbdxtools.idaho.Idaho', 'Idaho', ([], {}), '()\n', (1620, 1622), False, 'from gbdxtools.idaho import Idaho\n'), ((1926, 1933), 'gbdxtools.idaho.Idaho', 'Idaho', ([], {}), '()\n', (1931, 1933), False, 'from gbdxtools.idaho import Idaho\n'), ((2315, 2322), 'gbdxtools.idaho.Idaho', 'Idaho', ([], {}), '()\n', (2320, 2322), False, 'from gbdxtools.idaho import Idaho\n'), ((2377, 2418), 'os.path.join', 'os.path.join', (['self._temp_path', '"""chip.tif"""'], {}), "(self._temp_path, 'chip.tif')\n", (2389, 2418), False, 'import os\n'), ((2735, 2742), 'gbdxtools.idaho.Idaho', 'Idaho', ([], {}), '()\n', (2740, 2742), False, 'from gbdxtools.idaho import Idaho\n'), ((2797, 2839), 'os.path.join', 'os.path.join', (['self._temp_path', '"""chip2.tif"""'], {}), "(self._temp_path, 'chip2.tif')\n", (2809, 2839), False, 'import os\n')] |
import unittest
from main import Polski
class PolskiTest(unittest.TestCase):
def setUp(self):
self.polski = Polski()
| [
"main.Polski"
] | [((123, 131), 'main.Polski', 'Polski', ([], {}), '()\n', (129, 131), False, 'from main import Polski\n')] |
# Generated by Django 2.0.9 on 2018-12-13 09:36
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('branch', '0016_hoststudent_communication_log'),
]
operations = [
migrations.CreateModel(
name='BgeResource',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True)),
('cateogry', models.CharField(blank=True, max_length=80, null=True)),
('sub_category', models.CharField(blank=True, max_length=80, null=True)),
('title', models.CharField(blank=True, max_length=140, null=True)),
('file', models.FileField(blank=True, null=True, upload_to='resources/')),
('writer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
| [
"django.db.models.ForeignKey",
"django.db.models.FileField",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
] | [((227, 284), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (258, 284), False, 'from django.db import migrations, models\n'), ((478, 571), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (494, 571), False, 'from django.db import migrations, models\n'), ((601, 651), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'null': '(True)'}), '(auto_now_add=True, null=True)\n', (621, 651), False, 'from django.db import migrations, models\n'), ((685, 731), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'null': '(True)'}), '(auto_now=True, null=True)\n', (705, 731), False, 'from django.db import migrations, models\n'), ((763, 817), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(80)', 'null': '(True)'}), '(blank=True, max_length=80, null=True)\n', (779, 817), False, 'from django.db import migrations, models\n'), ((853, 907), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(80)', 'null': '(True)'}), '(blank=True, max_length=80, null=True)\n', (869, 907), False, 'from django.db import migrations, models\n'), ((936, 991), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(140)', 'null': '(True)'}), '(blank=True, max_length=140, null=True)\n', (952, 991), False, 'from django.db import migrations, models\n'), ((1019, 1082), 'django.db.models.FileField', 'models.FileField', ([], {'blank': '(True)', 'null': '(True)', 'upload_to': '"""resources/"""'}), "(blank=True, null=True, upload_to='resources/')\n", (1035, 1082), False, 'from django.db import migrations, models\n'), ((1112, 1219), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'to': 'settings.AUTH_USER_MODEL'}), '(null=True, on_delete=django.db.models.deletion.SET_NULL,\n to=settings.AUTH_USER_MODEL)\n', (1129, 1219), False, 'from django.db import migrations, models\n')] |
from random import randint
# Remove o ponto e hífen do CPF.
def removerFormatacaoCpf(cpf):
resultado = cpf.replace(".", "")
resultado = resultado.replace("-", "")
return resultado
# Adiciona o ponto e hífen no CPF.
def adicionarFormatacaoCpf(cpf):
cpfFormatado = ''
cpfFormatado += cpf[:3]
cpfFormatado += '.'
cpfFormatado += cpf[3:6]
cpfFormatado += '.'
cpfFormatado += cpf[6:9]
cpfFormatado += '-'
cpfFormatado += cpf[9:]
return cpfFormatado
# Versão 2, mais otimizada e menos verbosa. Trabalha com CPFs
# sem os 2 dígitos.
def calcularDigitosCpf(cpf):
digitos = [str(0), str(0)]
for i in range(2):
soma = 0
for k, v in enumerate(range(10+i, 1, -1)):
soma += int(cpf[k]) * v
calculo = 11 - (soma % 11)
if calculo > 9:
digitos[i] = str(0)
else:
digitos[i] = str(calculo)
if i == 0:
cpf += str(digitos[i])
return digitos
def validarCpf(cpf):
# Não passa os 2 últimos dígitos.
temp = cpf[:-2]
digitos = calcularDigitosCpf(temp)
temp += ''.join(digitos)
if temp == cpf:
valido = True
else:
valido = False
return valido
def gerarCpf():
cpf = ''
for i in range(0, 9):
cpf += str(randint(0, 9))
digitos = calcularDigitosCpf(cpf)
cpf += ''.join(digitos)
cpf = adicionarFormatacaoCpf(cpf)
return cpf
| [
"random.randint"
] | [((1311, 1324), 'random.randint', 'randint', (['(0)', '(9)'], {}), '(0, 9)\n', (1318, 1324), False, 'from random import randint\n')] |
from typing import List
from .controls import ACTION_TYPE, ExecutionControl, Logger
from .processor import Processor
import curses
import datetime
FRAME_WIDTH = 100
APP_NAME = 'RhProcessor'
proc_map = None
class Slot():
def __init__(self, node_dict: dict, logger: Logger, execution_control: ExecutionControl, ids: List[List[int]], current_id_string: str) -> None:
self._node = node_dict
self._logger = logger
self._execution_control = execution_control
self._ids = ids
self._slot = {}
self._current_id_string = current_id_string
def __call__(self) -> dict:
return self._digest()
def _digest(self):
self._slot['main_header'] = self._node.get('node_type', '').upper() + ': ' + self._node.get('name', '')
self._slot['sub_header'] = self._fns_to_string(self._node.get('tracks', {}))
_strt = datetime.datetime.fromtimestamp(self._node.get('start', ''))
self._slot['start'] = 'Start: ' + _strt.strftime('%d/%m/%Y, %H:%M:%S') + ' (' + str(datetime.datetime.now() - _strt) + ')'
if self._node.get('node_type') in ['FluxMode', 'ParallelFluxMode']:
_total = self._node.get('n_chld', None)
if _total != None:
n_fns = len(self._node.get('tracks'))
if n_fns > 0:
_exec = len(self._node.get('tracks').get(n_fns - 1))
self._slot['info'] = int(_exec/_total * 100)
if self._node.get('node_type') in ['BlockMode', 'FluxMode']:
_cids = self._current_id_string
_id_use = None
_up = -1 if self._node.get('node_type') == 'BlockMode' else - 2
for id in self._ids:
_id = self._execution_control._get_key_string(id[:_up])
if _id == _cids:
_id_use = id
break
if _id_use:
logs = self._logger.get_log_obj(_id_use)
if isinstance(logs, list) and len(logs) > 0:
self._slot['info_2'] = logs[-1].txt
return self._slot
def _fns_to_string(self, fns: dict):
_fns = []
for k, v in fns.items():
if not isinstance(v, dict):
continue
is_node = v.get('node_type')
if is_node:
_fns.append(v)
elif v.get(0) and isinstance(v.get(0), dict) and v.get(0).get('node_type'):
_fns.append(v[0])
_ret = []
for i, f in enumerate(_fns):
if isinstance(f, dict):
n = f.get('name', '')
n = f'{i}. {n}'
_ret.append(n)
if len(_ret) > 0: _ret[-1] = f'[{_ret[-1]}]'
return ' '.join(_ret)
'''
return {
'main_header': '',
'sub_header': '',
'status': '',
'duration': '',
'flux_progress': '',
'log': '',
'log_1': ''
}
'''
#_node = tracks.getNode(_id[:2]).to_dict()
#log = logger.get_log_obj(_id)
def display_progress(progress: int):
if progress == None:
return ''
SIZE = 40
fill = round(SIZE * progress / 100)
blank = SIZE - fill
return '[' + ('#' * fill) + (' ' * blank) + ']' + ' ' + str(progress) + '%'
console = curses.initscr()
def line_in_frame(txt = '', mask = ' ', start='|', end='|\n', align = 'left', padding = 0) -> str:
av_area = FRAME_WIDTH - 2 ## Start and end
av_area -= padding
mask_space = av_area - len(txt)
line = ''
if align == 'right': line += (mask * mask_space) + txt
elif align == 'center':
if mask_space % 2 == 0:
_ms = (mask * int(mask_space / 2))
line += _ms + txt + _ms
else:
_p = int((mask_space / 2) + .5)
_ms1 = (mask * _p)
_ms2 = (mask * (_p - 1))
line += _ms1 + txt + _ms2
else:
line += txt + (mask * mask_space)
line += end
line = (' ' * padding) + line
return start + line
def _draw_header(name: str):
console.addstr(line_in_frame(mask='-'))
console.addstr(line_in_frame(f' {name} ', mask='#', align='center'))
console.addstr(line_in_frame(APP_NAME, align='center'))
console.addstr(line_in_frame(mask ='-'))
console.addstr(line_in_frame())
console.refresh()
is_printing = False
def print_slots(_slots):
global is_printing
if is_printing:
return False
is_printing = True
console.clrtobot()
default_padding = 5
header_height = 5
def _build_core(ce, lj = 0):
main_header = ce.get('main_header', '')
sub_header = ce.get('sub_header', '')
start = ce.get('start', '')
flux_progress = ce.get('info', '')
log = ce.get('info_2', '')
log_1 = ce.get('info_3', '')
console.addstr(header_height + ( 6 * lj), 0, line_in_frame(txt=main_header))
console.addstr((header_height + 1 + (6 * lj)), 0, line_in_frame(sub_header, padding=default_padding))
console.addstr((header_height + 2 + (6 * lj)), 0, line_in_frame(start, padding=default_padding))
console.addstr((header_height + 3 + (6 * lj)), 0, line_in_frame(display_progress(flux_progress) if flux_progress else '', padding=default_padding))
console.addstr((header_height + 4 + (6 * lj)), 0, line_in_frame(txt='logs | ' + log, padding=default_padding))
console.addstr((header_height + 5 + (6 * lj)), 0, line_in_frame(txt=' | ' + log_1, padding=default_padding))
for i, s in enumerate(_slots):
_build_core(s(), i)
console.refresh()
is_printing = False
def terminal_logger(processor: Processor):
_draw_header(processor.name)
def _terminal_logger(execution_control: ExecutionControl, logger: Logger, action_type: ACTION_TYPE):
ids = [v for k, v in execution_control.current_nodes_id.items()]
slots = [Slot(v.to_dict(), logger, execution_control, ids, k) for k, v in execution_control.current_node.items()]
print_slots(slots)
processor.on_change(_terminal_logger)
#proc_map = processor.to_dict()
processor()
curses.endwin() | [
"datetime.datetime.now",
"curses.endwin",
"curses.initscr"
] | [((3339, 3355), 'curses.initscr', 'curses.initscr', ([], {}), '()\n', (3353, 3355), False, 'import curses\n'), ((6187, 6202), 'curses.endwin', 'curses.endwin', ([], {}), '()\n', (6200, 6202), False, 'import curses\n'), ((1034, 1057), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1055, 1057), False, 'import datetime\n')] |
import torch
import torch.optim as optim
from torch import autograd
import numpy as np
from tqdm import trange
import trimesh
from skimage import measure
import warnings
import time
from pipelines.utils.point_utils import sample_points_from_ray, np_get_occupied_idx, occupancy_sparse_to_dense
from pipelines.utils.postprocess_utils import remove_backface
class Generator3D(object):
''' Generator class for Local implicit grid Networks.
It provides functions to generate the final mesh as well refining options.
Args:
model (nn.Module): trained Local implicit grid model
optimizer (object): optimization utility class for optimizing latent grid
part_size (float): size of a part
num_optim_samples (int): number of points to sample at each optimization step
res_per_part (int): how many parts we split a grid into
overlap (bool): whether we use overlapping grids
device (device): pytorch device
points_batch (int): number of points we evaluate sdf values each time
conservative (bool): whether we evaluate a grid when all of its 8 neighbors contain points
postprocess (bool): whether to use post process to remove back faces
'''
def __init__(self,
model,
optimizer,
part_size=0.25,
num_optim_samples=2048,
res_per_part=0,
overlap=True,
device=None,
points_batch=20000,
conservative=False,
postprocess=True):
self.model = model.to(device)
self.optimizer = optimizer
self.part_size = part_size
self.num_optim_samples = num_optim_samples
if res_per_part == 0:
self.res_per_part = int(64 * self.part_size)
else:
self.res_per_part = res_per_part
self.overlap = overlap
self.device = device
self.points_batch = points_batch
self.conservative = conservative
self.postprocess = postprocess
def generate_mesh(self, data, return_stats=True):
''' Generates the output mesh from inputs loaded from dataset.
Args:
data (tensor): data tensor
return_stats (bool): whether stats should be returned
'''
stats_dict = {}
v = data.get('inputs', torch.empty(1, 0)).squeeze(0).cpu().numpy()
n = data.get('inputs.normals', torch.empty(1, 0)).squeeze(0).cpu().numpy()
mesh = self.generate_single_obj_mesh(v, n)
return mesh
def generate_single_obj_mesh(self, v, n):
''' Generates the output mesh of user specified single object.
Args:
v (numpy array): [#v, 3], input point cloud.
n (numpy array): [#v, 3], normals of the input point cloud.
Returns:
mesh (trimesh.Trimesh obj): output mesh object.
'''
device = self.device
surface_points = np.concatenate([v, n], axis=1)
xmin = np.min(v, axis=0)
xmax = np.max(v, axis=0)
# check if part size is too large
min_bb = np.min(xmax - xmin)
if self.part_size > 0.25 * min_bb:
warnings.warn(
'WARNING: part_size seems too large. Recommend using a part_size < '
'{:.2f} for this shape.'.format(0.25 * min_bb), UserWarning)
# add some extra slack to xmin and xmax
xmin -= self.part_size
xmax += self.part_size
#########################################################################
# generate sdf samples from pc
point_samples, sdf_values = sample_points_from_ray(v, n, sample_factor=10, std=0.01)
# shuffle
shuffle_index = np.random.permutation(point_samples.shape[0])
point_samples = point_samples[shuffle_index]
sdf_values = sdf_values[shuffle_index]
#########################################################################
################### only evaluated at sparse grid location ##############
#########################################################################
# get valid girds (we only evaluate on sparse locations)
# _.shape==(total_ncrops, ntarget, v.shape[1]) points within voxel
# occ_idx.shape==(total_ncrops, 3) index of each voxel
# grid_shape == (rr[0], rr[1], rr[2])
_, occ_idx, grid_shape = np_get_occupied_idx(
point_samples[:100000, :3],
# point_samples[:, :3],
xmin=xmin - 0.5 * self.part_size,
xmax=xmax + 0.5 * self.part_size,
crop_size=self.part_size,
ntarget=1, # we do not require `point_crops` (i.e. `_` in returns), so we set it to 1
overlap=self.overlap,
normalize_crops=False,
return_shape=True)
print('LIG shape: {}'.format(grid_shape))
#########################################################################
# treat as one batch
point_samples = torch.from_numpy(point_samples).to(device)
sdf_values = torch.from_numpy(sdf_values).to(device)
occ_idx_tensor = torch.from_numpy(occ_idx).to(device)
point_samples = point_samples.unsqueeze(0) # shape==(1, npoints, 3)
sdf_values = sdf_values.unsqueeze(0) # shape==(1, npoints, 1)
occ_idx_tensor = occ_idx_tensor.unsqueeze(0) # shape==(1, total_ncrops, 3)
# set range for computation
true_shape = ((np.array(grid_shape) - 1) / (2.0 if self.overlap else 1.0)).astype(np.int32)
self.model.set_xrange(xmin=xmin, xmax=xmin + true_shape * self.part_size)
# Clip the point position
xmin_ = self.model.grid_interp_layer.xmin
xmax_ = self.model.grid_interp_layer.xmax
x = point_samples[:, :, 0].clamp(xmin_[0], xmax_[0])
y = point_samples[:, :, 1].clamp(xmin_[1], xmax_[1])
z = point_samples[:, :, 2].clamp(xmin_[2], xmax_[2])
point_samples = torch.stack([x, y, z], dim=2)
# get label (inside==-1, outside==+1)
point_values = torch.sign(sdf_values)
#########################################################################
###################### Build/Optimize latent grid #######################
#########################################################################
# optimize latent grids, shape==(1, *grid_shape, code_len)
print('Optimizing latent codes in LIG...')
latent_grid = self.optimizer.optimize_latent_code(point_samples, point_values, occ_idx_tensor, grid_shape)
#########################################################################
##################### Evaluation (Marching Cubes) #######################
#########################################################################
# sparse occ index to dense occ grids
# (total_ncrops, 3) --> (*grid_shape, ) bool
occ_mask = occupancy_sparse_to_dense(occ_idx, grid_shape)
# points shape to be evaluated
output_grid_shape = list(self.res_per_part * true_shape)
# output_grid is ones, shape==(?, )
# xyz is points to be evaluated (dense, shape==(?, 3))
output_grid, xyz = self.get_eval_grid(xmin=xmin,
xmax=xmin + true_shape * self.part_size,
output_grid_shape=output_grid_shape)
# we only evaluate eval_points
# out_mask is for xyz, i.e. eval_points = xyz[occ_mask]
eval_points, out_mask = self.get_eval_inputs(xyz, xmin, occ_mask)
eval_points = torch.from_numpy(eval_points).to(device)
# evaluate dense grid for marching cubes (on sparse grids)
output_grid = self.generate_occ_grid(latent_grid, eval_points, output_grid, out_mask)
output_grid = output_grid.reshape(*output_grid_shape)
v, f, _, _ = measure.marching_cubes_lewiner(output_grid, 0) # logits==0
v *= (self.part_size / float(self.res_per_part) * (np.array(output_grid.shape, dtype=np.float32) /
(np.array(output_grid.shape, dtype=np.float32) - 1)))
v += xmin
# Create mesh
mesh = trimesh.Trimesh(v, f)
# Post-process the generated mesh to prevent artifacts
if self.postprocess:
print('Postprocessing generated mesh...')
mesh = remove_backface(mesh, surface_points)
return mesh
def get_eval_grid(self, xmin, xmax, output_grid_shape):
"""Initialize the eval output grid and its corresponding grid points.
Args:
xmin (numpy array): [3], minimum xyz values of the entire space.
xmax (numpy array): [3], maximum xyz values of the entire space.
output_grid_shape (list): [3], latent grid shape.
Returns:
output_grid (numpy array): [d*h*w] output grid sdf values.
xyz (numpy array): [d*h*w, 3] grid point xyz coordinates.
"""
# setup grid
eps = 1e-6
l = [np.linspace(xmin[i] + eps, xmax[i] - eps, output_grid_shape[i]) for i in range(3)]
xyz = np.stack(np.meshgrid(l[0], l[1], l[2], indexing='ij'), axis=-1).astype(np.float32)
output_grid = np.ones(output_grid_shape, dtype=np.float32)
xyz = xyz.reshape(-1, 3)
output_grid = output_grid.reshape(-1)
return output_grid, xyz
def get_eval_inputs(self, xyz, xmin, occ_mask):
"""Gathers the points within the grids that any/all of its 8 neighbors
contains points.
If self.conservative is True, gathers the points within the grids that any of its 8 neighbors
contains points.
If self.conservative is False, gathers the points within the grids that all of its 8 neighbors
contains points.
Returns the points need to be evaluate and the mask of the points and the output grid.
Args:
xyz (numpy array): [h*w*d, 3]
xmin (numpy array): [3] minimum value of the entire space.
occ_mask (numpy array): latent grid occupancy mask.
Returns:
eval_points (numpy array): [neval, 3], points to be evaluated.
out_mask (numpy array): [h*w*d], 0 1 value eval mask of the final sdf grid.
"""
mask = occ_mask.astype(np.bool)
if self.overlap:
mask = np.stack([
mask[:-1, :-1, :-1], mask[:-1, :-1, 1:], mask[:-1, 1:, :-1], mask[:-1, 1:, 1:], mask[1:, :-1, :-1],
mask[1:, :-1, 1:], mask[1:, 1:, :-1], mask[1:, 1:, 1:]
],
axis=-1)
if self.conservative:
mask = np.any(mask, axis=-1)
else:
mask = np.all(mask, axis=-1)
g = np.stack(np.meshgrid(np.arange(mask.shape[0]),
np.arange(mask.shape[1]),
np.arange(mask.shape[2]),
indexing='ij'),
axis=-1).reshape(-1, 3)
g = g[:, 0] * (mask.shape[1] * mask.shape[2]) + g[:, 1] * mask.shape[2] + g[:, 2]
g_valid = g[mask.ravel()] # valid grid index
if self.overlap:
ijk = np.floor((xyz - xmin) / self.part_size * 2).astype(np.int32)
else:
ijk = np.floor((xyz - xmin + 0.5 * self.part_size) / self.part_size).astype(np.int32)
ijk_idx = (ijk[:, 0] * (mask.shape[1] * mask.shape[2]) + ijk[:, 1] * mask.shape[2] + ijk[:, 2])
out_mask = np.isin(ijk_idx, g_valid)
eval_points = xyz[out_mask]
return eval_points, out_mask
def generate_occ_grid(self, latent_grid, eval_points, output_grid, out_mask):
"""Gets the final output occ grid.
Args:
latent_grid (tensor): [1, *grid_shape, latent_size], optimized latent grid.
eval_points (tensor): [neval, 3], points to be evaluated.
output_grid (numpy array): [d*h*w], final output occ grid.
out_mask (numpy array): [d*h*w], mask indicating the grids evaluated.
Returns:
output_grid (numpy array): [d*h*w], final output occ grid flattened.
"""
interp_old = self.model.interp
self.model.interp = True
split = int(np.ceil(eval_points.shape[0] / self.points_batch))
occ_val_list = []
self.model.eval()
with torch.no_grad():
for s in range(split):
sid = s * self.points_batch
eid = min((s + 1) * self.points_batch, eval_points.shape[0])
eval_points_slice = eval_points[sid:eid, :]
occ_vals = self.model.decode(latent_grid, eval_points_slice.unsqueeze(0))
occ_vals = occ_vals.squeeze(0).squeeze(1).cpu().numpy()
occ_val_list.append(occ_vals)
occ_vals = np.concatenate(occ_val_list, axis=0)
output_grid[out_mask] = occ_vals
self.model.interp = interp_old
return output_grid
| [
"skimage.measure.marching_cubes_lewiner",
"numpy.isin",
"torch.from_numpy",
"numpy.array",
"numpy.arange",
"pipelines.utils.point_utils.sample_points_from_ray",
"pipelines.utils.postprocess_utils.remove_backface",
"numpy.max",
"numpy.stack",
"numpy.linspace",
"numpy.concatenate",
"numpy.min",
... | [((2985, 3015), 'numpy.concatenate', 'np.concatenate', (['[v, n]'], {'axis': '(1)'}), '([v, n], axis=1)\n', (2999, 3015), True, 'import numpy as np\n'), ((3032, 3049), 'numpy.min', 'np.min', (['v'], {'axis': '(0)'}), '(v, axis=0)\n', (3038, 3049), True, 'import numpy as np\n'), ((3065, 3082), 'numpy.max', 'np.max', (['v'], {'axis': '(0)'}), '(v, axis=0)\n', (3071, 3082), True, 'import numpy as np\n'), ((3143, 3162), 'numpy.min', 'np.min', (['(xmax - xmin)'], {}), '(xmax - xmin)\n', (3149, 3162), True, 'import numpy as np\n'), ((3664, 3720), 'pipelines.utils.point_utils.sample_points_from_ray', 'sample_points_from_ray', (['v', 'n'], {'sample_factor': '(10)', 'std': '(0.01)'}), '(v, n, sample_factor=10, std=0.01)\n', (3686, 3720), False, 'from pipelines.utils.point_utils import sample_points_from_ray, np_get_occupied_idx, occupancy_sparse_to_dense\n'), ((3764, 3809), 'numpy.random.permutation', 'np.random.permutation', (['point_samples.shape[0]'], {}), '(point_samples.shape[0])\n', (3785, 3809), True, 'import numpy as np\n'), ((4459, 4684), 'pipelines.utils.point_utils.np_get_occupied_idx', 'np_get_occupied_idx', (['point_samples[:100000, :3]'], {'xmin': '(xmin - 0.5 * self.part_size)', 'xmax': '(xmax + 0.5 * self.part_size)', 'crop_size': 'self.part_size', 'ntarget': '(1)', 'overlap': 'self.overlap', 'normalize_crops': '(False)', 'return_shape': '(True)'}), '(point_samples[:100000, :3], xmin=xmin - 0.5 * self.\n part_size, xmax=xmax + 0.5 * self.part_size, crop_size=self.part_size,\n ntarget=1, overlap=self.overlap, normalize_crops=False, return_shape=True)\n', (4478, 4684), False, 'from pipelines.utils.point_utils import sample_points_from_ray, np_get_occupied_idx, occupancy_sparse_to_dense\n'), ((6031, 6060), 'torch.stack', 'torch.stack', (['[x, y, z]'], {'dim': '(2)'}), '([x, y, z], dim=2)\n', (6042, 6060), False, 'import torch\n'), ((6131, 6153), 'torch.sign', 'torch.sign', (['sdf_values'], {}), '(sdf_values)\n', (6141, 6153), False, 'import torch\n'), ((7000, 7046), 'pipelines.utils.point_utils.occupancy_sparse_to_dense', 'occupancy_sparse_to_dense', (['occ_idx', 'grid_shape'], {}), '(occ_idx, grid_shape)\n', (7025, 7046), False, 'from pipelines.utils.point_utils import sample_points_from_ray, np_get_occupied_idx, occupancy_sparse_to_dense\n'), ((7973, 8019), 'skimage.measure.marching_cubes_lewiner', 'measure.marching_cubes_lewiner', (['output_grid', '(0)'], {}), '(output_grid, 0)\n', (8003, 8019), False, 'from skimage import measure\n'), ((8309, 8330), 'trimesh.Trimesh', 'trimesh.Trimesh', (['v', 'f'], {}), '(v, f)\n', (8324, 8330), False, 'import trimesh\n'), ((9354, 9398), 'numpy.ones', 'np.ones', (['output_grid_shape'], {'dtype': 'np.float32'}), '(output_grid_shape, dtype=np.float32)\n', (9361, 9398), True, 'import numpy as np\n'), ((11635, 11660), 'numpy.isin', 'np.isin', (['ijk_idx', 'g_valid'], {}), '(ijk_idx, g_valid)\n', (11642, 11660), True, 'import numpy as np\n'), ((12965, 13001), 'numpy.concatenate', 'np.concatenate', (['occ_val_list'], {'axis': '(0)'}), '(occ_val_list, axis=0)\n', (12979, 13001), True, 'import numpy as np\n'), ((8497, 8534), 'pipelines.utils.postprocess_utils.remove_backface', 'remove_backface', (['mesh', 'surface_points'], {}), '(mesh, surface_points)\n', (8512, 8534), False, 'from pipelines.utils.postprocess_utils import remove_backface\n'), ((9151, 9214), 'numpy.linspace', 'np.linspace', (['(xmin[i] + eps)', '(xmax[i] - eps)', 'output_grid_shape[i]'], {}), '(xmin[i] + eps, xmax[i] - eps, output_grid_shape[i])\n', (9162, 9214), True, 'import numpy as np\n'), ((10487, 10671), 'numpy.stack', 'np.stack', (['[mask[:-1, :-1, :-1], mask[:-1, :-1, 1:], mask[:-1, 1:, :-1], mask[:-1, 1:,\n 1:], mask[1:, :-1, :-1], mask[1:, :-1, 1:], mask[1:, 1:, :-1], mask[1:,\n 1:, 1:]]'], {'axis': '(-1)'}), '([mask[:-1, :-1, :-1], mask[:-1, :-1, 1:], mask[:-1, 1:, :-1], mask\n [:-1, 1:, 1:], mask[1:, :-1, :-1], mask[1:, :-1, 1:], mask[1:, 1:, :-1],\n mask[1:, 1:, 1:]], axis=-1)\n', (10495, 10671), True, 'import numpy as np\n'), ((12389, 12438), 'numpy.ceil', 'np.ceil', (['(eval_points.shape[0] / self.points_batch)'], {}), '(eval_points.shape[0] / self.points_batch)\n', (12396, 12438), True, 'import numpy as np\n'), ((12505, 12520), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12518, 12520), False, 'import torch\n'), ((5072, 5103), 'torch.from_numpy', 'torch.from_numpy', (['point_samples'], {}), '(point_samples)\n', (5088, 5103), False, 'import torch\n'), ((5136, 5164), 'torch.from_numpy', 'torch.from_numpy', (['sdf_values'], {}), '(sdf_values)\n', (5152, 5164), False, 'import torch\n'), ((5201, 5226), 'torch.from_numpy', 'torch.from_numpy', (['occ_idx'], {}), '(occ_idx)\n', (5217, 5226), False, 'import torch\n'), ((7686, 7715), 'torch.from_numpy', 'torch.from_numpy', (['eval_points'], {}), '(eval_points)\n', (7702, 7715), False, 'import torch\n'), ((8092, 8137), 'numpy.array', 'np.array', (['output_grid.shape'], {'dtype': 'np.float32'}), '(output_grid.shape, dtype=np.float32)\n', (8100, 8137), True, 'import numpy as np\n'), ((10794, 10815), 'numpy.any', 'np.any', (['mask'], {'axis': '(-1)'}), '(mask, axis=-1)\n', (10800, 10815), True, 'import numpy as np\n'), ((10857, 10878), 'numpy.all', 'np.all', (['mask'], {'axis': '(-1)'}), '(mask, axis=-1)\n', (10863, 10878), True, 'import numpy as np\n'), ((8200, 8245), 'numpy.array', 'np.array', (['output_grid.shape'], {'dtype': 'np.float32'}), '(output_grid.shape, dtype=np.float32)\n', (8208, 8245), True, 'import numpy as np\n'), ((9257, 9301), 'numpy.meshgrid', 'np.meshgrid', (['l[0]', 'l[1]', 'l[2]'], {'indexing': '"""ij"""'}), "(l[0], l[1], l[2], indexing='ij')\n", (9268, 9301), True, 'import numpy as np\n'), ((11339, 11382), 'numpy.floor', 'np.floor', (['((xyz - xmin) / self.part_size * 2)'], {}), '((xyz - xmin) / self.part_size * 2)\n', (11347, 11382), True, 'import numpy as np\n'), ((11432, 11494), 'numpy.floor', 'np.floor', (['((xyz - xmin + 0.5 * self.part_size) / self.part_size)'], {}), '((xyz - xmin + 0.5 * self.part_size) / self.part_size)\n', (11440, 11494), True, 'import numpy as np\n'), ((5530, 5550), 'numpy.array', 'np.array', (['grid_shape'], {}), '(grid_shape)\n', (5538, 5550), True, 'import numpy as np\n'), ((10913, 10937), 'numpy.arange', 'np.arange', (['mask.shape[0]'], {}), '(mask.shape[0])\n', (10922, 10937), True, 'import numpy as np\n'), ((10972, 10996), 'numpy.arange', 'np.arange', (['mask.shape[1]'], {}), '(mask.shape[1])\n', (10981, 10996), True, 'import numpy as np\n'), ((11031, 11055), 'numpy.arange', 'np.arange', (['mask.shape[2]'], {}), '(mask.shape[2])\n', (11040, 11055), True, 'import numpy as np\n'), ((2381, 2398), 'torch.empty', 'torch.empty', (['(1)', '(0)'], {}), '(1, 0)\n', (2392, 2398), False, 'import torch\n'), ((2464, 2481), 'torch.empty', 'torch.empty', (['(1)', '(0)'], {}), '(1, 0)\n', (2475, 2481), False, 'import torch\n')] |
import sys
import time
import json
import traceback
from six.moves import cStringIO
from pysmt.shortcuts import Solver
from pysmt.smtlib.parser import SmtLibParser
def process_data(data):
try:
data = json.loads(data)
atime = float(data["time"])
if 'smt_script' in data.keys():
data = data["smt_script"]
else:
data = data["script"]
except:
atime = 0
data = data.split('\n')
start = 0
end = len(data)
for i in range(len(data)):
if "declare-fun" in data[i]:
start = i
break
for i in range(len(data)):
if "check-sat" in data[i]:
end = i
if "time:" in data[i]:
try:
atime = float(data[i].split(":")[-1])
except:
pass
data = '\n'.join(data[start:end + 1])
return data, atime
if __name__ == '__main__':
filename = sys.argv[1]
solver_name = sys.argv[2]
with open(filename, "r") as f:
data = f.read()
data, _ = process_data(data)
# you may need to change the solver logic according to your SMT scripts reasoning theory here
solver = Solver(name=solver_name, logic="BVt")
parser = SmtLibParser()
error = False
s = time.time()
try:
data = data.replace("bvurem_i", "bvurem")
data = data.replace("bvsrem_i", "bvsrem")
data = data.replace("bvudiv_i", "bvudiv")
data = data.replace("bvsdiv_i", "bvsdiv")
script = parser.get_script(cStringIO(data))
s = time.time()
log = script.evaluate(solver)
e = time.time()
except Exception as a:
traceback.print_exc()
e = time.time()
error = True
log = []
"""
print json.dumps({
'time' : e - s,
'log' : log,
'error' : error
})
"""
if error:
res = "error"
else:
try:
res = log[-1][1]
except IndexError:
res = "error"
print(res, str(e - s))
sys.stdout.flush()
| [
"json.loads",
"pysmt.smtlib.parser.SmtLibParser",
"pysmt.shortcuts.Solver",
"six.moves.cStringIO",
"sys.stdout.flush",
"traceback.print_exc",
"time.time"
] | [((1176, 1213), 'pysmt.shortcuts.Solver', 'Solver', ([], {'name': 'solver_name', 'logic': '"""BVt"""'}), "(name=solver_name, logic='BVt')\n", (1182, 1213), False, 'from pysmt.shortcuts import Solver\n'), ((1227, 1241), 'pysmt.smtlib.parser.SmtLibParser', 'SmtLibParser', ([], {}), '()\n', (1239, 1241), False, 'from pysmt.smtlib.parser import SmtLibParser\n'), ((1269, 1280), 'time.time', 'time.time', ([], {}), '()\n', (1278, 1280), False, 'import time\n'), ((2047, 2065), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2063, 2065), False, 'import sys\n'), ((214, 230), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (224, 230), False, 'import json\n'), ((1554, 1565), 'time.time', 'time.time', ([], {}), '()\n', (1563, 1565), False, 'import time\n'), ((1616, 1627), 'time.time', 'time.time', ([], {}), '()\n', (1625, 1627), False, 'import time\n'), ((1525, 1540), 'six.moves.cStringIO', 'cStringIO', (['data'], {}), '(data)\n', (1534, 1540), False, 'from six.moves import cStringIO\n'), ((1663, 1684), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1682, 1684), False, 'import traceback\n'), ((1697, 1708), 'time.time', 'time.time', ([], {}), '()\n', (1706, 1708), False, 'import time\n')] |
#!/us/bin/env python3
import time
class timer (object):
def __enter__(self):
self.start = time.time()
print('Timer starts at: %s' % self.start)
return self
def __exit__(self, type, value, traceback):
self.stop = time.time()
print('Timer stops at: %s' % self.stop)
print('Elapsed: %s' % (self.stop - self.start))
return self
| [
"time.time"
] | [((111, 122), 'time.time', 'time.time', ([], {}), '()\n', (120, 122), False, 'import time\n'), ((267, 278), 'time.time', 'time.time', ([], {}), '()\n', (276, 278), False, 'import time\n')] |
from __future__ import absolute_import, unicode_literals
import logging
from django import forms
from django.core.exceptions import PermissionDenied
from django.utils.translation import ugettext_lazy as _
from acls.models import AccessEntry
from permissions.models import Permission
from .models import Folder
from .permissions import PERMISSION_FOLDER_VIEW
logger = logging.getLogger(__name__)
class FolderForm(forms.ModelForm):
class Meta:
model = Folder
fields = ('title',)
class FolderListForm(forms.Form):
def __init__(self, *args, **kwargs):
user = kwargs.pop('user', None)
logger.debug('user: %s', user)
super(FolderListForm, self).__init__(*args, **kwargs)
queryset = Folder.objects.all()
try:
Permission.objects.check_permissions(user, [PERMISSION_FOLDER_VIEW])
except PermissionDenied:
queryset = AccessEntry.objects.filter_objects_by_access(PERMISSION_FOLDER_VIEW, user, queryset)
self.fields['folder'] = forms.ModelChoiceField(
queryset=queryset,
label=_('Folder'))
| [
"logging.getLogger",
"permissions.models.Permission.objects.check_permissions",
"django.utils.translation.ugettext_lazy",
"acls.models.AccessEntry.objects.filter_objects_by_access"
] | [((372, 399), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (389, 399), False, 'import logging\n'), ((788, 856), 'permissions.models.Permission.objects.check_permissions', 'Permission.objects.check_permissions', (['user', '[PERMISSION_FOLDER_VIEW]'], {}), '(user, [PERMISSION_FOLDER_VIEW])\n', (824, 856), False, 'from permissions.models import Permission\n'), ((913, 1001), 'acls.models.AccessEntry.objects.filter_objects_by_access', 'AccessEntry.objects.filter_objects_by_access', (['PERMISSION_FOLDER_VIEW', 'user', 'queryset'], {}), '(PERMISSION_FOLDER_VIEW, user,\n queryset)\n', (957, 1001), False, 'from acls.models import AccessEntry\n'), ((1104, 1115), 'django.utils.translation.ugettext_lazy', '_', (['"""Folder"""'], {}), "('Folder')\n", (1105, 1115), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
import pybullet as sim
import time
pc = sim.connect(sim.GUI)
sim.setGravity(0, 0, -9.8)
path = "path to your URDF file"
armID = sim.loadURDF(path, basePosition=[0, 0, 0], useFixedBase=True)
while pc != 0:
sim.stepSimulation()
time.sleep(0.01)
| [
"pybullet.connect",
"time.sleep",
"pybullet.setGravity",
"pybullet.stepSimulation",
"pybullet.loadURDF"
] | [((42, 62), 'pybullet.connect', 'sim.connect', (['sim.GUI'], {}), '(sim.GUI)\n', (53, 62), True, 'import pybullet as sim\n'), ((63, 89), 'pybullet.setGravity', 'sim.setGravity', (['(0)', '(0)', '(-9.8)'], {}), '(0, 0, -9.8)\n', (77, 89), True, 'import pybullet as sim\n'), ((132, 193), 'pybullet.loadURDF', 'sim.loadURDF', (['path'], {'basePosition': '[0, 0, 0]', 'useFixedBase': '(True)'}), '(path, basePosition=[0, 0, 0], useFixedBase=True)\n', (144, 193), True, 'import pybullet as sim\n'), ((212, 232), 'pybullet.stepSimulation', 'sim.stepSimulation', ([], {}), '()\n', (230, 232), True, 'import pybullet as sim\n'), ((235, 251), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (245, 251), False, 'import time\n')] |
import requests
url = 'http://127.0.0.1:9000/api/comments'
resp = requests.post(
url,
data={
"name": "wnn",
"email": "<EMAIL>",
"comments": "comment",
"page_id":"2"
}
)
print(resp.text)
| [
"requests.post"
] | [((66, 169), 'requests.post', 'requests.post', (['url'], {'data': "{'name': 'wnn', 'email': '<EMAIL>', 'comments': 'comment', 'page_id': '2'}"}), "(url, data={'name': 'wnn', 'email': '<EMAIL>', 'comments':\n 'comment', 'page_id': '2'})\n", (79, 169), False, 'import requests\n')] |
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField
from wtforms.validators import DataRequired, Length
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired(), Length(5, 20)])
password = PasswordField('Password', validators=[DataRequired(), Length(5, 20)])
remember = BooleanField('Remember me')
submit = SubmitField('Log in')
class RegisterForm(FlaskForm):
username = StringField('Username', validators=[DataRequired(), Length(5, 20)])
password = PasswordField('Password', validators=[DataRequired(), Length(5, 20)])
submit = SubmitField('Register')
class CreateForm(FlaskForm):
title = StringField('Title', validators=[DataRequired(), Length(5, 70)])
body = TextAreaField('Body', validators=[DataRequired()])
submit = SubmitField('Create')
class UpdateForm(FlaskForm):
title = StringField('Title', validators=[DataRequired(), Length(5, 70)])
body = TextAreaField('Body', validators=[DataRequired()])
submit = SubmitField('Update')
class DeleteForm(FlaskForm):
submit = SubmitField('Delete')
| [
"wtforms.validators.Length",
"wtforms.BooleanField",
"wtforms.validators.DataRequired",
"wtforms.SubmitField"
] | [((385, 412), 'wtforms.BooleanField', 'BooleanField', (['"""Remember me"""'], {}), "('Remember me')\n", (397, 412), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField\n'), ((426, 447), 'wtforms.SubmitField', 'SubmitField', (['"""Log in"""'], {}), "('Log in')\n", (437, 447), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField\n'), ((662, 685), 'wtforms.SubmitField', 'SubmitField', (['"""Register"""'], {}), "('Register')\n", (673, 685), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField\n'), ((869, 890), 'wtforms.SubmitField', 'SubmitField', (['"""Create"""'], {}), "('Create')\n", (880, 890), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField\n'), ((1074, 1095), 'wtforms.SubmitField', 'SubmitField', (['"""Update"""'], {}), "('Update')\n", (1085, 1095), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField\n'), ((1140, 1161), 'wtforms.SubmitField', 'SubmitField', (['"""Delete"""'], {}), "('Delete')\n", (1151, 1161), False, 'from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField\n'), ((253, 267), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (265, 267), False, 'from wtforms.validators import DataRequired, Length\n'), ((269, 282), 'wtforms.validators.Length', 'Length', (['(5)', '(20)'], {}), '(5, 20)\n', (275, 282), False, 'from wtforms.validators import DataRequired, Length\n'), ((338, 352), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (350, 352), False, 'from wtforms.validators import DataRequired, Length\n'), ((354, 367), 'wtforms.validators.Length', 'Length', (['(5)', '(20)'], {}), '(5, 20)\n', (360, 367), False, 'from wtforms.validators import DataRequired, Length\n'), ((532, 546), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (544, 546), False, 'from wtforms.validators import DataRequired, Length\n'), ((548, 561), 'wtforms.validators.Length', 'Length', (['(5)', '(20)'], {}), '(5, 20)\n', (554, 561), False, 'from wtforms.validators import DataRequired, Length\n'), ((617, 631), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (629, 631), False, 'from wtforms.validators import DataRequired, Length\n'), ((633, 646), 'wtforms.validators.Length', 'Length', (['(5)', '(20)'], {}), '(5, 20)\n', (639, 646), False, 'from wtforms.validators import DataRequired, Length\n'), ((762, 776), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (774, 776), False, 'from wtforms.validators import DataRequired, Length\n'), ((778, 791), 'wtforms.validators.Length', 'Length', (['(5)', '(70)'], {}), '(5, 70)\n', (784, 791), False, 'from wtforms.validators import DataRequired, Length\n'), ((839, 853), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (851, 853), False, 'from wtforms.validators import DataRequired, Length\n'), ((967, 981), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (979, 981), False, 'from wtforms.validators import DataRequired, Length\n'), ((983, 996), 'wtforms.validators.Length', 'Length', (['(5)', '(70)'], {}), '(5, 70)\n', (989, 996), False, 'from wtforms.validators import DataRequired, Length\n'), ((1044, 1058), 'wtforms.validators.DataRequired', 'DataRequired', ([], {}), '()\n', (1056, 1058), False, 'from wtforms.validators import DataRequired, Length\n')] |
import numpy as np
from time import time
from typing import List, Tuple
from tsp_heuristics.heuristics.utils import get_tour_distance
def nn_algo(
dist_matrix: np.array,
start: int = 0
) -> Tuple[List, float]:
"""
From a start city index, get an Tour according to the Nearest Neighbor
algorithm from the collection of the cities indexes.
Args:
dist_matrix (np.array)
start (int, optional): The first city that we will begin the Tour and
eventually return. Defaults to 0.
Returns:
np.array: Array of indexes representing the city Tour.
float: Time to complete the algorithm.
"""
t0 = time()
Tour = [start]
dist_matrix = dist_matrix.astype(float)
# Making the distance to go to the same
# city impossible.
for i in range(dist_matrix.shape[0]):
dist_matrix[i][i] = np.Inf
for _ in range(dist_matrix.shape[0] - 1):
# Finding the best next city.
min_index = np.argmin(dist_matrix[Tour[-1]])
# Making sure that we won't revisit
# the same city.
for t in Tour:
dist_matrix[min_index][t] = np.Inf
dist_matrix[t][min_index] = np.Inf
Tour.append(min_index)
return Tour, get_tour_distance(Tour, dist_matrix), (time() - t0)
| [
"numpy.argmin",
"time.time",
"tsp_heuristics.heuristics.utils.get_tour_distance"
] | [((666, 672), 'time.time', 'time', ([], {}), '()\n', (670, 672), False, 'from time import time\n'), ((994, 1026), 'numpy.argmin', 'np.argmin', (['dist_matrix[Tour[-1]]'], {}), '(dist_matrix[Tour[-1]])\n', (1003, 1026), True, 'import numpy as np\n'), ((1266, 1302), 'tsp_heuristics.heuristics.utils.get_tour_distance', 'get_tour_distance', (['Tour', 'dist_matrix'], {}), '(Tour, dist_matrix)\n', (1283, 1302), False, 'from tsp_heuristics.heuristics.utils import get_tour_distance\n'), ((1305, 1311), 'time.time', 'time', ([], {}), '()\n', (1309, 1311), False, 'from time import time\n')] |
#!/usr/bin/env python
import json
import time
import random
from sys import argv, exit
from elasticsearch import Elasticsearch, helpers
es = Elasticsearch([{'host': 'localhost', 'port': 9200}])
service_index = "masscan_services"
host_index = "nweb_hosts"
if len(argv)!=2:
exit("usage: nweb_upload.py <filename>")
filename = argv[1] # pull in import source
# ensure that the indexes exist
service_settings = {
"mappings": {
"properties": {
"ip": {
"type": "ip"
},
"port": {
"type": "integer"
},
"timestamp": {
"type": "date"
}
}
}
}
host_settings = {
"mappings": {
"properties": {
"ip": {
"type": "ip"
},
"timestamp": {
"type": "date"
}
}
}
}
# create indexes, DGAF about errors
es.indices.create(index=service_index, ignore=400, body=service_settings)
es.indices.create(index=host_index, ignore=400, body=host_settings)
f=open(filename)
count = 0
service_actions=[]
host_actions=[]
for line in f:
try:
linedata = line.rstrip().split(' ')
# make sure the line is somewhat what we expect
# should be masscan --readfile <in.bin> -oL out.txt
if len(linedata)!=5:
continue
service_actions.append({"port":linedata[2],"ip":linedata[3],"timestamp":linedata[4]})
host_actions.append({"_id":linedata[3],"ip":linedata[3],"timestamp":linedata[4]})
# check the first five lines for duplicate data
count = count+1
if count < 5:
result = es.search(index=service_index, body={ "query": {"query_string": { 'query':"port:"+linedata[2]+" ip:"+linedata[3]+" timestamp:"+linedata[4], "default_operator":"AND" }}})
if int(result['hits']['total']['value']) > 0:
exit("we've already seen this data")
if len(service_actions)>=10000:
helpers.bulk(es, service_actions, index=service_index, doc_type='_doc')
helpers.bulk(es, host_actions, index=host_index, doc_type='_doc')
service_actions=[]
host_actions=[]
print("uploaded "+str(count)+" results from "+filename+" ..")
except Exception as e:
print(e)
print("something went wrong, waiting 5-15 mins ..")
time.sleep(random.randint(300,1200))
continue
# don't forget to upload that last part!
helpers.bulk(es, service_actions, index=service_index, doc_type='_doc')
helpers.bulk(es, host_actions, index=host_index, doc_type='_doc')
print("uploaded "+str(count)+" results from "+filename+" ..")
| [
"elasticsearch.helpers.bulk",
"elasticsearch.Elasticsearch",
"random.randint",
"sys.exit"
] | [((141, 193), 'elasticsearch.Elasticsearch', 'Elasticsearch', (["[{'host': 'localhost', 'port': 9200}]"], {}), "([{'host': 'localhost', 'port': 9200}])\n", (154, 193), False, 'from elasticsearch import Elasticsearch, helpers\n'), ((2272, 2343), 'elasticsearch.helpers.bulk', 'helpers.bulk', (['es', 'service_actions'], {'index': 'service_index', 'doc_type': '"""_doc"""'}), "(es, service_actions, index=service_index, doc_type='_doc')\n", (2284, 2343), False, 'from elasticsearch import Elasticsearch, helpers\n'), ((2344, 2409), 'elasticsearch.helpers.bulk', 'helpers.bulk', (['es', 'host_actions'], {'index': 'host_index', 'doc_type': '"""_doc"""'}), "(es, host_actions, index=host_index, doc_type='_doc')\n", (2356, 2409), False, 'from elasticsearch import Elasticsearch, helpers\n'), ((276, 316), 'sys.exit', 'exit', (['"""usage: nweb_upload.py <filename>"""'], {}), "('usage: nweb_upload.py <filename>')\n", (280, 316), False, 'from sys import argv, exit\n'), ((1822, 1893), 'elasticsearch.helpers.bulk', 'helpers.bulk', (['es', 'service_actions'], {'index': 'service_index', 'doc_type': '"""_doc"""'}), "(es, service_actions, index=service_index, doc_type='_doc')\n", (1834, 1893), False, 'from elasticsearch import Elasticsearch, helpers\n'), ((1900, 1965), 'elasticsearch.helpers.bulk', 'helpers.bulk', (['es', 'host_actions'], {'index': 'host_index', 'doc_type': '"""_doc"""'}), "(es, host_actions, index=host_index, doc_type='_doc')\n", (1912, 1965), False, 'from elasticsearch import Elasticsearch, helpers\n'), ((1742, 1778), 'sys.exit', 'exit', (['"""we\'ve already seen this data"""'], {}), '("we\'ve already seen this data")\n', (1746, 1778), False, 'from sys import argv, exit\n'), ((2191, 2216), 'random.randint', 'random.randint', (['(300)', '(1200)'], {}), '(300, 1200)\n', (2205, 2216), False, 'import random\n')] |