id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
149013
|
class ResponseFunctionInterface(object):
"""
This response function interface provides a unique interface for all possible ways
to calculate the value and gradient of a response.
The interface is designed to be used in e.g. optimization, where the value and gradient
of a response is required, however the exact method of gradient calculation is of
secondary importance.
This might be done using e.g. adjoint sensitivity analysis capabilities of Kratos,
or even a simple finite differencing method.
(Do not confuse this class with the kratos/response_functions/adjoint_response_function.h,
which is an implementation detail for the adjoint sensitivity analysis in Kratos)
"""
def RunCalculation(self, calculate_gradient):
self.Initialize()
self.InitializeSolutionStep()
self.CalculateValue()
if calculate_gradient:
self.CalculateGradient()
self.FinalizeSolutionStep()
self.Finalize()
def Initialize(self):
pass
def UpdateDesign(self, updated_model_part, variable):
pass
def InitializeSolutionStep(self):
pass
def CalculateValue(self):
raise NotImplementedError("CalculateValue needs to be implemented by the derived class")
def CalculateGradient(self):
raise NotImplementedError("CalculateGradient needs to be implemented by the derived class")
def FinalizeSolutionStep(self):
pass
def Finalize(self):
pass
def GetValue(self):
raise NotImplementedError("GetValue needs to be implemented by the derived class")
def GetNodalGradient(self, variable):
raise NotImplementedError("GetNodalGradient needs to be implemented by the derived class")
def GetElementalGradient(self, variable):
raise NotImplementedError("GetElementalGradient needs to be implemented by the derived class")
def GetConditionalGradient(self, variable):
raise NotImplementedError("GetConditionalGradient needs to be implemented by the derived class")
def IsEvaluatedInFolder(self):
return False
|
149063
|
import argparse
import json
import os
from pprint import pprint
import numpy as np
from sklearn.metrics import precision_recall_fscore_support
from a2t.topic_classification.mlm import MLMTopicClassifier
from a2t.topic_classification.mnli import (
NLITopicClassifier,
NLITopicClassifierWithMappingHead,
)
from a2t.topic_classification.nsp import NSPTopicClassifier
CLASSIFIERS = {
"mnli": NLITopicClassifier,
"nsp": NSPTopicClassifier,
"mlm": MLMTopicClassifier,
"mnli-mapping": NLITopicClassifierWithMappingHead,
}
def top_k_accuracy(output, labels, k=5):
preds = np.argsort(output)[:, ::-1][:, :k]
return sum(l in p for l, p in zip(labels, preds)) / len(labels)
parser = argparse.ArgumentParser(
prog="run_evaluation",
description="Run a evaluation for each configuration.",
)
parser.add_argument("dataset", type=str, help="Dataset file.")
parser.add_argument("topics", type=str, help="Topics or classes file.")
parser.add_argument(
"--config",
type=str,
dest="config",
help="Configuration file for the experiment.",
)
args = parser.parse_args()
with open(args.topics, "rt") as f:
topics = [topic.rstrip().replace("_", " ") for topic in f]
topic2id = {topic: i for i, topic in enumerate(topics)}
with open(args.dataset, "rt") as f:
contexts, labels = [], []
for line in f:
_, label, context = line.strip().split("\t")
contexts.append(context)
labels.append(topic2id[label])
labels = np.array(labels)
with open(args.config, "rt") as f:
config = json.load(f)
for configuration in config:
os.makedirs(f"experiments/{configuration['name']}", exist_ok=True)
classifier = CLASSIFIERS[configuration["classification_model"]](labels=topics, **configuration)
output = classifier(contexts, batch_size=configuration["batch_size"])
np.save(f"experiments/{configuration['name']}/output.npy", output)
np.save(f"experiments/{configuration['name']}/labels.npy", labels)
pre, rec, f1, _ = precision_recall_fscore_support(labels, np.argmax(output, -1), average="weighted")
configuration["precision"] = pre
configuration["recall"] = rec
configuration["f1-score"] = f1
configuration["top-1"] = top_k_accuracy(output, labels, k=1)
configuration["top-3"] = top_k_accuracy(output, labels, k=3)
configuration["top-5"] = top_k_accuracy(output, labels, k=5)
configuration["topk-curve"] = [top_k_accuracy(output, labels, k=i) for i in range(len(topics))]
pprint(configuration)
with open(args.config, "wt") as f:
json.dump(config, f, indent=4)
|
149077
|
from IPython.core.display import Image as image
from PIL import Image
def save_and_display(arr, fname):
pilimg = Image.fromarray(arr)
pilimg.save(fname)
return image(filename=fname, width=600)
|
149091
|
from dlcliche.utils import *
from lib_fat2019 import *
def to_cls_idxs(one_labels, classes, delimiter=','):
one_cls_idxs = [classes.index(l) for l in one_labels.split(delimiter)]
return one_cls_idxs
def decompose_class_indexes(all_labels, classes, delimiter=','):
cls_idxs = [to_cls_idxs(labels, classes, delimiter=delimiter) for labels in all_labels]
return cls_idxs
def freq_enveropes(X):
return np.mean([np.mean(x[:, :, 0], axis=1) for x in X], axis=0)
def to_cls_freq_envs(X, labels, classes, delimiter):
cls_idxs = decompose_class_indexes(labels, classes, delimiter=delimiter)
freq_envs = [freq_enveropes([X[i] for i in idxs])
for idxs in cls_idxs]
return np.array(freq_envs)
def to_cls_envelope_map(srcs, dests, clip_min_src=255/10, clip_max_map=2.):
env_map = []
for src, dest in zip(srcs, dests):
src = np.clip(src, clip_min_src, np.max(src))
cls_env_map = np.clip(dest / src, 0, clip_max_map)
env_map.append(cls_env_map)
return np.array(env_map)
def mix_envelope_map(maps, cls_idxs):
return np.mean([maps[ci] for ci in cls_idxs], axis=0)
def apply_envelope_map_one(x_one_plane, _map):
x1 = x_one_plane * _map
#x1 = (x1 / x1.max() * 255.)
return x1.astype(np.uint8)
def apply_envelope_map(x, _map):
_xA = apply_envelope_map_one(x[..., 0], _map)
_xP = x[..., 1]
_xD = apply_envelope_map_one(x[..., 2], _map)
return np.stack([_xA, _xP, _xD], axis=-1)
class DomainFreqTransfer:
def __init__(self, classes, clip_min_src=255/10, clip_max_map=2.):
self.classes = classes
self.clip_min_src = clip_min_src
self.clip_max_map = clip_max_map
def fit(self, dest_domain_X, dest_domain_ml_y, delimiter=',',
src_domain_X=None, src_domain_ml_y=None):
self.delimiter = delimiter
# Class mean freq envelope
self.dest_freq_envs = to_cls_freq_envs(dest_domain_X, dest_domain_ml_y,
self.classes, delimiter=delimiter)
if src_domain_X is not None:
self.src_freq_envs = to_cls_freq_envs(src_domain_X, src_domain_ml_y,
self.classes, delimiter=delimiter)
# Freq envelope map from src to dest
#self.maps = to_cls_envelope_map(self.src_freq_envs, self.dest_freq_envs,
# clip_min_src=self.clip_min_src, clip_max_map=self.clip_max_map)
#def map_by_labels(self, one_y):
# cls_idxs = to_cls_idxs(one_y, self.classes, delimiter=self.delimiter)
# return mix_envelope_map(self.maps, cls_idxs).reshape((self.maps[0].shape[0], 1))
def __call__(self, X, y, show_samples=20):
"""Domain transfer samples in X inplace."""
assert len(X[0].shape) == 3
for i, (_x, _y) in enumerate(zip(X, y)):
# Old design: single envelope mapping per class
#_map = self.map_by_labels(_y)
# New design: sample-wise envelope mapping
src = freq_enveropes([_x])
dest = self.dest_freq_envs[self.classes.index(_y)]
_map = (dest/np.maximum(src, 1)).reshape((_x.shape[0], 1))
X[i] = apply_envelope_map(_x, _map)
if i < show_samples:
mapped = freq_enveropes([X[i]])
plt.plot(src, label='src')
plt.plot(dest, label='dest')
plt.plot(mapped, label='mapped')
plt.title(_y)
plt.legend()
plt.show()
|
149107
|
import FWCore.ParameterSet.Config as cms
from SimGeneral.MixingModule.mixObjects_cfi import *
process = cms.Process("PRODVAL1")
process.load("DQM.SiStripCommon.DaqMonitorROOTBackEnd_cfi")
process.RandomNumberGeneratorService = cms.Service("RandomNumberGeneratorService",
moduleSeeds = cms.PSet(
mix = cms.untracked.uint32(56789)
)
)
process.MessageLogger = cms.Service("MessageLogger",
cerr = cms.untracked.PSet(
enable = cms.untracked.bool(False)
),
cout = cms.untracked.PSet(
default = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
enable = cms.untracked.bool(True)
)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/relval/CMSSW_3_1_0_pre4/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_30X_v1/0003/3AA6EEA4-3B16-DE11-B35F-001617C3B654.root')
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.mix = cms.EDProducer("MixingModule",
LabelPlayback = cms.string(''),
maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
),
input = cms.SecSource("EmbeddedRootSource",
fileNames = cms.untracked.vstring(
'/store/relval/CMSSW_3_1_0_pre4/RelValTTbar/GEN-SIM-DIGI-RAW-HLTDEBUG/IDEAL_30X_v1/0003/3AA6EEA4-3B16-DE11-B35F-001617C3B654.root'),
seed = cms.int32(1234567),
type = cms.string('fixed'),
nbPileupEvents = cms.PSet(
averageNumber = cms.double(1.0)
),
maxEventsToSkip = cms.untracked.uint32(0),
),
maxBunch = cms.int32(12345),
minBunch = cms.int32(12345),
bunchspace = cms.int32(25),
checktof = cms.bool(False),
Label = cms.string(''),
mixObjects = cms.PSet(
mixCH = cms.PSet(
mixCaloHits
),
mixTracks = cms.PSet(
mixSimTracks
),
mixVertices = cms.PSet(
mixSimVertices
),
mixSH = cms.PSet(
mixSimHits
),
mixHepMC = cms.PSet(
mixHepMCProducts
)
)
)
process.test = cms.EDAnalyzer("TestSuite",
maxBunch = cms.int32(34567),
BunchNr = cms.int32(12345),
minBunch = cms.int32(23456),
fileName = cms.string('histos.root')
)
process.p = cms.Path(process.mix+process.test)
#process.outpath = cms.EndPath(process.out)
|
149109
|
import argparse
import os
import time
import datetime
import yaml
import tensorflow as tf
import numpy as np
import src.core as core
from src.retina_net import config_utils
from src.core import constants
from src.retina_net.builders import dataset_handler_builder
from src.retina_net.models.retinanet_model import RetinaNetModel
keras = tf.keras
def train_model(config):
"""
Training function.
:param config: config file
"""
# Get training config
training_config = config['training_config']
# Create dataset class
dataset_config = config['dataset_config']
dataset_handler = dataset_handler_builder.build_dataset(
dataset_config, 'train')
# Set keras training phase
keras.backend.set_learning_phase(1)
print("Keras Learning Phase Set to: " +
str(keras.backend.learning_phase()))
# Create Model
with tf.name_scope("retinanet_model"):
model = RetinaNetModel(config['model_config'])
# Instantiate an optimizer.
minibatch_size = training_config['minibatch_size']
epoch_size = int(dataset_handler.epoch_size / minibatch_size)
initial_learning_rate = training_config['initial_learning_rate']
decay_factor = training_config['decay_factor']
decay_boundaries = [
boundary *
epoch_size for boundary in training_config['decay_boundaries']]
decay_factors = [decay_factor**i for i in range(0, len(decay_boundaries)+1)]
learning_rate_values = [
np.round(
initial_learning_rate *
decay_factor,
8) for decay_factor in decay_factors]
lr_schedule = tf.keras.optimizers.schedules.PiecewiseConstantDecay(
decay_boundaries, learning_rate_values)
optimizer = keras.optimizers.Adam(learning_rate=lr_schedule, epsilon=1e-2)
# Create summary writer
log_file = config['logs_dir'] + '/training/' + str(datetime.datetime.now())
summary_writer = tf.summary.create_file_writer(log_file)
# Load checkpoint weights if training folder exists
ckpt = tf.train.Checkpoint(
step=tf.Variable(0),
optimizer=optimizer,
net=model)
manager = tf.train.CheckpointManager(
ckpt,
config['checkpoint_path'],
max_to_keep=training_config['max_checkpoints_to_keep'])
ckpt.restore(manager.latest_checkpoint)
# If no checkpoints exist, intialize either from imagenet or from scratch
if manager.latest_checkpoint:
print("Restored from {}".format(manager.latest_checkpoint))
ckpt.step.assign_add(1)
elif config['model_config']['feature_extractor']['pretrained_initialization']:
# Load resnet-50 imagenet pretrained weights if set in config file.
# Dummy input required to define graph.
input_shape = (224, 224, 3)
dummy_input = keras.layers.Input(shape=input_shape)
model.feature_extractor(dummy_input)
weights_path = keras.utils.get_file(
'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
('https://github.com/fchollet/deep-learning-models/'
'releases/download/v0.2/'
'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'),
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
model.feature_extractor.load_weights(weights_path, by_name=True)
# Tensorflow 2.0 bug with loading weights in nested models. Might get
# fixed later.
model.feature_extractor.conv_block_2a.load_weights(
weights_path, by_name=True)
model.feature_extractor.conv_block_3a.load_weights(
weights_path, by_name=True)
model.feature_extractor.conv_block_4a.load_weights(
weights_path, by_name=True)
model.feature_extractor.conv_block_5a.load_weights(
weights_path, by_name=True)
model.feature_extractor.identity_block_2b.load_weights(
weights_path, by_name=True)
model.feature_extractor.identity_block_2c.load_weights(
weights_path, by_name=True)
model.feature_extractor.identity_block_3b.load_weights(
weights_path, by_name=True)
model.feature_extractor.identity_block_3c.load_weights(
weights_path, by_name=True)
model.feature_extractor.identity_block_3d.load_weights(
weights_path, by_name=True)
model.feature_extractor.identity_block_4b.load_weights(
weights_path, by_name=True)
model.feature_extractor.identity_block_4c.load_weights(
weights_path, by_name=True)
model.feature_extractor.identity_block_4d.load_weights(
weights_path, by_name=True)
model.feature_extractor.identity_block_4e.load_weights(
weights_path, by_name=True)
model.feature_extractor.identity_block_4f.load_weights(
weights_path, by_name=True)
model.feature_extractor.identity_block_5b.load_weights(
weights_path, by_name=True)
model.feature_extractor.identity_block_5c.load_weights(
weights_path, by_name=True)
print("Initializing from ImageNet weights.")
else:
print("Initializing from scratch.")
# Create Dataset
# Skip already passed elements in dataset, in case of resuming training.
dataset = dataset_handler.create_dataset().repeat(
training_config['max_epochs'])
# Batch size goes in parenthesis.
batched_dataset = dataset.batch(minibatch_size)
batched_dataset = batched_dataset.take(tf.data.experimental.cardinality(
batched_dataset) - tf.cast(ckpt.step + 1, tf.int64))
print("Remaining iterations:" +
str(tf.data.experimental.cardinality(batched_dataset).numpy()))
# `prefetch` lets the dataset fetch batches, in the background while the model is training.
batched_dataset = batched_dataset.prefetch(
buffer_size=tf.data.experimental.AUTOTUNE)
last_time = time.time()
for sample_dict in batched_dataset:
with summary_writer.as_default():
# Turn on both graph and profiler for debugging the graph in
# tensorboard
tf.summary.trace_on(graph=False, profiler=False)
total_loss, loss_dict = train_single_step(
model, optimizer, sample_dict)
tf.summary.trace_export(
name="training_trace",
step=0,
profiler_outdir=log_file)
with tf.name_scope('losses'):
for loss_name in loss_dict.keys():
tf.summary.scalar(loss_name,
loss_dict[loss_name],
step=int(ckpt.step))
with tf.name_scope('optimizer'):
tf.summary.scalar('learning_rate',
lr_schedule(int(ckpt.step)),
step=int(ckpt.step))
tf.summary.scalar(
'Total Loss',
total_loss,
step=int(
ckpt.step))
summary_writer.flush()
# Write summary
if int(ckpt.step) % training_config['summary_interval'] == 0:
current_time = time.time()
time_elapsed = current_time - last_time
last_time = time.time()
print(
'Step {}, Total Loss {:0.3f}, Time Elapsed {:0.3f} s'.format(
int(ckpt.step), total_loss.numpy(), time_elapsed))
# Saving checkpoint
if int(ckpt.step) % int(
epoch_size * training_config['checkpoint_interval']) == 0:
save_path = manager.save(checkpoint_number=ckpt.save_counter)
print("Saved checkpoint for step {}: {}".format(
int(ckpt.step), save_path))
print("loss {:1.2f}".format(total_loss.numpy()))
ckpt.step.assign_add(1)
else:
ckpt.step.assign_add(1)
@tf.function
def train_single_step(
model,
optimizer,
sample_dict):
"""
:param model: keras retinanet model
:param optimizer: keras optimizer
:param sample_dict: input dictionary generated from dataset.
If element sizes in this dictionary are variable, remove tf.function decorator.
:return total_loss: Sum of all losses.
:return cls_loss: classification loss.
:return reg_loss: regression loss.
:return regularization_loss: regularization_loss
:return prediction_dict: Dictionary containing neural network predictions
"""
with tf.GradientTape() as tape:
prediction_dict = model(sample_dict[constants.IMAGE_NORMALIZED_KEY],
train_val_test='training')
total_loss, loss_dict = model.get_loss(sample_dict, prediction_dict)
# Get any regularization loss in the model and add it to total loss
regularization_loss = tf.reduce_sum(
tf.concat([layer.losses for layer in model.layers], axis=0))
loss_dict.update(
{constants.REGULARIZATION_LOSS_KEY: regularization_loss})
total_loss += regularization_loss
# Compute the gradient which respect to the loss
with tf.name_scope("grad_ops"):
gradients = tape.gradient(total_loss, model.trainable_variables)
clipped_gradients, _ = tf.clip_by_global_norm(gradients, 5.0)
optimizer.apply_gradients(
zip(clipped_gradients, model.trainable_variables))
return total_loss, loss_dict
def main():
"""Object Detection Model Trainer
"""
# Defaults
default_gpu_device = '1'
default_config_path = core.model_dir(
'retina_net') + '/configs/retinanet_bdd.yaml'
# Allowed data splits are 'train','train_mini', 'val', 'val_half',
# 'val_mini'
default_data_split = 'train'
# Parse input
parser = argparse.ArgumentParser() # Define argparser object
parser.add_argument('--gpu_device',
type=str,
dest='gpu_device',
default=default_gpu_device)
parser.add_argument('--yaml_path',
type=str,
dest='yaml_path',
default=default_config_path)
parser.add_argument('--data_split',
type=str,
dest='data_split',
default=default_data_split)
args = parser.parse_args()
# Set CUDA device id
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_device
# Allow GPU memory growth
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
# Load in configuration file as python dictionary
with open(args.yaml_path, 'r') as yaml_file:
config = yaml.load(yaml_file, Loader=yaml.FullLoader)
# Make necessary directories, update config with checkpoint path and data
# split
config = config_utils.setup(config, args)
# Go to training function
train_model(config)
if __name__ == '__main__':
main()
|
149202
|
import sys
[_, fai, window] = sys.argv
window = int(window)
with open(fai, 'r') as f:
for line in f:
dat = line.split("\t")
CHR = dat[0]
END = int(dat[1])
i = 1
while True:
print(CHR + '\t' + str(i) + '\t' + str(i + window - 1))
i += window
if i + window - 1 >= END:
break
|
149206
|
import imghdr
from typing import Optional
from mitmproxy.contentviews import base
from mitmproxy.coretypes import multidict
from . import image_parser
def test_ico(h, f):
if h.startswith(b"\x00\x00\x01\x00"):
return "ico"
imghdr.tests.append(test_ico)
class ViewImage(base.View):
name = "Image"
def __call__(self, data, **metadata):
image_type = imghdr.what('', h=data)
if image_type == 'png':
image_metadata = image_parser.parse_png(data)
elif image_type == 'gif':
image_metadata = image_parser.parse_gif(data)
elif image_type == 'jpeg':
image_metadata = image_parser.parse_jpeg(data)
elif image_type == 'ico':
image_metadata = image_parser.parse_ico(data)
else:
image_metadata = [
("Image Format", image_type or "unknown")
]
if image_type:
view_name = f"{image_type.upper()} Image"
else:
view_name = "Unknown Image"
return view_name, base.format_dict(multidict.MultiDict(image_metadata))
def render_priority(self, data: bytes, *, content_type: Optional[str] = None, **metadata) -> float:
return float(bool(
content_type
and content_type.startswith("image/")
and content_type != "image/svg+xml"
))
|
149215
|
import os
from flask import Blueprint, request
from flask_cors import CORS
from service.astra_service import astra_service
credentials_controller = Blueprint('credentials_controller', __name__)
CORS(credentials_controller)
# This controller handles the functionality for connecting to the database
#
# Here we define the REST API endpoints and call our Astra Service
# to send the request to the underlying Data Access Objects
@credentials_controller.route("/api/credentials", methods=['GET', 'POST'])
def connect():
if request.method == 'POST':
temp_zip_path = os.path.abspath('temp_bundle.zip')
temp_zip = open(temp_zip_path, 'wb')
with temp_zip as f:
f.write(request.get_data())
try:
astra_service.save_credentials(request.args['username'], request.args['password'],
request.args['keyspace'], temp_zip_path)
astra_service.connect()
finally:
os.remove(temp_zip_path)
return {'connected': True}, 200
if request.method == 'GET':
resp = astra_service.check_connection()
if resp is True:
status_code = 200
else:
status_code = 401
return str(resp), status_code
@credentials_controller.route("/api/credentials/test", methods=['POST'])
def test_credentials():
temp_zip_path = os.path.abspath('temp_bundle.zip')
temp_zip = open(temp_zip_path, 'wb')
with temp_zip as f:
f.write(request.get_data())
resp = {'success': False}
status_code = 400
try:
test_connection = astra_service.test_credentials(request.args['username'], request.args['password'],
request.args['keyspace'], temp_zip_path)
resp = {'success': test_connection}
if resp['success'] is True:
status_code = 200
else:
status_code = 401
finally:
os.remove(temp_zip_path)
return resp, status_code
|
149222
|
from django.db import models
from django.db.models.functions import Cast
class AssessmentQuerySet(models.QuerySet):
def assessment_grade(self, user, assessment):
all_answers = self.filter(
user=user,
question__assessment=assessment,
)
correct_answers = all_answers.filter(correct_answer=True).count()
total_answers = all_answers.count()
return correct_answers / total_answers
def unanswered_questions(self, user, assessment):
"""
Retrieves if the test has already been initiated
and if the user needs to answer any other question
"""
all_answers = self.filter(
user=user,
).count()
if assessment.question_count == all_answers:
return True, True
if (all_answers > 0) and (all_answers < assessment.question_count):
return True, False
return False, False
def ranking(self, assessment, n=10):
all_candidates_answers = (
self.filter(
question__assessment=assessment,
)
.values("user__username")
.annotate(
points=Cast(
models.Sum(
models.Case(
models.When(correct_answer=True, then=1),
default=models.Value(0),
)
),
models.FloatField(),
)
/ Cast(models.Count("correct_answer"), models.FloatField())
* 100
)
.order_by("-points")[:n]
)
return all_candidates_answers.all()
|
149234
|
import functools
import os
import pathlib
import random
import unittest
import uuid
from typing import Any
from typing import Callable
from typing import Type
from typing import TypeVar
from typing import cast
import pytest
from django.conf import settings as django_settings
from django.utils.module_loading import import_string
from typing_extensions import Final
from collectfast import settings
live_test = pytest.mark.skipif(
os.environ.get("SKIP_LIVE_TESTS") == "true", reason="not running live tests"
)
static_dir: Final = pathlib.Path(django_settings.STATICFILES_DIRS[0])
F = TypeVar("F", bound=Callable[..., Any])
def make_test(func: F) -> Type[unittest.TestCase]:
"""
Creates a class that inherits from `unittest.TestCase` with the decorated
function as a method. Create tests like this:
>>> fn = lambda x: 1337
>>> @make_test
... def test_fn(case):
... case.assertEqual(fn(), 1337)
"""
case = type(func.__name__, (unittest.TestCase,), {func.__name__: func})
case.__module__ = func.__module__
return case
def test_many(**mutations: Callable[[F], F]) -> Callable[[F], Type[unittest.TestCase]]:
def test(func: F) -> Type[unittest.TestCase]:
"""
Creates a class that inherits from `unittest.TestCase` with the decorated
function as a method. Create tests like this:
>>> fn = lambda x: 1337
>>> @make_test
... def test_fn(case):
... case.assertEqual(fn(), 1337)
"""
case_dict = {
"test_%s" % mutation_name: mutation(func)
for mutation_name, mutation in mutations.items()
}
case = type(func.__name__, (unittest.TestCase,), case_dict)
case.__module__ = func.__module__
return case
return test
def create_static_file() -> pathlib.Path:
"""Write random characters to a file in the static directory."""
path = static_dir / f"{uuid.uuid4().hex}.txt"
path.write_text("".join(chr(random.randint(0, 64)) for _ in range(500)))
return path
def clean_static_dir() -> None:
for filename in os.listdir(static_dir.as_posix()):
file = static_dir / filename
if file.is_file():
file.unlink()
def override_setting(name: str, value: Any) -> Callable[[F], F]:
def decorator(fn: F) -> F:
@functools.wraps(fn)
def wrapper(*args, **kwargs):
original = getattr(settings, name)
setattr(settings, name, value)
try:
return fn(*args, **kwargs)
finally:
setattr(settings, name, original)
return cast(F, wrapper)
return decorator
def override_storage_attr(name: str, value: Any) -> Callable[[F], F]:
def decorator(fn: F) -> F:
@functools.wraps(fn)
def wrapper(*args, **kwargs):
storage = import_string(django_settings.STATICFILES_STORAGE)
original = getattr(storage, name)
setattr(storage, name, value)
try:
return fn(*args, **kwargs)
finally:
setattr(storage, name, original)
return cast(F, wrapper)
return decorator
|
149236
|
import time
import random
import datetime
import os
class Python:
@staticmethod
def print(*lines):
if not lines:
print('')
for line in lines:
print(line)
@staticmethod
def input(prompt):
return input(prompt)
@staticmethod
def sleep(secs):
time.sleep(secs)
@staticmethod
def random_choice(items):
return random.choice(items)
@staticmethod
def time_now():
return time.time()
@staticmethod
def date_now():
return datetime.datetime.now()
def open_file(self, rel_path):
return open(os.path.join(self.get_base_dir(), rel_path), 'rb')
@staticmethod
def get_base_dir():
return os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
|
149285
|
from torch.utils.cpp_extension import load
def load_fwi(path):
fwi = load(name="fwi",
sources=[path+'/Torch_Fwi.cpp', path+'/Parameter.cpp', path+'/libCUFD.cu', path+'/el_stress.cu', path+'/el_velocity.cu', path+'/el_stress_adj.cu', path+'/el_velocity_adj.cu', path+'/Model.cu', path+'/Cpml.cu', path+'/utilities.cu', path+'/Src_Rec.cu', path+'/Boundary.cu'],
extra_cflags=[
'-O3'
],
extra_include_paths=['/usr/local/cuda/include', path+'/rapidjson'],
extra_ldflags=['-L/usr/local/cuda/lib64 -lnvrtc -lcuda -lcudart -lcufft'],
verbose=False)
return fwi
|
149291
|
import os
import sys
import time
import traceback
import torch
from queue import Queue
import requests
from typing import List
from functools import partial
from ctools.utils import read_file, save_file, get_rank, get_world_size, get_data_decompressor, remove_file, broadcast
from .base_comm_learner import BaseCommLearner
from ..learner_hook import LearnerHook
class FlaskFileSystemLearner(BaseCommLearner):
"""
Overview:
An implementation of CommLearner, using flask as the file system.
Interfaces:
__init__, register_learner, send_agent, get_data, send_train_info, start_heartbeats_thread
init_service, close_service,
Property:
hooks4call
"""
def __init__(self, cfg: 'EasyDict') -> None: # noqa
"""
Overview:
Initialize file path(url, path of traj & agent), comm frequency, dist learner info according to cfg.
Arguments:
- cfg (:obj:`EasyDict`): config dict
"""
super(FlaskFileSystemLearner, self).__init__(cfg)
self._url_prefix = 'http://{}:{}/'.format(cfg.upstream_ip, cfg.upstream_port)
self._path_traj = cfg.path_traj
self._path_agent = cfg.path_agent
# thread: _heartbeats_freq; hook: _send_agent_freq, _send_train_info_freq
self._heartbeats_freq = cfg.heartbeats_freq
self._send_agent_freq = cfg.send_agent_freq
self._send_train_info_freq = cfg.send_train_info_freq
self._rank = get_rank()
self._world_size = get_world_size()
if 'learner_ip' not in cfg.keys() or cfg.learner_ip == 'auto':
self._learner_ip = os.environ.get('SLURMD_NODENAME', '')
else:
self._learner_ip = cfg.learner_ip
self._learner_port = cfg.learner_port - self._rank
self._restore = cfg.restore
self._iter = 0
# override
def register_learner(self) -> None: # todo: 1 learner -> many agent?
"""
Overview:
Register learner's info in coordinator, called by ``self.init_service``.
Will set property ``_agent_name`` to returned response.info. Registration will repeat until succeeds.
"""
d = {
'learner_uid': self._learner_uid,
'learner_ip': self._learner_ip,
'learner_port': self._learner_port,
'world_size': self._world_size,
'restore': self._restore
}
while True: # only after registration succeeds, can ``_active_flag`` be set to True
result = self._flask_send(d, 'coordinator/register_learner')
if result is not None and result['code'] == 0:
self._agent_name = result['info']['player_name']
self._model_path = result['info']['model_path']
return
else:
time.sleep(10)
# override
def send_agent(self, state_dict: dict) -> None:
"""
Overview:
Save learner's agent in corresponding path, called by ``SendAgentHook``.
Arguments:
- state_dict (:obj:`dict`): state dict of the runtime agent
"""
new_path = self._agent_name + '_' + str(self._iter) + '_ckpt.pth'
state_dict['model'] = {k: v for k, v in state_dict['model'].items() if 'value_networks' not in k}
path = os.path.join(self._path_agent, new_path)
save_file(path, state_dict)
d = {'learner_uid': self._learner_uid, 'model_path': new_path}
while self._active_flag:
result = self._flask_send(d, 'coordinator/model_path_update')
if result is not None and result['code'] == 0: # remove last model
self._logger.info('save model at: {} for actor update'.format(new_path))
if os.path.exists(os.path.join(self._path_agent, self._agent_name + '_' + str(self._iter - 5) +'_ckpt.pth')):
os.remove(os.path.join(self._path_agent, self._agent_name + '_' + str(self._iter - 5) + '_ckpt.pth'))
self._iter += 1
return
else:
time.sleep(1)
@staticmethod
def load_data_fn(path_traj, traj_id, decompressor):
file_path = os.path.join(path_traj, traj_id)
s = read_file(file_path, fs_type='normal')
remove_file(file_path)
#s = decompressor(s)
return s
# override
def get_data(self, batch_size: int) -> list: # todo: doc not finished
"""
Overview:
Get batched data from coordinator.
Arguments:
- batch_size (:obj:`int`): size of one batch
Returns:
- stepdata (:obj:`list`): a list of train data, each element is one traj
"""
d = {'learner_uid': self._learner_uid, 'batch_size': batch_size}
sleep_count = 1
while self._active_flag:
result = self._flask_send(d, 'coordinator/ask_for_metadata')
if result is not None and result['code'] == 0:
metadata = result['info']
if metadata is not None:
assert isinstance(metadata, list)
decompressor = get_data_decompressor(metadata[0].get('compressor', 'none'))
data = [
partial(
FlaskFileSystemLearner.load_data_fn,
self._path_traj,
m['traj_id'],
decompressor=decompressor,
) for m in metadata
]
return data
time.sleep(sleep_count)
sleep_count += 1
# override
def send_train_info(self, train_info: dict) -> None:
"""
Overview:
Send train info to coordinator, called by ``SendTrainInfoHook``.
Sending will repeat until succeeds or ``_active_flag`` is set to False.
Arguments:
- train info (:obj:`dict`): train info in `dict` type, \
including keys `train_info`(last iter), `learner_uid`
"""
d = {'train_info': train_info, 'learner_uid': self._learner_uid}
while self._active_flag:
result = self._flask_send(d, 'coordinator/send_train_info')
if result is not None and result['code'] == 0:
return result['info']
else:
time.sleep(1)
# override
def _send_learner_heartbeats(self) -> None:
"""
Overview:
Send learner's heartbeats to coordinator, will start as a thread in ``self.start_heartbeats_thread``.
Sending will take place every ``_heartbeats_freq`` seconds until ``_active_flag`` is set to False.
"""
d = {'learner_uid': self._learner_uid}
while self._active_flag:
self._flask_send(d, 'coordinator/get_heartbeats')
for _ in range(self._heartbeats_freq):
if not self._active_flag:
break
time.sleep(1)
def _flask_send(self, data: dict, api: str) -> dict:
"""
Overview:
Send info via flask and return the response.
Log corresponding info/error when succeeds, fails or raises an exception.
Arguments:
- data (:obj:`dict`): the data to send via ``requests`` api
- api (:obj:`str`): the specific api which the data will be sent to, \
should add prefix ([ip]:[port]) before when using.
Returns:
- response (:obj:`dict`): if no exception raises, return the json response
"""
response = None
t = time.time()
try:
response = requests.post(self._url_prefix + api, json=data).json()
if hasattr(self, '_agent_name'):
name = self._agent_name.split('_')[0]
else:
name = 'none'
if response['code'] == 0:
self._logger.info("{} succeed sending result: {}, cost time: {:.4f}".format(api, name, time.time() - t))
else:
self._logger.error("{} failed to send result: {}, cost time: {:.4f}".format(api, name, time.time() - t))
except Exception as e:
self._logger.error(''.join(traceback.format_tb(e.__traceback__)))
self._logger.error("[error] api({}): {}".format(api, sys.exc_info()))
return response
@property
def hooks4call(self) -> List[LearnerHook]:
"""
Overview:
Initialize the hooks and return them.
Returns:
- hooks (:obj:`list`): the hooks which comm learner have, will be registered in learner as well.
"""
return [
SendAgentHook('send_agent', 100, position='before_run', ext_args={}),
SendAgentHook(
'send_agent', 100, position='after_iter', ext_args={'send_agent_freq': self._send_agent_freq}
),
SendTrainInfoHook(
'send_train_info',
100,
position='after_iter',
ext_args={'send_train_info_freq': self._send_train_info_freq}
),
]
def model_path(self):
return os.path.join(self._path_agent, self._model_path)
#return '/mnt/cache/zhouhang2/repo/distar/distar/entry/as_rl_baseline/experiments/final12/ckpt/iteration_86600.pth.tar'
class SendAgentHook(LearnerHook):
"""
Overview:
Hook to send agent
Interfaces:
__init__, __call__
Property:
name, priority, position
"""
def __init__(self, *args, ext_args: dict = {}, **kwargs) -> None:
"""
Overview:
init SendAgentHook
Arguments:
- ext_args (:obj:`dict`): extended_args, use ext_args.freq to set send_agent_freq
"""
super().__init__(*args, **kwargs)
if 'send_agent_freq' in ext_args:
self._freq = ext_args['send_agent_freq']
else:
self._freq = 1
def __call__(self, engine: 'BaseLearner') -> None: # noqa
"""
Overview:
Save learner's agent in corresponding path at interval iterations, including model_state_dict, last_iter
Arguments:
- engine (:obj:`BaseLearner`): the BaseLearner
"""
last_iter = engine.last_iter.val
if engine.rank == 0 and last_iter % self._freq == 0:
state_dict = {'model': engine.agent.model.state_dict(), 'iter': last_iter}
engine.send_agent(state_dict)
engine.info('{} save iter{} agent'.format(engine.name, last_iter))
class SendTrainInfoHook(LearnerHook):
"""
Overview:
Hook to send train info
Interfaces:
__init__, __call__
Property:
name, priority, position
"""
def __init__(self, *args, ext_args: dict, **kwargs) -> None:
"""
Overview:
init SendTrainInfoHook
Arguments:
- ext_args (:obj:`dict`): extended_args, use ext_args.freq to set send_train_info_freq
"""
super().__init__(*args, **kwargs)
self._freq = ext_args['send_train_info_freq']
def __call__(self, engine: 'BaseLearner') -> None: # noqa
"""
Overview:
Send train info including last_iter at interval iterations, learner_uid (added in ``send_train_info``)
Arguments:
- engine (:obj:`BaseLearner`): the BaseLearner
"""
flag = torch.tensor([0])
if engine.rank == 0:
last_iter = engine.last_iter.val
frames = int(self._freq * engine._world_size * engine._cfg.learner.data.batch_size * engine._cfg.learner.unroll_len)
if last_iter % self._freq == 0 and hasattr(engine, 'last_ckpt_path'):
state_dict = {'iter': frames, 'ckpt_path': os.path.abspath(engine.last_ckpt_path)}
checkpoint_path = engine.send_train_info(state_dict)
engine.info('{} save iter{} train_info'.format(engine.name, last_iter))
if checkpoint_path != 'none':
flag = torch.tensor([1])
engine.checkpoint_manager.load(
os.path.join(engine._path_agent, checkpoint_path),
model=engine.agent.model,
logger_prefix='({})'.format(engine.name),
strict=True,
info_print=engine.rank == 0,
)
engine.info('{} reset ckpt in {}!!!!!!!!!!!!!!!!!'.format(engine.name, checkpoint_path))
state_dict = {'model': engine.agent.model.state_dict(), 'iter': last_iter}
engine.send_agent(state_dict)
engine.info('{} save iter{} agent'.format(engine.name, last_iter))
broadcast(flag, 0)
if flag:
engine._setup_optimizer()
engine._agent.model.broadcast_params()
|
149374
|
import os
broker_url = os.environ.get('CELERY_BROKER_URL', 'redis://localhost:6379/0'),
result_backend = os.environ.get('CELERY_RESULT_BACKEND', 'redis://localhost:6379/0')
accept_content = ['pickle']
task_serializer = 'pickle'
result_serializer = 'pickle'
|
149451
|
from django.test import TestCase
from curious.graph import traverse
from curious_tests.models import Blog, Entry, Author
from curious_tests import assertQueryResultsEqual
class TestFunc(TestCase):
def setUp(self):
blog = Blog(name='Databases')
blog.save()
self.blogs = [blog]
authors = ('<NAME>', '<NAME>', '<NAME>')
headlines = ('MySQL is a relational DB',
'Postgres is a really good relational DB',
'Neo4J is a graph DB')
self.entries = [Entry(headline=headline, blog=blog) for headline in headlines]
for entry in self.entries:
entry.save()
self.authors = [Author(name=name) for name in authors]
for author in self.authors:
author.save()
for i, entry in enumerate(self.entries):
entry.authors.add(self.authors[i])
entry.authors.add(self.authors[(i+1)%len(self.authors)])
def test_can_traverse_via_function_and_returns_traversed_pair(self):
authors = traverse(self.blogs, Blog.authors)
assertQueryResultsEqual(self, authors, Blog.authors(self.blogs))
def test_can_traverse_via_function_with_filter(self):
f = dict(method='filter', kwargs=dict(name__icontains='Smith'))
authors = traverse(self.blogs, Blog.authors, filters=[f])
assertQueryResultsEqual(self, authors, [x for x in Blog.authors(self.blogs) if 'Smith' in x[0].name])
def test_can_traverse_via_function_with_exclusions(self):
f = dict(method='exclude', kwargs=dict(name__icontains='Smith'))
authors = traverse(self.blogs, Blog.authors, filters=[f])
assertQueryResultsEqual(self, authors, [x for x in Blog.authors(self.blogs) if 'Smith' not in x[0].name])
|
149469
|
r"""
Utility functions for snakemake files
"""
# pylint: disable=missing-function-docstring, redefined-outer-name
from functools import reduce
from operator import add
from pathlib import Path
def conf_expand_pattern(conf, placeholder="null"):
expand_pattern = "-".join(f"{key}:{{{key}}}" for key in conf)
return expand_pattern if expand_pattern else placeholder
def expand(pattern, **wildcards):
from snakemake.io import expand
has_default_choices = False
for val in wildcards.values(): # Sanity check
if isinstance(val, dict):
if "default" not in val or "choices" not in val:
print(val)
raise ValueError("Invalid default choices!")
has_default_choices = True
if not has_default_choices:
return expand(pattern, **wildcards)
expand_set = set()
for key, val in wildcards.items():
if isinstance(val, dict):
wildcards_use = {key: val["choices"]}
for other_key, other_val in wildcards.items():
if other_key == key:
continue
if isinstance(other_val, dict):
wildcards_use[other_key] = other_val["default"]
else:
wildcards_use[other_key] = other_val
expand_set = expand_set.union(expand(pattern, **wildcards_use))
return list(expand_set)
def seed2range(config):
for key, val in config.items():
if isinstance(val, dict):
seed2range(val)
elif key.endswith("seed") and val != 0:
config[key] = range(val)
def target_directories(config):
seed2range(config)
dataset = config["dataset"].keys()
subsample_conf = config["subsample"] or {}
subsample_conf = expand(
conf_expand_pattern(subsample_conf, placeholder="original"),
**subsample_conf
)
def per_method(method):
prior_conf = config["prior"] or {}
prior_conf = {} if method in ("UnionCom", "iNMF_FiG", "LIGER_FiG") else prior_conf # Methods that do not use prior feature matching
prior_conf = expand(
conf_expand_pattern(prior_conf, placeholder="null"),
**prior_conf
)
hyperparam_conf = config["method"][method] or {}
hyperparam_conf = expand(
conf_expand_pattern(hyperparam_conf, placeholder="default"),
**hyperparam_conf
)
seed = 0 if method in ("bindSC", ) else config["seed"] # Methods that are deterministic
return expand(
"results/raw/{dataset}/{subsample_conf}/{prior_conf}/{method}/{hyperparam_conf}/seed:{seed}",
dataset=dataset,
subsample_conf=subsample_conf,
prior_conf=prior_conf,
method=method,
hyperparam_conf=hyperparam_conf,
seed=seed
)
return reduce(add, map(per_method, config["method"]))
def target_files(directories):
def per_directory(directory):
directory = Path(directory)
if (directory / ".blacklist").exists():
return []
return [
directory / "metrics.yaml",
directory / "cell_type.pdf",
directory / "domain.pdf"
]
return reduce(add, map(per_directory, directories))
|
149493
|
from rest_framework import serializers
from ..models import Condition
class ConditionExportSerializer(serializers.ModelSerializer):
source = serializers.CharField(source='source.uri', default=None, read_only=True)
target_option = serializers.CharField(source='target_option.uri', default=None, read_only=True)
class Meta:
model = Condition
fields = (
'uri',
'uri_prefix',
'key',
'comment',
'source',
'relation',
'target_text',
'target_option'
)
|
149501
|
import torch
import torch.nn as nn
_input_size = 4
_seq_len = 2
_batch = 3
_hidden_size = 3
# model
class net_RNN(nn.Module):
def __init__(self):
super().__init__()
self.op = nn.RNN(_input_size, _hidden_size, 1)
def forward(self, input):
return self.op(input)
_model_ = net_RNN()
# dummy input for onnx generation
_dummy_ = torch.randn(_seq_len, _batch, _input_size)
|
149509
|
import time
from collections import Counter
from pprint import pprint
from tabulate import tabulate
from tinydb import Query, TinyDB
db = TinyDB("db.json")
def started(task):
return task["start"] is not None
def pending(task):
return not completed(task)
def completed(task):
return task["addr_payload"] is not None
def get_started(tasks):
return [task for task in tasks if started(task)]
def get_pending(tasks):
return [task for task in tasks if pending(task)]
def get_completed(tasks):
return [task for task in tasks if completed(task)]
def one_and_done(task):
return completed(task) and len(task["errors"]) == 0
def one_and_done_percentage(tasks):
started = get_started(tasks)
one_and_dones = [task for task in tasks if one_and_done(task)]
number = len(one_and_dones) / len(started)
return percentage(number)
def get_start_time(tasks):
started = [task for task in tasks if task["start"] is not None]
if not started:
return -1 # FIXME
return min([task["start"] for task in started])
def get_end_time(tasks):
started = [task for task in tasks if task["start"] is not None]
if not started:
return -1 # FIXME
return max([task["start"] for task in started])
def completed_per_second(tasks, interval):
start_time, end_time = interval
elapsed = end_time - start_time
num_completed = len(get_completed(tasks))
return num_completed / elapsed
def tasks_completed(tasks):
completed = get_completed(tasks)
return len(completed)
def get_batch(tasks):
return max([task["batch"] for task in tasks])
def percentage(value):
return f"{value:.2%}"
### Warning: don't implement any of the below code if it wouldn't be useful with sqlite too ...
###############
### history ###
###############
query = Query()
all_tasks = db.search(query["type"] == "task")
highest_batch = get_batch(all_tasks)
tasks_by_batch = {
batch_: [task for task in all_tasks if task["batch"] == batch_]
for batch_ in range(highest_batch)
}
intervals_by_batch = {
batch: (get_start_time(tasks), get_end_time(tasks))
for batch, tasks in tasks_by_batch.items()
}
headers = ["Batch", "Start Time", "# Completed", "One-And-Done %", "Completed / Second"]
rows = [
[
get_batch(tasks),
get_start_time(tasks),
len(get_completed(tasks)),
one_and_done_percentage(tasks),
completed_per_second(tasks, intervals_by_batch[batch]),
]
for batch, tasks in tasks_by_batch.items()
if len(tasks) > 1 # FIXME
]
print(tabulate(rows, headers))
###############
### current ###
###############
# first line is ^^ for just this task
# length-of-queue |
# percentage distributeion of different errors
# version number breakdown
# services breakdown
|
149512
|
from discord.ext import commands
from carberretta import Config
class CustomCheckFailure(commands.CheckAnyFailure):
def __init__(self, message):
self.msg = message
class CanNotVerifyQt(CustomCheckFailure):
def __init__(self):
super().__init__("You can not verify QTs.")
def can_verify_qts():
async def predicate(ctx):
if ctx.message.author.id != Config.QT_ID:
raise CanNotVerifyQt()
return True
return commands.check(predicate)
|
149526
|
import argparse
from gym.spaces import Box, Discrete
import os
from ray.rllib.examples.env.action_mask_env import ActionMaskEnv
from ray.rllib.examples.models.action_mask_model import \
ActionMaskModel, TorchActionMaskModel
parser = argparse.ArgumentParser()
parser.add_argument(
"--run",
type=str,
default="APPO",
help="The RLlib-registered algorithm to use.")
parser.add_argument("--num-cpus", type=int, default=0)
parser.add_argument(
"--framework",
choices=["tf", "tf2", "tfe", "torch"],
default="tf",
help="The DL framework specifier.")
parser.add_argument("--eager-tracing", action="store_true")
parser.add_argument(
"--stop-iters",
type=int,
default=200,
help="Number of iterations to train.")
parser.add_argument(
"--stop-timesteps",
type=int,
default=100000,
help="Number of timesteps to train.")
parser.add_argument(
"--stop-reward",
type=float,
default=80.0,
help="Reward at which we stop training.")
parser.add_argument(
"--local-mode",
action="store_true",
help="Init Ray in local mode for easier debugging.")
if __name__ == "__main__":
import ray
from ray import tune
args = parser.parse_args()
ray.init(num_cpus=args.num_cpus or None, local_mode=args.local_mode)
config = {
"env": ActionMaskEnv,
"env_config": {
"action_space": Discrete(100),
"observation_space": Box(-1.0, 1.0, (5, )),
},
"model": {
"custom_model": ActionMaskModel
if args.framework != "torch" else TorchActionMaskModel,
},
# Use GPUs iff `RLLIB_NUM_GPUS` env var set to > 0.
"num_gpus": int(os.environ.get("RLLIB_NUM_GPUS", "0")),
"framework": args.framework,
# Run with tracing enabled for tfe/tf2?
"eager_tracing": args.eager_tracing,
}
stop = {
"training_iteration": args.stop_iters,
"timesteps_total": args.stop_timesteps,
"episode_reward_mean": args.stop_reward,
}
results = tune.run(args.run, config=config, stop=stop, verbose=2)
ray.shutdown()
|
149575
|
from typing import Union, Sequence, Tuple, Callable, Dict, List
from .data.data_context import DataGetter, StaticGetter, CompositeGetter
from .input import MessageHandlerFunc, BaseInput, MessageInput
from .kbd import Keyboard, Group
from .media import Media
from .text import Multi, Format, Text
from .widget_event import WidgetEventProcessor
from ..exceptions import InvalidWidgetType, InvalidWidget
WidgetSrc = Union[str, Text, Keyboard, MessageHandlerFunc, Media, BaseInput]
SingleGetterBase = Union[DataGetter, Dict]
GetterVariant = Union[
None, SingleGetterBase,
List[SingleGetterBase], Tuple[SingleGetterBase, ...],
]
def ensure_text(widget: Union[str, Text, Sequence[Text]]) -> Text:
if isinstance(widget, str):
return Format(widget)
if isinstance(widget, Sequence):
if len(widget) == 1:
return widget[0]
return Multi(*widget)
return widget
def ensure_keyboard(widget: Union[Keyboard, Sequence[Keyboard]]) -> Keyboard:
if isinstance(widget, Sequence):
if len(widget) == 1:
return widget[0]
return Group(*widget)
return widget
def ensure_input(
widget: Union[
MessageHandlerFunc, WidgetEventProcessor, BaseInput,
Sequence[BaseInput]
]
) -> BaseInput:
if isinstance(widget, BaseInput):
return widget
elif isinstance(widget, Sequence):
if len(widget) == 0:
return MessageInput(None)
elif len(widget) == 1:
return widget[0]
else:
raise InvalidWidget(f"Only 1 input supported, got {len(widget)}")
else:
return MessageInput(widget)
def ensure_media(widget: Union[Media, Sequence[Media]]) -> Media:
if isinstance(widget, Media):
return widget
if len(widget) > 1: # TODO case selection of media
raise ValueError("Only one media widget is supported")
if len(widget) == 1:
return widget[0]
return Media()
def ensure_widgets(
widgets: Sequence[WidgetSrc]
) -> Tuple[Text, Keyboard, BaseInput, Media]:
texts = []
keyboards = []
inputs = []
media = []
for w in widgets:
if isinstance(w, (str, Text)):
texts.append(ensure_text(w))
elif isinstance(w, Keyboard):
keyboards.append(ensure_keyboard(w))
elif isinstance(w, (BaseInput, Callable)):
inputs.append(ensure_input(w))
elif isinstance(w, Media):
media.append(ensure_media(w))
else:
raise InvalidWidgetType(
f"Cannot add widget of type {type(w)}. "
f"Only str, Text, Keyboard, BaseInput and Callable are supported"
)
return (
ensure_text(texts),
ensure_keyboard(keyboards),
ensure_input(inputs),
ensure_media(media),
)
def ensure_data_getter(getter: GetterVariant) -> DataGetter:
if isinstance(getter, Callable):
return getter
elif isinstance(getter, dict):
return StaticGetter(getter)
elif isinstance(getter, (list, tuple)):
return CompositeGetter(*map(ensure_data_getter, getter))
elif getter is None:
return StaticGetter({})
else:
raise InvalidWidgetType(
f"Cannot add data getter of type {type(getter)}. "
f"Only Dict, Callable or List of Callables are supported"
)
|
149583
|
import pytest
import os
@pytest.mark.skipif(
"TRAVIS" in os.environ and os.environ["TRAVIS"] == "true",
reason="doesn't work with Travis",
)
def test_album_infos(network):
url_album = (
"https://rateyourmusic.com/release/comp/malicorne/legende___deuxieme_epoque/"
)
album_timeline = network.get_album_timeline(url_album)
if not album_timeline:
raise AssertionError()
if len(album_timeline) < 14:
raise AssertionError()
if not album_timeline[-1]["Date"] == "13 Aug 2004":
raise AssertionError()
|
149587
|
import pandas as pd
import os
from PIL import Image
import numpy as np
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau, TensorBoard
from keras import optimizers, losses, activations, models
from keras.layers import Convolution2D, Dense, Input, Flatten, Dropout, MaxPooling2D, BatchNormalization, \
GlobalMaxPool2D, Concatenate, GlobalMaxPooling2D, GlobalAveragePooling2D, Lambda
from keras.applications.resnet50 import ResNet50
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from keras import backend as K
from tqdm import tqdm
from collections import Counter
def read_and_resize(filepath, input_shape=(256, 256)):
im = Image.open((filepath)).convert('RGB')
im = im.resize(input_shape)
im_array = np.array(im, dtype="uint8")#[..., ::-1]
return np.array(im_array / (np.max(im_array)+ 0.001), dtype="float32")
datagen = ImageDataGenerator(
rotation_range=6,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
zoom_range=0.1)
def augment(im_array):
im_array = datagen.random_transform(im_array)
return im_array
def gen(df, batch_size=32, aug=False):
df = df.sample(frac=1)
dict_age = {'(0, 2)' : 0,
'(4, 6)' : 1,
'(8, 12)' : 2,
'(15, 20)' : 3,
'(25, 32)' : 4,
'(38, 43)' : 5,
'(48, 53)' : 6,
'(60, 100)' : 7}
while True:
for i, batch in enumerate([df[i:i+batch_size] for i in range(0,df.shape[0],batch_size)]):
if aug:
images = np.array([augment(read_and_resize(file_path)) for file_path in batch.path.values])
else:
images = np.array([read_and_resize(file_path) for file_path in batch.path.values])
labels = np.array([dict_age[g] for g in batch.age.values])
labels = labels[..., np.newaxis]
yield images, labels
def get_model(n_classes=1):
base_model = ResNet50(weights='imagenet', include_top=False)
#for layer in base_model.layers:
# layer.trainable = False
x = base_model.output
x = GlobalMaxPooling2D()(x)
x = Dropout(0.5)(x)
x = Dense(100, activation="relu")(x)
x = Dropout(0.5)(x)
if n_classes == 1:
x = Dense(n_classes, activation="sigmoid")(x)
else:
x = Dense(n_classes, activation="softmax")(x)
base_model = Model(base_model.input, x, name="base_model")
if n_classes == 1:
base_model.compile(loss="binary_crossentropy", metrics=['acc'], optimizer="adam")
else:
base_model.compile(loss="sparse_categorical_crossentropy", metrics=['acc'], optimizer="adam")
return base_model
def create_path(df, base_path):
df['path'] = df.apply(lambda x: base_path+"aligned/"+x['user_id']+"/landmark_aligned_face.%s.%s"
%(x['face_id'], x['original_image']), axis=1)
return df
def filter_df(df):
dict_age = {'(0, 2)' : 0,
'(4, 6)' : 1,
'(8, 12)' : 2,
'(15, 20)' : 3,
'(25, 32)' : 4,
'(38, 43)' : 5,
'(48, 53)' : 6,
'(60, 100)' : 7}
df['f'] = df.age.apply(lambda x: int(x in dict_age))
df = df[df.f == 1]
return df
if __name__ == "__main__":
base_path = "/media/ml/data_ml/face_age_gender/"
dict_age = {'(0, 2)' : 0,
'(4, 6)' : 1,
'(8, 12)' : 2,
'(15, 20)' : 3,
'(25, 32)' : 4,
'(38, 43)' : 5,
'(48, 53)' : 6,
'(60, 100)' : 7}
bag = 3
all_indexes = list(range(5))
accuracies = []
for test_id in tqdm(all_indexes):
train_id = [j for j in all_indexes if j!=test_id]
print(train_id, test_id)
train_df = pd.concat([pd.read_csv(base_path+"fold_%s_data.txt"%i, sep="\t") for i in train_id])
test_df = pd.read_csv(base_path+"fold_%s_data.txt"%test_id, sep="\t")
train_df = filter_df(train_df)
test_df = filter_df(test_df)
print(train_df.shape, test_df.shape)
train_df = create_path(train_df, base_path=base_path)
test_df = create_path(test_df, base_path=base_path)
cnt_ave = 0
predictions = 0
test_images = np.array([read_and_resize(file_path) for file_path in test_df.path.values])
test_labels = np.array([dict_age[a] for a in test_df.age.values])
for k in tqdm(range(bag)):
tr_tr, tr_val = train_test_split(train_df, test_size=0.1)
file_path = "baseline_age.h5"
checkpoint = ModelCheckpoint(file_path, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
early = EarlyStopping(monitor="val_acc", mode="max", patience=10)
reduce_on_plateau = ReduceLROnPlateau(monitor="val_acc", mode="max", factor=0.1, patience=3)
callbacks_list = [checkpoint, early, reduce_on_plateau] # early
model = get_model(n_classes=len(dict_age))
model.fit_generator(gen(tr_tr, aug=True), validation_data=gen(tr_val), epochs=200, verbose=2, workers=4,
callbacks=callbacks_list, steps_per_epoch=50, validation_steps=30)
model.load_weights(file_path)
predictions += model.predict(test_images)
cnt_ave += 1
test_images = test_images[:, :, ::-1, :]
predictions += model.predict(test_images)
cnt_ave += 1
K.clear_session()
predictions = predictions/cnt_ave
predictions = predictions.argmax(axis=-1)
acc = accuracy_score(test_labels, predictions)
print("accuracy : %s " %acc)
accuracies.append(acc)
print("mean acc : %s (%s) " % (np.mean(accuracies), np.std(accuracies)))
|
149589
|
from asyncio import Event, get_event_loop
class TaskGroup:
def __init__(self, loop=None):
self._loop = loop or get_event_loop()
self._tasks = set()
self.done = Event()
def add(self, coro):
task = self._loop.create_task(coro)
self._tasks.add(task)
self.done.clear()
task.add_done_callback(self._remove)
return task
def _remove(self, task):
self._tasks.remove(task)
len(self._tasks) < 1 and self.done.set()
def cancel(self):
for task in self._tasks:
task.cancel()
|
149591
|
import zipfile
import tkinter
from tkinter import filedialog
import tkinter.messagebox
from pathlib import Path
def un_zip(file_name):
i = 0
try:
path_use = file_choose_1.get()
with zipfile.ZipFile(path_use, "r") as f:
for fn in f.namelist():
i += 1
extracted_path = Path(f.extract(fn))
try:
extracted_path.rename(fn.encode("cp437").decode("gbk"))
except Exception as e:
print(e)
tkinter.messagebox.showwarning("wrong!", "wrong zip!")
tkinter.messagebox.showinfo(
"success!", "finish zip " + str(i) + "!")
except Exception as e:
print(e)
tkinter.messagebox.showwarning("wrong!", "wrong zip!")
def choose_file():
path_ = filedialog.askopenfilename(filetypes=[("zip files", "*.zip")])
path.set(path_)
def zip_main():
global path, file_choose_1
top = tkinter.Tk()
path = tkinter.StringVar()
button_begin = tkinter.Button(
top, text="begin", command=lambda: un_zip(file_name=path)
).pack()
button = tkinter.Button(top, text="choose zip file",
command=choose_file).pack()
file_choose_1 = tkinter.Entry(top, textvariable=path, width=50).pack()
top.mainloop()
if __name__ == "__main__":
zip_main()
|
149603
|
import random
import pandas as pd
shots = 90
ot_shots = 10
##########
# Team 1 #
##########
team1 = {
'2pt rate': .80,
'3pt rate': .20,
'2pt%': .50,
'3pt%': .33333,
'orbd': .225,
'foul3': .015,
'foul2': .057,
'ft%': .77
}
##########
# Team 2 #
##########
team2 = {
'2pt rate': .50,
'3pt rate': .50,
'2pt%': .50,
'3pt%': .33333,
'orbd': .225,
'foul3': .015,
'foul2': .057,
'ft%': .77
}
def shoot_ft(team, ft_attempts):
ft_points = 0
i = 0
while i <= ft_attempts:
make = random.random()
if make < team['ft%']:
ft_points += 1
i += 1
return ft_points
def points(team):
roll_shot_type = random.random()
roll_make = random.random()
roll_foul = random.random()
if roll_shot_type <= team['2pt rate']:
if roll_foul <= team['foul2']:
if roll_make <= team['2pt%']:
return 2 + shoot_ft(team, 1)
else:
return shoot_ft(team, 2)
elif roll_make <= team['2pt%']:
return 2
else:
if roll_foul <= team['foul3']:
if roll_make <= team['3pt%']:
return 3 + shoot_ft(team, 1)
else:
return shoot_ft(team, 3)
elif roll_make <= team['3pt%']:
return 3
roll_orbd = random.random()
if roll_orbd <= team['orbd']:
return points(team)
return 0
def play_game(shots_to_take):
t1_points_in_game = 0
t2_points_in_game = 0
for shot in range(shots_to_take):
t1_points_in_game += points(team1)
t2_points_in_game += points(team2)
return t1_points_in_game, t2_points_in_game
results = []
for game in range(1000000):
t1_points, t2_points = play_game(shots)
while t1_points == t2_points:
t1_new, t2_new = play_game(ot_shots)
t1_points += t1_new
t2_points += t2_new
result = {
'team1': t1_points,
'team2': t2_points,
'game': game,
'team1_win': t1_points > t2_points,
'team2_win': t2_points > t1_points,
}
results.append(result)
frame = pd.DataFrame(results)
team1_wins = frame['team1_win'].sum() / frame.shape[0]
team2_wins = frame['team2_win'].sum() / frame.shape[0]
print('Team 1 wins {0:.2f}% of the time'.format(team1_wins * 100))
print('Team 2 wins {0:.2f}% of the time'.format(team2_wins * 100))
|
149627
|
MX_ROBOT_MAX_NB_ACCELEROMETERS = 1
MX_DEFAULT_ROBOT_IP = "192.168.0.100"
MX_ROBOT_TCP_PORT_CONTROL = 10000
MX_ROBOT_TCP_PORT_FEED = 10001
MX_ROBOT_UDP_PORT_TRACE = 10002
MX_ROBOT_UDP_PORT_RT_CTRL = 10003
MX_CHECKPOINT_ID_MIN = 1
MX_CHECKPOINT_ID_MAX = 8000
MX_ACCELEROMETER_UNIT_PER_G = 16000
MX_GRAVITY_MPS2 = 9.8067
MX_ACCELEROMETER_JOINT_M500 = 5
MX_EXT_TOOL_MPM500_NB_VALVES = 2
MX_EXT_TOOL_VBOX_MAX_VALVES = 6
MX_EIP_MAJOR_VERSION = 2
MX_EIP_MINOR_VERSION = 1
MX_NB_DYNAMIC_PDOS = 4
MX_ROBOT_MODEL_UNKNOWN = 0
MX_ROBOT_MODEL_M500_R1 = 1
MX_ROBOT_MODEL_M500_R2 = 2
MX_ROBOT_MODEL_M500_R3 = 3
MX_ROBOT_MODEL_M1000_R1 = 10
MX_ROBOT_MODEL_SCARA_R1 = 20
MX_EXT_TOOL_NONE = 0
MX_EXT_TOOL_MEGP25_SHORT = 1
MX_EXT_TOOL_MEGP25_LONG = 2
MX_EXT_TOOL_VBOX_2VALVES = 3
MX_EXT_TOOL_TYPE_INVALID = 0xFFFFFFFF
MX_EXT_TOOL_COMPLEMENTARY = 0
MX_EXT_TOOL_INDEPENDENT = 1
MX_EXT_TOOL_POSITION = 2
MX_EXT_TOOL_MODE_INVALID = 0xFFFFFFFF
MX_VALVE_STATE_STAY = -1
MX_VALVE_STATE_CLOSE = 0
MX_VALVE_STATE_OPEN = 1
MX_EVENT_SEVERITY_SILENT = 0
MX_EVENT_SEVERITY_WARNING = 1
MX_EVENT_SEVERITY_PAUSE_MOTION = 2
MX_EVENT_SEVERITY_CLEAR_MOTION = 3
MX_EVENT_SEVERITY_ERROR = 4
MX_EVENT_SEVERITY_INVALID = 0xFFFFFFFF
MX_TORQUE_LIMITS_DETECT_ALL = 0
MX_TORQUE_LIMITS_DETECT_SKIP_ACCEL = 1
MX_TORQUE_LIMITS_INVALID = 0xFFFFFFFF
MX_MOTION_CMD_TYPE_NO_MOVE = 0
MX_MOTION_CMD_TYPE_MOVEJOINTS = 1
MX_MOTION_CMD_TYPE_MOVEPOSE = 2
MX_MOTION_CMD_TYPE_MOVELIN = 3
MX_MOTION_CMD_TYPE_MOVELINRELTRF = 4
MX_MOTION_CMD_TYPE_MOVELINRELWRF = 5
MX_MOTION_CMD_TYPE_DELAY = 6
MX_MOTION_CMD_TYPE_SETBLENDING = 7
MX_MOTION_CMD_TYPE_SETJOINTVEL = 8
MX_MOTION_CMD_TYPE_SETJOINTACC = 9
MX_MOTION_CMD_TYPE_SETCARTANGVEL = 10
MX_MOTION_CMD_TYPE_SETCARTLINVEL = 11
MX_MOTION_CMD_TYPE_SETCARTACC = 12
MX_MOTION_CMD_TYPE_SETTRF = 13
MX_MOTION_CMD_TYPE_SETWRF = 14
MX_MOTION_CMD_TYPE_SETCONF = 15
MX_MOTION_CMD_TYPE_SETAUTOCONF = 16
MX_MOTION_CMD_TYPE_SETCHECKPOINT = 17
MX_MOTION_CMD_TYPE_GRIPPER = 18
MX_MOTION_CMD_TYPE_GRIPPERVEL = 19
MX_MOTION_CMD_TYPE_GRIPPERFORCE = 20
MX_MOTION_CMD_TYPE_MOVEJOINTSVEL = 21
MX_MOTION_CMD_TYPE_MOVELINVELWRF = 22
MX_MOTION_CMD_TYPE_MOVELINVELTRF = 23
MX_MOTION_CMD_TYPE_VELCTRLTIMEOUT = 24
MX_MOTION_CMD_TYPE_SETCONFTURN = 25
MX_MOTION_CMD_TYPE_SETAUTOCONFTURN = 26
MX_MOTION_CMD_TYPE_SETTORQUELIMITS = 27
MX_MOTION_CMD_TYPE_SETTORQUELIMITSCFG = 28
MX_MOTION_CMD_TYPE_MOVEJOINTSREL = 29
MX_MOTION_CMD_TYPE_SETVALVESTATE = 30
MX_MOTION_CMD_TYPE_START_OFFLINE_PROGRAM = 100
MX_MOTION_CMD_TYPE_SETDBG = 1000
MX_EIP_DYNAMIC_AUTO = 0
MX_EIP_DYNAMIC_CFG_FW_VERSION = 1
MX_EIP_DYNAMIC_CFG_PRODUCT_TYPE = 2
MX_EIP_DYNAMIC_CFG_ROBOT_SERIAL = 3
MX_EIP_DYNAMIC_CFG_JOINT_OFFSET = 4
MX_EIP_DYNAMIC_CFG_ROBOT_DH_MODEL_1 = 5
MX_EIP_DYNAMIC_CFG_ROBOT_DH_MODEL_2 = 6
MX_EIP_DYNAMIC_CFG_ROBOT_DH_MODEL_3 = 7
MX_EIP_DYNAMIC_CFG_ROBOT_DH_MODEL_4 = 8
MX_EIP_DYNAMIC_CFG_ROBOT_DH_MODEL_5 = 9
MX_EIP_DYNAMIC_CFG_ROBOT_DH_MODEL_6 = 10
MX_EIP_DYNAMIC_CFG_JOINT_LIMITS_CFG = 11
MX_EIP_DYNAMIC_CFG_MODEL_JOINT_LIMITS_1_2_3 = 12
MX_EIP_DYNAMIC_CFG_MODEL_JOINT_LIMITS_4_5_6 = 13
MX_EIP_DYNAMIC_CFG_JOINT_LIMITS_1_2_3 = 14
MX_EIP_DYNAMIC_CFG_JOINT_LIMITS_4_5_6 = 15
MX_EIP_DYNAMIC_MQ_CONF = 20
MX_EIP_DYNAMIC_MQ_PARAMS = 21
MX_EIP_DYNAMIC_MQ_VEL_ACCEL = 22
MX_EIP_DYNAMIC_MQ_GRIPPER_CFG = 23
MX_EIP_DYNAMIC_MQ_TORQUE_LIMITS_CFG = 24
MX_EIP_DYNAMIC_MQ_TORQUE_LIMITS = 25
MX_EIP_DYNAMIC_RT_TARGET_JOINT_POS = 30
MX_EIP_DYNAMIC_RT_TARGET_CART_POS = 31
MX_EIP_DYNAMIC_RT_TARGET_JOINT_VEL = 32
MX_EIP_DYNAMIC_RT_TARGET_JOINT_TORQ = 33
MX_EIP_DYNAMIC_RT_TARGET_CART_VEL = 34
MX_EIP_DYNAMIC_RT_TARGET_CONF = 35
MX_EIP_DYNAMIC_RT_JOINT_POS = 40
MX_EIP_DYNAMIC_RT_CART_POS = 41
MX_EIP_DYNAMIC_RT_JOINT_VEL = 42
MX_EIP_DYNAMIC_RT_JOINT_TORQ = 43
MX_EIP_DYNAMIC_RT_CART_VEL = 44
MX_EIP_DYNAMIC_RT_CONF = 45
MX_EIP_DYNAMIC_RT_ACCELEROMETER_5 = 46
MX_EIP_DYNAMIC_RT_WRF = 50
MX_EIP_DYNAMIC_RT_TRF = 51
MX_EIP_DYNAMIC_RT_EXTTOOL_STATUS = 52
MX_EIP_DYNAMIC_RT_GRIPPER_VALVE_STATE = 53
MX_EIP_DYNAMIC_FORCE_32_BITS = 0xFFFFFFFF
MX_ST_BUFFER_FULL = 1000
MX_ST_UNKNOWN_CMD = 1001
MX_ST_SYNTAX_ERR = 1002
MX_ST_ARG_ERR = 1003
MX_ST_NOT_ACTIVATED = 1005
MX_ST_NOT_HOMED = 1006
MX_ST_JOINT_OVER_LIMIT = 1007
MX_ST_VEL_OVER_LIMIT = 1008
MX_ST_ACCEL_OVER_LIMIT = 1009
MX_ST_BLOCKED_BY_180_DEG_PROT = 1010
MX_ST_ALREADY_ERR = 1011
MX_ST_SINGULARITY_ERR = 1012
MX_ST_ACTIVATION_ERR = 1013
MX_ST_HOMING_ERR = 1014
MX_ST_MASTER_ERR = 1015
MX_ST_OUT_OF_REACH = 1016
MX_ST_COMM_ERR = 1017
MX_ST_EOS_MISSING = 1018
MX_ST_ROBOT_NOT_LEVELED = 1019
MX_ST_BRAKES_ERR = 1020
MX_ST_DEACTIVATION_ERR = 1021
MX_ST_OFFLINE_SAVE_ERR = 1022
MX_ST_IGNORE_CMD_OFFLINE = 1023
MX_ST_MASTERING_NEEDED = 1024
MX_ST_IMPOSSIBLE_RESET_ERR = 1025
MX_ST_MUST_BE_DEACTIVATED = 1026
MX_ST_SIM_MUST_DEACTIVATED = 1027
MX_ST_NETWORK_ERR = 1028
MX_ST_OFFLINE_FULL = 1029
MX_ST_ALREADY_SAVING = 1030
MX_ST_ILLEGAL_WHILE_SAVING = 1031
MX_ST_GRIPPER_FORCE_OVER_LIMIT = 1035
MX_ST_GRIPPER_VEL_OVER_LIMIT = 1036
MX_ST_GRIPPER_RANGE_OVER_LIMIT = 1037
MX_ST_NO_GRIPPER = 1038
MX_ST_GRIPPER_TEMP_OVER_LIMIT = 1039
MX_ST_CMD_FAILED = 1040
MX_ST_NO_VBOX = 1041
MX_ST_ACTIVATED = 2000
MX_ST_ALREADY_ACTIVATED = 2001
MX_ST_HOME_DONE = 2002
MX_ST_HOME_ALREADY = 2003
MX_ST_DEACTIVATED = 2004
MX_ST_ERROR_RESET = 2005
MX_ST_NO_ERROR_RESET = 2006
MX_ST_GET_STATUS_ROBOT = 2007
MX_ST_BRAKES_OFF = 2008
MX_ST_MASTER_DONE = 2009
MX_ST_BRAKES_ON = 2010
MX_ST_GET_WRF = 2013
MX_ST_GET_TRF = 2014
MX_ST_SET_CART_VEL = 2020
MX_ST_SET_CART_ACC = 2021
MX_ST_SET_JOINT_VEL = 2022
MX_ST_SET_JOINT_ACC = 2023
MX_ST_SET_TOOL_DEF = 2024
MX_ST_SET_WRF = 2025
MX_ST_GET_JOINTS = 2026
MX_ST_GET_POSE = 2027
MX_ST_GET_AUTO_CONF = 2028
MX_ST_GET_CONF = 2029
MX_ST_GET_PHYS_CONF = 2030
MX_ST_GET_AUTO_CONF_TURN = 2031
MX_ST_SET_CORNERING = 2032
MX_ST_CLR_CORNERING = 2033
MX_ST_AUTOCONF_ON = 2034
MX_ST_AUTOCONF_OFF = 2035
MX_ST_GET_CONF_TURN = 2036
MX_ST_ACT_POS_FEED = 2038
MX_ST_DEACT_POS_FEED = 2039
MX_ST_ACT_JOINTS_FEED = 2040
MX_ST_DEACT_JOINTS_FEED = 2041
MX_ST_PAUSE_MOTION = 2042
MX_ST_RESUME_MOTION = 2043
MX_ST_CLEAR_MOTION = 2044
MX_ST_SIM_ON = 2045
MX_ST_SIM_OFF = 2046
MX_ST_EXTTOOL_SIM = 2047
MX_ST_EXTTOOL_SIM_OFF = 2048
MX_ST_RECOVERY_MODE_ON = 2049
MX_ST_RECOVERY_MODE_OFF = 2050
MX_ST_RECOVERY_VEL_CAP = 2051
MX_ST_EOM_ON = 2052
MX_ST_EOM_OFF = 2053
MX_ST_EOB_ON = 2054
MX_ST_EOB_OFF = 2055
MX_ST_START_SAVING = 2060
MX_ST_N_CMD_SAVED = 2061
MX_ST_OFFLINE_ALREADY_SAVING = 2062
MX_ST_OFFLINE_START = 2063
MX_ST_OFFLINE_LOOP_ON = 2064
MX_ST_OFFLINE_LOOP_OFF = 2065
MX_ST_START_PROGRAM_ARDY = 2066
MX_ST_SET_CART_DELTAREF_WRF = 2067
MX_ST_SET_CART_DELTAREF_TRF = 2068
MX_ST_ACTIVATION_IN_PROGRESS = 2070
MX_ST_HOMING_IN_PROGRESS = 2071
MX_ST_MASTER_IN_PROGRESS = 2072
MX_ST_GRIP_HOME = 2075
MX_ST_GRIP_ARD_HOME = 2076
MX_ST_SET_GRIP_FORCE = 2077
MX_ST_SET_GRIP_VEL = 2078
MX_ST_GET_STATUS_GRIPPER = 2079
MX_ST_GET_CMD_PENDING_COUNT = 2080
MX_ST_GET_FW_VERSION = 2081
MX_ST_GET_FW_VERSION_FULL = 2082
MX_ST_GET_ROBOT_SERIAL = 2083
MX_ST_GET_PRODUCT_TYPE = 2084
MX_ST_CMD_SUCCESSFUL = 2085
MX_ST_GET_JOINT_LIMITS = 2090
MX_ST_SET_JOINT_LIMITS = 2092
MX_ST_SET_JOINT_LIMITS_CFG = 2093
MX_ST_GET_JOINT_LIMITS_CFG = 2094
MX_ST_GET_ROBOT_NAME = 2095
MX_ST_SET_CTRL_PORT_MONIT = 2096
MX_ST_SYNC_CMD_QUEUE = 2097
MX_ST_JOINT_TORQUE = 2100
MX_ST_JOINT_SPEED = 2101
MX_ST_JOINT_POS = 2102
MX_ST_CART_POSE = 2103
MX_ST_TEMPERATURE = 2104
MX_ST_GET_ROBOT_KIN_MODEL = 2110
MX_ST_GET_ROBOT_DH_MODEL = 2111
MX_ST_GET_JOINT_OFFSET = 2112
MX_ST_GET_MODEL_JOINT_LIMITS = 2113
MX_ST_GET_MOTION_OPTIONS = 2115
MX_ST_GET_MONITORING_INTERVAL = 2116
MX_ST_GET_REAL_TIME_MONITORING = 2117
MX_ST_GET_STATUS_EVENTS = 2118
MX_ST_GET_NETWORK_OPTIONS = 2119
MX_ST_GET_RTC = 2140
MX_ST_GET_BLENDING = 2150
MX_ST_GET_VEL_TIMEOUT = 2151
MX_ST_GET_JOINT_VEL = 2152
MX_ST_GET_JOINT_ACC = 2153
MX_ST_GET_CART_LIN_VEL = 2154
MX_ST_GET_CART_ANG_VEL = 2155
MX_ST_GET_CART_ACC = 2156
MX_ST_GET_CHECKPOINT = 2157
MX_ST_GET_GRIPPER_FORCE = 2158
MX_ST_GET_GRIPPER_VEL = 2159
MX_ST_GET_TORQUE_LIMITS_CFG = 2160
MX_ST_GET_TORQUE_LIMITS = 2161
MX_ST_RT_TARGET_JOINT_POS = 2200
MX_ST_RT_TARGET_CART_POS = 2201
MX_ST_RT_TARGET_JOINT_VEL = 2202
MX_ST_RT_TARGET_JOINT_TORQ = 2203
MX_ST_RT_TARGET_CART_VEL = 2204
MX_ST_RT_TARGET_CONF = 2208
MX_ST_RT_TARGET_CONF_TURN = 2209
MX_ST_RT_JOINT_POS = 2210
MX_ST_RT_CART_POS = 2211
MX_ST_RT_JOINT_VEL = 2212
MX_ST_RT_JOINT_TORQ = 2213
MX_ST_RT_CART_VEL = 2214
MX_ST_RT_CONF = 2218
MX_ST_RT_CONF_TURN = 2219
MX_ST_RT_ACCELEROMETER = 2220
MX_ST_RT_CHECKPOINT = 2227
MX_ST_RT_WRF = 2228
MX_ST_RT_TRF = 2229
MX_ST_RT_CYCLE_END = 2230
MX_ST_RT_EXTTOOL_STATUS = 2300
MX_ST_RT_VALVE_STATE = 2310
MX_ST_RT_GRIPPER_STATE = 2320
MX_ST_RT_GRIPPER_FORCE = 2321
MX_ST_RT_GRIPPER_POS = 2322
MX_ST_CONNECTED = 3000
MX_ST_USER_ALREADY = 3001
MX_ST_UPGRADE_IN_PROGRESS = 3002
MX_ST_CMD_TOO_LONG = 3003
MX_ST_EOM = 3004
MX_ST_ERROR_MOTION = 3005
MX_ST_SEND_JOINT_RT = 3007
MX_ST_COLLISION = 3008
MX_ST_INIT_FAILED = 3009
MX_ST_SEND_POS_RT = 3010
MX_ST_CANNOT_MOVE = 3011
MX_ST_EOB = 3012
MX_ST_END_OFFLINE = 3013
MX_ST_CANT_SAVE_OFFLINE = 3014
MX_ST_OFFLINE_TIMEOUT = 3015
MX_ST_IGNORING_CMD = 3016
MX_ST_NO_OFFLINE_SAVED = 3017
MX_ST_OFFLINE_LOOP = 3018
MX_ST_JOGGING_STOPPED = 3019
MX_ST_ERROR_GRIPPER = 3025
MX_ST_MAINTENANCE_CHECK = 3026
MX_ST_INTERNAL_ERROR = 3027
MX_ST_EXCESSIVE_TRQ = 3028
MX_ST_CHECKPOINT_REACHED = 3030
MX_ST_TEXT_API_ERROR = 3031
MX_ST_PSTOP = 3032
MX_ST_NO_VALID_CFG = 3033
MX_ST_TRACE_LVL_CHANGED = 3034
MX_ST_TCP_DUMP_STARTED = 3035
MX_ST_TCP_DUMP_DONE = 3036
MX_ST_ERROR_VBOX = 3037
MX_ST_INVALID = 0xFFFFFFFF
class RobotStatusCodeInfo:
def __init__(self, code, name, is_error):
"""This class contains information bout a robot status codes above (ex: MX_ST_BUFFER_FULL)
Parameters
----------
code : integer
The integer value (ex: 1001)
name : string
The code name (ex: "MX_ST_BUFFER_FULL"
is_error : bool
True if this is an error code
"""
self.code = code
self.name = name
self.is_error = is_error
robot_status_code_info = {
MX_ST_BUFFER_FULL:
RobotStatusCodeInfo(MX_ST_BUFFER_FULL, "MX_ST_BUFFER_FULL", is_error=True),
MX_ST_UNKNOWN_CMD:
RobotStatusCodeInfo(MX_ST_UNKNOWN_CMD, "MX_ST_UNKNOWN_CMD", is_error=True),
MX_ST_SYNTAX_ERR:
RobotStatusCodeInfo(MX_ST_SYNTAX_ERR, "MX_ST_SYNTAX_ERR", is_error=True),
MX_ST_ARG_ERR:
RobotStatusCodeInfo(MX_ST_ARG_ERR, "MX_ST_ARG_ERR", is_error=True),
MX_ST_NOT_ACTIVATED:
RobotStatusCodeInfo(MX_ST_NOT_ACTIVATED, "MX_ST_NOT_ACTIVATED", is_error=True),
MX_ST_NOT_HOMED:
RobotStatusCodeInfo(MX_ST_NOT_HOMED, "MX_ST_NOT_HOMED", is_error=True),
MX_ST_JOINT_OVER_LIMIT:
RobotStatusCodeInfo(MX_ST_JOINT_OVER_LIMIT, "MX_ST_JOINT_OVER_LIMIT", is_error=True),
MX_ST_BLOCKED_BY_180_DEG_PROT:
RobotStatusCodeInfo(MX_ST_BLOCKED_BY_180_DEG_PROT, "MX_ST_BLOCKED_BY_180_DEG_PROT", is_error=True),
MX_ST_ALREADY_ERR:
RobotStatusCodeInfo(MX_ST_ALREADY_ERR, "MX_ST_ALREADY_ERR", is_error=True),
MX_ST_SINGULARITY_ERR:
RobotStatusCodeInfo(MX_ST_SINGULARITY_ERR, "MX_ST_SINGULARITY_ERR", is_error=True),
MX_ST_ACTIVATION_ERR:
RobotStatusCodeInfo(MX_ST_ACTIVATION_ERR, "MX_ST_ACTIVATION_ERR", is_error=True),
MX_ST_HOMING_ERR:
RobotStatusCodeInfo(MX_ST_HOMING_ERR, "MX_ST_HOMING_ERR", is_error=True),
MX_ST_MASTER_ERR:
RobotStatusCodeInfo(MX_ST_MASTER_ERR, "MX_ST_MASTER_ERR", is_error=True),
MX_ST_OUT_OF_REACH:
RobotStatusCodeInfo(MX_ST_OUT_OF_REACH, "MX_ST_OUT_OF_REACH", is_error=True),
MX_ST_OFFLINE_SAVE_ERR:
RobotStatusCodeInfo(MX_ST_OFFLINE_SAVE_ERR, "MX_ST_OFFLINE_SAVE_ERR", is_error=True),
MX_ST_IGNORE_CMD_OFFLINE:
RobotStatusCodeInfo(MX_ST_IGNORE_CMD_OFFLINE, "MX_ST_IGNORE_CMD_OFFLINE", is_error=True),
MX_ST_MASTERING_NEEDED:
RobotStatusCodeInfo(MX_ST_MASTERING_NEEDED, "MX_ST_MASTERING_NEEDED", is_error=True),
MX_ST_IMPOSSIBLE_RESET_ERR:
RobotStatusCodeInfo(MX_ST_IMPOSSIBLE_RESET_ERR, "MX_ST_IMPOSSIBLE_RESET_ERR", is_error=True),
MX_ST_MUST_BE_DEACTIVATED:
RobotStatusCodeInfo(MX_ST_MUST_BE_DEACTIVATED, "MX_ST_MUST_BE_DEACTIVATED", is_error=True),
MX_ST_SIM_MUST_DEACTIVATED:
RobotStatusCodeInfo(MX_ST_SIM_MUST_DEACTIVATED, "MX_ST_SIM_MUST_DEACTIVATED", is_error=True),
MX_ST_OFFLINE_FULL:
RobotStatusCodeInfo(MX_ST_OFFLINE_FULL, "MX_ST_OFFLINE_FULL", is_error=True),
MX_ST_ALREADY_SAVING:
RobotStatusCodeInfo(MX_ST_ALREADY_SAVING, "MX_ST_ALREADY_SAVING", is_error=True),
MX_ST_ILLEGAL_WHILE_SAVING:
RobotStatusCodeInfo(MX_ST_ILLEGAL_WHILE_SAVING, "MX_ST_ILLEGAL_WHILE_SAVING", is_error=True),
MX_ST_NO_GRIPPER:
RobotStatusCodeInfo(MX_ST_NO_GRIPPER, "MX_ST_NO_GRIPPER", is_error=True),
MX_ST_NO_VBOX:
RobotStatusCodeInfo(MX_ST_NO_VBOX, "MX_ST_NO_VBOX", is_error=True),
MX_ST_CMD_FAILED:
RobotStatusCodeInfo(MX_ST_CMD_FAILED, "MX_ST_CMD_FAILED", is_error=True),
MX_ST_ACTIVATED:
RobotStatusCodeInfo(MX_ST_ACTIVATED, "MX_ST_ACTIVATED", is_error=False),
MX_ST_ALREADY_ACTIVATED:
RobotStatusCodeInfo(MX_ST_ALREADY_ACTIVATED, "MX_ST_ALREADY_ACTIVATED", is_error=False),
MX_ST_HOME_DONE:
RobotStatusCodeInfo(MX_ST_HOME_DONE, "MX_ST_HOME_DONE", is_error=False),
MX_ST_HOME_ALREADY:
RobotStatusCodeInfo(MX_ST_HOME_ALREADY, "MX_ST_HOME_ALREADY", is_error=False),
MX_ST_DEACTIVATED:
RobotStatusCodeInfo(MX_ST_DEACTIVATED, "MX_ST_DEACTIVATED", is_error=False),
MX_ST_ERROR_RESET:
RobotStatusCodeInfo(MX_ST_ERROR_RESET, "MX_ST_ERROR_RESET", is_error=False),
MX_ST_NO_ERROR_RESET:
RobotStatusCodeInfo(MX_ST_NO_ERROR_RESET, "MX_ST_NO_ERROR_RESET", is_error=False),
MX_ST_GET_STATUS_ROBOT:
RobotStatusCodeInfo(MX_ST_GET_STATUS_ROBOT, "MX_ST_GET_STATUS_ROBOT", is_error=False),
MX_ST_BRAKES_OFF:
RobotStatusCodeInfo(MX_ST_BRAKES_OFF, "MX_ST_BRAKES_OFF", is_error=False),
MX_ST_MASTER_DONE:
RobotStatusCodeInfo(MX_ST_MASTER_DONE, "MX_ST_MASTER_DONE", is_error=False),
MX_ST_BRAKES_ON:
RobotStatusCodeInfo(MX_ST_BRAKES_ON, "MX_ST_BRAKES_ON", is_error=False),
MX_ST_GET_WRF:
RobotStatusCodeInfo(MX_ST_GET_WRF, "MX_ST_GET_WRF", is_error=False),
MX_ST_GET_TRF:
RobotStatusCodeInfo(MX_ST_GET_TRF, "MX_ST_GET_TRF", is_error=False),
MX_ST_GET_JOINTS:
RobotStatusCodeInfo(MX_ST_GET_JOINTS, "MX_ST_GET_JOINTS", is_error=False),
MX_ST_GET_POSE:
RobotStatusCodeInfo(MX_ST_GET_POSE, "MX_ST_GET_POSE", is_error=False),
MX_ST_GET_AUTO_CONF:
RobotStatusCodeInfo(MX_ST_GET_AUTO_CONF, "MX_ST_GET_AUTO_CONF", is_error=False),
MX_ST_GET_CONF:
RobotStatusCodeInfo(MX_ST_GET_CONF, "MX_ST_GET_CONF", is_error=False),
MX_ST_GET_AUTO_CONF_TURN:
RobotStatusCodeInfo(MX_ST_GET_AUTO_CONF_TURN, "MX_ST_GET_AUTO_CONF_TURN", is_error=False),
MX_ST_GET_CONF_TURN:
RobotStatusCodeInfo(MX_ST_GET_CONF_TURN, "MX_ST_GET_CONF_TURN", is_error=False),
MX_ST_PAUSE_MOTION:
RobotStatusCodeInfo(MX_ST_PAUSE_MOTION, "MX_ST_PAUSE_MOTION", is_error=False),
MX_ST_RESUME_MOTION:
RobotStatusCodeInfo(MX_ST_RESUME_MOTION, "MX_ST_RESUME_MOTION", is_error=False),
MX_ST_CLEAR_MOTION:
RobotStatusCodeInfo(MX_ST_CLEAR_MOTION, "MX_ST_CLEAR_MOTION", is_error=False),
MX_ST_SIM_ON:
RobotStatusCodeInfo(MX_ST_SIM_ON, "MX_ST_SIM_ON", is_error=False),
MX_ST_SIM_OFF:
RobotStatusCodeInfo(MX_ST_SIM_OFF, "MX_ST_SIM_OFF", is_error=False),
MX_ST_EXTTOOL_SIM:
RobotStatusCodeInfo(MX_ST_EXTTOOL_SIM, "MX_ST_EXTTOOL_SIM", is_error=False),
MX_ST_EOM_ON:
RobotStatusCodeInfo(MX_ST_EOM_ON, "MX_ST_EOM_ON", is_error=False),
MX_ST_EOM_OFF:
RobotStatusCodeInfo(MX_ST_EOM_OFF, "MX_ST_EOM_OFF", is_error=False),
MX_ST_EOB_ON:
RobotStatusCodeInfo(MX_ST_EOB_ON, "MX_ST_EOB_ON", is_error=False),
MX_ST_EOB_OFF:
RobotStatusCodeInfo(MX_ST_EOB_OFF, "MX_ST_EOB_OFF", is_error=False),
MX_ST_START_SAVING:
RobotStatusCodeInfo(MX_ST_START_SAVING, "MX_ST_START_SAVING", is_error=False),
MX_ST_N_CMD_SAVED:
RobotStatusCodeInfo(MX_ST_N_CMD_SAVED, "MX_ST_N_CMD_SAVED", is_error=False),
MX_ST_OFFLINE_START:
RobotStatusCodeInfo(MX_ST_OFFLINE_START, "MX_ST_OFFLINE_START", is_error=False),
MX_ST_OFFLINE_LOOP_ON:
RobotStatusCodeInfo(MX_ST_OFFLINE_LOOP_ON, "MX_ST_OFFLINE_LOOP_ON", is_error=False),
MX_ST_OFFLINE_LOOP_OFF:
RobotStatusCodeInfo(MX_ST_OFFLINE_LOOP_OFF, "MX_ST_OFFLINE_LOOP_OFF", is_error=False),
MX_ST_GET_STATUS_GRIPPER:
RobotStatusCodeInfo(MX_ST_GET_STATUS_GRIPPER, "MX_ST_GET_STATUS_GRIPPER", is_error=False),
MX_ST_GET_CMD_PENDING_COUNT:
RobotStatusCodeInfo(MX_ST_GET_CMD_PENDING_COUNT, "MX_ST_GET_CMD_PENDING_COUNT", is_error=False),
MX_ST_GET_FW_VERSION:
RobotStatusCodeInfo(MX_ST_GET_FW_VERSION, "MX_ST_GET_FW_VERSION", is_error=False),
MX_ST_GET_FW_VERSION_FULL:
RobotStatusCodeInfo(MX_ST_GET_FW_VERSION_FULL, "MX_ST_GET_FW_VERSION_FULL", is_error=False),
MX_ST_GET_ROBOT_SERIAL:
RobotStatusCodeInfo(MX_ST_GET_ROBOT_SERIAL, "MX_ST_GET_ROBOT_SERIAL", is_error=False),
MX_ST_GET_PRODUCT_TYPE:
RobotStatusCodeInfo(MX_ST_GET_PRODUCT_TYPE, "MX_ST_GET_PRODUCT_TYPE", is_error=False),
MX_ST_CMD_SUCCESSFUL:
RobotStatusCodeInfo(MX_ST_CMD_SUCCESSFUL, "MX_ST_CMD_SUCCESSFUL", is_error=False),
MX_ST_SET_CTRL_PORT_MONIT:
RobotStatusCodeInfo(MX_ST_SET_CTRL_PORT_MONIT, "MX_ST_SET_CTRL_PORT_MONIT", is_error=False),
MX_ST_SYNC_CMD_QUEUE:
RobotStatusCodeInfo(MX_ST_SYNC_CMD_QUEUE, "MX_ST_SYNC_CMD_QUEUE", is_error=False),
MX_ST_GET_JOINT_LIMITS:
RobotStatusCodeInfo(MX_ST_GET_JOINT_LIMITS, "MX_ST_GET_JOINT_LIMITS", is_error=False),
MX_ST_SET_JOINT_LIMITS:
RobotStatusCodeInfo(MX_ST_SET_JOINT_LIMITS, "MX_ST_SET_JOINT_LIMITS", is_error=False),
MX_ST_SET_JOINT_LIMITS_CFG:
RobotStatusCodeInfo(MX_ST_SET_JOINT_LIMITS_CFG, "MX_ST_SET_JOINT_LIMITS_CFG", is_error=False),
MX_ST_GET_JOINT_LIMITS_CFG:
RobotStatusCodeInfo(MX_ST_GET_JOINT_LIMITS_CFG, "MX_ST_GET_JOINT_LIMITS_CFG", is_error=False),
MX_ST_GET_ROBOT_NAME:
RobotStatusCodeInfo(MX_ST_GET_ROBOT_NAME, "MX_ST_GET_ROBOT_NAME", is_error=False),
MX_ST_GET_ROBOT_KIN_MODEL:
RobotStatusCodeInfo(MX_ST_GET_ROBOT_KIN_MODEL, "MX_ST_GET_ROBOT_KIN_MODEL", is_error=False),
MX_ST_GET_ROBOT_DH_MODEL:
RobotStatusCodeInfo(MX_ST_GET_ROBOT_DH_MODEL, "MX_ST_GET_ROBOT_DH_MODEL", is_error=False),
MX_ST_GET_JOINT_OFFSET:
RobotStatusCodeInfo(MX_ST_GET_JOINT_OFFSET, "MX_ST_GET_JOINT_OFFSET", is_error=False),
MX_ST_GET_MODEL_JOINT_LIMITS:
RobotStatusCodeInfo(MX_ST_GET_MODEL_JOINT_LIMITS, "MX_ST_GET_MODEL_JOINT_LIMITS", is_error=False),
MX_ST_GET_MOTION_OPTIONS:
RobotStatusCodeInfo(MX_ST_GET_MOTION_OPTIONS, "MX_ST_GET_MOTION_OPTIONS", is_error=False),
MX_ST_GET_MONITORING_INTERVAL:
RobotStatusCodeInfo(MX_ST_GET_MONITORING_INTERVAL, "MX_ST_GET_MONITORING_INTERVAL", is_error=False),
MX_ST_GET_REAL_TIME_MONITORING:
RobotStatusCodeInfo(MX_ST_GET_REAL_TIME_MONITORING, "MX_ST_GET_REAL_TIME_MONITORING", is_error=False),
MX_ST_GET_STATUS_EVENTS:
RobotStatusCodeInfo(MX_ST_GET_STATUS_EVENTS, "MX_ST_GET_STATUS_EVENTS", is_error=False),
MX_ST_GET_NETWORK_OPTIONS:
RobotStatusCodeInfo(MX_ST_GET_NETWORK_OPTIONS, "MX_ST_GET_NETWORK_OPTIONS", is_error=False),
MX_ST_GET_RTC:
RobotStatusCodeInfo(MX_ST_GET_RTC, "MX_ST_GET_RTC", is_error=False),
MX_ST_GET_BLENDING:
RobotStatusCodeInfo(MX_ST_GET_BLENDING, "MX_ST_GET_BLENDING", is_error=False),
MX_ST_GET_VEL_TIMEOUT:
RobotStatusCodeInfo(MX_ST_GET_VEL_TIMEOUT, "MX_ST_GET_VEL_TIMEOUT", is_error=False),
MX_ST_GET_JOINT_VEL:
RobotStatusCodeInfo(MX_ST_GET_JOINT_VEL, "MX_ST_GET_JOINT_VEL", is_error=False),
MX_ST_GET_JOINT_ACC:
RobotStatusCodeInfo(MX_ST_GET_JOINT_ACC, "MX_ST_GET_JOINT_ACC", is_error=False),
MX_ST_GET_CART_LIN_VEL:
RobotStatusCodeInfo(MX_ST_GET_CART_LIN_VEL, "MX_ST_GET_CART_LIN_VEL", is_error=False),
MX_ST_GET_CART_ANG_VEL:
RobotStatusCodeInfo(MX_ST_GET_CART_ANG_VEL, "MX_ST_GET_CART_ANG_VEL", is_error=False),
MX_ST_GET_CART_ACC:
RobotStatusCodeInfo(MX_ST_GET_CART_ACC, "MX_ST_GET_CART_ACC", is_error=False),
MX_ST_GET_CHECKPOINT:
RobotStatusCodeInfo(MX_ST_GET_CHECKPOINT, "MX_ST_GET_CHECKPOINT", is_error=False),
MX_ST_GET_GRIPPER_FORCE:
RobotStatusCodeInfo(MX_ST_GET_GRIPPER_FORCE, "MX_ST_GET_GRIPPER_FORCE", is_error=False),
MX_ST_GET_GRIPPER_VEL:
RobotStatusCodeInfo(MX_ST_GET_GRIPPER_VEL, "MX_ST_GET_GRIPPER_VEL", is_error=False),
MX_ST_GET_TORQUE_LIMITS_CFG:
RobotStatusCodeInfo(MX_ST_GET_TORQUE_LIMITS_CFG, "MX_ST_GET_TORQUE_LIMITS_CFG", is_error=False),
MX_ST_GET_TORQUE_LIMITS:
RobotStatusCodeInfo(MX_ST_GET_TORQUE_LIMITS, "MX_ST_GET_TORQUE_LIMITS", is_error=False),
MX_ST_RT_TARGET_JOINT_POS:
RobotStatusCodeInfo(MX_ST_RT_TARGET_JOINT_POS, "MX_ST_RT_TARGET_JOINT_POS", is_error=False),
MX_ST_RT_TARGET_CART_POS:
RobotStatusCodeInfo(MX_ST_RT_TARGET_CART_POS, "MX_ST_RT_TARGET_CART_POS", is_error=False),
MX_ST_RT_TARGET_JOINT_VEL:
RobotStatusCodeInfo(MX_ST_RT_TARGET_JOINT_VEL, "MX_ST_RT_TARGET_JOINT_VEL", is_error=False),
MX_ST_RT_TARGET_JOINT_TORQ:
RobotStatusCodeInfo(MX_ST_RT_TARGET_JOINT_TORQ, "MX_ST_RT_TARGET_JOINT_TORQ", is_error=False),
MX_ST_RT_TARGET_CART_VEL:
RobotStatusCodeInfo(MX_ST_RT_TARGET_CART_VEL, "MX_ST_RT_TARGET_CART_VEL", is_error=False),
MX_ST_RT_TARGET_CONF:
RobotStatusCodeInfo(MX_ST_RT_TARGET_CONF, "MX_ST_RT_TARGET_CONF", is_error=False),
MX_ST_RT_TARGET_CONF_TURN:
RobotStatusCodeInfo(MX_ST_RT_TARGET_CONF_TURN, "MX_ST_RT_TARGET_CONF_TURN", is_error=False),
MX_ST_RT_JOINT_POS:
RobotStatusCodeInfo(MX_ST_RT_JOINT_POS, "MX_ST_RT_JOINT_POS", is_error=False),
MX_ST_RT_CART_POS:
RobotStatusCodeInfo(MX_ST_RT_CART_POS, "MX_ST_RT_CART_POS", is_error=False),
MX_ST_RT_JOINT_VEL:
RobotStatusCodeInfo(MX_ST_RT_JOINT_VEL, "MX_ST_RT_JOINT_VEL", is_error=False),
MX_ST_RT_JOINT_TORQ:
RobotStatusCodeInfo(MX_ST_RT_JOINT_TORQ, "MX_ST_RT_JOINT_TORQ", is_error=False),
MX_ST_RT_CART_VEL:
RobotStatusCodeInfo(MX_ST_RT_CART_VEL, "MX_ST_RT_CART_VEL", is_error=False),
MX_ST_RT_CONF:
RobotStatusCodeInfo(MX_ST_RT_CONF, "MX_ST_RT_CONF", is_error=False),
MX_ST_RT_CONF_TURN:
RobotStatusCodeInfo(MX_ST_RT_CONF_TURN, "MX_ST_RT_CONF_TURN", is_error=False),
MX_ST_RT_ACCELEROMETER:
RobotStatusCodeInfo(MX_ST_RT_ACCELEROMETER, "MX_ST_RT_ACCELEROMETER", is_error=False),
MX_ST_RT_GRIPPER_FORCE:
RobotStatusCodeInfo(MX_ST_RT_GRIPPER_FORCE, "MX_ST_RT_GRIPPER_FORCE", is_error=False),
MX_ST_RT_EXTTOOL_STATUS:
RobotStatusCodeInfo(MX_ST_RT_EXTTOOL_STATUS, "MX_ST_RT_EXTTOOL_STATUS", is_error=False),
MX_ST_RT_GRIPPER_STATE:
RobotStatusCodeInfo(MX_ST_RT_GRIPPER_STATE, "MX_ST_RT_GRIPPER_STATE", is_error=False),
MX_ST_RT_VALVE_STATE:
RobotStatusCodeInfo(MX_ST_RT_VALVE_STATE, "MX_ST_RT_VALVE_STATE", is_error=False),
MX_ST_RT_CHECKPOINT:
RobotStatusCodeInfo(MX_ST_RT_CHECKPOINT, "MX_ST_RT_CHECKPOINT", is_error=False),
MX_ST_RT_WRF:
RobotStatusCodeInfo(MX_ST_RT_WRF, "MX_ST_RT_WRF", is_error=False),
MX_ST_RT_TRF:
RobotStatusCodeInfo(MX_ST_RT_TRF, "MX_ST_RT_TRF", is_error=False),
MX_ST_RT_CYCLE_END:
RobotStatusCodeInfo(MX_ST_RT_CYCLE_END, "MX_ST_RT_CYCLE_END", is_error=False),
MX_ST_CONNECTED:
RobotStatusCodeInfo(MX_ST_CONNECTED, "MX_ST_CONNECTED", is_error=False),
MX_ST_USER_ALREADY:
RobotStatusCodeInfo(MX_ST_USER_ALREADY, "MX_ST_USER_ALREADY", is_error=True),
MX_ST_UPGRADE_IN_PROGRESS:
RobotStatusCodeInfo(MX_ST_UPGRADE_IN_PROGRESS, "MX_ST_UPGRADE_IN_PROGRESS", is_error=False),
MX_ST_CMD_TOO_LONG:
RobotStatusCodeInfo(MX_ST_CMD_TOO_LONG, "MX_ST_CMD_TOO_LONG", is_error=True),
MX_ST_EOM:
RobotStatusCodeInfo(MX_ST_EOM, "MX_ST_EOM", is_error=False),
MX_ST_ERROR_MOTION:
RobotStatusCodeInfo(MX_ST_ERROR_MOTION, "MX_ST_ERROR_MOTION", is_error=True),
MX_ST_INIT_FAILED:
RobotStatusCodeInfo(MX_ST_INIT_FAILED, "MX_ST_INIT_FAILED", is_error=True),
MX_ST_EOB:
RobotStatusCodeInfo(MX_ST_EOB, "MX_ST_EOB", is_error=False),
MX_ST_END_OFFLINE:
RobotStatusCodeInfo(MX_ST_END_OFFLINE, "MX_ST_END_OFFLINE", is_error=False),
MX_ST_CANT_SAVE_OFFLINE:
RobotStatusCodeInfo(MX_ST_CANT_SAVE_OFFLINE, "MX_ST_CANT_SAVE_OFFLINE", is_error=True),
MX_ST_IGNORING_CMD:
RobotStatusCodeInfo(MX_ST_IGNORING_CMD, "MX_ST_IGNORING_CMD", is_error=True),
MX_ST_NO_OFFLINE_SAVED:
RobotStatusCodeInfo(MX_ST_NO_OFFLINE_SAVED, "MX_ST_NO_OFFLINE_SAVED", is_error=True),
MX_ST_OFFLINE_LOOP:
RobotStatusCodeInfo(MX_ST_OFFLINE_LOOP, "MX_ST_OFFLINE_LOOP", is_error=False),
MX_ST_ERROR_GRIPPER:
RobotStatusCodeInfo(MX_ST_ERROR_GRIPPER, "MX_ST_ERROR_GRIPPER", is_error=True),
MX_ST_ERROR_VBOX:
RobotStatusCodeInfo(MX_ST_ERROR_VBOX, "MX_ST_ERROR_VBOX", is_error=True),
MX_ST_MAINTENANCE_CHECK:
RobotStatusCodeInfo(MX_ST_MAINTENANCE_CHECK, "MX_ST_MAINTENANCE_CHECK", is_error=True),
MX_ST_INTERNAL_ERROR:
RobotStatusCodeInfo(MX_ST_INTERNAL_ERROR, "MX_ST_INTERNAL_ERROR", is_error=True),
MX_ST_EXCESSIVE_TRQ:
RobotStatusCodeInfo(MX_ST_EXCESSIVE_TRQ, "MX_ST_EXCESSIVE_TRQ", is_error=True),
MX_ST_CHECKPOINT_REACHED:
RobotStatusCodeInfo(MX_ST_CHECKPOINT_REACHED, "MX_ST_CHECKPOINT_REACHED", is_error=False),
MX_ST_TEXT_API_ERROR:
RobotStatusCodeInfo(MX_ST_TEXT_API_ERROR, "MX_ST_TEXT_API_ERROR", is_error=True),
MX_ST_PSTOP:
RobotStatusCodeInfo(MX_ST_PSTOP, "MX_ST_PSTOP", is_error=True),
MX_ST_NO_VALID_CFG:
RobotStatusCodeInfo(MX_ST_NO_VALID_CFG, "MX_ST_NO_VALID_CFG", is_error=True),
MX_ST_TRACE_LVL_CHANGED:
RobotStatusCodeInfo(MX_ST_TRACE_LVL_CHANGED, "MX_ST_TRACE_LVL_CHANGED", is_error=False),
MX_ST_TCP_DUMP_STARTED:
RobotStatusCodeInfo(MX_ST_TCP_DUMP_STARTED, "MX_ST_TCP_DUMP_STARTED", is_error=False),
MX_ST_TCP_DUMP_DONE:
RobotStatusCodeInfo(MX_ST_TCP_DUMP_DONE, "MX_ST_TCP_DUMP_DONE", is_error=False),
}
|
149636
|
from __future__ import annotations
from typing import List, Tuple, Union
import numpy as np
# now just for the 2x2 mat or 1x2 vec
class Matrix():
def __init__(self,
arr: Union[List[float], np.ndarray],
data_type: str = 'mat',
row: int = 2,
col: int = 2):
self._data_type: str = data_type
self._val: np.ndarray = np.array(arr).reshape(
row, 1 if data_type == 'vec' else col)
# unary operator
def __neg__(self) -> Matrix:
return Matrix(-self._val, self._data_type)
def __pos__(self) -> Matrix:
return Matrix(self._val, self._data_type)
def __invert__(self):
raise NotImplementedError
# binary operator
def __add__(self, other: Union[float, int, Matrix]) -> Matrix:
if isinstance(other, float) or isinstance(other, int):
return Matrix(self._val + other, self._data_type)
else:
return Matrix(self._val + other._val, self._data_type)
def __sub__(self, other: Union[float, int, Matrix]) -> Matrix:
if isinstance(other, float) or isinstance(other, int):
return Matrix(self._val - other, self._data_type)
else:
return Matrix(self._val - other._val, self._data_type)
def __mul__(self, other: Union[float, int, Matrix]) -> Matrix:
if isinstance(other, float) or isinstance(other, int):
return Matrix(self._val * other, self._data_type)
else:
assert self._val.shape[1] == other._val.shape[0]
return Matrix(self._val @ other._val, other._data_type)
def __truediv__(self, other: float) -> Matrix:
assert not np.isclose(other, 0)
return Matrix(self._val / other, self._data_type)
def __floordiv__(self, other):
raise NotImplementedError
def __mod__(self, other):
raise NotImplementedError
def __pow__(self, other):
raise NotImplementedError
def __rshift__(self, other):
raise NotImplementedError
def __lshift__(self, other):
raise NotImplementedError
def __and__(self, other):
raise NotImplementedError
def __or__(self, other):
raise NotImplementedError
def __xor__(self, other):
raise NotImplementedError
# comparsion operator
def __lt__(self, other):
raise NotImplementedError
def __gt__(self, other):
raise NotImplementedError
def __le__(self, other):
raise NotImplementedError
def __ge__(self, other):
raise NotImplementedError
def __eq__(self, other) -> bool:
return np.isclose(self._val, other._val).all()
def __ne__(self, other) -> bool:
return not np.isclose(self._val, other._val).all()
# assignment operator
def __isub__(self, other: Union[float, int, Matrix]) -> Matrix:
if isinstance(other, float) or isinstance(other, int):
self._val -= other
else:
self._val -= other._val
return self
def __iadd__(self, other: Union[float, int, Matrix]) -> Matrix:
if isinstance(other, float) or isinstance(other, int):
self._val += other
else:
self._val += other._val
return self
def __imul__(self, other: Union[float, int, Matrix]) -> Matrix:
if isinstance(other, float) or isinstance(other, int):
self._val *= other
else:
assert self._val.shape[1] == other._val.shape[0]
self._val = self._val @ other._val
return self
def __idiv__(self, other: float) -> Matrix:
assert not np.isclose(other, 0)
self._val /= other
return self
def __ifloordiv__(self, other):
raise NotImplementedError
def __imod__(self, other):
raise NotImplementedError
def __ipow__(self, other):
raise NotImplementedError
def __irshift__(self, other):
raise NotImplementedError
def __ilshift__(self, other):
raise NotImplementedError
def __iand__(self, other):
raise NotImplementedError
def __ior__(self, other):
raise NotImplementedError
def __ixor__(self, other):
raise NotImplementedError
def __str__(self) -> str:
res: str = ''
for i in self._val:
res += str(i) + '\n'
return res
@property
def x(self) -> float:
'''extern interface for the 2d vector's x pos
Returns
-------
float
x pos of the vector
'''
return self._val[0, 0]
@x.setter
def x(self, val: float):
self._val[0, 0] = val
@property
def y(self) -> float:
'''extern interface for the 2d vector's y pos
Returns
-------
float
y pos of the vector
'''
if self._val.shape == (2, 1):
return self._val[1, 0]
elif self._val.shape == (1, 2):
return self._val[0, 1]
else:
raise ValueError
@y.setter
def y(self, val: float):
self._val[1, 0] = val
@property
def shape(self) -> Tuple[int, ...]:
return self._val.shape
@property
def size(self) -> int:
return self._val.size
@property
def row1(self) -> Matrix:
assert self._val.shape == (2, 2)
return Matrix(self._val[0], 'vec')
@property
def row2(self) -> Matrix:
assert self._val.shape == (2, 2)
return Matrix(self._val[1], 'vec')
def reshape(self, row: int, col: int) -> Matrix:
self._val = self._val.reshape(row, col)
return self
def value(self, row: int = 0, col: int = 0) -> float:
assert self._val.shape == (2, 2)
assert 0 <= row <= self._val.shape[0]
assert 0 <= col <= self._val.shape[1]
return self._val[row, col]
def determinant(self) -> float:
assert self._val.shape == (2, 2)
return np.linalg.det(self._val)
def transpose(self) -> Matrix:
self._val = self._val.T
return self
def invert(self) -> Matrix:
assert self._val.shape == (2, 2)
self._val = np.linalg.inv(self._val)
return self
def skew_symmetric_mat(self, vec: Matrix) -> Matrix:
assert self._val.shape == (2, 2)
return Matrix([0, -vec._val[1, 0], vec._val[0, 0], 0])
def identity_mat(self) -> Matrix:
assert self._val.shape == (2, 2)
return Matrix([1, 0, 0, 1])
def len_square(self) -> float:
return np.square(self._val).sum()
def len(self) -> float:
return np.sqrt(self.len_square())
def theta(self) -> float:
assert self._val.shape == (2, 1)
assert not np.isclose(self._val[0, 0], 0)
return np.arctan2(self._val[1, 0], self._val[0, 0])
def set_value(self, val: Union[List[float], Matrix]) -> Matrix:
if isinstance(val, list):
self._val = np.array(val).reshape(self._val.shape)
elif isinstance(val, Matrix):
self._val = val._val
return self
def clear(self) -> Matrix:
if self._val.shape == (2, 2):
self.set_value([0.0, 0.0, 0.0, 0.0])
else:
self.set_value([0.0, 0.0])
return self
def negate(self) -> Matrix:
self._val = -self._val
return self
def negative(self) -> Matrix:
return Matrix(-self._val, self._data_type)
def swap(self, other: Matrix) -> Matrix:
assert self._data_type == other._data_type
assert self._val.shape == other._val.shape
self._val, other._val = other._val, self._val
return self
def normalize(self) -> Matrix:
assert not np.isclose(self.len(), 0)
self._val /= self.len()
return self
def normal(self) -> Matrix:
assert not np.isclose(self.len(), 0)
return Matrix(self._val / self.len(), self._data_type)
def is_origin(self) -> bool:
assert self._val.shape == (2, 1)
return np.isclose(self._val, [0, 0]).all()
def dot(self, other: Matrix) -> float:
assert self._val.shape == (2, 1)
assert other._val.shape == (2, 1)
return np.dot(self._val.T, other._val)[0, 0]
def cross(self, other: Matrix) -> float:
assert self._val.shape == (2, 1)
assert other._val.shape == (2, 1)
# NOTE: same as the cross_product method
return np.cross(self._val.reshape(2), other._val.reshape(2)).tolist()
def perpendicular(self) -> Matrix:
assert self._val.shape == (2, 1)
return Matrix([-self._val[1, 0], self._val[0, 0]], self._data_type)
@staticmethod
def dot_product(veca: Matrix, vecb: Matrix) -> float:
assert veca._val.shape == (2, 1)
assert vecb._val.shape == (2, 1)
return np.dot(veca._val.T, vecb._val)[0, 0]
@staticmethod
def cross_product(veca: Matrix, vecb: Matrix) -> float:
assert veca._val.shape == (2, 1)
assert vecb._val.shape == (2, 1)
# NOTE: just hack this impl to output scalar val otherwise vector
# to pass mypy check
return np.cross(veca._val.reshape(2), vecb._val.reshape(2)).tolist()
@staticmethod
def cross_product2(lhs: Union[Matrix, float], rhs: Union[Matrix,
float]) -> Matrix:
if isinstance(lhs, float) and isinstance(rhs, Matrix):
assert rhs._val.shape == (2, 1)
return Matrix([-rhs.y, rhs.x], 'vec') * lhs
elif isinstance(lhs, Matrix) and isinstance(rhs, float):
assert lhs._val.shape == (2, 1)
return Matrix([lhs.y, -lhs.x], 'vec') * rhs
else:
raise TypeError
@staticmethod
def rotate_mat(radian: float) -> Matrix:
res: List[float] = []
cos_val: float = np.cos(radian)
sin_val: float = np.sin(radian)
res.append(cos_val)
res.append(-sin_val)
res.append(sin_val)
res.append(cos_val)
return Matrix(res)
|
149647
|
import os
from dragoneye.cloud_scanner.base_cloud_scanner import CloudScanSettings, CloudProvider
class GcpCloudScanSettings(CloudScanSettings):
def __init__(self,
commands_path: str,
account_name: str,
project_id: str,
output_path: str = os.getcwd(),
should_clean_before_scan: bool = True):
"""
The settings that the AwsScanner uses for aws scanning.
:param commands_path: The path of a YAML file that describes the scan commands to be used.
:param account_name: A name for the scan results.
:param project_id: The project ID of the project to scan.
:param output_path: The directory where results will be saved. Defaults to current working directory.
:param should_clean_before_scan: A flag that determines if prior results of this specific account (identified by account_name)
should be deleted before scanning.
"""
super().__init__(CloudProvider.GCP, account_name, should_clean_before_scan, output_path, commands_path)
self.project_id: str = project_id
|
149675
|
import gdb
class GetFrameSizesCommand(gdb.Command):
"""Gets the size of each frame, and of the whole stack.
Useful debugging tool to find out which functions have a fat stackframe.
"""
def __init__(self):
super(GetFrameSizesCommand, self).__init__(
"get-frame-sizes",
gdb.COMMAND_STATUS,
gdb.COMPLETE_NONE,
False)
def invoke(self, args, from_tty):
frame = gdb.newest_frame()
frame = frame.older()
s = 0
while frame != None:
newer_frame = frame.newer()
bottom_esp = newer_frame.read_register("esp")
top_esp = frame.read_register("esp")
function_name = frame.function()
print(str(function_name) + ": " + hex(top_esp - bottom_esp))
s += top_esp - bottom_esp
frame = frame.older()
print("Total stack size: " + hex(s))
GetFrameSizesCommand()
|
149685
|
import sys
from charts.GanttChart import GanttChart
REDDIT_CSV_ROUTE = "../../output/reddit/reddit.csv"
REDDIT_CSV_OUTLIERS_ROUTE = "../../output/reddit/reddit_filtered.csv"
REDDIT_CSV_LIVEVARS_ROUTE = "../../output/reddit/reddit.csv"
SOF_CSV_ROUTE = "../../output/stackoverflow/stackoverflow.csv"
SOF_CSV_OUTLIERS_ROUTE = "../../output/stackoverflow/stackoverflow_outliers.csv"
SOF_CSV_LIVEVARS_ROUTE = "../../output/stackoverflow/stackoverflow_livevars.csv"
def main():
chartCreator = GanttChart(REDDIT_CSV_ROUTE)
chartCreator.showCharts()
main()
|
149688
|
import os
from shutil import copyfileobj
import urllib3
from urllib3.util import parse_url
from checksumming_io.checksumming_io import ChecksummingSink
from .parallel_logger import logger
from .utils import sizeof_fmt, measure_duration_and_rate
from .s3 import S3Location, S3Agent, S3ObjectTagger
from bundle_tools import LocalBundle, File, DataFile, MetadataFile, SubmissionInfo
class BundleMissingDataFile(Exception):
pass
def report_duration_and_rate(func, *args, size):
retval, duration, rate = measure_duration_and_rate(func, *args, size=size)
logger.output(" (%.1f sec, %.1f MiB/sec) " % (duration, rate))
return retval
s3 = S3Agent()
http = urllib3.PoolManager()
class BundleStager:
def __init__(self, bundle: LocalBundle, target_bucket: str):
self.bundle = bundle
self.target_bucket = target_bucket
def stage(self, comment=""):
logger.output(f"\nBundle: {self.bundle.path} {comment}", "B")
try:
self.bundle.submission_info = SubmissionInfo(self.target_bucket, self.bundle)
self.bundle.submission_info.load()
self.bundle.enumerate_local_metadata_files()
self.bundle.enumerate_data_files_using_manifest()
self._stage_files_of_type(DataFile)
self._stage_files_of_type(MetadataFile)
if self.bundle.submission_info.save():
logger.output("\n Writing submission.json", progress_char='*')
except BundleMissingDataFile as e:
logger.output(f" -> {str(e)}\n", "!")
logger.flush()
def _stage_files_of_type(self, file_class):
files = [file for file in self.bundle.files.values() if type(file) == file_class]
logger.output(f"\n {file_class.__name__}s ({len(files)}):")
for file in files:
logger.output(f"\n {file.name} ({sizeof_fmt(file.size)}) ")
if type(file) == DataFile:
DataFileStager(file).stage_file(self.target_bucket)
else:
MetadataFileStager(file).stage(self.target_bucket)
class DataFileStager:
def __init__(self, file):
self.file = file
self.bundle = file.bundle
self.target_url = None
def stage_file(self, target_bucket):
self.target_url = f"s3://{target_bucket}/{self.file.path()}"
if self._obj_is_at_target_location():
logger.output("=present ", progress_char="โ๏ธ")
else:
src_location = self.source_data_file()
self.copy_file_to_target_location(src_location)
self._delete_downloaded_file(src_location)
self.file.staged_url = self.target_url
self._ensure_checksum_tags()
def _obj_is_at_target_location(self):
obj = s3.get_object(self.target_url)
if obj:
if obj.content_length == self.file.size:
return self.etag_matches_or_not_present(obj)
else:
logger.output("\n exists at target but has different size: %d / %d" %
(self.file.size, obj.content_length))
return False
def etag_matches_or_not_present(self, obj):
s3_etag = obj.e_tag.strip('"')
tags = s3.get_tagging(self.target_url)
if tags.get('hca-dss-s3_etag'):
if s3_etag == tags['hca-dss-s3_etag']:
return True
else:
logger.output("\n exists at target but has wrong etag: %s / %s" %
(s3_etag, tags['hca-dss-s3_etag']))
logger.output("\n copy to itself to correct etag... ", progress_char='โป๏ธ')
report_duration_and_rate(s3.copy_between_buckets,
self.target_url, self.target_url, self.file.size,
size=self.file.size)
return True
else:
# File size matches but file has no checksum tag.
# Assume file is good and proceed so we compute new checksums.
return True
def source_data_file(self):
location = self._find_locally() or self._download_from_origin()
if location:
logger.output(f"\n found at {location}")
return location
raise BundleMissingDataFile(f"Cannot find source for {self.file.name}")
def copy_file_to_target_location(self, source_location):
if parse_url(source_location).scheme == 's3':
self.copy_s3_file_to_target_location(source_location)
elif parse_url(source_location).scheme == 'file':
self.copy_local_file_to_target_location(source_location)
else:
raise RuntimeError(f"Unrecognized scheme: {source_location}")
def copy_s3_file_to_target_location(self, source_location):
logger.output(f"\n copy to {self.target_url} ", "C")
report_duration_and_rate(s3.copy_between_buckets,
source_location,
self.target_url,
self.file.size,
size=self.file.size)
S3ObjectTagger(self.target_url).complete_tags()
def copy_local_file_to_target_location(self, source_location):
local_path = parse_url(source_location).path.lstrip('/')
logger.output(f"\n upload to {self.target_url} ", "โฌ๏ธ")
self.file.checksums = report_duration_and_rate(s3.upload_and_checksum,
local_path,
self.target_url,
self.file.size,
size=self.file.size)
S3ObjectTagger(self.target_url).tag_using_these_checksums(self.file.checksums)
logger.output("+tagging ")
def _find_locally(self):
local_path = self.file.path()
if os.path.isfile(local_path) and os.stat(local_path).st_size == self.file.size:
return f"file:///{local_path}"
return None
def _download_from_origin(self):
logger.output(f"\n downloading {self.file.origin_url}", "โ")
dest_path = self.file.path()
try:
report_duration_and_rate(self._download, self.file.origin_url, dest_path, size=self.file.size)
return f"file:///{dest_path}"
# except urllib.error.HTTPError:
except Exception as e:
logger.output(f" error downloading ({str(e)})", "!")
os.remove(dest_path)
return None
def _ensure_checksum_tags(self):
if S3ObjectTagger(self.target_url).complete_tags():
logger.progress("+")
@staticmethod
def _delete_downloaded_file(location):
urlbits = parse_url(location)
if urlbits.scheme == 'file':
logger.output(f"\n Deleting {location}")
os.remove(urlbits.path.lstrip('/'))
@staticmethod
def _download(src_url: str, dest_path: str):
with open(dest_path, 'wb') as out_file:
# TODO now that we switched from urlopen(), this will fail with FTP files
with http.request('GET', src_url, preload_content=False) as in_stream:
copyfileobj(in_stream, out_file)
class MetadataFileStager:
def __init__(self, file: File):
self.file = file
self.bundle = file.bundle
def stage(self, bucket):
self.target = f"s3://{bucket}/{self.file.path()}"
if self._obj_is_at_target_location():
logger.output("=present ", progress_char="โ")
S3ObjectTagger(self.target).complete_tags()
else:
logger.output("+uploading ", progress_char="โ")
checksums = s3.upload_and_checksum(self.file.path(), self.target, self.file.size)
S3ObjectTagger(self.target).tag_using_these_checksums(checksums)
logger.output("+tagging ")
self.file.staged_url = self.target
def _obj_is_at_target_location(self):
obj = s3.get_object(self.target)
if obj:
local_checksums = self._checksum_local_file()
if local_checksums['s3_etag'] == obj.e_tag.strip('"'):
return True
else:
logger.output(f"\n exists at target but has a different ETAG ")
return False
def _checksum_local_file(self):
with ChecksummingSink() as sink:
with open(self.file.path(), 'rb') as fh:
copyfileobj(fh, sink)
return sink.get_checksums()
|
149715
|
import os
import io
import glob
import matplotlib.pyplot as plt
import imageio
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
SIZE_GUIDANCE = {
'images': 20
}
def plot_alignment(log_dir, save_dir):
tf_event = glob.glob(os.path.join(log_dir, "events.*"))[0]
event_acc = EventAccumulator(tf_event, size_guidance=SIZE_GUIDANCE)
event_acc.Reload()
alignments = event_acc.Images('alignment')
for alignment in alignments:
img_str = alignment.encoded_image_string
step = alignment.step
f = io.BytesIO(img_str)
img = plt.imread(f)
plt.imshow(img)
plt.axis('off')
plt.title('step {:0>5d}'.format(step))
plt.tight_layout()
plt.savefig('{}/{:0>5d}.png'.format(save_dir, step))
plt.clf()
def make_alignment_gif(log_dir):
save_dir = log_dir.split(os.sep)[-2] + '-alignmets'
gif_fp = os.path.join(save_dir, 'alignments.gif')
os.makedirs(save_dir, exist_ok=True)
plot_alignment(log_dir, save_dir)
png_fns = sorted([fn for fn in os.listdir(save_dir) if fn.endswith('.png')])
images = []
for fn in png_fns:
png_fp = os.path.join(save_dir, fn)
images.append(imageio.imread(png_fp))
imageio.mimsave(gif_fp, images, duration=0.5)
if __name__ == '__main__':
log_dir1 = 'NV-tacotron2-log'
make_alignment_gif(log_dir1)
|
149723
|
import pytest
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from catboost import CatBoostClassifier
from pydrift import DataDriftChecker, ModelDriftChecker, DriftCheckerEstimator
from pydrift.exceptions import (ColumnsNotMatchException,
DriftEstimatorException)
from pydrift.models import cat_features_fillna
from pydrift.constants import PATH_DATA, RANDOM_STATE
TARGET = 'Survived'
df_titanic = pd.read_csv(PATH_DATA / 'titanic.csv')
X = df_titanic.drop(columns=['PassengerId', 'Name', 'Ticket', 'Cabin', TARGET])
y = df_titanic[TARGET]
X_women = X[X['Sex'] == 'female']
X_men = X[X['Sex'] == 'male']
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=.5, random_state=RANDOM_STATE, stratify=y
)
cat_features = list(
X
.select_dtypes(include=['category', 'object'])
.columns
)
X_filled = cat_features_fillna(X, cat_features)
X_filled_train, X_filled_test, y_filled_train, y_filled_test = (
train_test_split(
X_filled, y, test_size=.5, random_state=RANDOM_STATE, stratify=y
)
)
df_left_data = pd.concat([X_filled_train, y_filled_train], axis=1)
df_right_data = pd.concat([X_filled_test, y_filled_test], axis=1)
def test_columns_not_match_exception():
"""Tests if correctly raised columns not match
custom exception"""
with pytest.raises(ColumnsNotMatchException):
DataDriftChecker(
X_train.drop(columns='Sex'), X_test, minimal=True, verbose=False
)
DataDriftChecker(
X_train, X_test.drop(columns='Cabin'), minimal=True, verbose=False
)
def test_estimator_drift_ko():
"""Tests if correctly check drifted data
in a pipeline
"""
with pytest.raises(DriftEstimatorException):
df_train_filled = pd.concat([X_filled_train, y_train], axis=1)
df_train_filled_drifted = df_train_filled[
(df_train_filled['Pclass'] > 1) & (
df_train_filled['Fare'] > 10)
].copy()
X_train_filled_drifted = df_train_filled_drifted.drop(columns=TARGET)
y_train_filled_drifted = df_train_filled_drifted[TARGET]
df_test_filled = pd.concat([X_filled_test, y_test], axis=1)
df_test_filled_drifted = df_test_filled[
~(df_test_filled['Pclass'] > 1) & (
df_test_filled['Fare'] > 10)
].copy()
X_test_filled_drifted = df_test_filled_drifted.drop(columns=TARGET)
ml_classifier_model = CatBoostClassifier(
num_trees=5,
max_depth=3,
cat_features=cat_features,
random_state=RANDOM_STATE,
verbose=False
)
pipeline_catboost_drift_checker = make_pipeline(
DriftCheckerEstimator(ml_classifier_model=ml_classifier_model,
column_names=X.columns.tolist(),
minimal=True)
)
pipeline_catboost_drift_checker.fit(X_train_filled_drifted,
y_train_filled_drifted)
pipeline_catboost_drift_checker.predict_proba(X_test_filled_drifted)
def test_data_drift_ok():
"""Tests if correctly check non-drifted data"""
data_drift_checker_ok = DataDriftChecker(
X_train, X_test, minimal=True, verbose=False
)
data_drift_checker_ok.check_categorical_columns()
assert not data_drift_checker_ok.ml_model_can_discriminate()
assert not data_drift_checker_ok.check_numerical_columns()
assert not data_drift_checker_ok.check_categorical_columns()
def test_data_drift_ko():
"""Tests if correctly check drifted data"""
data_drift_checker_ok = DataDriftChecker(
X_women, X_men, minimal=True, verbose=False
)
assert data_drift_checker_ok.ml_model_can_discriminate()
assert data_drift_checker_ok.check_numerical_columns()
assert data_drift_checker_ok.check_categorical_columns()
def test_model_drift_ok():
"""Tests if correctly check non-drifted model"""
ml_classifier_model = CatBoostClassifier(
num_trees=5,
max_depth=3,
cat_features=cat_features,
random_state=RANDOM_STATE,
verbose=False
)
ml_classifier_model.fit(X_filled_train, y_filled_train)
model_drift_checker_ok = ModelDriftChecker(
df_left_data, df_right_data, ml_classifier_model,
target_column_name=TARGET, minimal=True, verbose=False
)
assert not model_drift_checker_ok.check_model()
def test_model_drift_ko():
"""Tests if correctly check drifted model"""
ml_classifier_model_drifted = CatBoostClassifier(
num_trees=10,
max_depth=6,
cat_features=cat_features,
random_state=RANDOM_STATE,
verbose=False
)
ml_classifier_model_drifted.fit(X_filled_train, y_filled_train)
model_drift_checker_ko = ModelDriftChecker(
df_left_data, df_right_data, ml_classifier_model_drifted,
target_column_name=TARGET, minimal=True, verbose=False
)
assert model_drift_checker_ko.check_model()
def test_estimator_drift_ok():
"""Tests if correctly check non-drifted data
in a pipeline
"""
ml_classifier_model = CatBoostClassifier(
num_trees=5,
max_depth=3,
cat_features=cat_features,
random_state=RANDOM_STATE,
verbose=False
)
pipeline_catboost_drift_checker = make_pipeline(
DriftCheckerEstimator(ml_classifier_model=ml_classifier_model,
column_names=X.columns.tolist(),
minimal=True)
)
pipeline_catboost_drift_checker.fit(X_filled_train, y_filled_train)
pipeline_catboost_drift_checker.predict_proba(X_filled_test)
|
149732
|
import pytest
from cryptotik import Hitbtc
from decimal import Decimal
from cryptotik.exceptions import APIError
private = pytest.mark.skipif(
not pytest.config.getoption("--apikey"),
reason="needs --apikey option to run."
)
hit = Hitbtc(pytest.config.getoption("--apikey"),
pytest.config.getoption("--secret"))
def test_format_pair():
'''test string formating to match API expectations'''
assert hit.format_pair("ppc-usd") == "PPCUSD"
def test_get_markets():
'''test get_markets'''
assert isinstance(hit.get_markets(), list)
assert "ppcusd" in hit.get_markets()
def test_get_market_ticker():
'''test get_market_ticker'''
ticker = hit.get_market_ticker("PPC-USD")
assert isinstance(ticker, dict)
assert sorted(ticker.keys()) == ['ask', 'bid', 'high', 'last', 'low', 'open', 'symbol', 'timestamp', 'volume', 'volumeQuote']
def test_get_market_orders():
'''test get_market_orderbook'''
market_orders = hit.get_market_orders("ppc-usd")
assert isinstance(market_orders, dict)
assert isinstance(market_orders["ask"], list)
assert isinstance(market_orders["bid"], list)
def test_get_market_trade_history():
'''test get_market_trade_history'''
trade_history = hit.get_market_trade_history("ppc-usd", 10)
assert isinstance(trade_history, list)
assert len(trade_history) == 10
assert sorted(trade_history[0].keys()) == sorted(['id', 'price', 'quantity', 'side', 'timestamp'])
@private
def test_get_balances(apikey, secret):
balances = hit.get_balances()
assert isinstance(balances, list)
@private
def test_get_deposit_address(apikey, secret):
assert isinstance(hit.get_deposit_address("ppc"), dict)
@private
def test_get_withdraw_history(apikey, secret):
assert isinstance(hit.get_withdraw_history("ppc"), list)
@private
def test_withdraw(apikey, secret):
with pytest.raises(APIError):
hit.withdraw("ppc", 1, 'PpcEaT3Rd0NTsendftMKDAKr331DXgHe3L')
@private
def test_buy_limit(apikey, secret):
with pytest.raises(APIError):
hit.buy_limit("ppc-btc", 0.05, 1)
@private
def test_sell_limit(apikey, secret):
with pytest.raises(APIError):
hit.sell_limit("ltc_btc", 1, 0.25)
@private
def test_cancel_order(apikey, secret):
with pytest.raises(APIError):
hit.cancel_order('invalid')
|
149754
|
class Solution:
def processQueries(self, queries: List[int], m: int) -> List[int]:
mp = [i for i in range(1, m+1)]
res = []
for i, v in enumerate(queries):
j = mp.index(v)
#print(v,j)
res += j,
del mp[j]
mp.insert(0, v)
return res
# Binary Indexed Tree Or Fenwick Tree:
# position 0 (index) is dummy, need to start from 1
class Fenwick:
def __init__(self,n):
self.n = 2*n + 1
self.data = [0,]*self.n
def update(self, i, x):
while i < self.n:
self.data[i] += x
i += i & -i
def sum(self, i):
s = 0
while i > 0:
s += self.data[i]
i -= i & -i
return s
class Solution:
def processQueries(self, queries: List[int], m: int) -> List[int]:
tree = Fenwick(m*2)
indexMap = {}
for i in range(1, m+1):
tree.update(i + m, 1)
indexMap[i] = i + m
res = []
cur = m
for q in queries:
i = indexMap.pop(q)
rank = tree.sum(i-1) # ๅ้ขๆๅ ไธชๆฐ
res.append(rank)
indexMap[q] = cur
tree.update(i,-1)
tree.update(cur,1)
cur -=1
return res
"""
BIT TREE:
1
|
v
2
| 3
v / 5
4 /
| 6
v /
8 <- 7
ๆฏๅฆไธไธชtree ๆ8ไธช่็น๏ผๆดๆฐๆถๅ
ๆดๆฐ1 ->2 ->4 ->8
ๆดๆฐ2 ->4 -> 8
ๆดๆฐ3(11) -> 4(11+1 = 100)->8
ๆดๆฐ4 (100) ->8(100+100 = 1000)
ๆดๆฐ5 -> 6 -> 8
ๆดๆฐ6(110) ->8 (110 + 10 = 1000)
ๆดๆฐ7->8
ๅฆๆๅทฆ้ข่ฟ็ปญ้ฝๆฏ1๏ผ็ดๆฅไธๆๅทฆ้ขbit็ไธไธไฝ ๆฏๅฆ110 -> 1000, 1110 -> 10000, 1100 -> 10000
ๅๅ : ๅทฆไพง็1ๅทฒ็ปๅญไบ้่ฆ็sum๏ผๆฏๅฆ110๏ผ 100 ๅญไบๅ4๏ผๆไปฅ110-> 1000 ไฟ่ฏ8ๆฅๆ(5,6็ๅ)๏ผ
ๆฏๅฆ111->1000๏ผ ๅ ไธบ110ๅญไบ๏ผ5๏ผ6็ๅ), 100(ๅญไบ1-4็ๅ๏ผ๏ผไนๅ ไธบ8ๅทฒ็ปๅญไบ5ๅ6็ๅ๏ผso 111 ๅชๅญ็ฌฌ7ไธช
ๆฑsum
ๆฑ1: 1
ๆฑ2: 2
ๆฑ3: 3 + 2 (11, 10)
ๆฑ4: 4
ๆฑ5: 5 + 4 (101, 100)
ๆฑ6: 6 + 4 (110, 100)
ๆฑ7: 7 + 6 + 4 ๏ผ111๏ผ 110๏ผ 100๏ผ
ๆฑ8๏ผ 8
BIT index nๅชๅญๆๅฐbitไธ้ข็ๅ
ๆฏๅฆ1110 ๆ็ๅๆฏ1110 + 1101
ๆฏๅฆ1100๏ผ ๆๅ็ๆฏ1001 + 1010 + 1011 + 1100
ๆฏๅฆ100 ๆ็ๅๆฏ1๏ผ 10, 100,
"""
|
149814
|
from collections import defaultdict
from braces.views import LoginRequiredMixin
from django.contrib import messages
from django.contrib.auth.context_processors import PermWrapper
from django.db.models import Count, F, Q
from django.forms.utils import timezone
from django.http import HttpResponseRedirect, JsonResponse
from django.shortcuts import get_object_or_404
from django.template.loader import get_template
from django.urls import resolve, reverse
from django.utils.safestring import mark_safe
from django.utils.translation import gettext as _
from django.utils.translation import gettext_lazy as _l
from django.views.generic import DetailView
from django.views.generic.detail import SingleObjectTemplateResponseMixin
from django.views.generic.edit import ModelFormMixin, ProcessFormView
from listable.views import SELECT_MULTI, TEXT, BaseListableView
from qatrack.attachments.models import Attachment
from qatrack.attachments.views import listable_attachment_tags
from qatrack.parts import forms as p_forms
from qatrack.parts import models as p_models
def parts_searcher(request):
p_search = request.GET['q']
parts = p_models.Part.objects\
.filter(Q(part_number__icontains=p_search) | Q(name__icontains=p_search)) \
.order_by('part_number')[0:50]\
.values_list('id', 'part_number', 'alt_part_number', 'name', 'quantity_current')
return JsonResponse({'data': list(parts)}, safe=False)
def parts_storage_searcher(request):
try:
p_id = int(request.GET['p_id'])
except ValueError:
return JsonResponse({'data': '__clear__'}, safe=False)
psc = p_models.PartStorageCollection.objects \
.filter(part=p_id) \
.select_related('storage', 'storage__room', 'storage__room__site') \
.order_by('storage__room__site__name', 'storage__room__name', 'storage__location')[0:50] \
.values_list('storage_id', 'storage__room__site__name', 'storage__room__name', 'storage__location', 'quantity')
return JsonResponse({'data': list(psc)}, safe=False)
def room_location_searcher(request):
try:
r_id = int(request.GET['r_id'])
except ValueError:
return JsonResponse({'data': '__clear__'}, safe=False)
storage = p_models.Storage.objects \
.filter(room=r_id, location__isnull=False) \
.select_related('room', 'room__site') \
.order_by('room__site__name', 'room__name', 'location') \
.values_list('id', 'location', 'description')
storage_no_location, _ = p_models.Storage.objects \
.get_or_create(room=p_models.Room.objects.get(pk=r_id), location__isnull=True)
return JsonResponse({'storage': list(storage), 'storage_no_location': storage_no_location.id}, safe=False)
class PartUpdateCreate(LoginRequiredMixin, SingleObjectTemplateResponseMixin, ModelFormMixin, ProcessFormView):
model = p_models.Part
template_name = 'parts/part_update.html'
form_class = p_forms.PartForm
def dispatch(self, request, *args, **kwargs):
self.user = request.user
return super(PartUpdateCreate, self).dispatch(request, *args, **kwargs)
def get_object(self, queryset=None):
try:
return super(PartUpdateCreate, self).get_object(queryset)
except AttributeError:
return None
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super(PartUpdateCreate, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
return super(PartUpdateCreate, self).post(request, *args, **kwargs)
def get_context_data(self, *args, **kwargs):
context_data = super(PartUpdateCreate, self).get_context_data(**kwargs)
if self.request.method == 'POST':
context_data['supplier_formset'] = p_forms.PartSupplierCollectionFormset(
self.request.POST,
instance=self.object,
prefix='supplier'
)
context_data['storage_formset'] = p_forms.PartStorageCollectionFormset(
self.request.POST,
instance=self.object,
prefix='storage'
)
else:
context_data['supplier_formset'] = p_forms.PartSupplierCollectionFormset(instance=self.object, prefix='supplier') # noqa: E501
context_data['storage_formset'] = p_forms.PartStorageCollectionFormset(instance=self.object, prefix='storage') # noqa: E501
context_data['attachments'] = self.object.attachment_set.all() if self.object else []
return context_data
def form_invalid(self, form):
messages.add_message(self.request, messages.ERROR, _('Please correct the errors below.'))
return super().form_invalid(form)
def edit_part_attachments(self, part):
for idx, f in enumerate(self.request.FILES.getlist('part_attachments')):
Attachment.objects.create(
attachment=f,
comment="Uploaded %s by %s" % (timezone.now(), self.request.user.username),
label=f.name,
part=part,
created_by=self.request.user
)
a_ids = self.request.POST.get('part_attachments_delete_ids', '').split(',')
if a_ids != ['']:
Attachment.objects.filter(id__in=a_ids).delete()
def form_valid(self, form):
context = self.get_context_data()
supplier_formset = context['supplier_formset']
storage_formset = context['storage_formset']
if not supplier_formset.is_valid() or not storage_formset.is_valid():
messages.add_message(self.request, messages.ERROR, _('Please correct the errors below.'))
return self.render_to_response(context)
part = form.save(commit=False)
if not part.pk:
message = _('New part %(description)s added') % {'description': str(part)}
else:
message = _('Part %(description)s updated') % {'description': str(part)}
messages.add_message(request=self.request, level=messages.SUCCESS, message=message)
part.save()
for sup_form in supplier_formset:
delete = sup_form.cleaned_data.get('DELETE')
is_new = sup_form.instance.id is None
psc_instance = sup_form.instance
if delete and not is_new:
psc_instance.delete()
continue
elif sup_form.has_changed():
psc_instance.part = part
psc_instance.save()
for sto_form in storage_formset:
delete = sto_form.cleaned_data.get('DELETE')
is_new = sto_form.instance.id is None
psc_instance = sto_form.instance
if delete and not is_new:
psc_instance.delete()
continue
elif sto_form.has_changed():
psc_instance.part = part
storage_field = sto_form.cleaned_data['storage_field']
if isinstance(storage_field, str):
psc_instance.storage = p_models.Storage.objects.create(
room=sto_form.cleaned_data['room'],
location=storage_field
)
else:
psc_instance.storage = storage_field
psc_instance.save()
part.set_quantity_current()
self.edit_part_attachments(part)
if 'submit_add_another' in self.request.POST:
return HttpResponseRedirect(reverse('part_new'))
return HttpResponseRedirect(reverse('parts_list'))
class PartDetails(DetailView):
model = p_models.Part
template_name = 'parts/part_details.html'
class PartsList(BaseListableView):
page_title = _l("All Parts")
model = p_models.Part
template_name = 'parts/parts_list.html'
paginate_by = 50
# order_by = ['part_number']
kwarg_filters = None
multi_separator = '<span class="padding-0-10">|</span>'
fields = (
'actions',
'name',
'part_number',
'new_or_used',
'quantity_current',
'quantity_min',
'part_category__name',
'locations',
'attachments',
)
headers = {
'actions': _l('Actions'),
'name': _l('Name'),
'part_number': _l('Part Number'),
'new_or_used': _l('New or Used'),
'quantity_current': _l('In Storage'),
'quantity_min': _l('Min Quantity'),
'locations': _l("Locations"),
'part_category__name': _l('Category'),
"attachments": mark_safe('<i class="fa fa-paperclip fa-fw" aria-hidden="true"></i>'),
}
widgets = {
'actions': None,
'name': TEXT,
'part_number': TEXT,
'new_or_used': SELECT_MULTI,
'quantity_min': None,
'quantity_current': None,
'locations': None,
'part_category__name': SELECT_MULTI,
'attachments': None,
}
search_fields = {
'actions': False,
'quantity_current': False,
'quantity_min': False,
'locations': False,
'attachments': False,
}
order_fields = {
'actions': False,
'part_category__name': False,
'locations': "partstoragecollection__storage__room__site__name",
"attachments": "attachment_count",
}
select_related = ('part_category',)
prefetch_related = ("attachment_set",)
def get_icon(self):
return 'fa-cog'
def get(self, request, *args, **kwargs):
if self.kwarg_filters is None:
self.kwarg_filters = kwargs.pop('f', None)
return super(PartsList, self).get(request, *args, **kwargs)
def get_queryset(self):
qs = super(PartsList, self).get_queryset()
return qs.order_by("part_number").annotate(attachment_count=Count("attachment"))
def format_col(self, field, obj):
col = super(PartsList, self).format_col(field, obj)
return col
def get_context_data(self, *args, **kwargs):
context = super(PartsList, self).get_context_data(*args, **kwargs)
current_url = resolve(self.request.path_info).url_name
context['view_name'] = current_url
context['icon'] = self.get_icon()
context['page_title'] = self.page_title
return context
def actions(self, p):
template = get_template('parts/table_context_p_actions.html')
mext = reverse('parts_list')
perms = PermWrapper(self.request.user)
c = {'p': p, 'request': self.request, 'next': mext, 'perms': perms}
return template.render(c)
def locations(self, obj):
return self.parts_locations_cache.get(obj.id, _("None in storage"))
@property
def parts_locations_cache(self):
if not hasattr(self, "_parts_locations_cache"):
self._generate_parts_locations()
return self._parts_locations_cache
def _generate_parts_locations(self):
psc = p_models.PartStorageCollection.objects.order_by(
"storage__room__site__name",
"storage__room__name",
"storage__location",
).values_list(
"part_id",
"quantity",
"storage__location",
"storage__room__site__name",
"storage__room__name",
)
tmp_cache = defaultdict(list)
for part_id, quantity, loc, site_name, room_name in psc:
site = "%s:" % site_name if site_name else ""
text = (
'<div style="display: inline-block; white-space: nowrap;">'
'%s%s:%s <span class="badge">%d</span>'
'</div>'
) % (site, room_name, loc or "", quantity)
tmp_cache[part_id].append(text)
self._parts_locations_cache = {}
for k, v in tmp_cache.items():
self._parts_locations_cache[k] = ', '.join(v)
def part_number(self, part):
return part.part_number or "<em>N/A</em>"
def attachments(self, part):
return listable_attachment_tags(part)
class LowInventoryPartsList(PartsList):
page_title = _l("Low Inventory Parts")
def get_queryset(self):
qs = super().get_queryset()
return qs.filter(quantity_current__lt=F('quantity_min'))
class SuppliersList(BaseListableView):
model = p_models.Supplier
template_name = 'service_log/service_event_list.html'
paginate_by = 50
# order_by = ['part_number']
kwarg_filters = None
fields = (
'actions',
'name',
'phone_number',
'get_website_tag',
)
headers = {
'actions': _l('Actions'),
'name': _l('Name'),
'phone_number': _l("Phone Number"),
'get_website_tag': _l("Website"),
}
widgets = {
'actions': None,
'name': TEXT,
'phone_number': TEXT,
'website': TEXT,
}
search_fields = {
'actions': False,
'get_website_tag': 'website',
}
order_fields = {
'actions': False,
'get_website_tag': 'website',
}
def get_context_data(self, *args, **kwargs):
context = super(SuppliersList, self).get_context_data(*args, **kwargs)
current_url = resolve(self.request.path_info).url_name
context['view_name'] = current_url
context['icon'] = 'fa-microchip'
context['page_title'] = _l("All Suppliers")
return context
def actions(self, supplier):
template = get_template('parts/table_context_suppliers_actions.html')
mext = reverse('suppliers_list')
c = {
'supplier': supplier,
'request': self.request,
'next': mext,
'perms': PermWrapper(self.request.user),
}
return template.render(c)
class SupplierDetails(PartsList):
template_name = 'parts/supplier_details.html'
def get_queryset(self):
return super().get_queryset().filter(partsuppliercollection__supplier__id=self.kwargs['pk'])
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context['supplier'] = get_object_or_404(p_models.Supplier, pk=self.kwargs['pk'])
return context
|
149828
|
import numpy as np
import unittest
import os
import shutil
# Data from Geradin
# time[s] theta[rad]
geradin_FoR0 = np.array([[-0.0117973, 1.56808],
[0.0816564, 1.5394],
[0.171988, 1.41698],
[0.235203, 1.31521],
[0.307327, 1.09265],
[0.427399, 0.601124],
[0.526338, 0.0899229],
[0.646417, -0.394903],
[0.748531, -0.765465],
[0.868959, -0.94209],
[0.905131, -0.956217],
[0.965504, -0.903829],
[1.06828, -0.69149],
[1.16508, -0.425429],
[1.29798, -0.240495],
[1.42483, -0.0688408],
[1.56382, 0.169571],
[1.78751, 0.634087],
[1.89627, 0.806105],
[1.98075, 0.844608],
[2.10125, 0.734983],
[2.19143, 0.478564],
[2.26934, 0.0347871],
[2.37751, -0.315796],
[2.52803, -0.546627],
[2.60034, -0.601682],
[2.76314, -0.645155],
[2.88987, -0.580701],
[3.05893, -0.423295],
[3.24611, -0.239453],
[3.43335, -0.00201041],
[3.51194, 0.157214],
[3.59668, 0.423517],
[3.6815, 0.756821],
[3.73591, 0.87633],
[3.81435, 0.901554],
[3.93481, 0.751729],
[4.04305, 0.468146],
[4.17525, 0.0366783],
[4.34949, -0.556439],
[4.44551, -0.987179],
[4.57784, -1.29805],
[4.65016, -1.3531],
[4.70444, -1.35419],
[4.78294, -1.27537],
[4.86762, -1.06267],
[4.99464, -0.743611]])
geradin_FoR1 = np.array([[0.00756934, 0.0266485],
[0.134225, 0.0241027],
[0.309222, 0.100987],
[0.418117, 0.393606],
[0.490855, 0.713752],
[0.533195, 0.820103],
[0.635787, 0.871642],
[0.762124, 0.587696],
[0.85826, 0.264156],
[0.918194, -0.0720575],
[0.996205, -0.422034],
[1.05008, -0.784926],
[1.09792, -1.1477],
[1.1639, -1.47063],
[1.27207, -1.82121],
[1.38636, -2.09152],
[1.47067, -2.20042],
[1.53694, -2.26875],
[1.67582, -2.12414],
[1.8028, -1.84528],
[1.89365, -1.5121],
[1.97843, -1.2056],
[2.00271, -1.07208],
[2.08146, -0.765457],
[2.14818, -0.431789],
[2.19686, -0.0575583],
[2.24552, 0.303273],
[2.29421, 0.690904],
[2.37299, 1.02433],
[2.44573, 1.34447],
[2.55464, 1.65049],
[2.65749, 1.92983],
[2.7904, 2.12817],
[2.94135, 2.27254],
[3.03182, 2.27072],
[3.18853, 2.17376],
[3.30891, 1.95694],
[3.42312, 1.61964],
[3.50121, 1.33666],
[3.56714, 0.973525],
[3.61495, 0.583954],
[3.66883, 0.221062],
[3.71673, -0.0881084],
[3.80076, -0.451606],
[3.87271, -0.828262],
[3.95678, -1.15156],
[3.98681, -1.25937],
[4.08307, -1.47571],
[4.13729, -1.5304],
[4.27618, -1.38579],
[4.36701, -1.066],
[4.4217, -0.705294],
[4.50652, -0.37199],
[4.59132, -0.0520868],
[4.68815, 0.240774],
[4.79703, 0.519993],
[4.91188, 0.74549],
[4.98432, 0.797635]])
class TestDoublePendulum(unittest.TestCase):
"""
Validation of a double pendulum with a mass at each tip position
Reference case: <NAME> and <NAME>, "Flexible multibody dynamics : a finite element approach"
"""
def setUp(self):
import sharpy.utils.generate_cases as gc
from sharpy.utils.constants import deg2rad
# Structural properties
mass_per_unit_length = 1.
mass_iner = 1e-4
EA = 1e9
GA = 1e9
GJ = 1e9
EI = 1e9
# Beam1
global nnodes1
nnodes1 = 11
l1 = 1.0
m1 = 1.0
theta_ini1 = 90.*deg2rad
# Beam2
nnodes2 = nnodes1
l2 = l1
m2 = m1
theta_ini2 = 00.*deg2rad
# airfoils
airfoil = np.zeros((1,20,2),)
airfoil[0,:,0] = np.linspace(0.,1.,20)
# Simulation
numtimesteps = 10
dt = 0.01
# Create the structure
beam1 = gc.AeroelasticInformation()
r1 = np.linspace(0.0, l1, nnodes1)
node_pos1 = np.zeros((nnodes1,3),)
node_pos1[:, 0] = r1*np.sin(theta_ini1)
node_pos1[:, 2] = -r1*np.cos(theta_ini1)
beam1.StructuralInformation.generate_uniform_sym_beam(node_pos1, mass_per_unit_length, mass_iner, EA, GA, GJ, EI, num_node_elem = 3, y_BFoR = 'y_AFoR', num_lumped_mass=1)
beam1.StructuralInformation.body_number = np.zeros((beam1.StructuralInformation.num_elem,), dtype = int)
beam1.StructuralInformation.boundary_conditions[0] = 1
beam1.StructuralInformation.boundary_conditions[-1] = -1
beam1.StructuralInformation.lumped_mass_nodes = np.array([nnodes1-1], dtype = int)
beam1.StructuralInformation.lumped_mass = np.ones((1,))*m1
beam1.StructuralInformation.lumped_mass_inertia = np.zeros((1,3,3))
beam1.StructuralInformation.lumped_mass_position = np.zeros((1,3))
beam1.AerodynamicInformation.create_one_uniform_aerodynamics(
beam1.StructuralInformation,
chord = 1.,
twist = 0.,
sweep = 0.,
num_chord_panels = 4,
m_distribution = 'uniform',
elastic_axis = 0.25,
num_points_camber = 20,
airfoil = airfoil)
beam2 = gc.AeroelasticInformation()
r2 = np.linspace(0.0, l2, nnodes2)
node_pos2 = np.zeros((nnodes2,3),)
node_pos2[:, 0] = r2*np.sin(theta_ini2) + node_pos1[-1, 0]
node_pos2[:, 2] = -r2*np.cos(theta_ini2) + node_pos1[-1, 2]
beam2.StructuralInformation.generate_uniform_sym_beam(node_pos2, mass_per_unit_length, mass_iner, EA, GA, GJ, EI, num_node_elem = 3, y_BFoR = 'y_AFoR', num_lumped_mass=1)
beam2.StructuralInformation.body_number = np.zeros((beam1.StructuralInformation.num_elem,), dtype = int)
beam2.StructuralInformation.boundary_conditions[0] = 1
beam2.StructuralInformation.boundary_conditions[-1] = -1
beam2.StructuralInformation.lumped_mass_nodes = np.array([nnodes2-1], dtype = int)
beam2.StructuralInformation.lumped_mass = np.ones((1,))*m2
beam2.StructuralInformation.lumped_mass_inertia = np.zeros((1,3,3))
beam2.StructuralInformation.lumped_mass_position = np.zeros((1,3))
beam2.AerodynamicInformation.create_one_uniform_aerodynamics(
beam2.StructuralInformation,
chord = 1.,
twist = 0.,
sweep = 0.,
num_chord_panels = 4,
m_distribution = 'uniform',
elastic_axis = 0.25,
num_points_camber = 20,
airfoil = airfoil)
beam1.assembly(beam2)
# Simulation details
SimInfo = gc.SimulationInformation()
SimInfo.set_default_values()
SimInfo.define_uinf(np.array([0.0,1.0,0.0]), 1.)
SimInfo.solvers['SHARPy']['flow'] = ['BeamLoader',
'AerogridLoader',
# 'InitializeMultibody',
'DynamicCoupled']
global name
name = 'double_pendulum_geradin'
SimInfo.solvers['SHARPy']['case'] = 'double_pendulum_geradin'
SimInfo.solvers['SHARPy']['write_screen'] = 'off'
SimInfo.solvers['SHARPy']['route'] = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) + '/'
SimInfo.solvers['SHARPy']['log_folder'] = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) + '/'
SimInfo.set_variable_all_dicts('dt', dt)
SimInfo.define_num_steps(numtimesteps)
SimInfo.set_variable_all_dicts('rho', 0.0)
SimInfo.set_variable_all_dicts('velocity_field_input', SimInfo.solvers['SteadyVelocityField'])
SimInfo.set_variable_all_dicts('output', os.path.abspath(os.path.dirname(os.path.realpath(__file__))) + '/output/')
SimInfo.solvers['BeamLoader']['unsteady'] = 'on'
SimInfo.solvers['AerogridLoader']['unsteady'] = 'on'
SimInfo.solvers['AerogridLoader']['mstar'] = 2
SimInfo.solvers['AerogridLoader']['wake_shape_generator'] = 'StraightWake'
SimInfo.solvers['AerogridLoader']['wake_shape_generator_input'] = {'u_inf':1.,
'u_inf_direction': np.array([0., 1., 0.]),
'dt': dt}
SimInfo.solvers['WriteVariablesTime']['FoR_number'] = np.array([0, 1], dtype = int)
SimInfo.solvers['WriteVariablesTime']['FoR_variables'] = ['mb_quat']
SimInfo.solvers['WriteVariablesTime']['structure_nodes'] = np.array([nnodes1-1, nnodes1+nnodes2-1], dtype = int)
SimInfo.solvers['WriteVariablesTime']['structure_variables'] = ['pos']
SimInfo.solvers['NonLinearDynamicMultibody']['gravity_on'] = True
SimInfo.solvers['NonLinearDynamicMultibody']['newmark_damp'] = 0.15
SimInfo.solvers['BeamPlot']['include_FoR'] = True
SimInfo.solvers['DynamicCoupled']['structural_solver'] = 'NonLinearDynamicMultibody'
SimInfo.solvers['DynamicCoupled']['structural_solver_settings'] = SimInfo.solvers['NonLinearDynamicMultibody']
SimInfo.solvers['DynamicCoupled']['aero_solver'] = 'StepUvlm'
SimInfo.solvers['DynamicCoupled']['aero_solver_settings'] = SimInfo.solvers['StepUvlm']
SimInfo.solvers['DynamicCoupled']['postprocessors'] = ['WriteVariablesTime', 'BeamPlot', 'AerogridPlot']
SimInfo.solvers['DynamicCoupled']['postprocessors_settings'] = {'WriteVariablesTime': SimInfo.solvers['WriteVariablesTime'],
'BeamPlot': SimInfo.solvers['BeamPlot'],
'AerogridPlot': SimInfo.solvers['AerogridPlot']}
SimInfo.solvers['DynamicCoupled']['postprocessors_settings']['WriteVariablesTime']['folder'] = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) + '/output/'
SimInfo.solvers['DynamicCoupled']['postprocessors_settings']['BeamPlot']['folder'] = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) + '/output/'
SimInfo.solvers['DynamicCoupled']['postprocessors_settings']['AerogridPlot']['folder'] = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) + '/output/'
SimInfo.with_forced_vel = False
SimInfo.with_dynamic_forces = False
# Create the MB and BC files
LC1 = gc.LagrangeConstraint()
LC1.behaviour = 'hinge_FoR'
LC1.body_FoR = 0
LC1.rot_axis_AFoR = np.array([0.0,1.0,0.0])
LC2 = gc.LagrangeConstraint()
LC2.behaviour = 'hinge_node_FoR'
LC2.node_in_body = nnodes1-1
LC2.body = 0
LC2.body_FoR = 1
LC2.rot_axisB = np.array([0.0,1.0,0.0])
LC = []
LC.append(LC1)
LC.append(LC2)
MB1 = gc.BodyInformation()
MB1.body_number = 0
MB1.FoR_position = np.zeros((6,),)
MB1.FoR_velocity = np.zeros((6,),)
MB1.FoR_acceleration = np.zeros((6,),)
MB1.FoR_movement = 'free'
MB1.quat = np.array([1.0,0.0,0.0,0.0])
MB2 = gc.BodyInformation()
MB2.body_number = 1
MB2.FoR_position = np.array([node_pos2[0, 0], node_pos2[0, 1], node_pos2[0, 2], 0.0, 0.0, 0.0])
MB2.FoR_velocity = np.zeros((6,),)
MB2.FoR_acceleration = np.zeros((6,),)
MB2.FoR_movement = 'free'
MB2.quat = np.array([1.0,0.0,0.0,0.0])
MB = []
MB.append(MB1)
MB.append(MB2)
# Write files
gc.clean_test_files(SimInfo.solvers['SHARPy']['route'], SimInfo.solvers['SHARPy']['case'])
SimInfo.generate_solver_file()
SimInfo.generate_dyn_file(numtimesteps)
beam1.generate_h5_files(SimInfo.solvers['SHARPy']['route'], SimInfo.solvers['SHARPy']['case'])
gc.generate_multibody_file(LC, MB,SimInfo.solvers['SHARPy']['route'], SimInfo.solvers['SHARPy']['case'])
def test_doublependulum(self):
import sharpy.sharpy_main
solver_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + '/double_pendulum_geradin.sharpy')
sharpy.sharpy_main.main(['', solver_path])
# read output and compare
output_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__))) + '/output/double_pendulum_geradin/WriteVariablesTime/'
pos_tip_data = np.loadtxt(("%sstruct_pos_node%d.dat" % (output_path, nnodes1*2-1)), )
self.assertAlmostEqual(pos_tip_data[-1, 1], 1.051004, 4)
self.assertAlmostEqual(pos_tip_data[-1, 2], 0.000000, 4)
self.assertAlmostEqual(pos_tip_data[-1, 3], -0.9986984, 4)
def tearDown(self):
solver_path = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
solver_path += '/'
files_to_delete = [name + '.aero.h5',
name + '.dyn.h5',
name + '.fem.h5',
name + '.mb.h5',
name + '.sharpy']
for f in files_to_delete:
os.remove(solver_path + f)
shutil.rmtree(solver_path + 'output/')
|
149852
|
import numpy as np
import random
import os
import datetime
def str_list_to_float(str_list):
return [float(item) for item in str_list]
def str_list_to_int(str_list):
return [int(item) for item in str_list]
def read_embeddings(filename, n_node, n_embed):
with open(filename, "r") as f:
embedding_matrix = np.random.rand(n_node, n_embed)
f.readline() # skip the first line
for line in f:
emd = line.split()
embedding_matrix[int(emd[0]), :] = str_list_to_float(emd[1:])
return embedding_matrix
def read_embeddings_with_id_convert(filename, graph, n_embed):
with open(filename, "r") as f:
embedding_matrix = np.random.rand(graph.n_node, n_embed)
f.readline() # skip the first line
for line in f:
emd = line.split()
embedding_matrix[graph.name2id[emd[0]], :] = str_list_to_float(emd[1:])
return embedding_matrix
def agm(x): # x is 1d-array
agm_x = 1 - np.exp(-x)
agm_x[np.isnan(agm_x)] = 0
return np.clip(agm_x, 1e-6, 1)
def agm_softmax(x): # x is 1d-array
agm_x = 1 - np.exp(-x)
agm_x[np.isnan(agm_x)] = 0
agm_x = np.clip(agm_x, 1e-6, 1)
return agm_x / agm_x.sum()
def read_edges_from_file(filename):
with open(filename, "r") as f:
lines = f.readlines()
edges = [str_list_to_int(line.split()) for line in lines if not line[0].startswith('#')]
return edges
def create_file_dir_in_config(config):
for k, v in config.__dict__.items():
if not k.startswith('_') and 'filename' in k:
if not isinstance(v, list):
v = [v]
for path in v:
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
def shuffle(*args):
idx = list(range(len(args[0])))
random.shuffle(idx)
results = []
for array in args:
results.append([array[i] for i in idx])
return tuple(results)
def genearate_tmp_filename(config):
return ('tmp-' + str(hash(str(config.__dict__))) + str(datetime.datetime.now()) + '.pkl').replace(' ', '_').replace(':', '_')
|
149865
|
import asyncio
HELP_DESC = ("""
!listplugins\t\t\t-\tlist addable plugins
!addplugin plugin [plugin2 ...]\t-\tadd plugin(s) (--all for all)
!remplugin plugin [plugin2 ...]\t-\tremove plugin(s)
""")
blacklisted = ["help" "meta"]
async def register_to(plugin):
"""
TODO: don't add plugins twice, don't remove meta plugin etc
"""
async def listplugins_callback(room, event):
available = plugin.mroom.bot.available_plugins
pluginlist = ""
for k,v in available.items():
indentet = "\t" + v[:-1].replace("\n", "\n\t") + v[-1]
pluginlist += f"{k}:\n{indentet}\n"
await plugin.send_html(f"""<pre><code>{pluginlist}</pre></code>""")
listplugins_handler = plugin.CommandHandler("listplugins", listplugins_callback)
plugin.add_handler(listplugins_handler)
async def addplugin_callback(room, event):
curr_plugins = [p.pluginname for p in plugin.mroom.plugins]
args = event.source['content']['body'].split()
if len(args) > 1 and args[1] == "--all":
await asyncio.gather(*(plugin.mroom.add_plugin(pname) for pname in plugin.mroom.bot.available_plugins if pname not in curr_plugins))
else:
await asyncio.gather(*(plugin.mroom.add_plugin(pname) for pname in args[1:] if pname not in curr_plugins))
await plugin.send_text("Call !help to see new plugins")
addplugin_handler = plugin.CommandHandler("addplugin", addplugin_callback)
plugin.add_handler(addplugin_handler)
async def remplugin_callback(room, event):
args = plugin.extract_args(event)
torem = list(filter(lambda x: x not in blacklisted, args[1:]))
await asyncio.gather(*(plugin.mroom.remove_plugin(pname) for pname in torem))
await plugin.send_text("Call !help to see new plugins")
remplugin_handler = plugin.CommandHandler("remplugin", remplugin_callback)
plugin.add_handler(remplugin_handler)
# async def reload_callback(room, event):
# # if some plugins are still in the register_to funciton, they will not
# # be stopped :(
# await plugin.mroom.bot.read_plugins() # look for new available plugins
# await asyncio.gather(*(p.stop_all_tasks() for p in plugin.mroom.plugins)) # stop running tasks
# await plugin.mroom.load_plugins()
# # await plugin.send_text("Reloaded Plugins.")
#
# reload_handler = plugin.CommandHandler("reload", reload_callback)
# plugin.add_handler(reload_handler)
|
149869
|
from numpy import genfromtxt
from matplotlib.pyplot import figure, legend, loglog, savefig, xlabel, ylabel,show
from sys import argv
input_file = str(argv[1])
output_file = str(argv[2])
data = genfromtxt(input_file, skip_header=1, delimiter=',')
N = data[:,0]
err = data[:,1]
fig=figure()
loglog(N,1/N, '--', label='slope -1')
loglog(N,err, 'o-', label='L2 error')
xlabel('degrees of freedom')
ylabel('error')
legend()
savefig(output_file)
print('Generated ' + output_file)
|
149900
|
import pickle
import sys
import zlib
from scrapy.crawler import Crawler
from scrapy.utils.conf import build_component_list
from scrapy.utils.project import get_project_settings
from .utils import get_spider_class
class Cassette:
"""
Helper class to store request, response and output data.
"""
FIXTURE_VERSION = 2
def __init__(
self,
spider=None,
spider_name=None,
request=None,
response=None,
init_attrs=None,
input_attrs=None,
output_attrs=None,
output_data=None,
middlewares=None,
included_settings=None,
python_version=None,
filename=None,
):
self.spider_name = spider_name
self.middlewares = middlewares
self.included_settings = included_settings
if spider:
self.spider_name = spider.name
self.middlewares = self._get_middlewares(spider.settings)
self.included_settings = self._get_included_settings(spider.settings)
self.request = request
self.response = response
self.init_attrs = init_attrs
self.input_attrs = input_attrs
self.output_attrs = output_attrs
self.output_data = output_data
self.filename = filename
self.python_version = python_version or sys.version_info.major
@classmethod
def from_fixture(cls, fixture):
with open(fixture, 'rb') as f:
binary = f.read()
cassette = pickle.loads(zlib.decompress(binary))
return cassette
def _get_middlewares(self, settings):
full_list = build_component_list(settings.getwithbase('SPIDER_MIDDLEWARES'))
autounit_mw_path = list(filter(lambda x: x.endswith('AutounitMiddleware'), full_list))[0]
start = full_list.index(autounit_mw_path)
mw_paths = [mw for mw in full_list[start:] if mw != autounit_mw_path]
return mw_paths
def _get_included_settings(self, settings):
# Use the new setting, if empty, try the deprecated one
names = settings.getlist('AUTOUNIT_RECORD_SETTINGS', [])
if not names:
names = settings.getlist('AUTOUNIT_INCLUDED_SETTINGS', [])
included = {name: settings.get(name) for name in names}
return included
def get_spider(self):
settings = get_project_settings()
spider_cls = get_spider_class(self.spider_name, settings)
spider_cls.update_settings(settings)
for k, v in self.included_settings.items():
settings.set(k, v, priority=50)
crawler = Crawler(spider_cls, settings)
spider = spider_cls.from_crawler(crawler, **self.init_attrs)
return spider
def pack(self):
return zlib.compress(pickle.dumps(self, protocol=2))
def to_dict(self):
return {
'spider_name': self.spider_name,
'request': self.request,
'response': self.response,
'output_data': self.output_data,
'middlewares': self.middlewares,
'settings': self.included_settings,
'init_attrs': self.init_attrs,
'input_attrs': self.input_attrs,
'output_attrs': self.output_attrs,
}
|
149956
|
import pandas as pd
import tensorflow as tf
def get_top_5(pred):
_, indexes = tf.math.top_k(pred, k=5)
indexes = indexes.numpy().astype(str)
parsed = " ".join(indexes)
return parsed
def _decode_predictions(predictions):
preds = [get_top_5(pred) for pred in predictions]
return preds
def generate_submission(instance_ids, predictions, csv_file):
predictions = _decode_predictions(predictions)
df = pd.DataFrame(list(zip(instance_ids, predictions)),
columns=['Id', 'Predicted'])
df = df.sort_values('Id')
df.to_csv(csv_file, index=False, header=True, sep=',')
return df
|
149967
|
import json
import os
from fibber import log
from fibber.datasets.downloadable_datasets import downloadable_dataset_urls
from fibber.download_utils import download_file, get_root_dir
logger = log.setup_custom_logger(__name__)
def download_and_preprocess_mr():
"""preprocess raw movie review dataset to Fibber's JSON format."""
root_dir = get_root_dir()
dataset_dir = "datasets/mr/"
download_file(subdir=os.path.join(dataset_dir, "raw"),
**downloadable_dataset_urls["mr-raw"])
logger.info("Start processing data.")
with open(os.path.join(root_dir, dataset_dir, "raw/rt-polaritydata/rt-polarity.neg"),
encoding="utf-8", errors="ignore") as f:
neg = f.readlines()
with open(os.path.join(root_dir, dataset_dir, "raw/rt-polaritydata/rt-polarity.pos"),
encoding="utf-8", errors="ignore") as f:
pos = f.readlines()
train = {
"label_mapping": ["negative", "positive"],
"cased": False,
"paraphrase_field": "text0",
}
test = {
"label_mapping": ["negative", "positive"],
"cased": False,
"paraphrase_field": "text0",
}
trainlist = []
testlist = []
for id, item in enumerate(neg):
if id % 10 == 0:
testlist.append({
"label": 0,
"text0": item.strip()})
else:
trainlist.append({
"label": 0,
"text0": item.strip()})
for id, item in enumerate(pos):
if id % 10 == 0:
testlist.append({
"label": 1,
"text0": item.strip()})
else:
trainlist.append({
"label": 1,
"text0": item.strip()})
train["data"] = trainlist
test["data"] = testlist
with open(os.path.join(root_dir, dataset_dir, "train.json"), "w") as f:
json.dump(train, f, indent=2)
with open(os.path.join(root_dir, dataset_dir, "test.json"), "w") as f:
json.dump(test, f, indent=2)
if __name__ == "__main__":
download_and_preprocess_mr()
|
149992
|
import binascii
import re
legacy_syntax_names = {
('\t', 'simple'): 'TSV (Rainbow)',
(',', 'quoted'): 'CSV (Rainbow)'
}
filename_policy_map = {'simple': 'Simple', 'quoted': 'Standard', 'quoted_rfc': 'quoted_rfc'}
def encode_delim(delim):
return binascii.hexlify(delim.encode('utf-8')).decode('ascii')
def decode_delim(delim):
return binascii.unhexlify(delim.encode('ascii')).decode('utf-8')
def get_syntax_file_basename(delim, policy):
for k, v in legacy_syntax_names.items():
if (delim, policy) == k:
return v + '.sublime-syntax'
return 'Rainbow_CSV_hex_{}_{}.sublime-syntax'.format(encode_delim(delim), filename_policy_map[policy])
simple_header_template = '''%YAML 1.2
---
name: '{}'
file_extensions: [{}]
scope: text.{}
contexts:
main:
- match: '^'
push: rainbow1
'''
standard_header_template = '''%YAML 1.2
---
name: '{}'
file_extensions: [{}]
scope: text.{}
contexts:
main:
- match: '^'
push: rainbow1
quoted_field:
- match: '""'
scope: meta.rainbow.double-quote-escaped
- match: '"'
pop: true
'''
non_rfc_endline_rule = ''' - match: '$'\n pop: true\n'''
rainbow_scope_names = [
'rainbow1',
'keyword.rainbow2',
'entity.name.rainbow3',
'comment.rainbow4',
'string.rainbow5',
'entity.name.tag.rainbow6',
'storage.type.rainbow7',
'support.rainbow8',
'constant.language.rainbow9',
'variable.language.rainbow10'
]
def oniguruma_regular_escape_single_char(delim_char):
single_escape_chars = r'\/|.$^*()[]+?'
if single_escape_chars.find(delim_char) != -1:
return r'\{}'.format(delim_char)
if delim_char == '\t':
return r'\t'
return delim_char
def oniguruma_regular_escape(delim):
return ''.join([oniguruma_regular_escape_single_char(d) for d in delim])
def get_syntax_name(delim, policy):
for k, v in legacy_syntax_names.items():
if (delim, policy) == k:
return v
ui_delim = delim.replace('\t', 'tab')
hr_policy_map = {'simple': 'Simple', 'quoted': 'Standard', 'quoted_rfc': 'RFC'}
return 'Rainbow CSV {} {}'.format(ui_delim, hr_policy_map[policy])
def yaml_escape(data):
return data.replace("'", "''")
def get_context_name(context_id):
return "rainbow{}".format(context_id + 1)
def make_simple_context(delim, context_id, num_contexts, indent=' '):
result_lines = []
next_context_id = (context_id + 1) % num_contexts
context_header = "{}:".format(get_context_name(context_id))
# We use `meta_content_scope` instead of `meta_scope` to prevent wrong separator color bug, see https://github.com/mechatroner/sublime_rainbow_csv/issues/31
result_lines.append("- meta_content_scope: {}".format(rainbow_scope_names[context_id]))
result_lines.append("- match: '{}'".format(yaml_escape(oniguruma_regular_escape(delim))))
result_lines.append(" set: {}".format(get_context_name(next_context_id)))
result_lines.append("- match: '$'")
result_lines.append(" pop: true")
result_lines = [indent + v for v in result_lines]
result_lines = [context_header] + result_lines
result_lines = [indent + v for v in result_lines]
return '\n'.join(result_lines) + '\n'
def make_standard_context(delim, context_id, num_contexts, indent=' '):
result_lines = []
next_context_id = (context_id + 1) % num_contexts
context_header = "{}:".format(get_context_name(context_id))
# We use `meta_content_scope` instead of `meta_scope` to prevent wrong separator color bug, see https://github.com/mechatroner/sublime_rainbow_csv/issues/31
result_lines.append("- meta_content_scope: {}".format(rainbow_scope_names[context_id]))
result_lines.append("- match: '{}'".format(yaml_escape(oniguruma_regular_escape(delim))))
result_lines.append(" set: {}".format(get_context_name(next_context_id)))
result_lines.append("- match: '$'")
result_lines.append(" pop: true")
result_lines.append("- match: '\"'")
result_lines.append(" push: quoted_field")
result_lines = [indent + v for v in result_lines]
result_lines = [context_header] + result_lines
result_lines = [indent + v for v in result_lines]
return '\n'.join(result_lines) + '\n'
def make_sublime_syntax_simple(delim):
scope = 'rbcsmn' + ''.join([str(ord(d)) for d in delim])
name = get_syntax_name(delim, 'simple')
result = simple_header_template.format(yaml_escape(name), scope, scope)
num_contexts = len(rainbow_scope_names)
for context_id in range(num_contexts):
result += '\n'
result += make_simple_context(delim, context_id, num_contexts)
return result
def make_sublime_syntax_standard(delim, policy):
assert policy in ['quoted', 'quoted_rfc']
scope = 'rbcstn' + ''.join([str(ord(d)) for d in delim])
name = get_syntax_name(delim, policy)
result = standard_header_template.format(yaml_escape(name), scope, scope)
if policy == 'quoted':
result += non_rfc_endline_rule
num_contexts = len(rainbow_scope_names)
for context_id in range(num_contexts):
result += '\n'
result += make_standard_context(delim, context_id, num_contexts)
return result
def make_sublime_syntax(delim, policy):
assert policy in filename_policy_map.keys()
if policy == 'quoted':
return make_sublime_syntax_standard(delim, policy)
elif policy == 'quoted_rfc':
return make_sublime_syntax_standard(delim, policy)
else:
return make_sublime_syntax_simple(delim)
def get_pregenerated_delims():
delims = [chr(i) for i in range(32, 127)]
delims.append('\t')
delims = [delim for delim in delims if re.match('^[a-zA-Z0-9]$', delim) is None]
return delims
|
150034
|
import functools
from datetime import datetime
try:
from datetime import timezone
utc = timezone.utc
except ImportError:
from datetime import timedelta, tzinfo
class UTC(tzinfo):
def utcoffset(self, dt):
return timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dst):
return timedelta(0)
utc = UTC()
try:
from werkzeug.utils import invalidate_cached_property
except ImportError:
from werkzeug._internal import _missing
def invalidate_cached_property(obj, name):
obj.__dict__[name] = _missing
class FakeCache:
"""
An object that mimics just enough of Flask-Caching's API to be compatible
with our needs, but does nothing.
"""
def get(self, key):
return None
def set(self, key, value):
return None
def delete(self, key):
return None
def first(iterable, default=None, key=None):
"""
Return the first truthy value of an iterable.
Shamelessly stolen from https://github.com/hynek/first
"""
if key is None:
for el in iterable:
if el:
return el
else:
for el in iterable:
if key(el):
return el
return default
sentinel = object()
def getattrd(obj, name, default=sentinel):
"""
Same as getattr(), but allows dot notation lookup
Source: http://stackoverflow.com/a/14324459
"""
try:
return functools.reduce(getattr, name.split("."), obj)
except AttributeError as e:
if default is not sentinel:
return default
raise
def timestamp_from_datetime(dt):
"""
Given a datetime, in UTC, return a float that represents the timestamp for
that datetime.
http://stackoverflow.com/questions/8777753/converting-datetime-date-to-utc-timestamp-in-python#8778548
"""
dt = dt.replace(tzinfo=utc)
if hasattr(dt, "timestamp") and callable(dt.timestamp):
return dt.replace(tzinfo=utc).timestamp()
return (dt - datetime(1970, 1, 1, tzinfo=utc)).total_seconds()
|
150045
|
import os
import numpy as np
import torch
import torch.nn.functional as F
import torch.distributed as dist
class DistDataError(Exception):
"""Defines an empty exception to throw when some other rank hit a real exception."""
pass
class DistData(object):
def __init__(self, backend='gloo'):
assert backend in ['gloo', 'mpi'], f"torch.distributed backend '{backend}' is not supported, valid options are 'gloo' or 'mpi'"
dist.init_process_group(backend, init_method="env://")
# lookup our process rank and the group size
self.rank = dist.get_rank()
self.numranks = dist.get_world_size()
def allassert(self, cond, msg):
"""Check that cond is True on all ranks, assert with msg everywhere if not.
To prevent deadlocks in cases where an assertion might only fail on one rank,
this executes an allreduce to ensure that if any rank finds that an assertion
has been violated, all ranks fail an assertion check.
The condition must be true on all ranks for this not to assert.
"""
alltrue = self.alltrue(cond)
assert alltrue, msg
def allraise_if(self, err):
"""Raise exception if err is not None on any rank.
Similarly to allassert, this raises an exception on all ranks if err
is set to an exception on any rank. Rank(s) where err is not None
re-raise err as exception, and ranks where err is None raise DistDataError.
Thus all ranks raise an exception if any rank has an active exception,
which helps avoid deadlocks in cases where an exception may be raised
on a subset of ranks.
"""
alltrue = self.alltrue(err is None)
if not alltrue:
# At least one rank raised an exception.
# Re-raise the actual exception if this rank threw one.
if err is not None:
raise err
# TODO: is there a better exception to use here?
# On other ranks, raise an "empty" exception to indicate
# that we're only failing because someone else did.
raise DistDataError
def barrier(self):
"""Globally synchronize all processes"""
dist.barrier()
def bcast(self, val, root):
"""Broadcast a scalar value from root to all ranks"""
vals = [val]
dist.broadcast_object_list(vals, src=root)
return vals[0]
def scatterv_(self, invals: np.array, counts: list, root:int=0):
"""Scatter int64 values from invals according to counts array, return received portion in a new tensor"""
self.allassert(len(counts) == self.numranks,
f"Length of counts list {len(counts)} does not match number of ranks {self.numranks}")
# Define list of tensors to scatter on the root.
# torch.distributed.scatter requires each tensor to be the same shape,
# so find the max size across all count values and pad.
max_size = max(counts)
scatterlist = None
if self.rank == root:
slices = list(torch.split(torch.from_numpy(invals), counts))
scatterlist = [F.pad(s, (0, max_size - len(s))) for s in slices]
# Receive a tensor of the max count size from the root,
# then copy values into output numpy array, which may be smaller.
recvtensor = torch.zeros(max_size, dtype=torch.int64)
dist.scatter(recvtensor, scatterlist, src=root)
return recvtensor[:counts[self.rank]]
def alltrue(self, val):
"""Returns True if all procs input True, False otherwise"""
# torch.dist does not support reductions with bool types
# so we cast to int and cast the result back to bool
tensor = torch.tensor([int(val)], dtype=torch.int32)
dist.all_reduce(tensor, op=dist.ReduceOp.BAND)
return bool(tensor[0])
def sum(self, val):
"""Compute sum of a scalar val, and return total on all ranks."""
tensor = torch.tensor([val])
dist.all_reduce(tensor, op=dist.ReduceOp.SUM)
return tensor[0]
def exscan(self, val: int):
"""Compute prefix sum (exclusive scan) of int64 val, and return offset of each rank."""
# torch.distributed doesn't have a scan, so fallback to allreduce
tensor = torch.zeros(self.numranks, dtype=torch.int64)
tensor[self.rank:] = val
dist.all_reduce(tensor, op=dist.ReduceOp.SUM)
return int(tensor[self.rank]) - val
def min(self, val):
"""Return minimum of scalar val to all ranks."""
tensor = torch.tensor([val])
dist.all_reduce(tensor, op=dist.ReduceOp.MIN)
return tensor[0]
def minrank(self, cond):
"""Find first rank whose condition is True, return that rank if any, None otherwise."""
minrank = self.numranks
if cond:
minrank = self.rank
minrank = self.min(minrank)
if minrank < self.numranks:
return minrank
return None
def bcast_first(self, val):
"""Broadcast val from first rank where it is not None, return val if any, None otherwise"""
# Find the first rank with a valid value.
minrank = self.minrank(val is not None)
# If there is no rank with a valid value, return None
if minrank is None:
return None
# Otherwise broadcast the value from the first valid rank.
val = self.bcast(val, root=minrank)
return val
def all_sum_(self, vals: np.array):
"""Sums values in numpy array vals element-wise and update vals in place with final result on all ranks"""
# Builds torch.tensor with from_numpy to use same underlying memory as numpy array.
tensor = torch.from_numpy(vals)
dist.all_reduce(tensor, op=dist.ReduceOp.SUM)
def open(self, filename, truncate=None):
"""Create, truncate, and open a file shared by all ranks."""
# Don't truncate existing file until all ranks reach this point
self.barrier()
# We'll capture any exception in this variable
err = None
# Rank 0 creates and truncates file.
if self.rank == 0:
try:
f = open(filename, 'wb')
# Some file systems like GPFS deliver faster write speed
# if the file size is known before data is written to the file.
if truncate is not None:
f.truncate(truncate)
except Exception as e:
err = e
# Verify that rank 0 created the file
self.allraise_if(err)
# Wait for rank 0 to open (and truncate) file,
# then have all ranks open file for writing.
if self.rank != 0:
try:
f = open(filename, 'r+b')
except Exception as e:
err = e
# Verify that all ranks successfully opened the file
self.allraise_if(err)
return f
def remove(self, filename):
"""Remove a shared file."""
# Don't remove the file until all are ready
self.barrier()
# We'll capture any exception in this variable
err = None
# Rank 0 removes the file if it exists.
if self.rank == 0:
try:
if os.path.exists(filename):
os.remove(filename)
except Exception as e:
err = e
# Verify that rank 0 successfully removed the file.
self.allraise_if(err)
def rename(self, srcfile, destfile):
"""Rename a shared file."""
# Don't rename until all are ready
self.barrier()
# We'll capture any exception in this variable
err = None
# Rank 0 renames the file.
if self.rank == 0:
try:
if os.path.exists(srcfile):
os.rename(srcfile, destfile)
except Exception as e:
err = e
# Verify that the rename succeeded
self.allraise_if(err)
|
150055
|
from datetime import timedelta
import azure.batch.models as batch_models
from aztk import models
from aztk.utils import constants, helpers
def submit_job(
core_job_operations,
job_configuration,
start_task,
job_manager_task,
autoscale_formula,
software_metadata_key: str,
vm_image_model,
application_metadata,
):
"""
Job Submission
:param job_configuration -> aztk_sdk.spark.models.JobConfiguration
:param start_task -> batch_models.StartTask
:param job_manager_task -> batch_models.TaskAddParameter
:param autoscale_formula -> str
:param software_metadata_key -> str
:param vm_image_model -> aztk_sdk.models.VmImage
:returns None
"""
core_job_operations.get_cluster_data(job_configuration.id).save_cluster_config(
job_configuration.to_cluster_config())
# get a verified node agent sku
sku_to_use, image_ref_to_use = helpers.select_latest_verified_vm_image_with_node_agent_sku(
vm_image_model.publisher, vm_image_model.offer, vm_image_model.sku, core_job_operations.batch_client)
# set up subnet if necessary
network_conf = None
if job_configuration.subnet_id:
network_conf = batch_models.NetworkConfiguration(subnet_id=job_configuration.subnet_id)
# set up a schedule for a recurring job
auto_pool_specification = batch_models.AutoPoolSpecification(
pool_lifetime_option=batch_models.PoolLifetimeOption.job_schedule,
auto_pool_id_prefix=job_configuration.id,
keep_alive=False,
pool=batch_models.PoolSpecification(
display_name=job_configuration.id,
virtual_machine_configuration=batch_models.VirtualMachineConfiguration(
image_reference=image_ref_to_use, node_agent_sku_id=sku_to_use),
vm_size=job_configuration.vm_size,
enable_auto_scale=True,
auto_scale_formula=autoscale_formula,
auto_scale_evaluation_interval=timedelta(minutes=5),
start_task=start_task,
enable_inter_node_communication=not job_configuration.mixed_mode(),
network_configuration=network_conf,
max_tasks_per_node=4,
metadata=[
batch_models.MetadataItem(name=constants.AZTK_SOFTWARE_METADATA_KEY, value=software_metadata_key),
batch_models.MetadataItem(
name=constants.AZTK_MODE_METADATA_KEY, value=constants.AZTK_JOB_MODE_METADATA),
],
),
)
# define job specification
job_spec = batch_models.JobSpecification(
pool_info=batch_models.PoolInformation(auto_pool_specification=auto_pool_specification),
display_name=job_configuration.id,
on_all_tasks_complete=batch_models.OnAllTasksComplete.terminate_job,
job_manager_task=job_manager_task,
metadata=[batch_models.MetadataItem(name="applications", value=application_metadata)],
)
# define schedule
schedule = batch_models.Schedule(
do_not_run_until=None, do_not_run_after=None, start_window=None, recurrence_interval=None)
# create job schedule and add task
setup = batch_models.JobScheduleAddParameter(id=job_configuration.id, schedule=schedule, job_specification=job_spec)
core_job_operations.batch_client.job_schedule.add(setup)
if job_configuration.scheduling_target != models.SchedulingTarget.Any:
core_job_operations.create_task_table(job_configuration.id)
return core_job_operations.batch_client.job_schedule.get(job_schedule_id=job_configuration.id)
|
150071
|
import json
import os
from twisted.internet.defer import inlineCallbacks, returnValue, DeferredQueue
from twisted.internet.task import Clock
from twisted.web.client import Agent
from twisted.web.server import NOT_DONE_YET
import certifi
from vumi.message import TransportUserMessage
from vumi.tests.fake_connection import FakeHttpServer
from vumi.tests.helpers import VumiTestCase
from vumi.transports.tests.helpers import TransportHelper
from vumi.transports.vumi_bridge import GoConversationTransport
from vumi.config import ConfigError
from vumi.utils import http_request_full
class TestGoConversationTransportBase(VumiTestCase):
transport_class = None
def setUp(self):
self.tx_helper = self.add_helper(TransportHelper(self.transport_class))
self.fake_http = FakeHttpServer(self.handle_inbound_request)
self.clock = Clock()
self._request_queue = DeferredQueue()
self._pending_reqs = []
self.add_cleanup(self.finish_requests)
@inlineCallbacks
def get_transport(self, start=True, **config):
defaults = {
'account_key': 'account-key',
'conversation_key': 'conversation-key',
'access_token': 'access-token',
'publish_status': True,
}
defaults.update(config)
transport = yield self.tx_helper.get_transport(defaults, start=False)
transport.agent_factory = self.fake_http.get_agent
if start:
yield transport.startWorker()
returnValue(transport)
@inlineCallbacks
def finish_requests(self):
for req in self._pending_reqs:
if not req.finished:
yield req.finish()
def handle_inbound_request(self, request):
self._request_queue.put(request)
return NOT_DONE_YET
@inlineCallbacks
def get_next_request(self):
req = yield self._request_queue.get()
self._pending_reqs.append(req)
returnValue(req)
class TestGoConversationTransport(TestGoConversationTransportBase):
transport_class = GoConversationTransport
def test_server_settings_without_configs(self):
return self.assertFailure(self.get_transport(), ConfigError)
def get_configured_transport(self, start=True):
return self.get_transport(start=start, web_path='test', web_port='0')
def post_msg(self, url, msg_json):
data = msg_json.encode('utf-8')
return http_request_full(
url.encode('utf-8'), data=data, headers={
'Content-Type': 'application/json; charset=utf-8',
})
@inlineCallbacks
def test_receiving_messages(self):
transport = yield self.get_configured_transport()
url = transport.get_transport_url('messages.json')
msg = self.tx_helper.make_inbound("inbound")
resp = yield self.post_msg(url, msg.to_json())
self.assertEqual(resp.code, 200)
[received_msg] = yield self.tx_helper.wait_for_dispatched_inbound(1)
self.assertEqual(received_msg['message_id'], msg['message_id'])
[status] = yield self.tx_helper.wait_for_dispatched_statuses(1)
self.assertEquals(status['status'], 'ok')
self.assertEquals(status['component'], 'received-from-vumi-go')
self.assertEquals(status['type'], 'good_request')
self.assertEquals(status['message'], 'Good request received')
@inlineCallbacks
def test_receive_bad_message(self):
transport = yield self.get_configured_transport()
url = transport.get_transport_url('messages.json')
resp = yield self.post_msg(url, 'This is not JSON.')
self.assertEqual(resp.code, 400)
[failure] = self.flushLoggedErrors()
self.assertTrue('No JSON object' in str(failure))
[status] = yield self.tx_helper.wait_for_dispatched_statuses(1)
self.assertEquals(status['status'], 'down')
self.assertEquals(status['component'], 'received-from-vumi-go')
self.assertEquals(status['type'], 'bad_request')
self.assertEquals(status['message'], 'Bad request received')
@inlineCallbacks
def test_receiving_ack_events(self):
transport = yield self.get_configured_transport()
url = transport.get_transport_url('events.json')
# prime the mapping
yield transport.map_message_id('remote', 'local')
ack = self.tx_helper.make_ack(event_id='event-id')
ack['user_message_id'] = 'remote'
resp = yield self.post_msg(url, ack.to_json())
self.assertEqual(resp.code, 200)
[received_ack] = yield self.tx_helper.wait_for_dispatched_events(1)
self.assertEqual(received_ack['event_id'], ack['event_id'])
self.assertEqual(received_ack['user_message_id'], 'local')
self.assertEqual(received_ack['sent_message_id'], 'remote')
statuses = yield self.tx_helper.wait_for_dispatched_statuses(1)
self.assertEqual(len(statuses), 2)
self.assertEquals(statuses[0]['status'], 'ok')
self.assertEquals(statuses[0]['component'], 'sent-by-vumi-go')
self.assertEquals(statuses[0]['type'], 'vumi_go_sent')
self.assertEquals(statuses[0]['message'], 'Sent by Vumi Go')
self.assertEquals(statuses[1]['status'], 'ok')
self.assertEquals(statuses[1]['component'], 'vumi-go-event')
self.assertEquals(statuses[1]['type'], 'good_request')
self.assertEquals(statuses[1]['message'],
'Good event received from Vumi Go')
@inlineCallbacks
def test_receiving_nack_events(self):
transport = yield self.get_configured_transport()
url = transport.get_transport_url('events.json')
# prime the mapping
yield transport.map_message_id('remote', 'local')
nack = self.tx_helper.make_nack(event_id='event-id')
nack['user_message_id'] = 'remote'
resp = yield self.post_msg(url, nack.to_json())
self.assertEqual(resp.code, 200)
[received_nack] = yield self.tx_helper.wait_for_dispatched_events(1)
self.assertEqual(received_nack['event_id'], nack['event_id'])
self.assertEqual(received_nack['user_message_id'], 'local')
self.assertEqual(received_nack['sent_message_id'], 'remote')
statuses = yield self.tx_helper.wait_for_dispatched_statuses(1)
self.assertEqual(len(statuses), 2)
self.assertEquals(statuses[0]['status'], 'down')
self.assertEquals(statuses[0]['component'], 'sent-by-vumi-go')
self.assertEquals(statuses[0]['type'], 'vumi_go_failed')
self.assertEquals(statuses[0]['message'], 'Vumi Go failed to send')
self.assertEquals(statuses[1]['status'], 'ok')
self.assertEquals(statuses[1]['component'], 'vumi-go-event')
self.assertEquals(statuses[1]['type'], 'good_request')
self.assertEquals(statuses[1]['message'],
'Good event received from Vumi Go')
@inlineCallbacks
def test_receive_bad_event(self):
transport = yield self.get_configured_transport()
url = transport.get_transport_url('events.json')
resp = yield self.post_msg(url, 'This is not JSON.')
self.assertEqual(resp.code, 400)
[failure] = self.flushLoggedErrors()
self.assertTrue('No JSON object' in str(failure))
[status] = yield self.tx_helper.wait_for_dispatched_statuses(1)
self.assertEquals(status['status'], 'down')
self.assertEquals(status['component'], 'vumi-go-event')
self.assertEquals(status['type'], 'bad_request')
self.assertEquals(status['message'], 'Bad event received from Vumi Go')
@inlineCallbacks
def test_cacerts_installed(self):
yield self.get_configured_transport()
self.assertEqual(os.environ["SSL_CERT_FILE"], certifi.where())
@inlineCallbacks
def test_sending_messages(self):
yield self.get_configured_transport()
msg = self.tx_helper.make_outbound(
"outbound", session_event=TransportUserMessage.SESSION_CLOSE)
d = self.tx_helper.dispatch_outbound(msg)
req = yield self.get_next_request()
received_msg = json.loads(req.content.read())
self.assertEqual(received_msg, {
'content': msg['content'],
'in_reply_to': None,
'to_addr': msg['to_addr'],
'message_id': msg['message_id'],
'session_event': TransportUserMessage.SESSION_CLOSE,
'helper_metadata': {},
})
remote_id = TransportUserMessage.generate_id()
reply = msg.copy()
reply['message_id'] = remote_id
req.write(reply.to_json().encode('utf-8'))
req.finish()
yield d
[ack] = yield self.tx_helper.wait_for_dispatched_events(1)
self.assertEqual(ack['user_message_id'], msg['message_id'])
self.assertEqual(ack['sent_message_id'], remote_id)
[status] = yield self.tx_helper.wait_for_dispatched_statuses(1)
self.assertEquals(status['status'], 'ok')
self.assertEquals(status['component'], 'submitted-to-vumi-go')
self.assertEquals(status['type'], 'good_request')
self.assertEquals(status['message'], 'Message accepted by Vumi Go')
@inlineCallbacks
def test_sending_bad_messages(self):
yield self.get_configured_transport()
msg = self.tx_helper.make_outbound(
"outbound", session_event=TransportUserMessage.SESSION_CLOSE)
self.tx_helper.dispatch_outbound(msg)
req = yield self.get_next_request()
req.setResponseCode(400, "Bad Request")
req.finish()
[status] = yield self.tx_helper.wait_for_dispatched_statuses(1)
self.assertEquals(status['status'], 'down')
self.assertEquals(status['component'], 'submitted-to-vumi-go')
self.assertEquals(status['type'], 'bad_request')
self.assertEquals(status['message'],
'Message submission rejected by Vumi Go')
@inlineCallbacks
def test_teardown_before_start(self):
transport = yield self.get_configured_transport(start=False)
yield transport.teardown_transport()
def test_agent_factory_default(self):
self.assertTrue(isinstance(
GoConversationTransport.agent_factory(), Agent))
|
150117
|
from concurrent import futures
class FooError(Exception):
pass
def foo(x, y):
if x > y:
raise FooError
return y - x
with futures.ProcessPoolExecutor() as pool:
running = [pool.submit(foo, 5, 8),
pool.submit(foo, 6, 9)]
while running:
done, running = futures.wait(running, timeout = 2.5)
for future in done:
data = future.result()
... do whatever we need with data ...
|
150168
|
import numpy as np
from numpy.testing import assert_allclose
from autumn.models.covid_19.mixing_matrix.mixing_adjusters.location_adjuster import LocationMixingAdjuster
MM = np.ones([16, 16])
HOME_MM = MM * 0.1
OTHER_LOCATIONS_MM = MM * 0.2
SCHOOL_MM = MM * 0.3
WORK_MM = MM * 0.6
MIXING_MATRICES = {
'all_locations': MM,
'home': HOME_MM,
'other_locations': OTHER_LOCATIONS_MM,
'school': SCHOOL_MM,
'work': WORK_MM
}
def test_location_adjuster__with_no_data():
"""
Ensure there is no change if no mixing data has been suplied.
"""
mobility_funcs = {}
microdistancing_funcs = {}
adjuster = LocationMixingAdjuster(MIXING_MATRICES, mobility_funcs, microdistancing_funcs)
mm = np.ones([16, 16])
adj_mm = adjuster.get_adjustment(0, mm)
assert_allclose(mm, adj_mm, atol=0.01, verbose=True)
def test_location_adjuster__with_only_mobility_data():
mobility_funcs = {"work": lambda t: 0.3 * t, "school": lambda t: 0.2 * t}
microdistancing_funcs = {}
adjuster = LocationMixingAdjuster(MIXING_MATRICES, mobility_funcs, microdistancing_funcs)
mm = np.ones([16, 16])
adj_mm = adjuster.get_adjustment(1, mm)
work_component = WORK_MM * (0.3 - 1)
school_component = SCHOOL_MM * (0.2 - 1)
expect_mm = MM + work_component + school_component
assert_allclose(expect_mm, adj_mm, atol=0.01, verbose=True)
def test_location_adjuster__with_only_microdistancing_data():
mobility_funcs = {}
microdistancing_funcs = {"work": lambda t: 0.3 * t, "school": lambda t: 0.2 * t}
adjuster = LocationMixingAdjuster(MIXING_MATRICES, mobility_funcs, microdistancing_funcs)
mm = np.ones([16, 16])
adj_mm = adjuster.get_adjustment(1, mm)
work_component = WORK_MM * (0.3 - 1)
school_component = SCHOOL_MM * (0.2 - 1)
expect_mm = MM + work_component + school_component
assert_allclose(expect_mm, adj_mm, atol=0.01, verbose=True)
def test_location_adjuster__with_microdistancing_and_mobility_data():
mobility_funcs = {"work": lambda t: 0.3 * t, "home": lambda t: 0.5}
microdistancing_funcs = {"school": lambda t: 0.2 * t, "home": lambda t: 0.7}
adjuster = LocationMixingAdjuster(MIXING_MATRICES, mobility_funcs, microdistancing_funcs)
mm = np.ones([16, 16])
adj_mm = adjuster.get_adjustment(1, mm)
work_component = WORK_MM * (0.3 - 1)
school_component = SCHOOL_MM * (0.2 - 1)
home_component = HOME_MM * (0.5 * 0.7 - 1)
expect_mm = MM + work_component + school_component + home_component
assert_allclose(expect_mm, adj_mm, atol=0.01, verbose=True)
def _get_country_mixing_matrix(sheet_type, iso3):
if sheet_type == "home":
return HOME_MM
if sheet_type == "other_locations":
return OTHER_LOCATIONS_MM
if sheet_type == "school":
return SCHOOL_MM
if sheet_type == "work":
return WORK_MM
else:
return MM
|
150188
|
from .db_utils import PostgresController
from .enums import Action, Change
__all__ = ['PostgresController', 'Action', 'Change']
|
150204
|
from django.shortcuts import render
### Rest API setup
import rest_framework.routers
import rest_framework.viewsets
import rest_framework.filters
from voxel_globe.websockets.serializers import LogMessageSerializer
from voxel_globe.websockets.models import LogMessage
router = rest_framework.routers.DefaultRouter()
class LogMessageViewSet(rest_framework.viewsets.ModelViewSet):
queryset = LogMessage.objects.all()
serializer_class = LogMessageSerializer
filter_backends = (rest_framework.filters.DjangoFilterBackend,)
filter_fields = ['message_text', 'message_type', 'task_id']
def get_queryset(self):
return super(LogMessageViewSet, self).get_queryset().filter(owner=self.request.user)
|
150205
|
import itertools
import os
import sys
TEMPLATE = '''
#!/bin/bash
#PBS -N {job_name}
#PBS -J 1-{num_jobs:d}
echo "Activating environment {env_name}"
source /export/vcl-nfs1-data2/shared/euichuls/miniconda3/bin/activate {env_name}
echo "PBS_ARRAY_INDEX: $PBS_ARRAY_INDEX"
echo "Hostname: $HOSTNAME"
source /export/vcl-nfs2/shared/common/jobs/gpu_select.sh
echo {job_name}
cd {base_dir}
'''
def main():
job_name = 'spider_att3'
commands = []
for output_from, upd_steps in itertools.product(('true', 'false'), (0, 1, 2)):
for qenc, ctenc, tinc in itertools.product(('e', 'eb'), ('e', 'eb', 'ebs'),
('true', 'false')):
if (output_from, qenc, ctenc, tinc, upd_steps) not in (
('false', 'eb', 'eb', 'true', 2),
('true', 'eb', 'ebs', 'false', 0),
('true', 'eb', 'ebs', 'false', 2),
('true', 'eb', 'ebs', 'true', 2),
('true', 'e', 'e', 'true', 1)):
continue
commands.append('python train.py --config configs/spider-20190205/nl2code-0214.jsonnet --config-args "{{output_from: {}, qenc: \'{}\', ctenc: \'{}\', tinc: {}, upd_steps: {}}}" --logdir logdirs/20190214'.format(
output_from, qenc, ctenc, tinc, upd_steps))
f = open('experiments/PBS_20190214_{}.sh'.format(job_name), 'w')
f.write(TEMPLATE.format(
job_name=job_name,
env_name='seq2s',
num_jobs=len(commands),
base_dir=os.path.realpath(os.getcwd())))
for i, cmd in enumerate(commands):
f.write('''if [[ $PBS_ARRAY_INDEX == {i} ]]; then {cmd}; fi\n'''.format(i=i+1,
cmd=cmd))
f.close()
if __name__ == '__main__':
main()
|
150207
|
from datetime import datetime
import os
from .testutils import FullStackTests
# ============================================================================
class TestWebRecCollsAPI(FullStackTests):
@classmethod
def setup_class(cls):
super(TestWebRecCollsAPI, cls).setup_class()
def test_create_anon_coll(self):
res = self.testapp.post_json('/api/v1/collections?user={user}'.format(user=self.anon_user), params={'title': 'Temp'})
assert self.testapp.cookies['__test_sesh'] != ''
assert res.json['collection']['id'] == 'temp'
assert res.json['collection']['title'] == 'Temp'
def test_create_anon_coll_dup_error(self):
res = self.testapp.post_json('/api/v1/collections?user={user}'.format(user=self.anon_user),
params={'title': 'Temp'}, status=400)
assert self.testapp.cookies['__test_sesh'] != ''
assert res.json == {'error': 'duplicate_name'}
def test_get_anon_coll(self):
res = self.testapp.get('/api/v1/collection/temp?user={user}'.format(user=self.anon_user))
assert res.json['collection']
coll = res.json['collection']
assert coll['size'] == 0
assert coll['id'] == 'temp'
assert coll['slug'] == 'temp'
assert coll['title'] == 'Temp'
assert coll['slug_matched'] == True
assert coll['created_at'] <= datetime.utcnow().isoformat()
assert self.ISO_DT_RX.match(coll['created_at'])
assert self.ISO_DT_RX.match(coll['updated_at'])
assert coll['recordings'] == []
assert coll['public'] == False
assert coll['public_index'] == False
def test_get_anon_coll_wrong_user(self):
res = self.testapp.get('/api/v1/collection/temp?user=temp-ABC', status=404)
assert res.json['error'] == 'no_such_user'
def test_list_anon_collections(self):
res = self.testapp.get('/api/v1/collections?user={user}'.format(user=self.anon_user))
colls = res.json['collections']
assert len(colls) == 1
colls.sort(key=lambda x: x['id'])
assert colls[0]['id'] == 'temp'
assert colls[0]['title'] == 'Temp'
assert 'pages' not in colls[0]
assert 'recordings' not in colls[0]
assert 'lists' not in colls[0]
#assert colls[0]['download_url'] == 'http://localhost:80/{user}/temp/$download'.format(user=self.anon_user)
def test_error_no_title(self):
res = self.testapp.post_json('/api/v1/collections?user={user}'.format(user=self.anon_user), status=400)
assert res.json['error'] == 'invalid_coll_name'
def test_error_invalid_temp_title(self):
res = self.testapp.post_json('/api/v1/collections?user={user}'.format(user=self.anon_user),
params={'title': 'new'}, status=400)
assert res.json['error'] == 'invalid_temp_coll_name'
def test_error_external_not_allowed(self):
params = {'external': True,
'title': 'temp'
}
res = self.testapp.post_json('/api/v1/collections?user={user}'.format(user=self.anon_user), params=params,
status=403)
assert res.json == {'error': 'external_not_allowed'}
def test_error_no_such_coll(self):
res = self.testapp.get('/api/v1/collection/blah@$?user={user}'.format(user=self.anon_user), status=404)
assert res.json == {'error': 'no_such_collection'}
def test_error_missing_user_coll(self):
res = self.testapp.post_json('/api/v1/collections', params={'title': 'Recording'}, status=400)
assert res.json == {'error': 'no_user_specified'}
def test_error_invalid_user_coll(self):
res = self.testapp.post_json('/api/v1/collections?user=user', params={'title': 'Example'}, status=404)
assert res.json == {'error': 'no_such_user'}
def test_error_invalid_user_coll_2(self):
res = self.testapp.post_json('/api/v1/collections?user=temp$123', params={'title': 'Example'}, status=404)
assert res.json == {'error': 'no_such_user'}
def test_delete_coll(self):
res = self.testapp.delete('/api/v1/collection/temp?user={user}'.format(user=self.anon_user))
assert res.json == {'deleted_id': 'temp'}
|
150212
|
from conftest import DeSECAPIV1Client, NSLordClient, random_domainname
def test_create(api_user: DeSECAPIV1Client):
assert len(api_user.domain_list()) == 0
assert api_user.domain_create(random_domainname()).status_code == 201
assert len(api_user.domain_list()) == 1
def test_get(api_user_domain: DeSECAPIV1Client):
domain = api_user_domain.get(f"/domains/{api_user_domain.domain}/").json()
assert NSLordClient.query(api_user_domain.domain, 'CDS')[1] == set(domain['keys'][0]['ds'])
assert domain['name'] == api_user_domain.domain
def test_destroy(api_user_domain: DeSECAPIV1Client):
n = len(api_user_domain.domain_list())
assert api_user_domain.domain_destroy(api_user_domain.domain).status_code == 204
assert len(api_user_domain.domain_list()) == n - 1
|
150237
|
def pytest_addoption(parser):
group = parser.getgroup("pypyjit options")
group.addoption("--pypy", action="store", default=None, dest="pypy_c",
help="the location of the JIT enabled pypy-c")
|
150241
|
import logging as log
from pathlib import Path
from fud.stages import Stage, SourceType
from fud.utils import TmpDir
from fud import errors
class HwEmulationStage(Stage):
name = "wdb"
def __init__(self, config):
super().__init__(
src_state="xclbin",
target_state="wdb",
input_type=SourceType.Path,
output_type=SourceType.Path,
config=config,
description="Runs Vivado hw emulation",
)
xilinx_location = self.config["stages", self.name, "xilinx_location"]
xrt_location = self.config["stages", self.name, "xrt_location"]
self.setup_commands = (
f"source {xilinx_location}/settings64.sh && source {xrt_location}/setup.sh"
)
self.host_cpp = self.config["stages", self.name, "host"]
self.xrt = (
Path(self.config["global", "futil_directory"])
/ "fud"
/ "bitstream"
/ "xrt.ini"
)
self.sim_script = (
Path(self.config["global", "futil_directory"])
/ "fud"
/ "bitstream"
/ "sim_script.tcl"
)
self.mode = self.config["stages", self.name, "mode"]
self.device = "xilinx_u50_gen3x16_xdma_201920_3"
# remote execution
self.SSHClient = None
self.SCPClient = None
self.ssh_host = self.config["stages", self.name, "ssh_host"]
self.ssh_user = self.config["stages", self.name, "ssh_username"]
self.temp_location = self.config["stages", "xclbin", "temp_location"]
self.setup()
def _define_steps(self, input_data):
@self.step()
def import_libs():
"""Import remote libs."""
try:
from paramiko import SSHClient
from scp import SCPClient
self.SSHClient = SSHClient
self.SCPClient = SCPClient
except ModuleNotFoundError:
raise errors.RemoteLibsNotInstalled
@self.step()
def check_host_cpp():
"""
Make sure that `-s wdb.host` is provided
"""
if self.host_cpp is None:
raise errors.MissingDynamicConfiguration("wdb.host")
@self.step()
def establish_connection() -> SourceType.UnTyped:
"""
Establish SSH connection
"""
client = self.SSHClient()
client.load_system_host_keys()
client.connect(self.ssh_host, username=self.ssh_user)
return client
@self.step()
def make_remote_tmpdir(client: SourceType.UnTyped) -> SourceType.String:
"""
Execution `mktemp -d` on server.
"""
_, stdout, _ = client.exec_command(f"mktemp -d -p {self.temp_location}")
return stdout.read().decode("ascii").strip()
@self.step()
def send_files(
client: SourceType.UnTyped,
tmpdir: SourceType.String,
xclbin: SourceType.Path,
):
"""
Copy files over ssh channel
"""
with self.SCPClient(client.get_transport()) as scp:
scp.put(xclbin, remote_path=f"{tmpdir}/kernel.xclbin")
scp.put(self.host_cpp, remote_path=f"{tmpdir}/host.cpp")
scp.put(self.xrt, remote_path=f"{tmpdir}/xrt.ini")
scp.put(self.sim_script, remote_path=f"{tmpdir}/sim_script.tcl")
@self.step()
def setup_environment(client: SourceType.UnTyped):
"""
Source Xilinx scripts
"""
@self.step()
def compile_host(client: SourceType.UnTyped, tmpdir: SourceType.String):
"""
Compile the host code
"""
_, stdout, stderr = client.exec_command(
" ".join(
[
f"cd {tmpdir}",
"&&",
"g++",
"-I/opt/xilinx/xrt/include",
"-I/scratch/opt/Xilinx/Vivado/2020.2/include",
"-Wall -O0 -g -std=c++14 -fmessage-length=0",
"host.cpp",
"-o 'host'",
"-L/opt/xilinx/xrt/lib -lOpenCL -lpthread -lrt -lstdc++",
]
)
)
for chunk in iter(lambda: stdout.readline(2048), ""):
log.debug(chunk.strip())
log.debug(stderr.read().decode("UTF-8").strip())
@self.step()
def generate_emconfig(client: SourceType.UnTyped, tmpdir: SourceType.String):
"""
Generate emconfig.json
"""
_, stdout, stderr = client.exec_command(
" ".join(
[
f"cd {tmpdir}",
"&&",
"/scratch/opt/Xilinx/Vitis/2020.2/bin/emconfigutil",
f"--platform {self.device}",
"--od .",
]
)
)
for chunk in iter(lambda: stdout.readline(2048), ""):
log.debug(chunk.strip())
log.debug(stderr.read().decode("UTF-8").strip())
@self.step()
def emulate(client: SourceType.UnTyped, tmpdir: SourceType.String):
"""
Emulation the xclbin
"""
_, stdout, stderr = client.exec_command(
" ".join(
[
f"cd {tmpdir}",
"&&",
self.setup_commands,
"&&",
f"XCL_EMULATION_MODE={self.mode}",
"./host",
"kernel.xclbin",
self.device,
]
)
)
for chunk in iter(lambda: stdout.readline(2048), ""):
log.debug(chunk.strip())
log.debug(stderr.read().decode("UTF-8").strip())
@self.step()
def download_wdb(
client: SourceType.UnTyped,
tmpdir: SourceType.String,
) -> SourceType.Stream:
"""
Download xclbin file
"""
local_tmpdir = TmpDir()
wdb_path = Path(local_tmpdir.name) / "kernel.wdb"
with self.SCPClient(client.get_transport()) as scp:
scp.get(
f"{tmpdir}/xilinx_u50_gen3x16_xdma_201920_3-0-kernel.wdb",
local_path=str(wdb_path),
)
return wdb_path.open("rb")
@self.step()
def cleanup(client: SourceType.UnTyped, tmpdir: SourceType.String):
"""
Close SSH Connection and cleanup temporaries.
"""
if self.config["stages", self.name, "save_temps"] is None:
client.exec_command("rm -r {tmpdir}")
else:
print(tmpdir)
client.close()
check_host_cpp()
client = establish_connection()
tmpdir = make_remote_tmpdir(client)
send_files(client, tmpdir, input_data)
compile_host(client, tmpdir)
generate_emconfig(client, tmpdir)
emulate(client, tmpdir)
wdb = download_wdb(client, tmpdir)
cleanup(client, tmpdir)
return wdb
|
150254
|
from rest_framework.filters import BaseFilterBackend, SearchFilter
from django_filters import rest_framework as filters
from core.models import MaintenanceRecord
from api.permissions import CanEditOrReadOnly
from api.v2.serializers.details import MaintenanceRecordSerializer
from api.v2.views.base import AuthOptionalViewSet
class MaintenanceRecordFilterBackend(BaseFilterBackend):
"""
Filter MaintenanceRecords using the request_user and 'query_params'
"""
def filter_queryset(self, request, queryset, view):
request_params = request.query_params
active = request_params.get('active')
if isinstance(active, basestring) and active.lower() == 'true'\
or isinstance(active, bool) and active:
queryset = MaintenanceRecord.active()
return queryset
class MaintenanceRecordViewSet(AuthOptionalViewSet):
"""
API endpoint that allows records to be viewed or edited.
"""
http_method_names = [
'get', 'post', 'put', 'patch', 'delete', 'head', 'options', 'trace'
]
queryset = MaintenanceRecord.objects.order_by('-start_date')
permission_classes = (CanEditOrReadOnly, )
serializer_class = MaintenanceRecordSerializer
filter_backends = (
filters.DjangoFilterBackend, SearchFilter,
MaintenanceRecordFilterBackend
)
|
150268
|
import bgl
from .cui_shapes import *
from .cui_bezier_box import *
from .cui_items import *
#
#
#
class CUIContainer(CUIRectWidget):
def __init__(self):
super().__init__()
self.horizontal_margin = 0
self.vertical_margin = 0
self.draw_backdrop = True
return
#
def draw(self, color_override=None):
if self.draw_backdrop:
if color_override:
super().draw(color_override)
else:
super().draw()
#
def set_margins(self, horizontal=None, vertical=None):
if horizontal != None:
self.horizontal_margin = horizontal
if vertical != None:
self.vertical_margin = vertical
return
#
def __str__(self):
return 'CUI Container'
class CUIBoxContainer(CUIContainer):
def __init__(self):
super().__init__()
self.containers = []
self.color_font = (0.0, 0.0, 1.0, 1.0)
self.color_box = (0.0, 0.0, 0.2, 1.0)
self.color_row = (0.0, 0.0, 0.25, 1.0)
self.color_item = (0.0, 0.0, 0.3, 0.7)
self.color_hover = (0.0, 0.0, 0.5, 0.7)
self.color_click = (0.0, 0.0, 0.6, 1.0)
self.scroll_offset = 0
self.scrolling = False
self.scroll_bar_size = 14
self.scroll_max_dist = 0
self.scroll_max_move = 0
self.scroll_bar = None
self.scroll_click = False
self.scroll_prev_mouse = [0, 0]
self.horizontal_margin = 4
self.vertical_margin = 4
self.collapse = False
self.header_box = None
self.header = None
self.type = 'BOX'
self.container_separation = 4
return
#
def create_shape_data(self):
total_height = 0
total_height += self.vertical_margin
for c, cont in enumerate(self.containers):
cont.width = self.width - self.horizontal_margin*2
cont.create_shape_data()
cont.pos_offset[0] = self.horizontal_margin
cont.pos_offset[1] = -total_height
if self.header:
if len(self.header.shapes) > 0:
height = self.header.height
arrow_size = height*0.333
if self.collapse:
self.header.shapes[0].set_base_points([[arrow_size*0.5, -height/2+arrow_size/2], [
arrow_size*1.5, -height/2], [arrow_size*0.5, -height/2-arrow_size/2]])
else:
self.header.shapes[0].set_base_points([[arrow_size*0.5, -height/2+arrow_size/2], [
arrow_size*1.5, -height/2+arrow_size/2], [arrow_size, -height/2-arrow_size/2]])
self.header_box.width = self.width - self.horizontal_margin*2
self.header_box.create_shape_data()
total_height += cont.height
if c < len(self.containers)-1:
total_height += self.container_separation
max_height = None
if self.max_height != None and self.type == 'PANEL':
max_height = self.max_height/self.scale
if max_height != None:
if total_height > max_height:
self.scrolling = True
else:
self.scrolling = False
self.scroll_offset = 0
total_height += self.vertical_margin
self.height = total_height
if self.scrolling and self.type == 'PANEL':
self.scroll_max_dist = self.height - max_height
if self.scroll_offset > self.scroll_max_dist:
self.scroll_offset = self.scroll_max_dist
for cont in self.containers:
cont.resize_width(
self.width-self.horizontal_margin*2-self.scroll_bar_size-4, 0.0)
cont.create_shape_data()
cont.set_scale(self.scale)
self.scroll_bar = CUIButton(
max_height*(max_height/self.height), '')
self.scroll_bar.width = self.scroll_bar_size
self.scroll_bar.set_color(color=self.color_item, color_hover=self.color_hover,
color_click=self.color_click, color_font=self.color_font)
self.scroll_bar.pos_offset[0] = self.width - \
self.scroll_bar_size - self.horizontal_margin
self.scroll_bar.pos_offset[1] = -self.vertical_margin
self.scroll_max_move = max_height - \
self.vertical_margin*2 - self.scroll_bar.height
self.scroll_bar.create_shape_data()
if self.collapse:
self.height = self.vertical_margin*2
if self.header:
self.height += self.header_box.height
if self.scrolling and self.type == 'PANEL':
self.scroll_bar.height = self.height - self.vertical_margin * 2
self.scroll_bar.create_shape_data()
super().create_shape_data()
return
def update_batches(self, position=[0, 0]):
super().update_batches(position)
pos = [position[0]+self.scale_pos_offset[0],
position[1]+self.scale_pos_offset[1]]
if self.scroll_bar:
scroll_offset = int(
(self.scroll_max_move*(self.scroll_offset/self.scroll_max_dist)) * self.scale)
self.scroll_bar.update_batches([pos[0], pos[1]-scroll_offset])
for cont in self.containers:
cont.update_batches(
[pos[0], pos[1]+self.scroll_offset * self.scale])
return
def draw(self, position=[0, 0]):
if self.visible:
super().draw()
if self.scroll_bar and self.scrolling:
self.scroll_bar.draw()
if self.scrolling and self.type == 'PANEL':
# cur_scissor = None
# if bgl.glIsEnabled(bgl.GL_SCISSOR_TEST) == 1:
# cur_scissor = bgl.Buffer(bgl.GL_INT, 4)
# bgl.glGetIntegerv(bgl.GL_SCISSOR_BOX, cur_scissor)
# bgl.glDisable(bgl.GL_SCISSOR_TEST)
pos = [position[0]+self.scale_pos_offset[0],
position[1]+self.scale_pos_offset[1]]
clip_pos = [pos[0]+self.horizontal_margin,
pos[1]-self.vertical_margin]
offset = 0
# if pos[1]+self.scroll_offset > position[1]:
# offset = pos[1]+self.scroll_offset-position[1]
bgl.glEnable(bgl.GL_SCISSOR_TEST)
bgl.glScissor(
round(pos[0]+self.horizontal_margin*self.scale),
round(pos[1]-self.height*self.scale +
self.vertical_margin*self.scale),
round((self.width-self.horizontal_margin*2)*self.scale),
round((self.height-self.vertical_margin*2-offset)*self.scale)
)
for cont in self.containers:
if self.collapse == False or cont == self.header_box:
cont.draw()
if self.scrolling and self.type == 'PANEL':
bgl.glDisable(bgl.GL_SCISSOR_TEST)
# if cur_scissor:
# bgl.glEnable(bgl.GL_SCISSOR_TEST)
# bgl.glScissor(cur_scissor[0],cur_scissor[1],cur_scissor[2],cur_scissor[3])
return
#
def add_box(self, color=None):
box = CUIBoxContainer()
box.set_color(color=self.color_box,
color_outline=self.color_outline, color_font=self.color_font)
box.set_style_color(color_box=self.color_box, color_row=self.color_row,
color_item=self.color_item, color_hover=self.color_hover, color_click=self.color_click)
if color:
box.set_color(color=color)
box.width = self.width - self.horizontal_margin*2
self.containers.append(box)
return box
def add_header(self, collapsable, header_text, height, use_backdrop, hor_marg=4, vert_marg=4, backdrop_color=None, button_color=None):
box = CUIBoxContainer()
box.set_color(color=self.color_box,
color_outline=self.color_outline, color_font=self.color_font)
box.set_style_color(color_box=self.color_box, color_row=self.color_row,
color_item=self.color_item, color_hover=self.color_hover, color_click=self.color_click)
if backdrop_color:
box.set_color(color=backdrop_color)
box.width = self.width - self.horizontal_margin*2
box.horizontal_margin = hor_marg
box.vertical_margin = vert_marg
box.draw_backdrop = use_backdrop
row = CUIRowContainer()
row.width = box.width - box.horizontal_margin*2
row.set_color(color=self.color_row,
color_outline=self.color_outline, color_font=self.color_font)
row.set_style_color(color_item=self.color_item,
color_hover=self.color_hover, color_click=self.color_click)
header = CUIButton(height, header_text)
header.set_color(color=self.color_item, color_hover=self.color_hover,
color_click=self.color_click, color_font=self.color_font)
if button_color:
header.set_color(color=button_color)
if collapsable:
header.set_click_up_func(self.toggle_collapse)
arrow_size = height*0.333
poly = header.add_poly_shape([[arrow_size/2, -height/2+arrow_size/2],
[arrow_size*1.5, -height/2], [arrow_size/2, -height/2-arrow_size/2]])
poly.set_color(color=[0.0, 0.0, 0.9, 1.0])
row.items.append(header)
box.containers.append(row)
self.header_box = box
self.header = header
self.containers.insert(0, box)
return
def add_text_row(self, height, text, font_size=10):
row = CUIRowContainer()
row.width = self.width - self.horizontal_margin*2
row.set_color(color=self.color_row,
color_outline=self.color_outline, color_font=self.color_font)
row.set_style_color(color_item=self.color_item,
color_hover=self.color_hover, color_click=self.color_click)
label = row.add_label(height, text)
label.set_font_size(font_size)
self.containers.append(row)
return row
def add_row(self):
row = CUIRowContainer()
row.width = self.width - self.horizontal_margin*2
row.set_color(color=self.color_row,
color_outline=self.color_outline, color_font=self.color_font)
row.set_style_color(color_item=self.color_item,
color_hover=self.color_hover, color_click=self.color_click)
self.containers.append(row)
return row
#
def test_click_down(self, mouse_co, shift, pos, arguments=None):
status = None
if self.hover:
if self.scroll_bar and self.scrolling:
if self.scroll_bar.hover:
scroll_offset = int(
self.scroll_max_move*(self.scroll_offset/self.scroll_max_dist))
self.scroll_click = True
self.scroll_prev_mouse = mouse_co
status = ['BOX_SCROLL', None]
for cont in self.containers:
if cont.hover:
status = cont.test_click_down(mouse_co, shift, [
pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]+self.scroll_offset * self.scale], arguments)
if self.header and self.header_box == cont:
if self.header_box.hover:
if self.type == 'PANEL':
status = ['PANEL_HEADER', None]
else:
status = ['BOX_HEADER', None]
if status == None:
status = ['PANEL', None]
return status
def test_click_up(self, mouse_co, shift, pos, arguments=None):
status = None
if self.hover:
if self.scroll_bar and self.scrolling:
if self.scroll_bar.hover:
scroll_offset = int(
self.scroll_max_move*(self.scroll_offset/self.scroll_max_dist))
self.scroll_click = False
self.scroll_prev_mouse = [0, 0]
status = ['BOX_SCROLL', None]
for cont in self.containers:
if cont.hover:
status = cont.test_click_up(mouse_co, shift, [
pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]+self.scroll_offset * self.scale], arguments)
if self.header and self.header_box == cont:
if self.header_box.hover:
if self.type == 'PANEL':
status = ['PANEL_HEADER', None]
else:
status = ['BOX_HEADER', None]
if status == None:
status = ['PANEL', None]
return status
def click_down_move(self, mouse_co, shift, pos, arguments=None):
if self.hover:
test_containers = True
if self.scroll_bar and self.scrolling:
if self.scroll_bar.hover and self.scroll_click:
test_containers = False
scroll_offset = int(
self.scroll_max_move*(self.scroll_offset/self.scroll_max_dist))
fac = self.scroll_max_dist/self.scroll_max_move
offset = int(
(self.scroll_prev_mouse[1] - mouse_co[1]) * fac)
new_offset = int(
self.scroll_max_move*((self.scroll_offset+offset)/self.scroll_max_dist))
if abs(new_offset-scroll_offset) > 0:
self.scroll_box(offset)
self.update_batches()
self.scroll_prev_mouse = mouse_co
if test_containers:
for cont in self.containers:
cont.click_down_move(mouse_co, shift, [
pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]+self.scroll_offset * self.scale], arguments)
return
#
def toggle_collapse(self, modal, arguments=None):
self.collapse = not self.collapse
self.scroll_offset = 0
return
def test_hover(self, mouse_co, pos):
self.clear_hover()
status = None
if self.visible:
super().test_hover(mouse_co, [pos[0], pos[1]])
if self.hover:
position = [pos[0]+self.scale_pos_offset[0],
pos[1]+self.scale_pos_offset[1]]
status = 'PANEL'
if self.scroll_bar and self.scrolling:
scroll_offset = int(
(self.scroll_max_move*(self.scroll_offset/self.scroll_max_dist)) * self.scale)
self.scroll_bar.test_hover(
mouse_co, [position[0], position[1]-scroll_offset])
if self.scroll_bar.hover:
return 'PANEL_SCROLL'
test_containers = False
if self.scrolling and self.type == 'PANEL':
clip_pos = [position[0]+self.horizontal_margin *
self.scale, position[1]-self.vertical_margin*self.scale]
if clip_pos[0] < mouse_co[0] < clip_pos[0]+(self.width-self.horizontal_margin*2)*self.scale:
if clip_pos[1] > mouse_co[1] > clip_pos[1]-(self.height+self.vertical_margin*2)*self.scale:
test_containers = True
else:
test_containers = True
if test_containers:
for cont in self.containers:
c_status = cont.test_hover(
mouse_co, [position[0], position[1]+self.scroll_offset * self.scale])
if c_status:
if self.header and self.header_box == cont and self.header.hover:
status = 'PANEL_HEADER'
else:
status = c_status
return status
def resize_width(self, width, move_fac):
prev_width = self.width*self.scale
self.width = width
if self.min_width != None:
if self.width < self.min_width:
self.width = self.min_width
if self.max_width != None:
if self.width > self.max_width:
self.width = self.max_width
if self.type == 'PANEL':
self.set_new_position(
[self.position[0] + (self.width*self.scale - prev_width)*move_fac, self.position[1]])
for cont in self.containers:
cont.resize_width(self.width-self.horizontal_margin*2, move_fac)
return
def scroll_box(self, value):
self.scroll_offset += value
if self.scroll_offset < 0:
self.scroll_offset = 0
elif self.scroll_offset > self.scroll_max_dist and self.collapse == False:
self.scroll_offset = self.scroll_max_dist
elif self.scroll_offset > 0 and self.collapse:
self.scroll_offset = 0
return
#
def remove_container(self, index):
if index < len(self.containers):
self.containers.pop(index)
return
def clear_rows(self):
self.containers.clear()
return
def reset_item_states(self, clear_hover):
for cont in self.containers:
cont.reset_item_states(clear_hover)
return
def filter_change_custom_id(self, tar_id, new_id):
for cont in self.containers:
cont.filter_change_custom_id(tar_id, new_id)
return
def clear_hover(self):
self.hover = False
if self.scroll_bar:
self.scroll_bar.clear_hover()
for cont in self.containers:
cont.clear_hover()
return
#
def type_add_key(self, key):
for cont in self.containers:
cont.type_add_key(key)
return
def type_delete_key(self):
for cont in self.containers:
cont.type_delete_key()
return
def type_move_pos(self, value):
for cont in self.containers:
cont.type_move_pos(value)
return
def type_confirm(self, arguments=None):
for cont in self.containers:
cont.type_confirm(arguments)
return
def type_cancel(self):
for cont in self.containers:
cont.type_cancel()
return
def type_backspace_key(self):
for cont in self.containers:
cont.type_backspace_key()
return
#
def bezier_box_delete_points(self, pos, arguments=None):
status = None
bez_id = None
if self.hover:
for cont in self.containers:
if cont.hover:
status, bez_id = cont.bezier_box_delete_points(
[pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]+self.scroll_offset * self.scale], arguments)
if status:
break
return status, bez_id
def bezier_box_sharpen_points(self, pos, offset, arguments=None):
status = False
hov_status = False
bez_id = None
if self.hover:
for cont in self.containers:
if cont.hover:
hov_status, bez_id, status = cont.bezier_box_sharpen_points(
[pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]+self.scroll_offset * self.scale], offset, arguments)
if status:
break
return hov_status, bez_id, status
def bezier_box_rotate_points(self, pos, angle, arguments=None):
status = False
hov_status = False
mid_co = None
bez_id = None
if self.hover:
for cont in self.containers:
if cont.hover:
hov_status, bez_id, status, mid_co = cont.bezier_box_rotate_points(
[pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]+self.scroll_offset * self.scale], angle, arguments)
if status:
break
return hov_status, bez_id, status, mid_co
def bezier_box_clear_sharpness(self, pos, arguments=None):
status = None
hov_status = False
if self.hover:
for cont in self.containers:
if cont.hover:
hov_status, status = cont.bezier_box_clear_sharpness(
[pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]+self.scroll_offset * self.scale], arguments)
if status:
break
return hov_status, status
def bezier_box_clear_rotation(self, pos, arguments=None):
status = None
hov_status = False
if self.hover:
for cont in self.containers:
if cont.hover:
hov_status, status = cont.bezier_box_clear_rotation(
[pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]+self.scroll_offset * self.scale], arguments)
if status:
break
return hov_status, status
def bezier_box_reset_sharpness(self, pos, arguments=None):
status = None
hov_status = False
if self.hover:
for cont in self.containers:
if cont.hover:
hov_status, status = cont.bezier_box_reset_sharpness(
[pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]+self.scroll_offset * self.scale], arguments)
if status:
break
return hov_status, status
def bezier_box_reset_rotation(self, pos, arguments=None):
status = None
hov_status = False
if self.hover:
for cont in self.containers:
if cont.hover:
hov_status, status = cont.bezier_box_reset_rotation(
[pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]+self.scroll_offset * self.scale], arguments)
if status:
break
return hov_status, status
def bezier_box_confirm_sharpness(self):
hov_status = False
if self.hover:
for cont in self.containers:
if cont.hover:
hov_status = cont.bezier_box_confirm_sharpness()
if hov_status:
break
return hov_status
def bezier_box_confirm_rotation(self):
hov_status = False
if self.hover:
for cont in self.containers:
if cont.hover:
hov_status = cont.bezier_box_confirm_rotation()
if hov_status:
break
return hov_status
def bezier_box_select_points(self, status):
hov_status = False
if self.hover:
for cont in self.containers:
if cont.hover:
hov_status = cont.bezier_box_select_points(status)
break
return hov_status
#
def set_scale(self, scale):
super().set_scale(scale)
if self.scroll_bar:
self.scroll_bar.set_scale(scale)
for cont in self.containers:
cont.set_scale(scale)
return
def set_header_bev(self, size, res):
if self.header:
self.header.set_bev(size, res)
return
def set_header_color(self, color=None, color_hover=None, color_click=None, color_font=None):
if self.header:
self.header.set_color(color=color, color_hover=color_hover,
color_click=color_click, color_font=color_font)
return
def set_header_font_size(self, size):
if self.header:
self.header.set_font_size(size)
return
def set_separation(self, sep):
self.container_separation = sep
return
def set_collapsed(self, status):
self.collapse = status
return
def set_button_bool(self, status, custom_id_filter=None):
for cont in self.containers:
cont.set_button_bool(status, custom_id_filter)
return
def set_color(self, color=None, color_outline=None, color_font=None):
if color_font:
self.color_font = color_font
super().set_color(color=color, color_outline=color_outline)
return
def set_style_color(self, color_box=None, color_row=None, color_item=None, color_hover=None, color_click=None):
if color_box != None:
self.color_box = color_box
if color_row != None:
self.color_row = color_row
if color_item != None:
self.color_item = color_item
if color_hover != None:
self.color_hover = color_hover
if color_click != None:
self.color_click = color_click
return
def set_header_icon_data(self, image=None, width=None, height=None, text_side=None):
if self.header != None:
self.header.set_icon_data(
image=image, width=width, height=height, text_side=text_side)
return
#
def __str__(self):
return 'CUI Box Container'
class CUIRowContainer(CUIContainer):
def __init__(self):
super().__init__()
self.items = []
self.color_font = (0.0, 0.0, 1.0, 1.0)
self.color_item = (0.0, 0.0, 0.3, 0.7)
self.color_hover = (0.0, 0.0, 0.5, 0.7)
self.color_click = (0.0, 0.0, 0.6, 1.0)
self.type = 'ROW'
self.items_separations = 4
self.draw_backdrop = False
return
#
def create_shape_data(self):
total_height = 0
total_height += self.vertical_margin
x_pos = 0
x_pos += self.horizontal_margin
highest = 0
# calc width of buttons
# if even divisions is bigger than max width then remove that from total and recongifure for non maxed items
avail_width = self.width - self.horizontal_margin * \
2 - self.items_separations*(len(self.items)-1)
widths = []
max_widths = 0
rem_items = 0
for i, item in enumerate(self.items):
if item.item_type == 'BOOLEAN' and item.use_button == False:
if avail_width/len(self.items) > item.parts[0].bool_box_size:
widths.append(item.parts[0].bool_box_size)
max_widths += item.parts[0].bool_box_size
else:
widths.append(None)
rem_items += 1
elif item.max_width != None:
if avail_width/len(self.items) > item.max_width:
widths.append(item.max_width)
max_widths += item.max_width
else:
widths.append(None)
rem_items += 1
else:
widths.append(None)
rem_items += 1
new_avail = avail_width - max_widths
# place items in row
for i, item in enumerate(self.items):
if widths[i] != None:
item.width = widths[i]
# Not sure what this was here for but caused issues with max width button
# x_pos += new_avail/2
else:
item.width = new_avail/rem_items
item.pos_offset[0] = x_pos
item.pos_offset[1] = -total_height
item.create_shape_data()
x_pos += item.width
if i < len(self.items)-1:
x_pos += self.items_separations
if item.height > highest:
highest = item.height
if self.width-self.horizontal_margin > x_pos:
for i, item in enumerate(self.items):
item.pos_offset[0] += (self.width -
self.horizontal_margin-x_pos)/2
# check for items that have a smaller size than highest and replace in middle of row vertically
for i, item in enumerate(self.items):
if item.height < highest:
offset = int((highest-item.height)/2)
item.pos_offset[1] -= offset
total_height += highest
total_height += self.vertical_margin
self.height = total_height
super().create_shape_data()
return
def update_batches(self, position=[0, 0]):
super().update_batches(position)
for item in self.items:
item.update_batches(
[position[0]+self.scale_pos_offset[0], position[1]+self.scale_pos_offset[1]])
return
def draw(self):
super().draw()
if self.visible:
for item in self.items:
item.draw()
return
#
def add_button(self, height, text):
but = CUIButton(height, text)
but.set_color(color=self.color_item, color_hover=self.color_hover,
color_click=self.color_click, color_font=self.color_font)
self.items.append(but)
return but
def add_bool(self, height, text, default=False):
boolean = CUIBoolProp(height, text, default_val=default)
boolean.set_color(color=self.color_item, color_hover=self.color_hover,
color_click=self.color_click, color_font=self.color_font)
self.items.append(boolean)
return boolean
def add_label(self, height, text):
label = CUILabel(height, text)
label.set_color(color=self.color_item, color_hover=self.color_hover,
color_click=self.color_click, color_font=self.color_font)
self.items.append(label)
return label
def add_number(self, height, text, default, decimals, step, min, max):
num = CUINumProp(height, text, default, decimals, step, min, max)
num.set_color(color=self.color_item, color_hover=self.color_hover,
color_click=self.color_click, color_font=self.color_font)
num.set_value(default)
self.items.append(num)
return num
def add_bezier_box(self, height, type, points=None):
use_default = False
if points == None:
use_default = True
if points != None:
if len(points) < 2:
use_default = True
if use_default:
bez = CUIBezierBox(height, type, [(0, 1), (1, 0)])
else:
bez = CUIBezierBox(height, type, points)
self.items.append(bez)
return bez
#
def test_click_down(self, mouse_co, shift, pos, arguments=None):
status = None
if self.hover:
for item in self.items:
if item.hover:
item.click_down = True
status = item.click_down_func(mouse_co, shift, [
pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]], arguments)
return status
def test_click_up(self, mouse_co, shift, pos, arguments=None):
status = None
if self.hover:
for item in self.items:
if item.hover:
item.click_down = False
status = item.click_up_func(mouse_co, shift, [
pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]], arguments)
return status
def click_down_move(self, mouse_co, shift, pos, arguments=None):
if self.hover:
for item in self.items:
item.click_down_move(mouse_co, shift, [
pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]], arguments)
return
#
def resize_width(self, width, move_fac):
self.width = width
return
def test_hover(self, mouse_co, pos):
self.clear_hover()
status = None
super().test_hover(mouse_co, [pos[0], pos[1]])
if self.hover:
for item in self.items:
i_status = item.test_hover(
mouse_co, [pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]])
if i_status:
status = i_status
return status
def filter_change_custom_id(self, tar_id, new_id):
for item in self.items:
if item.custom_id == tar_id:
item.custom_id = new_id
return
#
def clear_hover(self):
self.hover = False
for item in self.items:
item.clear_hover()
return
def reset_item_states(self, clear_hover):
for item in self.items:
item.reset_item_states(clear_hover)
return
def remove_item(self, index):
if index < len(self.items):
self.items.pop(index)
return
#
def type_add_key(self, key):
for item in self.items:
if item.item_type == 'NUMBER':
item.type_add_key(key)
return
def type_delete_key(self):
for item in self.items:
if item.item_type == 'NUMBER':
item.type_delete_key()
return
def type_move_pos(self, value):
for item in self.items:
if item.item_type == 'NUMBER':
item.type_move_pos(value)
return
def type_confirm(self, arguments=None):
for item in self.items:
if item.item_type == 'NUMBER':
item.type_confirm(arguments)
return
def type_cancel(self):
for item in self.items:
if item.item_type == 'NUMBER':
item.type_cancel()
return
def type_backspace_key(self):
for item in self.items:
if item.item_type == 'NUMBER':
item.type_backspace_key()
return
#
def bezier_box_delete_points(self, pos, arguments=None):
status = None
bez_id = None
if self.hover:
for item in self.items:
if item.hover and item.item_type == 'BEZIER_BOX':
item.bezier_box_delete_points(
[pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]], arguments)
status = True
bez_id = item.custom_id
break
return status, bez_id
def bezier_box_sharpen_points(self, pos, offset, arguments=None):
status = False
hov_status = False
bez_id = None
if self.hover:
for item in self.items:
if item.hover and item.item_type == 'BEZIER_BOX':
status = item.bezier_box_sharpen_points(
[pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]], offset, arguments)
hov_status = True
bez_id = item.custom_id
break
return hov_status, bez_id, status
def bezier_box_rotate_points(self, pos, angle, arguments=None):
status = False
hov_status = False
mid_co = None
bez_id = None
if self.hover:
for item in self.items:
if item.hover and item.item_type == 'BEZIER_BOX':
status, mid_co = item.bezier_box_rotate_points(
[pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]], angle, arguments)
hov_status = True
bez_id = item.custom_id
break
return hov_status, bez_id, status, mid_co
def bezier_box_clear_sharpness(self, pos, arguments=None):
status = None
hov_status = False
if self.hover:
for item in self.items:
if item.hover and item.item_type == 'BEZIER_BOX':
status = item.bezier_box_clear_sharpness(
[pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]], arguments)
hov_status = True
break
return hov_status, status
def bezier_box_clear_rotation(self, pos, arguments=None):
status = None
hov_status = False
if self.hover:
for item in self.items:
if item.hover and item.item_type == 'BEZIER_BOX':
status = item.bezier_box_clear_rotation(
[pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]], arguments)
hov_status = True
break
return hov_status, status
def bezier_box_reset_sharpness(self, pos, arguments=None):
status = None
hov_status = False
if self.hover:
for item in self.items:
if item.hover and item.item_type == 'BEZIER_BOX':
status = item.bezier_box_reset_sharpness(
[pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]], arguments)
hov_status = True
break
return hov_status, status
def bezier_box_reset_rotation(self, pos, arguments=None):
status = None
hov_status = False
if self.hover:
for item in self.items:
if item.hover and item.item_type == 'BEZIER_BOX':
status = item.bezier_box_reset_rotation(
[pos[0]+self.scale_pos_offset[0], pos[1]+self.scale_pos_offset[1]], arguments)
hov_status = True
break
return hov_status, status
def bezier_box_confirm_sharpness(self):
hov_status = False
if self.hover:
for item in self.items:
if item.hover and item.item_type == 'BEZIER_BOX':
item.bezier_box_confirm_sharpness()
hov_status = True
break
return hov_status
def bezier_box_confirm_rotation(self):
hov_status = False
if self.hover:
for item in self.items:
if item.hover and item.item_type == 'BEZIER_BOX':
item.bezier_box_confirm_rotation()
hov_status = True
break
return hov_status
def bezier_box_select_points(self, status):
hov_status = False
if self.hover:
for item in self.items:
if item.hover and item.item_type == 'BEZIER_BOX':
item.bezier_box_select_points(status)
hov_status = True
break
return hov_status
#
def set_scale(self, scale):
super().set_scale(scale)
for item in self.items:
item.set_scale(scale)
return
def set_button_bool(self, status, custom_id_filter=None):
for item in self.items:
if item.item_type == 'BUTTON':
if custom_id_filter != None:
if item.custom_id == custom_id_filter:
item.set_bool(status)
else:
item.set_bool(status)
return
def set_color(self, color=None, color_outline=None, color_font=None):
if color_font:
self.color_font = color_font
super().set_color(color=color, color_outline=color_outline)
return
def set_style_color(self, color_item=None, color_hover=None, color_click=None):
if color_item != None:
self.color_item = color_item
if color_hover != None:
self.color_hover = color_hover
if color_click != None:
self.color_click = color_click
return
#
def __str__(self):
return 'CUI Row Container'
|
150291
|
import logging
import time
from queue import Queue
from typing import Dict, Optional, TYPE_CHECKING
from manta_lab.base.packet import RequestPacket
from .pusher import FilePusher, RecordPusher
from .thread import InternalManager, InternalManagerThread
if TYPE_CHECKING:
from threading import Event
from manta_lab import Settings
from manta_lab.api import MantaAPI
from manta_lab.base.packet import Packet
from manta_lab.sdk.manta_run import Run
logger = logging.getLogger(__name__)
class SendManager(InternalManager):
_api: "MantaAPI"
_settings: "Settings"
_record_q: "Queue[Packet]"
_result_q: "Queue[Packet]"
_run: "Optional[Run]"
_entity: "Optional[str]"
_project: "Optional[str]"
_file_pusher: "Optional[FilePusher]"
_record_pusher: "Optional[RecordPusher]"
def __init__(
self,
api: "MantaAPI",
settings: "Settings",
record_q: "Queue[Packet]",
result_q: "Queue[Packet]",
) -> None:
self._api = api
self._settings = settings
self._record_q = record_q
self._result_q = result_q
self._file_pusher = None
self._record_pusher = None
self._exit_code = 0
self._start_pusher_threads()
def process(self, record: "Packet") -> None:
assert record.key is not None
record_type = record.key
if isinstance(record, RequestPacket):
send_func = getattr(self, f"_send_{record_type}_request", None)
else:
send_func = getattr(self, f"_send_{record_type}", None)
if record_type != "console":
logger.debug("send: {}".format(record_type))
assert send_func, f"Cant find appropriate send function for [{record_type}]"
send_func(record)
def finish(self) -> None:
logger.info("shutting down sender")
if self._file_pusher:
self._file_pusher.finish()
self._file_pusher = None
if self._record_pusher:
self._record_pusher.finish()
self._record_pusher = None
def debounce(self) -> None:
pass
def _start_pusher_threads(self) -> None:
self._file_pusher = FilePusher(api=self._api)
self._file_pusher.start()
self._record_pusher = RecordPusher(api=self._api)
self._record_pusher.start()
def _send_run(self, packet: "Packet"):
self._run = packet.run
# TODO: self._start_pusher_threads() here or __init__?
# FIXME: timestamp values are injected. want to change here
def _send_history(self, packet: "Packet"):
history = packet.history.as_dict()["item"] # TODO: item pops
history["_timestamp"] = time.time() * 1000
self._record_pusher.push("histories", history)
def _send_stats(self, packet: "Packet"):
# self._flatten(row)
# row["_timestamp"] = now
# row["_runtime"] = int(now - self._run.start_time.ToSeconds())
stats = packet.stats.as_dict()["item"] # TODO: item pops
stats["_timestamp"] = time.time() * 1000
self._record_pusher.push("systems", stats)
def _send_console(self, packet: "Packet"):
console = packet.console.as_dict()
console["_timestamp"] = time.time() * 1000
self._record_pusher.push("logs", console)
def _send_meta(self, packet: "Packet"):
meta = packet.meta.as_dict()
self._api.update_run_meta(meta)
def _send_config(self, packet: "Packet"):
config = packet.config.as_dict()
self._api.update_run_config(config)
def _send_summary(self, packet: "Packet"):
summary = packet.summary.as_dict()
self._api.update_run_summary(summary)
def _send_artifact_request(self, packet: "Packet"):
artifact = packet.artifact
self._file_pusher.push(artifact)
def _send_file(self, packet: "Packet"):
pass
class SenderThread(InternalManagerThread):
"""Read records from queue and dispatch to sender routines."""
_record_q: "Queue[Packet]"
_result_q: "Queue[Packet]"
_manager: "SendManager"
def __init__(
self,
api: "MantaAPI",
settings: "Settings",
record_q: "Queue[Packet]",
result_q: "Queue[Packet]",
stopped: "Event",
debounce_interval_ms: "float" = 30000,
) -> None:
super().__init__(
input_record_q=record_q,
result_q=result_q,
stopped=stopped,
debounce_interval_ms=debounce_interval_ms,
)
self.name = "SenderThread"
self._api = api
self._settings = settings
self._record_q = record_q
self._result_q = result_q
def _setup(self) -> None:
self._manager = SendManager(
api=self._api,
settings=self._settings,
record_q=self._record_q,
result_q=self._result_q,
)
|
150327
|
from unittest import TestCase, mock
from ...core.error import BaseError
from .. import classloader as test_module
from ..classloader import ClassLoader, ClassNotFoundError, ModuleLoadError
class TestClassLoader(TestCase):
def test_import_loaded(self):
assert ClassLoader.load_module("unittest")
def test_import_local(self):
with mock.patch.object(test_module.sys, "modules", {}):
assert (
ClassLoader.load_module("aries_cloudagent.transport").__name__
== "aries_cloudagent.transport"
)
def test_import_relative(self):
with mock.patch.object(test_module.sys, "modules", {}):
assert (
ClassLoader.load_module("transport", "aries_cloudagent").__name__
== "aries_cloudagent.transport"
)
with mock.patch.object(test_module.sys, "modules", {}):
assert (
ClassLoader.load_module(".transport", "aries_cloudagent").__name__
== "aries_cloudagent.transport"
)
with mock.patch.object(test_module.sys, "modules", {}):
assert (
ClassLoader.load_module(
"..transport", "aries_cloudagent.config"
).__name__
== "aries_cloudagent.transport"
)
def test_import_missing(self):
with mock.patch.object(test_module.sys, "modules", {}):
assert ClassLoader.load_module("aries_cloudagent.not") is None
with mock.patch.object(test_module.sys, "modules", {}):
assert ClassLoader.load_module("aries_cloudagent.not.a-module") is None
with mock.patch.object(test_module.sys, "modules", {}):
assert ClassLoader.load_module("aries_cloudagent", "not.a-module") is None
def test_import_error(self):
with mock.patch.object(
test_module, "import_module", autospec=True
) as import_module, mock.patch.object(test_module.sys, "modules", {}):
import_module.side_effect = ModuleNotFoundError
with self.assertRaises(ModuleLoadError):
ClassLoader.load_module("aries_cloudagent.config")
def test_load_class(self):
assert ClassLoader.load_class("TestCase", "unittest") is TestCase
assert ClassLoader.load_class("unittest.TestCase") is TestCase
def test_load_class_missing(self):
with self.assertRaises(ClassNotFoundError):
# with no default module
assert ClassLoader.load_class("NotAClass")
with self.assertRaises(ClassNotFoundError):
assert ClassLoader.load_class("aries_cloudagent.NotAClass")
with self.assertRaises(ClassNotFoundError):
assert ClassLoader.load_class("not-a-module.NotAClass")
with self.assertRaises(ClassNotFoundError):
# should be a string, not a type
assert ClassLoader.load_class("aries_cloudagent.version.__version__")
def test_load_subclass(self):
assert ClassLoader.load_subclass_of(BaseError, "aries_cloudagent.config.base")
def test_load_subclass_missing(self):
with self.assertRaises(ClassNotFoundError):
assert ClassLoader.load_subclass_of(
TestCase, "aries_cloudagent.config.base"
)
with self.assertRaises(ClassNotFoundError):
assert ClassLoader.load_subclass_of(
TestCase, "aries_cloudagent.not-a-module"
)
def test_scan_packages(self):
pkgs = ClassLoader.scan_subpackages("aries_cloudagent")
assert "aries_cloudagent.transport" in pkgs
pkgs = ClassLoader.scan_subpackages("aries_cloudagent.transport")
assert "aries_cloudagent.transport.inbound" in pkgs
def test_scan_packages_missing(self):
with self.assertRaises(ModuleLoadError):
ClassLoader.scan_subpackages("aries_cloudagent.not-a-module")
|
150349
|
from django.conf import settings
GCM_DEVICE_MODEL = getattr(settings, 'GCM_DEVICE_MODEL', 'gcm.models.Device')
GCM_APIKEY = getattr(settings, 'GCM_APIKEY', None)
GCM_MAX_RECIPIENTS = getattr(settings, 'GCM_MAX_RECIPIENTS', 1000)
|
150350
|
import pandas as pd
import talib
df= pd.read_csv('data/train/EURUSD_H1_2010-2019_train.csv')
close = df['close'].astype('float')
volume = df['volume'].astype('float')
obv = talib.MA(close, volume)
print(obv)
|
150354
|
from django.core.management.base import BaseCommand
from cms.serializers import get_slug_page_serializer
from cms.models import SlugPage
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('pages', nargs='*')
def handle(self, *args, **options):
pages = options['pages']
slug_pages = SlugPage.objects.filter(slug__in=pages) if pages else SlugPage.objects.all()
for slug_page in slug_pages:
serializer_class = get_slug_page_serializer(slug_page)
initial_data = serializer_class().to_representation(slug_page, use_fake=True)
initial_fields = serializer_class().to_internal_value(initial_data)['fields']
kept_fields = {
key: value for key, value in slug_page.fields.items() if key in initial_fields
}
slug_page.fields = {**initial_fields, **kept_fields}
slug_page.save()
|
150377
|
import xgboost as xgb
from xgboost.dask import DaskDMatrix
from dask.distributed import Client
from dask.distributed import LocalCluster
from dask import array as da
def main(client):
# generate some random data for demonstration
m = 100000
n = 100
X = da.random.random(size=(m, n), chunks=100)
y = da.random.random(size=(m, ), chunks=100)
# DaskDMatrix acts like normal DMatrix, works as a proxy for local
# DMatrix scatter around workers.
dtrain = DaskDMatrix(client, X, y)
# Use train method from xgboost.dask instead of xgboost. This
# distributed version of train returns a dictionary containing the
# resulting booster and evaluation history obtained from
# evaluation metrics.
output = xgb.dask.train(client,
{'verbosity': 1,
'tree_method': 'hist'},
dtrain,
num_boost_round=4, evals=[(dtrain, 'train')])
bst = output['booster']
history = output['history']
# you can pass output directly into `predict` too.
prediction = xgb.dask.predict(client, bst, dtrain)
print('Evaluation history:', history)
return prediction
if __name__ == '__main__':
# or use other clusters for scaling
with LocalCluster(n_workers=7, threads_per_worker=4) as cluster:
with Client(cluster) as client:
main(client)
|
150504
|
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
import os
import shutil
from tempfile import TemporaryDirectory
import sys
import re
import json
from widgets.textviewer import *
from urllib.parse import urlparse
import requests
import threading
import webbrowser
import platform
from utilities import *
from widgets.progressbar import ProgressBar
class ApiViewer(QDialog):
logmessage = Signal(str)
def __init__(self, parent=None):
super(ApiViewer,self).__init__(parent)
# Main window
self.mainWindow = parent
self.setWindowTitle("API Viewer")
self.setMinimumWidth(700)
self.setMinimumHeight(600)
# Properties
self.folder = os.path.join(os.path.expanduser("~"), 'Facepager', 'APIs')
self.folderDefault = os.path.join(os.path.expanduser("~"), 'Facepager', 'DefaultAPIs')
self.filesSuffix = ['.oa3.json']
self.lastSelected = None
# Hold the list of loaded ApiDocs,
# indexed by filename
self.apis = {}
# List of top nodes in the view,
# indexed by filename
# deprecated: use self.apis instead
self.topNodes= {}
self.detailTables = {}
self.detailWidgets = {}
self.allFilesLoaded = False
self.filesDownloaded = False
#layout
layout = QVBoxLayout(self)
self.setLayout(layout)
#loading indicator
self.loadingLock = threading.Lock()
self.loadingIndicator = QLabel('Loading...please wait a second.')
self.loadingIndicator.hide()
layout.addWidget(self.loadingIndicator)
#Middle
central = QSplitter(self)
layout.addWidget(central,1)
#list view
self.itemList = QTreeWidget(self)
self.itemList.setHeaderHidden(True)
self.itemList.setColumnCount(1)
self.itemList.setIndentation(15)
self.itemList.itemSelectionChanged.connect(self.currentChanged)
central.addWidget(self.itemList)
central.setStretchFactor(0, 0)
#detail view
self.detailView=QScrollArea()
self.detailView.setWidgetResizable(True)
self.detailWidget = QWidget()
self.detailWidget.setAutoFillBackground(True)
self.detailWidget.setStyleSheet("background-color: rgb(255,255,255);")
self.detailLayout=QVBoxLayout()
self.detailWidget.setLayout(self.detailLayout)
self.detailView.setWidget(self.detailWidget)
central.addWidget(self.detailView)
central.setStretchFactor(1, 2)
self.detailName = QLabel('')
self.detailName.setWordWrap(True)
self.detailName.setStyleSheet("QLabel {font-size:15pt;font-weight:bold;}")
self.detailLayout.addWidget(self.detailName)
self.detailDescription = TextViewer()
#self.detailDescription .setStyleSheet("QTextViewer {padding-left:0px;}")
self.detailLayout.addWidget(self.detailDescription)
self.detailLayout.addStretch(100)
#buttons
buttons= QHBoxLayout() #QDialogButtonBox()
self.folderButton = QPushButton("")
self.folderButton.setFlat(True)
self.folderButton.setText(self.folder)
self.folderButton.clicked.connect(self.folderClicked)
buttons.addWidget(self.folderButton)
buttons.addStretch()
self.reloadButton=QPushButton('Reload')
self.reloadButton.clicked.connect(self.reloadDocs)
self.reloadButton.setToolTip("Reload all API files.")
buttons.addWidget(self.reloadButton)
self.rejectButton=QPushButton('Close')
self.rejectButton.clicked.connect(self.close)
self.rejectButton.setToolTip("Close the window")
buttons.addWidget(self.rejectButton)
self.applyButton=QPushButton('Apply')
self.applyButton.setDefault(True)
self.applyButton.clicked.connect(self.applyItem)
self.applyButton.setToolTip("Apply the selected path.")
buttons.addWidget(self.applyButton)
layout.addLayout(buttons)
#status bar
#self.statusbar = QStatusBar()
#self.statusbar.insertWidget(0,self.folderButton)
#layout.addWidget(self.statusbar)
def folderClicked(self):
if not os.path.exists(self.folder):
os.makedirs(self.folder)
if platform.system() == "Windows":
webbrowser.open(self.folder)
elif platform.system() == "Darwin":
webbrowser.open('file:///' + self.folder)
else:
webbrowser.open('file:///' + self.folder)
def reloadDocs(self):
self.filesDownloaded = False
self.downloadDefaultFiles()
self.clear()
self.topNodes = {}
self.apis = {}
self.initDocs()
for i in range(0, self.mainWindow.RequestTabs.count()):
tab = self.mainWindow.RequestTabs.widget(i)
tab.reloadDoc()
def showWindow(self):
self.show()
QApplication.processEvents()
# Load files
self.initDocs()
# Select item
if (self.lastSelected is None) or (self.lastSelected not in self.topNodes):
selected = self.itemList.topLevelItem(0)
else:
selected = self.topNodes.get(self.lastSelected)
self.itemList.setCurrentItem(selected)
self.itemList.setFocus()
#self.applyButton.setDefault(True)
self.raise_()
def showDoc(self, module, basepath, path, field = None):
# Show
self.show()
QApplication.processEvents()
self.initDocs()
# Find file / module / api
selectedItem = self.getApiNode(module, basepath, path)
self.itemList.setCurrentItem(selectedItem)
self.itemList.setFocus()
# Focus field
if field is not None:
params = self.detailWidgets.get('Response',{})
while (not field in params) and (field != ''):
field = field.rsplit('.', 1)
field = field[0] if len(field) > 1 else ''
if field in params:
valuewidget = params.get(field)
valuewidget.setStyleSheet("border: 2px solid blue;font-weight:bold;")
self.detailView.ensureWidgetVisible(valuewidget)
#self.exec_()
def addDetailTable(self, caption):
detailForm=QFormLayout()
detailForm.setRowWrapPolicy(QFormLayout.DontWrapRows);
detailForm.setFieldGrowthPolicy(QFormLayout.AllNonFixedFieldsGrow);
detailForm.setFormAlignment(Qt.AlignLeft | Qt.AlignTop);
detailForm.setLabelAlignment(Qt.AlignLeft);
self.detailLayout.insertLayout(self.detailLayout.count()-1, detailForm,1)
self.detailTables[caption] = detailForm
caption = QLabel(caption)
caption.setWordWrap(True)
caption.setStyleSheet("QLabel {font-size:12pt;margin-top:1em;margin-bottom:0.5em;font-weight:bold;}")
detailForm.addRow(caption)
def addDetailText(self,value):
detailCaption, detailForm = list(self.detailTables.items())[-1]
caption = QLabel(value)
caption.setStyleSheet("QLabel {padding-left:0.4em;}")
caption.setWordWrap(True)
detailForm.addRow(caption)
def addDetailRow(self,name,value):
detailCaption, detailForm = list(self.detailTables.items())[-1]
nameWidget = QLabel(name)
nameWidget.setWordWrap(True)
nameWidget.setStyleSheet("QLabel {padding-left:0.4em;}")
valueWidget = TextViewer()
valueWidget.setText(value)
detailForm.addRow(nameWidget,valueWidget)
if not detailCaption in self.detailWidgets:
self.detailWidgets[detailCaption] = {}
self.detailWidgets[detailCaption][name] = nameWidget
def currentChanged(self):
self.clearDetails()
current = self.itemList.currentItem()
if current and current.isSelected():
data = current.data(0,Qt.UserRole)
self.lastSelected = os.path.join(data.get('folder',''),data.get('filename',''))
# Caption
if data.get('type', '') == 'file':
title = getDictValue(data, 'info.title')
self.detailName.setText(title)
# Description
self.detailDescription.setText(getDictValue(data,'info.description'))
# Info
self.addDetailTable('Paths')
self.addDetailRow('Documentation: ',getDictValue(data, 'info.externalDocs.url'))
self.addDetailRow('Base path: ', getDictValue(data, 'info.servers.0.url'))
elif data.get('type', '') == 'path':
title = getDictValue(data, 'info.title') + " " + data['path']
self.detailName.setText(title)
operation = getDictValue(data, 'operations.get', False)
if operation:
# Description
self.detailDescription.setText(getDictValue(operation, 'summary'))
# Info
self.addDetailTable('Paths')
self.addDetailRow('Documentation: ', getDictValue(operation, 'externalDocs.url'))
self.addDetailRow('Base path: ', getDictValue(data, 'info.servers.0.url'))
self.addDetailRow('Resource path: ', getDictValue(data, 'path'))
# Parameters
params = operation.get('parameters',{})
if params:
self.addDetailTable('Parameters')
for param in params:
paramname = param.get('name')
if param.get('in','query') == 'path':
paramname = '<'+paramname+'>'
# Description
description = param.get('description')
# Options
schema = param.get('schema',{})
items = schema.get('items', {}) if schema.get('type') == 'array' else schema
enum = items.get('enum', [])
oneof = [value.get('const', '') for value in items.get('oneOf', [])]
bool = ['0','1'] if items.get('type') == 'boolean' else []
options = ",".join(enum+oneof+bool)
if options != '':
description += "\n\nOptions: "+options
self.addDetailRow(paramname, description)
# Response
self.addDetailTable('Response')
self.addDetailText(getDictValue(operation, 'responses.200.description', ''))
def addDetailProperties(schema, key = ''):
if not isinstance(schema, dict):
return False
if schema.get('type', None) == 'object':
properties = schema.get('properties',None)
if isinstance(properties, dict):
ref = properties.get("$ref",None)
if ref is not None:
properties = self.getSchemaComponent(data, ref)
if isinstance(properties, dict):
for name, value in properties.items():
if not isinstance(value,dict):
return False
self.addDetailRow(key + name, value.get('description', ''))
if value.get("type",None) == "object":
addDetailProperties(value,key+name+".")
elif value.get("type",None) == "array":
addDetailProperties(value,key+name+".")
elif schema.get('type', None) == 'array':
items = schema.get('items',{})
addDetailProperties(items, key+'*.')
schema = getDictValue(operation, 'responses.200.content.application/json.schema', None)
addDetailProperties(schema)
self.detailWidget.show()
def clearDetails(self):
self.detailWidget.hide()
self.detailName.setText("")
self.detailDescription.setText("")
for detailCaption,detailForm in self.detailTables.items():
while detailForm.rowCount() > 0:
detailForm.removeRow(0)
self.detailLayout.removeItem(detailForm)
self.detailTables = {}
self.detailWidgets = {}
def clear(self):
self.allFilesLoaded=False
self.itemList.clear()
self.clearDetails()
def checkDefaultFiles(self):
if not os.path.exists(self.folderDefault):
self.reloadDocs()
elif len(os.listdir(self.folderDefault)) == 0:
self.reloadDocs()
def downloadDefaultFiles(self,silent=False):
with self.loadingLock:
if self.filesDownloaded:
return False
# Progress
progress = ProgressBar("Downloading default API definitions from GitHub...", self) if not silent else None
QApplication.processEvents()
# Create temporary download folder
tmp = TemporaryDirectory(suffix='FacepagerDefaultAPIs')
try:
#Download
files = requests.get("https://api.github.com/repos/strohne/Facepager/contents/apis").json()
files = [f['path'] for f in files if f['path'].endswith(tuple(self.filesSuffix))]
if progress is not None:
progress.setMaximum(len(files))
for filename in files:
response = requests.get("https://raw.githubusercontent.com/strohne/Facepager/master/"+filename)
if response.status_code != 200:
raise(f"GitHub is not available (status code {response.status_code})")
with open(os.path.join(tmp.name, os.path.basename(filename)), 'wb') as f:
f.write(response.content)
if progress is not None:
progress.step()
# Create folder
if not os.path.exists(self.folderDefault):
os.makedirs(self.folderDefault)
# Clear folder
for filename in os.listdir(self.folderDefault):
os.remove(os.path.join(self.folderDefault, filename))
# Move files from tempfolder
for filename in os.listdir(tmp.name):
shutil.move(os.path.join(tmp.name, filename), self.folderDefault)
self.logmessage.emit("Default API definitions downloaded from GitHub.")
except Exception as e:
if not silent:
QMessageBox.information(self,"Facepager","Error downloading default API definitions:"+str(e))
self.logmessage.emit("Error downloading default API definitions:"+str(e))
return False
else:
self.filesDownloaded = True
return True
finally:
tmp.cleanup()
if progress is not None:
progress.close()
def initDocs(self):
if self.allFilesLoaded:
return False
self.loadingIndicator.show()
QApplication.processEvents()
try:
#self.downloadDefaultFiles()
if os.path.exists(self.folderDefault):
files = [f for f in os.listdir(self.folderDefault) if f.endswith(tuple(self.filesSuffix))]
for filename in files:
self.loadFile(self.folderDefault, filename, True)
if os.path.exists(self.folder):
files = [f for f in os.listdir(self.folder) if f.endswith(tuple(self.filesSuffix))]
for filename in files:
self.loadFile(self.folder, filename)
self.itemList.sortItems(0, Qt.AscendingOrder)
self.allFilesLoaded = True
except:
self.loadingIndicator.hide()
return False
finally:
self.loadingIndicator.hide()
return True
def loadFiles(self,folder, module, default=False):
# self.downloadDefaultFiles(True)
# Create folders
if not os.path.exists(folder):
os.makedirs(folder)
module = module.replace(" ", "")
for filename in os.listdir(folder):
if filename.startswith(module) and filename.endswith(self.filesSuffix[0]):
self.loadFile(folder, filename, default)
def loadFile(self, folder, filename, default=False):
if os.path.join(folder, filename) in self.topNodes:
return self.topNodes[os.path.join(folder, filename)]
if not os.path.isfile(os.path.join(folder, filename)):
return None
try:
with open(os.path.join(folder, filename), 'r',encoding="utf-8") as input:
data = json.load(input)
if not isinstance(data,dict):
return None
data['x-facepager-default'] = default
self.apis[os.path.join(folder, filename)] = data
# Prepare node
itemData = {}
itemData = {k:v for (k,v) in data.items() if k.startswith('x-facepager-')}
itemData['type'] = 'file'
itemData['filename'] = filename
itemData['folder'] = folder
itemData['default'] = default
itemData['info'] = data.get('info',{})
itemData['info']['externalDocs'] = data.get('externalDocs',{})
itemData['info']['servers'] = data.get('servers', [])
itemData['module'] = data.get("x-facepager-module", "Generic")
# Root node in the view
if default:
itemData['caption'] = itemData['info'].get('title', '') +" *"
else:
itemData['caption'] = itemData['info'].get('title', '')
topItem = ApiWidgetItem()
topItem.setText(0,itemData['caption'])
ft = topItem.font(0)
ft.setWeight(QFont.Bold)
topItem.setFont(0,ft)
if default:
topItem.setForeground(0, QBrush(QColor("darkblue")))
topItem.setData(0,Qt.UserRole,itemData)
self.itemList.addTopLevelItem(topItem)
self.topNodes[os.path.join(folder, filename)] = topItem
# Path nodes
for path,operations in data.get('paths',{}).items():
path = path.replace("{", "<").replace("}", ">")
pathItemData = itemData.copy()
pathItemData['type'] = 'path'
pathItemData['caption'] = path
pathItemData['path'] = path
pathItemData['operations'] = operations
pathItemData['components'] = data.get('components',{})
newItem = ApiWidgetItem()
newItem.setText(0,path)
newItem.setData(0,Qt.UserRole, pathItemData)
topItem.addChild(newItem)
QApplication.processEvents()
return topItem
except Exception as e:
QMessageBox.information(self,"Facepager","Error loading items:"+str(e))
return None
def getApiBasePaths(self, module):
urls = []
try:
# Load files
self.loadFiles(self.folder, module)
self.loadFiles(self.folderDefault, module)
# Extract urls
for k,v in self.apis.items():
api_module = getDictValue(v,'x-facepager-module','Generic')
if (api_module == module):
api_urls = getDictValue(v, 'servers.*.url', [])
urls.extend(api_urls)
urls= list(set(urls))
except Exception as e:
self.logmessage(f"Error loading base paths: {str(e)}")
return urls
def getApiDoc(self, module, basepath = ''):
try:
# Documentation
self.loadFiles(self.folder, module)
self.loadFiles(self.folderDefault, module, True)
# Get best match based on module and basepath
api = None
for k,v in self.apis.items():
api_module = getDictValue(v,'x-facepager-module','Generic')
api_urls = getDictValue(v,'servers.*.url',[])
api_default = getDictValue(v,'x-facepager-default',False)
# Prio 1: user defined docs matching module and basepath
if (api_module == module) and (basepath in api_urls) and (not api_default):
api = v
break
# Prio 2: default docs matching module and basepath
elif (api_module == module) and (basepath in api_urls):
api = v
# Prio 3: docs matching module
elif (api_module == module):
api = v if api is None else api
return api
except:
return None
def getApiNode(self, module, basepath, path):
node = None
for idx_file in range(self.itemList.topLevelItemCount()):
topItem = self.itemList.topLevelItem(idx_file)
topData = topItem.data(0, Qt.UserRole)
# Find path
# TODO: prioritize non default apis
# TODO: fuzzy match (module matches, basepath is similar)
api_module = topData.get('module', 'Generic')
api_urls = getDictValue(topData, 'info.servers.*.url', [])
api_default = topData['default']
if (api_module == module) and (basepath in api_urls):
node = topItem
for idx_path in range(topItem.childCount()):
pathItem = topItem.child(idx_path)
pathData = pathItem.data(0, Qt.UserRole)
if pathData.get('path', None) == path:
return pathItem
return node
def getApiField(self, module = '', basepath = '', path='', field=''):
try:
data = self.getApiDoc(module, basepath)
if data is not None:
basepath = getDictValue(data,"servers.0.url") if data is not None else basepath
paths = data.get('paths',{}) if data is not None else None
# Operation response
path = path.replace("<", "{").replace(">", "}")
if path in paths:
operation = paths.get(path)
elif path.replace(basepath,"") in paths:
operation = paths.get(path.replace(basepath,""))
else:
operation = None
operation = getDictValue(operation,"get.responses.200",False) if operation is not None else {}
# Field
if field is None and operation is not None and isinstance(operation, dict):
return operation.get('description',None)
# Field
def findFieldProperties(key, schema):
if not isinstance(schema, dict):
return schema
keys = key.split('.', 1)
if keys[0] == '':
return schema
if schema.get('type', None) == 'object':
properties = schema.get('properties', None)
if isinstance(properties, dict):
ref = properties.get("$ref", None)
if ref is not None:
properties = self.getSchemaComponent(data, ref)
if isinstance(properties, dict):
value = properties.get(keys[0],{})
if len(keys) == 1:
return value
else:
return findFieldProperties(keys[1], value)
elif (schema.get('type', None) == 'array') and (keys[0] == '*'):
value = schema.get('items', {})
if len(keys) == 1:
return value
else:
return findFieldProperties(keys[1], value)
return schema
schema = getDictValue(operation, 'content.application/json.schema', None)
fieldprops = findFieldProperties(field, schema)
if fieldprops is not None:
return fieldprops.get('description','')
# response = getDictValue(operation, 'content.application/json.schema.properties', False)
# if not response:
# response = getDictValue(operation, 'content.application/json.schema.items.properties', False)
#
# if response and isinstance(response, dict):
# if not field in response:
# parts = field.split(".")
# field = parts[0] if len(parts) > 0 else None
#
# if field is not None and field in response:
# return response.get(field).get('description')
return None
except:
return None
def getSchemaComponent(self, data, key):
# eg "#components/schema/user/properties
key = key.replace("#", "").replace("/", ".")
return getDictValue(data, key, False)
def applyItem(self):
if not self.itemList.currentItem():
return False
# Find API module
data = self.itemList.currentItem().data(0,Qt.UserRole)
module = data.get('module', None)
if module is None:
return False
tab = self.mainWindow.getModule(module)
if tab is not None:
path = data.get('path', '')
options = {
'basepath' : getDictValue(data, 'info.servers.0.url',''),
'resource' : path
}
# Will pull the default settings from the API doc
tab.setSettings(options)
self.mainWindow.RequestTabs.setCurrentWidget(tab)
self.close()
class ApiWidgetItem(QTreeWidgetItem):
def __lt__(self, other):
data1 = self.data(0,Qt.UserRole)
data2 = other.data(0,Qt.UserRole)
if data1.get('iscategory',False) and data2.get('iscategory',False):
return data1.get('name','') < data2.get('name','')
elif data1.get('default',False) != data2.get('default',False):
return data1.get('default',False)
else:
return data1.get('name','') < data2.get('name','')
|
150522
|
import os
import logging
import boto3
import botocore
from botocore.exceptions import ClientError
logger = logging.getLogger()
logger.setLevel(logging.INFO)
cp = boto3.client("codepipeline")
def lambda_handler(event, context):
logger.debug("## Environment Variables ##")
logger.debug(os.environ)
logger.debug("## Event ##")
logger.debug(event)
pipeline_name = os.environ["PIPELINE_NAME"]
logger.info(f"Starting Coninuous Training release change for {pipeline_name}")
try:
response = cp.start_pipeline_execution(
name=pipeline_name
)
logger.info(f'Release Change ExecutionId: {response["pipelineExecutionId"]}')
except ClientError as e:
error_message = e.response["Error"]["Message"]
logger.error(error_message)
raise Exception(error_message)
return {
"statusCode": 200,
"ExecutionId": response["pipelineExecutionId"]
}
|
150601
|
import os
from leapp.libraries.common.config import architecture
from leapp.libraries.stdlib import api
from leapp.models import CopyFile, TargetUserSpaceUpgradeTasks, UpgradeInitramfsTasks
DASD_CONF = '/etc/dasd.conf'
def process():
if not architecture.matches_architecture(architecture.ARCH_S390X):
return
if os.path.isfile(DASD_CONF):
# the file has to be copied into the targetuserspace container first,
# then it can be included into the initramfs ==> both messages are
# needed to be produced
api.produce(TargetUserSpaceUpgradeTasks(copy_files=[CopyFile(src=DASD_CONF)]))
api.produce(UpgradeInitramfsTasks(include_files=[DASD_CONF]))
else:
api.current_logger().warning(
"The {} file has not been discovered. DASD not used?"
.format(DASD_CONF)
)
|
150666
|
import numpy as np
import pickle
import os
from PIL import Image
import time
from tqdm import tqdm, trange
import shutil
from random import randint
import argparse
import glob
import pdb
import random
import math
import time
import argparse
import matplotlib.pyplot as plt
from copy import deepcopy
import sys
sys.path.append(os.path.join(os.getcwd(), 'dataset'))
import torchvision.transforms as transforms
import torchvision.models as models
import torch.nn as nn
import torch.optim as optim
import torch
from torch.autograd import Variable
from torch.optim import lr_scheduler
from torch.utils.data import Dataset, DataLoader, RandomSampler
from torchsummary import summary
from models import resnext
from model import generate_model
from opts import parse_opts
import utils
from spatial_transforms import *
from temporal_transforms import *
import dataset_class
import warnings
import os
# os.environ['CUDA_VISIBLE_DEVICES']='3'
warnings.filterwarnings("ignore")
annot_dir = './dataset'
save_dir = 'output_task0'
model_test_dir = 'models/task0_model'
args = parse_opts()
os.environ['CUDA_VISIBLE_DEVICES']=args.cuda_id
device = 'cuda:0'
if isinstance(args.cuda_id, list):
device_ids = [i for i in eval(args.cuda_id)]
else:
device_ids = [eval(args.cuda_id)]
if not os.path.exists(save_dir):
os.makedirs(save_dir)
def forward(model, data):
rgbs, depths, labels = data
if args.modality == 'RGB':
inputs = rgbs.to(device, non_blocking=True).float()
elif args.modality == 'Depth':
inputs = depths.to(device, non_blocking=True).float()
elif args.modality == 'RGB-D':
inputs = torch.cat((rgbs, depths), 1).to(device, non_blocking=True).float()
probs, logits = model(inputs)
labels = labels.to(device, non_blocking=True).long()
return probs, logits, labels
def model_test(model, save_dir, filename, dataloader, num_class):
model.module.fc = nn.Linear(model.module.fc.in_features, num_class)
model.module.fc.to(device)
checkpoint = utils.load_checkpoint(save_dir, filename)
# model = checkpoint['model']
model.load_state_dict(checkpoint['state_dict'])
model.eval()
print('Evaluating for model {}........'.format(filename))
acc = utils.AverageMeter()
for data in tqdm(dataloader):
probs, logits, labels = forward(model, data)
acc.update(utils.calculate_accuracy(probs, labels))
print('val_acc:{:.3f}'.format(acc.avg))
def model_train(model, save_dir, dataloader_train, dataloader_val):
model.train()
num_epochs = 50
criterion = nn.CrossEntropyLoss()
# determine optimizer
fc_lr_layers = list(map(id, model.module.fc.parameters()))
pretrained_lr_layers = [p for p in model.parameters()
if id(p) not in fc_lr_layers and p.requires_grad==True]
# pretrained_lr_layers = filter(lambda p:
# id(p) not in fc_lr_layers, model.parameters())
# optimizer = torch.optim.SGD([
# {"params": model.module.fc.parameters()},
# {"params": pretrained_lr_layers, "lr": 1e-4, 'weight_decay':1e-3}
# ], lr=1e-3, momentum=0.9, weight_decay=1e-3)
learning_rate = 1e-3
# lr_steps = [10,15,20]
lr_steps = [25]
optimizer = torch.optim.SGD(model.parameters(),lr=learning_rate, momentum=0.9, weight_decay=1e-3)
train_logger = utils.Logger(os.path.join(save_dir, '{}-{}-{}.log'.format(args.arch, args.n_frames_per_clip, args.modality)),
['step', 'train_loss', 'train_acc', 'val_loss', 'val_acc',
'lr_feature', 'lr_fc'])
# scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)
train_loss = utils.AverageMeter()
train_acc = utils.AverageMeter()
val_loss = utils.AverageMeter()
val_acc = utils.AverageMeter()
step = 0
for epoch in trange(num_epochs): # loop over the dataset multiple times
train_loss.reset()
train_acc.reset()
for data in dataloader_train:
probs, outputs, labels = forward(model, data)
optimizer.zero_grad()
loss_ = criterion(outputs, labels)
loss_.backward()
optimizer.step()
train_loss.update(loss_.item())
train_acc.update(utils.calculate_accuracy(probs, labels))
if step % 100 == 0:
val_loss.reset()
val_acc.reset()
model.eval()
for data_val in dataloader_val:
probs_val, outputs_val, labels_val = forward(model, data_val)
val_loss_ = criterion(outputs_val, labels_val)
val_loss.update(val_loss_.item())
val_acc.update(utils.calculate_accuracy(probs_val, labels_val))
model.train()
print('epoch{}/{} train_acc:{:.3f} train_loss:{:.3f} val_acc:{:.3f} val_loss:{:.3f}'.format(
epoch + 1, num_epochs,
train_acc.val, train_loss.val,
val_acc.avg, val_loss.avg
))
train_logger.log({
'step': step,
'train_loss': train_loss.val,
'train_acc': train_acc.val,
'val_loss': val_loss.avg,
'val_acc': val_acc.avg,
# 'lr_feature': optimizer.param_groups[1]['lr'],
'lr_feature': 0,
'lr_fc': optimizer.param_groups[0]['lr']
})
step += 1
utils.save_checkpoint(model, optimizer, step, save_dir,
'{}-{}-{}.pth'.format(args.arch, args.n_frames_per_clip, args.modality))
# scheduler.step()
utils.adjust_learning_rate(learning_rate, optimizer, epoch, lr_steps)
if __name__ == '__main__':
# keep shuffling be constant every time
seed = 1
torch.manual_seed(seed)
# torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
# activitynet mean value
mean = [114.7, 107.7, 99.4]
# std = [38.7568578 / 255, 37.88248729 / 255, 40.02898126 / 255]
# kinetics
# [110.6, 103.1, 96.2]
# norm_method1 = Normalize(mean, std)
norm_method = Normalize(mean, [1, 1, 1])
# scales = [1, 1/(math.pow(2, .25)), 1/(math.pow(2, .75)), 1/2]
scales = [args.initial_scale]
for i in range(1, args.n_scales):
scales.append(scales[-1] * args.scale_step)
trans_train = Compose([
Scale([112,112]),
MultiScaleRandomCrop(scales, [112,112]),
SpatialElasticDisplacement(),
# RandomHorizontalFlip(),
ToTensor(1), norm_method
])
temporal_transform_train = Compose([
TemporalRandomCrop(args.n_frames_per_clip)
])
trans_test = Compose([
Scale([112,112]),
CenterCrop([112, 112]),
ToTensor(1), norm_method
])
temporal_transform_test = Compose([
TemporalCenterCrop(args.n_frames_per_clip)
])
# load dataset
if args.is_train:
print('Loading training data.....')
class_id1 = [i for i in range(1, 41)]
dataset_train = dataset_class.dataset_video_class(annot_dir, 'train_plus_val',
class_id = class_id1,
n_frames_per_clip=args.n_frames_per_clip,
img_size=(args.w, args.h),
reverse=False, transform=trans_train,
temporal_transform = temporal_transform_train)
dataloader_train = DataLoader(dataset_train, batch_size=args.batch_size,
shuffle=True,
num_workers=args.num_workers, pin_memory=True)
print('\n')
print('Loading validating data.....')
dataset_val = dataset_class.dataset_video_class(annot_dir, 'test',
class_id = class_id1,
n_frames_per_clip=args.n_frames_per_clip,
img_size=(args.w, args.h),
reverse=False, transform=trans_test,
temporal_transform = temporal_transform_test)
dataloader_val = DataLoader(dataset_val, batch_size=args.batch_size_val,
num_workers=args.num_workers,pin_memory=True)
else:
print('Loading testing data.....')
class_id1 = [i for i in range(1, 41)]
dataset_test = dataset_class.dataset_video_class(annot_dir, 'test',
class_id = class_id1,
n_frames_per_clip=args.n_frames_per_clip,
img_size=(args.w, args.h),
reverse=False, transform=trans_test,
temporal_transform = temporal_transform_test)
dataloader_test = DataLoader(dataset_test, batch_size=args.batch_size_val,
num_workers=args.num_workers,pin_memory=True)
model, parameters = generate_model(args)
model.to(device)
if args.is_train:
if args.modality == 'RGB':
summary(model, (3,args.n_frames_per_clip,112,112))
elif args.modality == 'Depth':
summary(model, (1,args.n_frames_per_clip,112,112))
elif args.modality == 'RGB-D':
summary(model, (4,args.n_frames_per_clip,112,112))
model_train(model, save_dir, dataloader_train, dataloader_val)
pdb.set_trace()
else:
model_test(model, model_test_dir, '{}-{}-{}.pth'.format(args.arch, args.n_frames_per_clip, args.modality), dataloader_test, args.n_finetune_classes)
pdb.set_trace()
|
150674
|
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:'
}
}
SECRET_KEY = '<PASSWORD>'
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'nonrelated_inlines.tests.testapp'
]
ROOT_URLCONF = 'nonrelated_inlines.tests.urls'
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware'
]
TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
}]
|
150698
|
import datetime
import random
from nonebot import CommandGroup, logger
from nonebot.plugin.export import export
from nonebot.typing import T_State
from nonebot.adapters.cqhttp.bot import Bot
from nonebot.adapters.cqhttp.event import GroupMessageEvent
from nonebot.adapters.cqhttp.permission import GROUP
from nonebot.adapters.cqhttp.message import Message, MessageSegment
from omega_miya.utils.omega_plugin_utils import init_export, init_processor_state
from .utils import maybe, sp, sp_event
# from ._oldalmanac import old_almanac
# Custom plugin usage text
__plugin_custom_name__ = 'ๆฑ็ญพ'
__plugin_usage__ = r'''ใๆฑ็ญพใ
ๆฑ็ญพ, ๆฑ่ฟๅฟ, ๅ
ๆฌไธไธ้ไบๆฝๅกใๅ้ฅญใ็กๆ่งใDD
ๆฏไธชไบบๆฏๅคฉๆฑๅไธไธชไธ่ฅฟ็็ปๆๆฏไธๆ ท็ๅฆ!
ไธ่ฆไธไฟก้ช้ๆฐๆฝๅฆ!
ไป
้็พค่ไฝฟ็จ
**Permission**
Command & Lv.10
or AuthNode
**AuthNode**
basic
**Usage**
/ๆฑ็ญพ [ๆๆฑไนไบ]
/ๅธฎๆ้ [้้กน1 ้้กน2 ...]'''
# Init plugin export
init_export(export(), __plugin_custom_name__, __plugin_usage__)
# ๆณจๅไบไปถๅๅบๅจ
Maybe = CommandGroup(
'maybe',
# ไฝฟ็จrun_preprocessorๆฆๆชๆ้็ฎก็, ๅจdefault_stateๅๅงๅๆ้ๆ้
state=init_processor_state(
name='maybe',
command=True,
level=10),
permission=GROUP,
priority=10,
block=True)
luck = Maybe.command('luck', aliases={'ๆฑ็ญพ'})
# ไฟฎๆน้ป่ฎคๅๆฐๅค็
@luck.args_parser
async def parse(bot: Bot, event: GroupMessageEvent, state: T_State):
args = str(event.get_plaintext()).strip().split()
if not args:
await luck.reject('ไฝ ไผผไนๆฒกๆๅ้ๆๆ็ๅๆฐๅขQAQ, ่ฏท้ๆฐๅ้:')
state[state["_current_key"]] = args[0]
if state[state["_current_key"]] == 'ๅๆถ':
await luck.finish('ๆไฝๅทฒๅๆถ')
@luck.handle()
async def handle_first_receive(bot: Bot, event: GroupMessageEvent, state: T_State):
args = str(event.get_plaintext()).strip().split()
if not args:
pass
elif args and len(args) == 1:
state['draw'] = args[0]
else:
await luck.finish('ๅๆฐ้่ฏฏQAQ')
@luck.got('draw', prompt='ไฝ ๆณ้ฎไปไนไบๅข?')
async def handle_luck(bot: Bot, event: GroupMessageEvent, state: T_State):
user_id = event.user_id
_draw = state['draw']
# ๆฑ็ญพ่
ๆต็งฐ, ไผๅ
ไฝฟ็จ็พคๆต็งฐ
draw_user = event.sender.card
if not draw_user:
draw_user = event.sender.nickname
# ๅคๆญ็นๆฎไบไปถ
if _draw in sp.keys():
draw_result = sp_event(_draw)
else:
draw_result = maybe(draw=_draw, user_id=user_id)
# ๅ็จๆทๅ้็ปๆ
today = datetime.date.today().strftime('%Yๅนด%mๆ%dๆฅ')
msg = f'ไปๅคฉๆฏ{today}\n{draw_user}{draw_result}'
logger.info(f'{event.group_id}/{event.user_id} ่ฟ่กไบไธๆฌกๆฑ็ญพ')
await luck.finish(msg)
help_choice = Maybe.command('choice', aliases={'ๅธฎๆ้', '้ๆฉๅฐ้พ็'})
# ไฟฎๆน้ป่ฎคๅๆฐๅค็
@help_choice.args_parser
async def parse(bot: Bot, event: GroupMessageEvent, state: T_State):
args = str(event.get_plaintext()).strip()
if not args:
await help_choice.reject('ไฝ ไผผไนๆฒกๆๅ้ๆๆ็ๅๆฐๅขQAQ, ่ฏท้ๆฐๅ้:')
state[state["_current_key"]] = args
if state[state["_current_key"]] == 'ๅๆถ':
await help_choice.finish('ๆไฝๅทฒๅๆถ')
@help_choice.handle()
async def handle_first_receive(bot: Bot, event: GroupMessageEvent, state: T_State):
args = str(event.get_plaintext()).strip()
if not args:
pass
else:
state['choices'] = args
@help_choice.got('choices', prompt='ๆๅฅ้้กน, ๅๆฅๆๅธฎไฝ ้~')
async def handle_help_choice(bot: Bot, event: GroupMessageEvent, state: T_State):
choices = state['choices']
result = random.choice(str(choices).split())
result_text = f'''ๅธฎไฝ ไปโ{'โ๏ผโ'.join(str(choices).split())}โไธญ้ๆฉไบ๏ผ\n\nโ{result}โ'''
msg = Message(MessageSegment.at(user_id=event.user_id)).append(result_text)
await help_choice.finish(msg)
# almanac = Maybe.command('almanac', aliases={'DD่้ปๅ', 'dd่้ปๅ'})
#
#
# @almanac.handle()
# async def handle_first_receive(bot: Bot, event: GroupMessageEvent, state: T_State):
# args = str(event.get_plaintext()).strip().lower().split()
# if not args:
# pass
# else:
# await almanac.finish('ๅๆฐ้่ฏฏQAQ')
#
# user_id = event.user_id
#
# # ๆฑ็ญพ่
ๆต็งฐ, ไผๅ
ไฝฟ็จ็พคๆต็งฐ
# draw_user = event.sender.card
# if not draw_user:
# draw_user = event.sender.nickname
#
# draw_result = old_almanac(user_id=user_id)
#
# # ๅ็จๆทๅ้็ปๆ
# today = datetime.date.today().strftime('%Yๅนด%mๆ%dๆฅ')
# msg = f"ไปๅคฉๆฏ{today}\n{draw_user}ไปๆฅ:\n{'='*12}\n{draw_result}"
# logger.info(f'{event.group_id}/{event.user_id} ่ฟ่กไบไธๆฌกdd่้ปๅๆฅ่ฏข')
# await almanac.finish(msg)
|
150728
|
from typing import Optional
from django.utils import timezone
from request_logging.base import append_to_list, retrieve
from request_logging.models import RequestLog
import logging
logger = logging.getLogger("ablator.functionality")
LOK_KEY = "list-of-timestamp-keys"
def save_new_timestamp_key(new_key):
append_to_list(LOK_KEY, new_key)
def list_timestamp_keys():
return retrieve(LOK_KEY)
def generate_key_for_id_hourly(func_id: str, timestamp=None):
"""
Generate a string to be used as key for grouping request logs. The string is returned, and it
is stored in the list of existing valid keys.
:param func_id: A functionality id
:param timestamp: a DateTime object for the requested timestamp
:return: A string to be used as key. Contains the year, month, day, and hour of the timestamp
"""
time_stamp_for_key = timestamp if timestamp else timezone.now()
current_key = "{}-{}".format(func_id, time_stamp_for_key.strftime("%Y-%m-%d-%H"))
save_new_timestamp_key(current_key)
return current_key
def save_request_log_entry(
functionality_id: str, flavor_id: Optional[str], action: str, client_user_id: str = None, elapsed_time: float = None
):
current_key = generate_key_for_id_hourly(functionality_id)
timestamp = timezone.now()
f_action = {
"functionality_id": functionality_id,
"flavor_id": flavor_id,
"timestamp": timestamp,
"action": action,
"client_user_id": client_user_id,
"elapsed_time": elapsed_time,
}
append_to_list(current_key, f_action)
logger.info(action, extra=f_action)
def get_request_logs(timestamp_key):
request_log_dicts = retrieve(timestamp_key)
request_logs = [
RequestLog(d["functionality_id"], d["flavor_id"], d["timestamp"], d["action"], d["client_user_id"], d["elapsed_time"])
for d in request_log_dicts
]
return request_logs
def get_request_logs_for_functionality_id(functionality_id):
timestamp_keys = list_timestamp_keys()
logs = {}
for timestamp_key in timestamp_keys:
if functionality_id in timestamp_key:
logs[timestamp_key] = get_request_logs(timestamp_key)
|
150805
|
class Sequence:
transcription_table = {'A':'U', 'T':'A', 'C':'G' , 'G':'C'}
enz_dict = {'EcoRI':'GAATTC', 'EcoRV':'GATATC'}
def __init__(self, seqstring):
self.seqstring = seqstring.upper()
def restriction(self, enz):
try:
enz_target = Sequence.enz_dict[enz]
return self.seqstring.count(enz_target)
except KeyError:
return 0
def transcription(self):
tt = ""
for letter in self.seqstring:
if letter in 'ATCG':
tt += self.transcription_table[letter]
return tt
|
150808
|
from kratos import *
from math import log
from lake.attributes.sram_port_attr import SRAMPortAttr
class SRAMStubGenerator(Generator):
##########################
# Generation #
##########################
def __init__(self,
sram_name,
data_width,
width_mult,
depth):
super().__init__(sram_name)
# for provided external sram macro
self.external = True
self.data_width = data_width
self.width_mult = width_mult
self.depth = depth
############################
# Clock and Reset #
############################
self._clk = self.clock("sram_clk")
self._clk.add_attribute(SRAMPortAttr("sram clk"))
############################
# Inputs #
############################
# attribute indicates that all these ports will be renamed to match
# the port names for external sram macro
self._wen = self.input("sram_wen", 1)
self._wen.add_attribute(SRAMPortAttr("sram wen"))
self._cen = self.input("sram_cen", 1)
self._cen.add_attribute(SRAMPortAttr("sram cen"))
self._addr = self.input("sram_addr", clog2(self.depth))
self._addr.add_attribute(SRAMPortAttr("sram addr"))
self._data_in = self.input("sram_data_in",
self.data_width * self.width_mult)
self._data_in.add_attribute(SRAMPortAttr("sram data in"))
############################
# Outputs #
############################
self._data_out = self.output("sram_data_out",
self.data_width * self.width_mult)
self._data_out.add_attribute(SRAMPortAttr("sram data out"))
self._wtsel = self.input("sram_wtsel", 2)
self._wtsel.add_attribute(SRAMPortAttr("sram wtsel"))
self._rtsel = self.input("sram_rtsel", 2)
self._rtsel.add_attribute(SRAMPortAttr("sram rtsel"))
if __name__ == "__main__":
# to see interface, mark self.use_stub = True and self.external = False
dut = SRAMStubGenerator(sram_name="TSMC",
data_width=16,
width_mult=1,
depth=124)
verilog(dut, filename="tsmc_macro.sv")
|
150818
|
import enum
class block(enum.Enum):
"""
block
* acacia_button
* acacia_door
* acacia_fence
* acacia_fence_gate
* acacia_leaves
* acacia_log
* acacia_planks
* acacia_pressure_plate
* acacia_sapling
* acacia_sign
* acacia_slab
* acacia_stairs
* acacia_trapdoor
* acacia_wall_sign
* acacia_wood
* activator_rail
* air
* allium
* amethyst_block
* amethyst_cluster
* ancient_debris
* andesite
* andesite_slab
* andesite_stairs
* andesite_wall
* anvil
* attached_melon_stem
* attached_pumpkin_stem
* azalea
* azalea_leaves
* azure_bluet
* bamboo
* bamboo_sapling
* barrel
* barrier
* basalt
* beacon
* bedrock
* bee_nest
* beehive
* beetroots
* bell
* big_dripleaf
* big_dripleaf_stem
* birch_button
* birch_door
* birch_fence
* birch_fence_gate
* birch_leaves
* birch_log
* birch_planks
* birch_pressure_plate
* birch_sapling
* birch_sign
* birch_slab
* birch_stairs
* birch_trapdoor
* birch_wall_sign
* birch_wood
* black_banner
* black_bed
* black_candle
* black_candle_cake
* black_carpet
* black_concrete
* black_concrete_powder
* black_glazed_terracotta
* black_shulker_box
* black_stained_glass
* black_stained_glass_pane
* black_terracotta
* black_wall_banner
* black_wool
* blackstone
* blackstone_slab
* blackstone_stairs
* blackstone_wall
* blast_furnace
* blue_banner
* blue_bed
* blue_candle
* blue_candle_cake
* blue_carpet
* blue_concrete
* blue_concrete_powder
* blue_glazed_terracotta
* blue_ice
* blue_orchid
* blue_shulker_box
* blue_stained_glass
* blue_stained_glass_pane
* blue_terracotta
* blue_wall_banner
* blue_wool
* bone_block
* bookshelf
* brain_coral
* brain_coral_block
* brain_coral_fan
* brain_coral_wall_fan
* brewing_stand
* brick_slab
* brick_stairs
* brick_wall
* bricks
* brown_banner
* brown_bed
* brown_candle
* brown_candle_cake
* brown_carpet
* brown_concrete
* brown_concrete_powder
* brown_glazed_terracotta
* brown_mushroom
* brown_mushroom_block
* brown_shulker_box
* brown_stained_glass
* brown_stained_glass_pane
* brown_terracotta
* brown_wall_banner
* brown_wool
* bubble_column
* bubble_coral
* bubble_coral_block
* bubble_coral_fan
* bubble_coral_wall_fan
* budding_amethyst
* cactus
* cake
* calcite
* campfire
* candle
* candle_cake
* carrots
* cartography_table
* carved_pumpkin
* cauldron
* cave_air
* cave_vines
* cave_vines_plant
* chain
* chain_command_block
* chest
* chipped_anvil
* chiseled_deepslate
* chiseled_nether_bricks
* chiseled_polished_blackstone
* chiseled_quartz_block
* chiseled_red_sandstone
* chiseled_sandstone
* chiseled_stone_bricks
* chorus_flower
* chorus_plant
* clay
* coal_block
* coal_ore
* coarse_dirt
* cobbled_deepslate
* cobbled_deepslate_slab
* cobbled_deepslate_stairs
* cobbled_deepslate_wall
* cobblestone
* cobblestone_slab
* cobblestone_stairs
* cobblestone_wall
* cobweb
* cocoa
* command_block
* comparator
* composter
* conduit
* copper_block
* copper_ore
* cornflower
* cracked_deepslate_bricks
* cracked_deepslate_tiles
* cracked_nether_bricks
* cracked_polished_blackstone_bricks
* cracked_stone_bricks
* crafting_table
* creeper_head
* creeper_wall_head
* crimson_button
* crimson_door
* crimson_fence
* crimson_fence_gate
* crimson_fungus
* crimson_hyphae
* crimson_nylium
* crimson_planks
* crimson_pressure_plate
* crimson_roots
* crimson_sign
* crimson_slab
* crimson_stairs
* crimson_stem
* crimson_trapdoor
* crimson_wall_sign
* crying_obsidian
* cut_copper
* cut_copper_slab
* cut_copper_stairs
* cut_red_sandstone
* cut_red_sandstone_slab
* cut_sandstone
* cut_sandstone_slab
* cyan_banner
* cyan_bed
* cyan_candle
* cyan_candle_cake
* cyan_carpet
* cyan_concrete
* cyan_concrete_powder
* cyan_glazed_terracotta
* cyan_shulker_box
* cyan_stained_glass
* cyan_stained_glass_pane
* cyan_terracotta
* cyan_wall_banner
* cyan_wool
* damaged_anvil
* dandelion
* dark_oak_button
* dark_oak_door
* dark_oak_fence
* dark_oak_fence_gate
* dark_oak_leaves
* dark_oak_log
* dark_oak_planks
* dark_oak_pressure_plate
* dark_oak_sapling
* dark_oak_sign
* dark_oak_slab
* dark_oak_stairs
* dark_oak_trapdoor
* dark_oak_wall_sign
* dark_oak_wood
* dark_prismarine
* dark_prismarine_slab
* dark_prismarine_stairs
* daylight_detector
* dead_brain_coral
* dead_brain_coral_block
* dead_brain_coral_fan
* dead_brain_coral_wall_fan
* dead_bubble_coral
* dead_bubble_coral_block
* dead_bubble_coral_fan
* dead_bubble_coral_wall_fan
* dead_bush
* dead_fire_coral
* dead_fire_coral_block
* dead_fire_coral_fan
* dead_fire_coral_wall_fan
* dead_horn_coral
* dead_horn_coral_block
* dead_horn_coral_fan
* dead_horn_coral_wall_fan
* dead_tube_coral
* dead_tube_coral_block
* dead_tube_coral_fan
* dead_tube_coral_wall_fan
* deepslate
* deepslate_brick_slab
* deepslate_brick_stairs
* deepslate_brick_wall
* deepslate_bricks
* deepslate_coal_ore
* deepslate_copper_ore
* deepslate_diamond_ore
* deepslate_emerald_ore
* deepslate_gold_ore
* deepslate_iron_ore
* deepslate_lapis_ore
* deepslate_redstone_ore
* deepslate_tile_slab
* deepslate_tile_stairs
* deepslate_tile_wall
* deepslate_tiles
* detector_rail
* diamond_block
* diamond_ore
* diorite
* diorite_slab
* diorite_stairs
* diorite_wall
* dirt
* dirt_path
* dispenser
* dragon_egg
* dragon_head
* dragon_wall_head
* dried_kelp_block
* dripstone_block
* dropper
* emerald_block
* emerald_ore
* enchanting_table
* end_gateway
* end_portal
* end_portal_frame
* end_rod
* end_stone
* end_stone_brick_slab
* end_stone_brick_stairs
* end_stone_brick_wall
* end_stone_bricks
* ender_chest
* exposed_copper
* exposed_cut_copper
* exposed_cut_copper_slab
* exposed_cut_copper_stairs
* farmland
* fern
* fire
* fire_coral
* fire_coral_block
* fire_coral_fan
* fire_coral_wall_fan
* fletching_table
* flower_pot
* flowering_azalea
* flowering_azalea_leaves
* frosted_ice
* furnace
* gilded_blackstone
* glass
* glass_pane
* glow_lichen
* glowstone
* gold_block
* gold_ore
* granite
* granite_slab
* granite_stairs
* granite_wall
* grass
* grass_block
* gravel
* gray_banner
* gray_bed
* gray_candle
* gray_candle_cake
* gray_carpet
* gray_concrete
* gray_concrete_powder
* gray_glazed_terracotta
* gray_shulker_box
* gray_stained_glass
* gray_stained_glass_pane
* gray_terracotta
* gray_wall_banner
* gray_wool
* green_banner
* green_bed
* green_candle
* green_candle_cake
* green_carpet
* green_concrete
* green_concrete_powder
* green_glazed_terracotta
* green_shulker_box
* green_stained_glass
* green_stained_glass_pane
* green_terracotta
* green_wall_banner
* green_wool
* grindstone
* hanging_roots
* hay_block
* heavy_weighted_pressure_plate
* honey_block
* honeycomb_block
* hopper
* horn_coral
* horn_coral_block
* horn_coral_fan
* horn_coral_wall_fan
* ice
* infested_chiseled_stone_bricks
* infested_cobblestone
* infested_cracked_stone_bricks
* infested_deepslate
* infested_mossy_stone_bricks
* infested_stone
* infested_stone_bricks
* iron_bars
* iron_block
* iron_door
* iron_ore
* iron_trapdoor
* jack_o_lantern
* jigsaw
* jukebox
* jungle_button
* jungle_door
* jungle_fence
* jungle_fence_gate
* jungle_leaves
* jungle_log
* jungle_planks
* jungle_pressure_plate
* jungle_sapling
* jungle_sign
* jungle_slab
* jungle_stairs
* jungle_trapdoor
* jungle_wall_sign
* jungle_wood
* kelp
* kelp_plant
* ladder
* lantern
* lapis_block
* lapis_ore
* large_amethyst_bud
* large_fern
* lava
* lava_cauldron
* lectern
* lever
* light
* light_blue_banner
* light_blue_bed
* light_blue_candle
* light_blue_candle_cake
* light_blue_carpet
* light_blue_concrete
* light_blue_concrete_powder
* light_blue_glazed_terracotta
* light_blue_shulker_box
* light_blue_stained_glass
* light_blue_stained_glass_pane
* light_blue_terracotta
* light_blue_wall_banner
* light_blue_wool
* light_gray_banner
* light_gray_bed
* light_gray_candle
* light_gray_candle_cake
* light_gray_carpet
* light_gray_concrete
* light_gray_concrete_powder
* light_gray_glazed_terracotta
* light_gray_shulker_box
* light_gray_stained_glass
* light_gray_stained_glass_pane
* light_gray_terracotta
* light_gray_wall_banner
* light_gray_wool
* light_weighted_pressure_plate
* lightning_rod
* lilac
* lily_of_the_valley
* lily_pad
* lime_banner
* lime_bed
* lime_candle
* lime_candle_cake
* lime_carpet
* lime_concrete
* lime_concrete_powder
* lime_glazed_terracotta
* lime_shulker_box
* lime_stained_glass
* lime_stained_glass_pane
* lime_terracotta
* lime_wall_banner
* lime_wool
* lodestone
* loom
* magenta_banner
* magenta_bed
* magenta_candle
* magenta_candle_cake
* magenta_carpet
* magenta_concrete
* magenta_concrete_powder
* magenta_glazed_terracotta
* magenta_shulker_box
* magenta_stained_glass
* magenta_stained_glass_pane
* magenta_terracotta
* magenta_wall_banner
* magenta_wool
* magma_block
* medium_amethyst_bud
* melon
* melon_stem
* moss_block
* moss_carpet
* mossy_cobblestone
* mossy_cobblestone_slab
* mossy_cobblestone_stairs
* mossy_cobblestone_wall
* mossy_stone_brick_slab
* mossy_stone_brick_stairs
* mossy_stone_brick_wall
* mossy_stone_bricks
* moving_piston
* mushroom_stem
* mycelium
* nether_brick_fence
* nether_brick_slab
* nether_brick_stairs
* nether_brick_wall
* nether_bricks
* nether_gold_ore
* nether_portal
* nether_quartz_ore
* nether_sprouts
* nether_wart
* nether_wart_block
* netherite_block
* netherrack
* note_block
* oak_button
* oak_door
* oak_fence
* oak_fence_gate
* oak_leaves
* oak_log
* oak_planks
* oak_pressure_plate
* oak_sapling
* oak_sign
* oak_slab
* oak_stairs
* oak_trapdoor
* oak_wall_sign
* oak_wood
* observer
* obsidian
* orange_banner
* orange_bed
* orange_candle
* orange_candle_cake
* orange_carpet
* orange_concrete
* orange_concrete_powder
* orange_glazed_terracotta
* orange_shulker_box
* orange_stained_glass
* orange_stained_glass_pane
* orange_terracotta
* orange_tulip
* orange_wall_banner
* orange_wool
* oxeye_daisy
* oxidized_copper
* oxidized_cut_copper
* oxidized_cut_copper_slab
* oxidized_cut_copper_stairs
* packed_ice
* peony
* petrified_oak_slab
* pink_banner
* pink_bed
* pink_candle
* pink_candle_cake
* pink_carpet
* pink_concrete
* pink_concrete_powder
* pink_glazed_terracotta
* pink_shulker_box
* pink_stained_glass
* pink_stained_glass_pane
* pink_terracotta
* pink_tulip
* pink_wall_banner
* pink_wool
* piston
* piston_head
* player_head
* player_wall_head
* podzol
* pointed_dripstone
* polished_andesite
* polished_andesite_slab
* polished_andesite_stairs
* polished_basalt
* polished_blackstone
* polished_blackstone_brick_slab
* polished_blackstone_brick_stairs
* polished_blackstone_brick_wall
* polished_blackstone_bricks
* polished_blackstone_button
* polished_blackstone_pressure_plate
* polished_blackstone_slab
* polished_blackstone_stairs
* polished_blackstone_wall
* polished_deepslate
* polished_deepslate_slab
* polished_deepslate_stairs
* polished_deepslate_wall
* polished_diorite
* polished_diorite_slab
* polished_diorite_stairs
* polished_granite
* polished_granite_slab
* polished_granite_stairs
* poppy
* potatoes
* potted_acacia_sapling
* potted_allium
* potted_azalea_bush
* potted_azure_bluet
* potted_bamboo
* potted_birch_sapling
* potted_blue_orchid
* potted_brown_mushroom
* potted_cactus
* potted_cornflower
* potted_crimson_fungus
* potted_crimson_roots
* potted_dandelion
* potted_dark_oak_sapling
* potted_dead_bush
* potted_fern
* potted_flowering_azalea_bush
* potted_jungle_sapling
* potted_lily_of_the_valley
* potted_oak_sapling
* potted_orange_tulip
* potted_oxeye_daisy
* potted_pink_tulip
* potted_poppy
* potted_red_mushroom
* potted_red_tulip
* potted_spruce_sapling
* potted_warped_fungus
* potted_warped_roots
* potted_white_tulip
* potted_wither_rose
* powder_snow
* powder_snow_cauldron
* powered_rail
* prismarine
* prismarine_brick_slab
* prismarine_brick_stairs
* prismarine_bricks
* prismarine_slab
* prismarine_stairs
* prismarine_wall
* pumpkin
* pumpkin_stem
* purple_banner
* purple_bed
* purple_candle
* purple_candle_cake
* purple_carpet
* purple_concrete
* purple_concrete_powder
* purple_glazed_terracotta
* purple_shulker_box
* purple_stained_glass
* purple_stained_glass_pane
* purple_terracotta
* purple_wall_banner
* purple_wool
* purpur_block
* purpur_pillar
* purpur_slab
* purpur_stairs
* quartz_block
* quartz_bricks
* quartz_pillar
* quartz_slab
* quartz_stairs
* rail
* raw_copper_block
* raw_gold_block
* raw_iron_block
* red_banner
* red_bed
* red_candle
* red_candle_cake
* red_carpet
* red_concrete
* red_concrete_powder
* red_glazed_terracotta
* red_mushroom
* red_mushroom_block
* red_nether_brick_slab
* red_nether_brick_stairs
* red_nether_brick_wall
* red_nether_bricks
* red_sand
* red_sandstone
* red_sandstone_slab
* red_sandstone_stairs
* red_sandstone_wall
* red_shulker_box
* red_stained_glass
* red_stained_glass_pane
* red_terracotta
* red_tulip
* red_wall_banner
* red_wool
* redstone_block
* redstone_lamp
* redstone_ore
* redstone_torch
* redstone_wall_torch
* redstone_wire
* repeater
* repeating_command_block
* respawn_anchor
* rooted_dirt
* rose_bush
* sand
* sandstone
* sandstone_slab
* sandstone_stairs
* sandstone_wall
* scaffolding
* sculk_sensor
* sea_lantern
* sea_pickle
* seagrass
* shroomlight
* shulker_box
* skeleton_skull
* skeleton_wall_skull
* slime_block
* small_amethyst_bud
* small_dripleaf
* smithing_table
* smoker
* smooth_basalt
* smooth_quartz
* smooth_quartz_slab
* smooth_quartz_stairs
* smooth_red_sandstone
* smooth_red_sandstone_slab
* smooth_red_sandstone_stairs
* smooth_sandstone
* smooth_sandstone_slab
* smooth_sandstone_stairs
* smooth_stone
* smooth_stone_slab
* snow
* snow_block
* soul_campfire
* soul_fire
* soul_lantern
* soul_sand
* soul_soil
* soul_torch
* soul_wall_torch
* spawner
* sponge
* spore_blossom
* spruce_button
* spruce_door
* spruce_fence
* spruce_fence_gate
* spruce_leaves
* spruce_log
* spruce_planks
* spruce_pressure_plate
* spruce_sapling
* spruce_sign
* spruce_slab
* spruce_stairs
* spruce_trapdoor
* spruce_wall_sign
* spruce_wood
* sticky_piston
* stone
* stone_brick_slab
* stone_brick_stairs
* stone_brick_wall
* stone_bricks
* stone_button
* stone_pressure_plate
* stone_slab
* stone_stairs
* stonecutter
* stripped_acacia_log
* stripped_acacia_wood
* stripped_birch_log
* stripped_birch_wood
* stripped_crimson_hyphae
* stripped_crimson_stem
* stripped_dark_oak_log
* stripped_dark_oak_wood
* stripped_jungle_log
* stripped_jungle_wood
* stripped_oak_log
* stripped_oak_wood
* stripped_spruce_log
* stripped_spruce_wood
* stripped_warped_hyphae
* stripped_warped_stem
* structure_block
* structure_void
* sugar_cane
* sunflower
* sweet_berry_bush
* tall_grass
* tall_seagrass
* target
* terracotta
* tinted_glass
* tnt
* torch
* trapped_chest
* tripwire
* tripwire_hook
* tube_coral
* tube_coral_block
* tube_coral_fan
* tube_coral_wall_fan
* tuff
* turtle_egg
* twisting_vines
* twisting_vines_plant
* vine
* void_air
* wall_torch
* warped_button
* warped_door
* warped_fence
* warped_fence_gate
* warped_fungus
* warped_hyphae
* warped_nylium
* warped_planks
* warped_pressure_plate
* warped_roots
* warped_sign
* warped_slab
* warped_stairs
* warped_stem
* warped_trapdoor
* warped_wall_sign
* warped_wart_block
* water
* water_cauldron
* waxed_copper_block
* waxed_cut_copper
* waxed_cut_copper_slab
* waxed_cut_copper_stairs
* waxed_exposed_copper
* waxed_exposed_cut_copper
* waxed_exposed_cut_copper_slab
* waxed_exposed_cut_copper_stairs
* waxed_oxidized_copper
* waxed_oxidized_cut_copper
* waxed_oxidized_cut_copper_slab
* waxed_oxidized_cut_copper_stairs
* waxed_weathered_copper
* waxed_weathered_cut_copper
* waxed_weathered_cut_copper_slab
* waxed_weathered_cut_copper_stairs
* weathered_copper
* weathered_cut_copper
* weathered_cut_copper_slab
* weathered_cut_copper_stairs
* weeping_vines
* weeping_vines_plant
* wet_sponge
* wheat
* white_banner
* white_bed
* white_candle
* white_candle_cake
* white_carpet
* white_concrete
* white_concrete_powder
* white_glazed_terracotta
* white_shulker_box
* white_stained_glass
* white_stained_glass_pane
* white_terracotta
* white_tulip
* white_wall_banner
* white_wool
* wither_rose
* wither_skeleton_skull
* wither_skeleton_wall_skull
* yellow_banner
* yellow_bed
* yellow_candle
* yellow_candle_cake
* yellow_carpet
* yellow_concrete
* yellow_concrete_powder
* yellow_glazed_terracotta
* yellow_shulker_box
* yellow_stained_glass
* yellow_stained_glass_pane
* yellow_terracotta
* yellow_wall_banner
* yellow_wool
* zombie_head
* zombie_wall_head
"""
acacia_button = "minecraft:acacia_button"
acacia_door = "minecraft:acacia_door"
acacia_fence = "minecraft:acacia_fence"
acacia_fence_gate = "minecraft:acacia_fence_gate"
acacia_leaves = "minecraft:acacia_leaves"
acacia_log = "minecraft:acacia_log"
acacia_planks = "minecraft:acacia_planks"
acacia_pressure_plate = "minecraft:acacia_pressure_plate"
acacia_sapling = "minecraft:acacia_sapling"
acacia_sign = "minecraft:acacia_sign"
acacia_slab = "minecraft:acacia_slab"
acacia_stairs = "minecraft:acacia_stairs"
acacia_trapdoor = "minecraft:acacia_trapdoor"
acacia_wall_sign = "minecraft:acacia_wall_sign"
acacia_wood = "minecraft:acacia_wood"
activator_rail = "minecraft:activator_rail"
air = "minecraft:air"
allium = "minecraft:allium"
amethyst_block = "minecraft:amethyst_block"
amethyst_cluster = "minecraft:amethyst_cluster"
ancient_debris = "minecraft:ancient_debris"
andesite = "minecraft:andesite"
andesite_slab = "minecraft:andesite_slab"
andesite_stairs = "minecraft:andesite_stairs"
andesite_wall = "minecraft:andesite_wall"
anvil = "minecraft:anvil"
attached_melon_stem = "minecraft:attached_melon_stem"
attached_pumpkin_stem = "minecraft:attached_pumpkin_stem"
azalea = "minecraft:azalea"
azalea_leaves = "minecraft:azalea_leaves"
azure_bluet = "minecraft:azure_bluet"
bamboo = "minecraft:bamboo"
bamboo_sapling = "minecraft:bamboo_sapling"
barrel = "minecraft:barrel"
barrier = "minecraft:barrier"
basalt = "minecraft:basalt"
beacon = "minecraft:beacon"
bedrock = "minecraft:bedrock"
bee_nest = "minecraft:bee_nest"
beehive = "minecraft:beehive"
beetroots = "minecraft:beetroots"
bell = "minecraft:bell"
big_dripleaf = "minecraft:big_dripleaf"
big_dripleaf_stem = "minecraft:big_dripleaf_stem"
birch_button = "minecraft:birch_button"
birch_door = "minecraft:birch_door"
birch_fence = "minecraft:birch_fence"
birch_fence_gate = "minecraft:birch_fence_gate"
birch_leaves = "minecraft:birch_leaves"
birch_log = "minecraft:birch_log"
birch_planks = "minecraft:birch_planks"
birch_pressure_plate = "minecraft:birch_pressure_plate"
birch_sapling = "minecraft:birch_sapling"
birch_sign = "minecraft:birch_sign"
birch_slab = "minecraft:birch_slab"
birch_stairs = "minecraft:birch_stairs"
birch_trapdoor = "minecraft:birch_trapdoor"
birch_wall_sign = "minecraft:birch_wall_sign"
birch_wood = "minecraft:birch_wood"
black_banner = "minecraft:black_banner"
black_bed = "minecraft:black_bed"
black_candle = "minecraft:black_candle"
black_candle_cake = "minecraft:black_candle_cake"
black_carpet = "minecraft:black_carpet"
black_concrete = "minecraft:black_concrete"
black_concrete_powder = "minecraft:black_concrete_powder"
black_glazed_terracotta = "minecraft:black_glazed_terracotta"
black_shulker_box = "minecraft:black_shulker_box"
black_stained_glass = "minecraft:black_stained_glass"
black_stained_glass_pane = "minecraft:black_stained_glass_pane"
black_terracotta = "minecraft:black_terracotta"
black_wall_banner = "minecraft:black_wall_banner"
black_wool = "minecraft:black_wool"
blackstone = "minecraft:blackstone"
blackstone_slab = "minecraft:blackstone_slab"
blackstone_stairs = "minecraft:blackstone_stairs"
blackstone_wall = "minecraft:blackstone_wall"
blast_furnace = "minecraft:blast_furnace"
blue_banner = "minecraft:blue_banner"
blue_bed = "minecraft:blue_bed"
blue_candle = "minecraft:blue_candle"
blue_candle_cake = "minecraft:blue_candle_cake"
blue_carpet = "minecraft:blue_carpet"
blue_concrete = "minecraft:blue_concrete"
blue_concrete_powder = "minecraft:blue_concrete_powder"
blue_glazed_terracotta = "minecraft:blue_glazed_terracotta"
blue_ice = "minecraft:blue_ice"
blue_orchid = "minecraft:blue_orchid"
blue_shulker_box = "minecraft:blue_shulker_box"
blue_stained_glass = "minecraft:blue_stained_glass"
blue_stained_glass_pane = "minecraft:blue_stained_glass_pane"
blue_terracotta = "minecraft:blue_terracotta"
blue_wall_banner = "minecraft:blue_wall_banner"
blue_wool = "minecraft:blue_wool"
bone_block = "minecraft:bone_block"
bookshelf = "minecraft:bookshelf"
brain_coral = "minecraft:brain_coral"
brain_coral_block = "minecraft:brain_coral_block"
brain_coral_fan = "minecraft:brain_coral_fan"
brain_coral_wall_fan = "minecraft:brain_coral_wall_fan"
brewing_stand = "minecraft:brewing_stand"
brick_slab = "minecraft:brick_slab"
brick_stairs = "minecraft:brick_stairs"
brick_wall = "minecraft:brick_wall"
bricks = "minecraft:bricks"
brown_banner = "minecraft:brown_banner"
brown_bed = "minecraft:brown_bed"
brown_candle = "minecraft:brown_candle"
brown_candle_cake = "minecraft:brown_candle_cake"
brown_carpet = "minecraft:brown_carpet"
brown_concrete = "minecraft:brown_concrete"
brown_concrete_powder = "minecraft:brown_concrete_powder"
brown_glazed_terracotta = "minecraft:brown_glazed_terracotta"
brown_mushroom = "minecraft:brown_mushroom"
brown_mushroom_block = "minecraft:brown_mushroom_block"
brown_shulker_box = "minecraft:brown_shulker_box"
brown_stained_glass = "minecraft:brown_stained_glass"
brown_stained_glass_pane = "minecraft:brown_stained_glass_pane"
brown_terracotta = "minecraft:brown_terracotta"
brown_wall_banner = "minecraft:brown_wall_banner"
brown_wool = "minecraft:brown_wool"
bubble_column = "minecraft:bubble_column"
bubble_coral = "minecraft:bubble_coral"
bubble_coral_block = "minecraft:bubble_coral_block"
bubble_coral_fan = "minecraft:bubble_coral_fan"
bubble_coral_wall_fan = "minecraft:bubble_coral_wall_fan"
budding_amethyst = "minecraft:budding_amethyst"
cactus = "minecraft:cactus"
cake = "minecraft:cake"
calcite = "minecraft:calcite"
campfire = "minecraft:campfire"
candle = "minecraft:candle"
candle_cake = "minecraft:candle_cake"
carrots = "minecraft:carrots"
cartography_table = "minecraft:cartography_table"
carved_pumpkin = "minecraft:carved_pumpkin"
cauldron = "minecraft:cauldron"
cave_air = "minecraft:cave_air"
cave_vines = "minecraft:cave_vines"
cave_vines_plant = "minecraft:cave_vines_plant"
chain = "minecraft:chain"
chain_command_block = "minecraft:chain_command_block"
chest = "minecraft:chest"
chipped_anvil = "minecraft:chipped_anvil"
chiseled_deepslate = "minecraft:chiseled_deepslate"
chiseled_nether_bricks = "minecraft:chiseled_nether_bricks"
chiseled_polished_blackstone = "minecraft:chiseled_polished_blackstone"
chiseled_quartz_block = "minecraft:chiseled_quartz_block"
chiseled_red_sandstone = "minecraft:chiseled_red_sandstone"
chiseled_sandstone = "minecraft:chiseled_sandstone"
chiseled_stone_bricks = "minecraft:chiseled_stone_bricks"
chorus_flower = "minecraft:chorus_flower"
chorus_plant = "minecraft:chorus_plant"
clay = "minecraft:clay"
coal_block = "minecraft:coal_block"
coal_ore = "minecraft:coal_ore"
coarse_dirt = "minecraft:coarse_dirt"
cobbled_deepslate = "minecraft:cobbled_deepslate"
cobbled_deepslate_slab = "minecraft:cobbled_deepslate_slab"
cobbled_deepslate_stairs = "minecraft:cobbled_deepslate_stairs"
cobbled_deepslate_wall = "minecraft:cobbled_deepslate_wall"
cobblestone = "minecraft:cobblestone"
cobblestone_slab = "minecraft:cobblestone_slab"
cobblestone_stairs = "minecraft:cobblestone_stairs"
cobblestone_wall = "minecraft:cobblestone_wall"
cobweb = "minecraft:cobweb"
cocoa = "minecraft:cocoa"
command_block = "minecraft:command_block"
comparator = "minecraft:comparator"
composter = "minecraft:composter"
conduit = "minecraft:conduit"
copper_block = "minecraft:copper_block"
copper_ore = "minecraft:copper_ore"
cornflower = "minecraft:cornflower"
cracked_deepslate_bricks = "minecraft:cracked_deepslate_bricks"
cracked_deepslate_tiles = "minecraft:cracked_deepslate_tiles"
cracked_nether_bricks = "minecraft:cracked_nether_bricks"
cracked_polished_blackstone_bricks = "minecraft:cracked_polished_blackstone_bricks"
cracked_stone_bricks = "minecraft:cracked_stone_bricks"
crafting_table = "minecraft:crafting_table"
creeper_head = "minecraft:creeper_head"
creeper_wall_head = "minecraft:creeper_wall_head"
crimson_button = "minecraft:crimson_button"
crimson_door = "minecraft:crimson_door"
crimson_fence = "minecraft:crimson_fence"
crimson_fence_gate = "minecraft:crimson_fence_gate"
crimson_fungus = "minecraft:crimson_fungus"
crimson_hyphae = "minecraft:crimson_hyphae"
crimson_nylium = "minecraft:crimson_nylium"
crimson_planks = "minecraft:crimson_planks"
crimson_pressure_plate = "minecraft:crimson_pressure_plate"
crimson_roots = "minecraft:crimson_roots"
crimson_sign = "minecraft:crimson_sign"
crimson_slab = "minecraft:crimson_slab"
crimson_stairs = "minecraft:crimson_stairs"
crimson_stem = "minecraft:crimson_stem"
crimson_trapdoor = "minecraft:crimson_trapdoor"
crimson_wall_sign = "minecraft:crimson_wall_sign"
crying_obsidian = "minecraft:crying_obsidian"
cut_copper = "minecraft:cut_copper"
cut_copper_slab = "minecraft:cut_copper_slab"
cut_copper_stairs = "minecraft:cut_copper_stairs"
cut_red_sandstone = "minecraft:cut_red_sandstone"
cut_red_sandstone_slab = "minecraft:cut_red_sandstone_slab"
cut_sandstone = "minecraft:cut_sandstone"
cut_sandstone_slab = "minecraft:cut_sandstone_slab"
cyan_banner = "minecraft:cyan_banner"
cyan_bed = "minecraft:cyan_bed"
cyan_candle = "minecraft:cyan_candle"
cyan_candle_cake = "minecraft:cyan_candle_cake"
cyan_carpet = "minecraft:cyan_carpet"
cyan_concrete = "minecraft:cyan_concrete"
cyan_concrete_powder = "minecraft:cyan_concrete_powder"
cyan_glazed_terracotta = "minecraft:cyan_glazed_terracotta"
cyan_shulker_box = "minecraft:cyan_shulker_box"
cyan_stained_glass = "minecraft:cyan_stained_glass"
cyan_stained_glass_pane = "minecraft:cyan_stained_glass_pane"
cyan_terracotta = "minecraft:cyan_terracotta"
cyan_wall_banner = "minecraft:cyan_wall_banner"
cyan_wool = "minecraft:cyan_wool"
damaged_anvil = "minecraft:damaged_anvil"
dandelion = "minecraft:dandelion"
dark_oak_button = "minecraft:dark_oak_button"
dark_oak_door = "minecraft:dark_oak_door"
dark_oak_fence = "minecraft:dark_oak_fence"
dark_oak_fence_gate = "minecraft:dark_oak_fence_gate"
dark_oak_leaves = "minecraft:dark_oak_leaves"
dark_oak_log = "minecraft:dark_oak_log"
dark_oak_planks = "minecraft:dark_oak_planks"
dark_oak_pressure_plate = "minecraft:dark_oak_pressure_plate"
dark_oak_sapling = "minecraft:dark_oak_sapling"
dark_oak_sign = "minecraft:dark_oak_sign"
dark_oak_slab = "minecraft:dark_oak_slab"
dark_oak_stairs = "minecraft:dark_oak_stairs"
dark_oak_trapdoor = "minecraft:dark_oak_trapdoor"
dark_oak_wall_sign = "minecraft:dark_oak_wall_sign"
dark_oak_wood = "minecraft:dark_oak_wood"
dark_prismarine = "minecraft:dark_prismarine"
dark_prismarine_slab = "minecraft:dark_prismarine_slab"
dark_prismarine_stairs = "minecraft:dark_prismarine_stairs"
daylight_detector = "minecraft:daylight_detector"
dead_brain_coral = "minecraft:dead_brain_coral"
dead_brain_coral_block = "minecraft:dead_brain_coral_block"
dead_brain_coral_fan = "minecraft:dead_brain_coral_fan"
dead_brain_coral_wall_fan = "minecraft:dead_brain_coral_wall_fan"
dead_bubble_coral = "minecraft:dead_bubble_coral"
dead_bubble_coral_block = "minecraft:dead_bubble_coral_block"
dead_bubble_coral_fan = "minecraft:dead_bubble_coral_fan"
dead_bubble_coral_wall_fan = "minecraft:dead_bubble_coral_wall_fan"
dead_bush = "minecraft:dead_bush"
dead_fire_coral = "minecraft:dead_fire_coral"
dead_fire_coral_block = "minecraft:dead_fire_coral_block"
dead_fire_coral_fan = "minecraft:dead_fire_coral_fan"
dead_fire_coral_wall_fan = "minecraft:dead_fire_coral_wall_fan"
dead_horn_coral = "minecraft:dead_horn_coral"
dead_horn_coral_block = "minecraft:dead_horn_coral_block"
dead_horn_coral_fan = "minecraft:dead_horn_coral_fan"
dead_horn_coral_wall_fan = "minecraft:dead_horn_coral_wall_fan"
dead_tube_coral = "minecraft:dead_tube_coral"
dead_tube_coral_block = "minecraft:dead_tube_coral_block"
dead_tube_coral_fan = "minecraft:dead_tube_coral_fan"
dead_tube_coral_wall_fan = "minecraft:dead_tube_coral_wall_fan"
deepslate = "minecraft:deepslate"
deepslate_brick_slab = "minecraft:deepslate_brick_slab"
deepslate_brick_stairs = "minecraft:deepslate_brick_stairs"
deepslate_brick_wall = "minecraft:deepslate_brick_wall"
deepslate_bricks = "minecraft:deepslate_bricks"
deepslate_coal_ore = "minecraft:deepslate_coal_ore"
deepslate_copper_ore = "minecraft:deepslate_copper_ore"
deepslate_diamond_ore = "minecraft:deepslate_diamond_ore"
deepslate_emerald_ore = "minecraft:deepslate_emerald_ore"
deepslate_gold_ore = "minecraft:deepslate_gold_ore"
deepslate_iron_ore = "minecraft:deepslate_iron_ore"
deepslate_lapis_ore = "minecraft:deepslate_lapis_ore"
deepslate_redstone_ore = "minecraft:deepslate_redstone_ore"
deepslate_tile_slab = "minecraft:deepslate_tile_slab"
deepslate_tile_stairs = "minecraft:deepslate_tile_stairs"
deepslate_tile_wall = "minecraft:deepslate_tile_wall"
deepslate_tiles = "minecraft:deepslate_tiles"
detector_rail = "minecraft:detector_rail"
diamond_block = "minecraft:diamond_block"
diamond_ore = "minecraft:diamond_ore"
diorite = "minecraft:diorite"
diorite_slab = "minecraft:diorite_slab"
diorite_stairs = "minecraft:diorite_stairs"
diorite_wall = "minecraft:diorite_wall"
dirt = "minecraft:dirt"
dirt_path = "minecraft:dirt_path"
dispenser = "minecraft:dispenser"
dragon_egg = "minecraft:dragon_egg"
dragon_head = "minecraft:dragon_head"
dragon_wall_head = "minecraft:dragon_wall_head"
dried_kelp_block = "minecraft:dried_kelp_block"
dripstone_block = "minecraft:dripstone_block"
dropper = "minecraft:dropper"
emerald_block = "minecraft:emerald_block"
emerald_ore = "minecraft:emerald_ore"
enchanting_table = "minecraft:enchanting_table"
end_gateway = "minecraft:end_gateway"
end_portal = "minecraft:end_portal"
end_portal_frame = "minecraft:end_portal_frame"
end_rod = "minecraft:end_rod"
end_stone = "minecraft:end_stone"
end_stone_brick_slab = "minecraft:end_stone_brick_slab"
end_stone_brick_stairs = "minecraft:end_stone_brick_stairs"
end_stone_brick_wall = "minecraft:end_stone_brick_wall"
end_stone_bricks = "minecraft:end_stone_bricks"
ender_chest = "minecraft:ender_chest"
exposed_copper = "minecraft:exposed_copper"
exposed_cut_copper = "minecraft:exposed_cut_copper"
exposed_cut_copper_slab = "minecraft:exposed_cut_copper_slab"
exposed_cut_copper_stairs = "minecraft:exposed_cut_copper_stairs"
farmland = "minecraft:farmland"
fern = "minecraft:fern"
fire = "minecraft:fire"
fire_coral = "minecraft:fire_coral"
fire_coral_block = "minecraft:fire_coral_block"
fire_coral_fan = "minecraft:fire_coral_fan"
fire_coral_wall_fan = "minecraft:fire_coral_wall_fan"
fletching_table = "minecraft:fletching_table"
flower_pot = "minecraft:flower_pot"
flowering_azalea = "minecraft:flowering_azalea"
flowering_azalea_leaves = "minecraft:flowering_azalea_leaves"
frosted_ice = "minecraft:frosted_ice"
furnace = "minecraft:furnace"
gilded_blackstone = "minecraft:gilded_blackstone"
glass = "minecraft:glass"
glass_pane = "minecraft:glass_pane"
glow_lichen = "minecraft:glow_lichen"
glowstone = "minecraft:glowstone"
gold_block = "minecraft:gold_block"
gold_ore = "minecraft:gold_ore"
granite = "minecraft:granite"
granite_slab = "minecraft:granite_slab"
granite_stairs = "minecraft:granite_stairs"
granite_wall = "minecraft:granite_wall"
grass = "minecraft:grass"
grass_block = "minecraft:grass_block"
gravel = "minecraft:gravel"
gray_banner = "minecraft:gray_banner"
gray_bed = "minecraft:gray_bed"
gray_candle = "minecraft:gray_candle"
gray_candle_cake = "minecraft:gray_candle_cake"
gray_carpet = "minecraft:gray_carpet"
gray_concrete = "minecraft:gray_concrete"
gray_concrete_powder = "minecraft:gray_concrete_powder"
gray_glazed_terracotta = "minecraft:gray_glazed_terracotta"
gray_shulker_box = "minecraft:gray_shulker_box"
gray_stained_glass = "minecraft:gray_stained_glass"
gray_stained_glass_pane = "minecraft:gray_stained_glass_pane"
gray_terracotta = "minecraft:gray_terracotta"
gray_wall_banner = "minecraft:gray_wall_banner"
gray_wool = "minecraft:gray_wool"
green_banner = "minecraft:green_banner"
green_bed = "minecraft:green_bed"
green_candle = "minecraft:green_candle"
green_candle_cake = "minecraft:green_candle_cake"
green_carpet = "minecraft:green_carpet"
green_concrete = "minecraft:green_concrete"
green_concrete_powder = "minecraft:green_concrete_powder"
green_glazed_terracotta = "minecraft:green_glazed_terracotta"
green_shulker_box = "minecraft:green_shulker_box"
green_stained_glass = "minecraft:green_stained_glass"
green_stained_glass_pane = "minecraft:green_stained_glass_pane"
green_terracotta = "minecraft:green_terracotta"
green_wall_banner = "minecraft:green_wall_banner"
green_wool = "minecraft:green_wool"
grindstone = "minecraft:grindstone"
hanging_roots = "minecraft:hanging_roots"
hay_block = "minecraft:hay_block"
heavy_weighted_pressure_plate = "minecraft:heavy_weighted_pressure_plate"
honey_block = "minecraft:honey_block"
honeycomb_block = "minecraft:honeycomb_block"
hopper = "minecraft:hopper"
horn_coral = "minecraft:horn_coral"
horn_coral_block = "minecraft:horn_coral_block"
horn_coral_fan = "minecraft:horn_coral_fan"
horn_coral_wall_fan = "minecraft:horn_coral_wall_fan"
ice = "minecraft:ice"
infested_chiseled_stone_bricks = "minecraft:infested_chiseled_stone_bricks"
infested_cobblestone = "minecraft:infested_cobblestone"
infested_cracked_stone_bricks = "minecraft:infested_cracked_stone_bricks"
infested_deepslate = "minecraft:infested_deepslate"
infested_mossy_stone_bricks = "minecraft:infested_mossy_stone_bricks"
infested_stone = "minecraft:infested_stone"
infested_stone_bricks = "minecraft:infested_stone_bricks"
iron_bars = "minecraft:iron_bars"
iron_block = "minecraft:iron_block"
iron_door = "minecraft:iron_door"
iron_ore = "minecraft:iron_ore"
iron_trapdoor = "minecraft:iron_trapdoor"
jack_o_lantern = "minecraft:jack_o_lantern"
jigsaw = "minecraft:jigsaw"
jukebox = "minecraft:jukebox"
jungle_button = "minecraft:jungle_button"
jungle_door = "minecraft:jungle_door"
jungle_fence = "minecraft:jungle_fence"
jungle_fence_gate = "minecraft:jungle_fence_gate"
jungle_leaves = "minecraft:jungle_leaves"
jungle_log = "minecraft:jungle_log"
jungle_planks = "minecraft:jungle_planks"
jungle_pressure_plate = "minecraft:jungle_pressure_plate"
jungle_sapling = "minecraft:jungle_sapling"
jungle_sign = "minecraft:jungle_sign"
jungle_slab = "minecraft:jungle_slab"
jungle_stairs = "minecraft:jungle_stairs"
jungle_trapdoor = "minecraft:jungle_trapdoor"
jungle_wall_sign = "minecraft:jungle_wall_sign"
jungle_wood = "minecraft:jungle_wood"
kelp = "minecraft:kelp"
kelp_plant = "minecraft:kelp_plant"
ladder = "minecraft:ladder"
lantern = "minecraft:lantern"
lapis_block = "minecraft:lapis_block"
lapis_ore = "minecraft:lapis_ore"
large_amethyst_bud = "minecraft:large_amethyst_bud"
large_fern = "minecraft:large_fern"
lava = "minecraft:lava"
lava_cauldron = "minecraft:lava_cauldron"
lectern = "minecraft:lectern"
lever = "minecraft:lever"
light = "minecraft:light"
light_blue_banner = "minecraft:light_blue_banner"
light_blue_bed = "minecraft:light_blue_bed"
light_blue_candle = "minecraft:light_blue_candle"
light_blue_candle_cake = "minecraft:light_blue_candle_cake"
light_blue_carpet = "minecraft:light_blue_carpet"
light_blue_concrete = "minecraft:light_blue_concrete"
light_blue_concrete_powder = "minecraft:light_blue_concrete_powder"
light_blue_glazed_terracotta = "minecraft:light_blue_glazed_terracotta"
light_blue_shulker_box = "minecraft:light_blue_shulker_box"
light_blue_stained_glass = "minecraft:light_blue_stained_glass"
light_blue_stained_glass_pane = "minecraft:light_blue_stained_glass_pane"
light_blue_terracotta = "minecraft:light_blue_terracotta"
light_blue_wall_banner = "minecraft:light_blue_wall_banner"
light_blue_wool = "minecraft:light_blue_wool"
light_gray_banner = "minecraft:light_gray_banner"
light_gray_bed = "minecraft:light_gray_bed"
light_gray_candle = "minecraft:light_gray_candle"
light_gray_candle_cake = "minecraft:light_gray_candle_cake"
light_gray_carpet = "minecraft:light_gray_carpet"
light_gray_concrete = "minecraft:light_gray_concrete"
light_gray_concrete_powder = "minecraft:light_gray_concrete_powder"
light_gray_glazed_terracotta = "minecraft:light_gray_glazed_terracotta"
light_gray_shulker_box = "minecraft:light_gray_shulker_box"
light_gray_stained_glass = "minecraft:light_gray_stained_glass"
light_gray_stained_glass_pane = "minecraft:light_gray_stained_glass_pane"
light_gray_terracotta = "minecraft:light_gray_terracotta"
light_gray_wall_banner = "minecraft:light_gray_wall_banner"
light_gray_wool = "minecraft:light_gray_wool"
light_weighted_pressure_plate = "minecraft:light_weighted_pressure_plate"
lightning_rod = "minecraft:lightning_rod"
lilac = "minecraft:lilac"
lily_of_the_valley = "minecraft:lily_of_the_valley"
lily_pad = "minecraft:lily_pad"
lime_banner = "minecraft:lime_banner"
lime_bed = "minecraft:lime_bed"
lime_candle = "minecraft:lime_candle"
lime_candle_cake = "minecraft:lime_candle_cake"
lime_carpet = "minecraft:lime_carpet"
lime_concrete = "minecraft:lime_concrete"
lime_concrete_powder = "minecraft:lime_concrete_powder"
lime_glazed_terracotta = "minecraft:lime_glazed_terracotta"
lime_shulker_box = "minecraft:lime_shulker_box"
lime_stained_glass = "minecraft:lime_stained_glass"
lime_stained_glass_pane = "minecraft:lime_stained_glass_pane"
lime_terracotta = "minecraft:lime_terracotta"
lime_wall_banner = "minecraft:lime_wall_banner"
lime_wool = "minecraft:lime_wool"
lodestone = "minecraft:lodestone"
loom = "minecraft:loom"
magenta_banner = "minecraft:magenta_banner"
magenta_bed = "minecraft:magenta_bed"
magenta_candle = "minecraft:magenta_candle"
magenta_candle_cake = "minecraft:magenta_candle_cake"
magenta_carpet = "minecraft:magenta_carpet"
magenta_concrete = "minecraft:magenta_concrete"
magenta_concrete_powder = "minecraft:magenta_concrete_powder"
magenta_glazed_terracotta = "minecraft:magenta_glazed_terracotta"
magenta_shulker_box = "minecraft:magenta_shulker_box"
magenta_stained_glass = "minecraft:magenta_stained_glass"
magenta_stained_glass_pane = "minecraft:magenta_stained_glass_pane"
magenta_terracotta = "minecraft:magenta_terracotta"
magenta_wall_banner = "minecraft:magenta_wall_banner"
magenta_wool = "minecraft:magenta_wool"
magma_block = "minecraft:magma_block"
medium_amethyst_bud = "minecraft:medium_amethyst_bud"
melon = "minecraft:melon"
melon_stem = "minecraft:melon_stem"
moss_block = "minecraft:moss_block"
moss_carpet = "minecraft:moss_carpet"
mossy_cobblestone = "minecraft:mossy_cobblestone"
mossy_cobblestone_slab = "minecraft:mossy_cobblestone_slab"
mossy_cobblestone_stairs = "minecraft:mossy_cobblestone_stairs"
mossy_cobblestone_wall = "minecraft:mossy_cobblestone_wall"
mossy_stone_brick_slab = "minecraft:mossy_stone_brick_slab"
mossy_stone_brick_stairs = "minecraft:mossy_stone_brick_stairs"
mossy_stone_brick_wall = "minecraft:mossy_stone_brick_wall"
mossy_stone_bricks = "minecraft:mossy_stone_bricks"
moving_piston = "minecraft:moving_piston"
mushroom_stem = "minecraft:mushroom_stem"
mycelium = "minecraft:mycelium"
nether_brick_fence = "minecraft:nether_brick_fence"
nether_brick_slab = "minecraft:nether_brick_slab"
nether_brick_stairs = "minecraft:nether_brick_stairs"
nether_brick_wall = "minecraft:nether_brick_wall"
nether_bricks = "minecraft:nether_bricks"
nether_gold_ore = "minecraft:nether_gold_ore"
nether_portal = "minecraft:nether_portal"
nether_quartz_ore = "minecraft:nether_quartz_ore"
nether_sprouts = "minecraft:nether_sprouts"
nether_wart = "minecraft:nether_wart"
nether_wart_block = "minecraft:nether_wart_block"
netherite_block = "minecraft:netherite_block"
netherrack = "minecraft:netherrack"
note_block = "minecraft:note_block"
oak_button = "minecraft:oak_button"
oak_door = "minecraft:oak_door"
oak_fence = "minecraft:oak_fence"
oak_fence_gate = "minecraft:oak_fence_gate"
oak_leaves = "minecraft:oak_leaves"
oak_log = "minecraft:oak_log"
oak_planks = "minecraft:oak_planks"
oak_pressure_plate = "minecraft:oak_pressure_plate"
oak_sapling = "minecraft:oak_sapling"
oak_sign = "minecraft:oak_sign"
oak_slab = "minecraft:oak_slab"
oak_stairs = "minecraft:oak_stairs"
oak_trapdoor = "minecraft:oak_trapdoor"
oak_wall_sign = "minecraft:oak_wall_sign"
oak_wood = "minecraft:oak_wood"
observer = "minecraft:observer"
obsidian = "minecraft:obsidian"
orange_banner = "minecraft:orange_banner"
orange_bed = "minecraft:orange_bed"
orange_candle = "minecraft:orange_candle"
orange_candle_cake = "minecraft:orange_candle_cake"
orange_carpet = "minecraft:orange_carpet"
orange_concrete = "minecraft:orange_concrete"
orange_concrete_powder = "minecraft:orange_concrete_powder"
orange_glazed_terracotta = "minecraft:orange_glazed_terracotta"
orange_shulker_box = "minecraft:orange_shulker_box"
orange_stained_glass = "minecraft:orange_stained_glass"
orange_stained_glass_pane = "minecraft:orange_stained_glass_pane"
orange_terracotta = "minecraft:orange_terracotta"
orange_tulip = "minecraft:orange_tulip"
orange_wall_banner = "minecraft:orange_wall_banner"
orange_wool = "minecraft:orange_wool"
oxeye_daisy = "minecraft:oxeye_daisy"
oxidized_copper = "minecraft:oxidized_copper"
oxidized_cut_copper = "minecraft:oxidized_cut_copper"
oxidized_cut_copper_slab = "minecraft:oxidized_cut_copper_slab"
oxidized_cut_copper_stairs = "minecraft:oxidized_cut_copper_stairs"
packed_ice = "minecraft:packed_ice"
peony = "minecraft:peony"
petrified_oak_slab = "minecraft:petrified_oak_slab"
pink_banner = "minecraft:pink_banner"
pink_bed = "minecraft:pink_bed"
pink_candle = "minecraft:pink_candle"
pink_candle_cake = "minecraft:pink_candle_cake"
pink_carpet = "minecraft:pink_carpet"
pink_concrete = "minecraft:pink_concrete"
pink_concrete_powder = "minecraft:pink_concrete_powder"
pink_glazed_terracotta = "minecraft:pink_glazed_terracotta"
pink_shulker_box = "minecraft:pink_shulker_box"
pink_stained_glass = "minecraft:pink_stained_glass"
pink_stained_glass_pane = "minecraft:pink_stained_glass_pane"
pink_terracotta = "minecraft:pink_terracotta"
pink_tulip = "minecraft:pink_tulip"
pink_wall_banner = "minecraft:pink_wall_banner"
pink_wool = "minecraft:pink_wool"
piston = "minecraft:piston"
piston_head = "minecraft:piston_head"
player_head = "minecraft:player_head"
player_wall_head = "minecraft:player_wall_head"
podzol = "minecraft:podzol"
pointed_dripstone = "minecraft:pointed_dripstone"
polished_andesite = "minecraft:polished_andesite"
polished_andesite_slab = "minecraft:polished_andesite_slab"
polished_andesite_stairs = "minecraft:polished_andesite_stairs"
polished_basalt = "minecraft:polished_basalt"
polished_blackstone = "minecraft:polished_blackstone"
polished_blackstone_brick_slab = "minecraft:polished_blackstone_brick_slab"
polished_blackstone_brick_stairs = "minecraft:polished_blackstone_brick_stairs"
polished_blackstone_brick_wall = "minecraft:polished_blackstone_brick_wall"
polished_blackstone_bricks = "minecraft:polished_blackstone_bricks"
polished_blackstone_button = "minecraft:polished_blackstone_button"
polished_blackstone_pressure_plate = "minecraft:polished_blackstone_pressure_plate"
polished_blackstone_slab = "minecraft:polished_blackstone_slab"
polished_blackstone_stairs = "minecraft:polished_blackstone_stairs"
polished_blackstone_wall = "minecraft:polished_blackstone_wall"
polished_deepslate = "minecraft:polished_deepslate"
polished_deepslate_slab = "minecraft:polished_deepslate_slab"
polished_deepslate_stairs = "minecraft:polished_deepslate_stairs"
polished_deepslate_wall = "minecraft:polished_deepslate_wall"
polished_diorite = "minecraft:polished_diorite"
polished_diorite_slab = "minecraft:polished_diorite_slab"
polished_diorite_stairs = "minecraft:polished_diorite_stairs"
polished_granite = "minecraft:polished_granite"
polished_granite_slab = "minecraft:polished_granite_slab"
polished_granite_stairs = "minecraft:polished_granite_stairs"
poppy = "minecraft:poppy"
potatoes = "minecraft:potatoes"
potted_acacia_sapling = "minecraft:potted_acacia_sapling"
potted_allium = "minecraft:potted_allium"
potted_azalea_bush = "minecraft:potted_azalea_bush"
potted_azure_bluet = "minecraft:potted_azure_bluet"
potted_bamboo = "minecraft:potted_bamboo"
potted_birch_sapling = "minecraft:potted_birch_sapling"
potted_blue_orchid = "minecraft:potted_blue_orchid"
potted_brown_mushroom = "minecraft:potted_brown_mushroom"
potted_cactus = "minecraft:potted_cactus"
potted_cornflower = "minecraft:potted_cornflower"
potted_crimson_fungus = "minecraft:potted_crimson_fungus"
potted_crimson_roots = "minecraft:potted_crimson_roots"
potted_dandelion = "minecraft:potted_dandelion"
potted_dark_oak_sapling = "minecraft:potted_dark_oak_sapling"
potted_dead_bush = "minecraft:potted_dead_bush"
potted_fern = "minecraft:potted_fern"
potted_flowering_azalea_bush = "minecraft:potted_flowering_azalea_bush"
potted_jungle_sapling = "minecraft:potted_jungle_sapling"
potted_lily_of_the_valley = "minecraft:potted_lily_of_the_valley"
potted_oak_sapling = "minecraft:potted_oak_sapling"
potted_orange_tulip = "minecraft:potted_orange_tulip"
potted_oxeye_daisy = "minecraft:potted_oxeye_daisy"
potted_pink_tulip = "minecraft:potted_pink_tulip"
potted_poppy = "minecraft:potted_poppy"
potted_red_mushroom = "minecraft:potted_red_mushroom"
potted_red_tulip = "minecraft:potted_red_tulip"
potted_spruce_sapling = "minecraft:potted_spruce_sapling"
potted_warped_fungus = "minecraft:potted_warped_fungus"
potted_warped_roots = "minecraft:potted_warped_roots"
potted_white_tulip = "minecraft:potted_white_tulip"
potted_wither_rose = "minecraft:potted_wither_rose"
powder_snow = "minecraft:powder_snow"
powder_snow_cauldron = "minecraft:powder_snow_cauldron"
powered_rail = "minecraft:powered_rail"
prismarine = "minecraft:prismarine"
prismarine_brick_slab = "minecraft:prismarine_brick_slab"
prismarine_brick_stairs = "minecraft:prismarine_brick_stairs"
prismarine_bricks = "minecraft:prismarine_bricks"
prismarine_slab = "minecraft:prismarine_slab"
prismarine_stairs = "minecraft:prismarine_stairs"
prismarine_wall = "minecraft:prismarine_wall"
pumpkin = "minecraft:pumpkin"
pumpkin_stem = "minecraft:pumpkin_stem"
purple_banner = "minecraft:purple_banner"
purple_bed = "minecraft:purple_bed"
purple_candle = "minecraft:purple_candle"
purple_candle_cake = "minecraft:purple_candle_cake"
purple_carpet = "minecraft:purple_carpet"
purple_concrete = "minecraft:purple_concrete"
purple_concrete_powder = "minecraft:purple_concrete_powder"
purple_glazed_terracotta = "minecraft:purple_glazed_terracotta"
purple_shulker_box = "minecraft:purple_shulker_box"
purple_stained_glass = "minecraft:purple_stained_glass"
purple_stained_glass_pane = "minecraft:purple_stained_glass_pane"
purple_terracotta = "minecraft:purple_terracotta"
purple_wall_banner = "minecraft:purple_wall_banner"
purple_wool = "minecraft:purple_wool"
purpur_block = "minecraft:purpur_block"
purpur_pillar = "minecraft:purpur_pillar"
purpur_slab = "minecraft:purpur_slab"
purpur_stairs = "minecraft:purpur_stairs"
quartz_block = "minecraft:quartz_block"
quartz_bricks = "minecraft:quartz_bricks"
quartz_pillar = "minecraft:quartz_pillar"
quartz_slab = "minecraft:quartz_slab"
quartz_stairs = "minecraft:quartz_stairs"
rail = "minecraft:rail"
raw_copper_block = "minecraft:raw_copper_block"
raw_gold_block = "minecraft:raw_gold_block"
raw_iron_block = "minecraft:raw_iron_block"
red_banner = "minecraft:red_banner"
red_bed = "minecraft:red_bed"
red_candle = "minecraft:red_candle"
red_candle_cake = "minecraft:red_candle_cake"
red_carpet = "minecraft:red_carpet"
red_concrete = "minecraft:red_concrete"
red_concrete_powder = "minecraft:red_concrete_powder"
red_glazed_terracotta = "minecraft:red_glazed_terracotta"
red_mushroom = "minecraft:red_mushroom"
red_mushroom_block = "minecraft:red_mushroom_block"
red_nether_brick_slab = "minecraft:red_nether_brick_slab"
red_nether_brick_stairs = "minecraft:red_nether_brick_stairs"
red_nether_brick_wall = "minecraft:red_nether_brick_wall"
red_nether_bricks = "minecraft:red_nether_bricks"
red_sand = "minecraft:red_sand"
red_sandstone = "minecraft:red_sandstone"
red_sandstone_slab = "minecraft:red_sandstone_slab"
red_sandstone_stairs = "minecraft:red_sandstone_stairs"
red_sandstone_wall = "minecraft:red_sandstone_wall"
red_shulker_box = "minecraft:red_shulker_box"
red_stained_glass = "minecraft:red_stained_glass"
red_stained_glass_pane = "minecraft:red_stained_glass_pane"
red_terracotta = "minecraft:red_terracotta"
red_tulip = "minecraft:red_tulip"
red_wall_banner = "minecraft:red_wall_banner"
red_wool = "minecraft:red_wool"
redstone_block = "minecraft:redstone_block"
redstone_lamp = "minecraft:redstone_lamp"
redstone_ore = "minecraft:redstone_ore"
redstone_torch = "minecraft:redstone_torch"
redstone_wall_torch = "minecraft:redstone_wall_torch"
redstone_wire = "minecraft:redstone_wire"
repeater = "minecraft:repeater"
repeating_command_block = "minecraft:repeating_command_block"
respawn_anchor = "minecraft:respawn_anchor"
rooted_dirt = "minecraft:rooted_dirt"
rose_bush = "minecraft:rose_bush"
sand = "minecraft:sand"
sandstone = "minecraft:sandstone"
sandstone_slab = "minecraft:sandstone_slab"
sandstone_stairs = "minecraft:sandstone_stairs"
sandstone_wall = "minecraft:sandstone_wall"
scaffolding = "minecraft:scaffolding"
sculk_sensor = "minecraft:sculk_sensor"
sea_lantern = "minecraft:sea_lantern"
sea_pickle = "minecraft:sea_pickle"
seagrass = "minecraft:seagrass"
shroomlight = "minecraft:shroomlight"
shulker_box = "minecraft:shulker_box"
skeleton_skull = "minecraft:skeleton_skull"
skeleton_wall_skull = "minecraft:skeleton_wall_skull"
slime_block = "minecraft:slime_block"
small_amethyst_bud = "minecraft:small_amethyst_bud"
small_dripleaf = "minecraft:small_dripleaf"
smithing_table = "minecraft:smithing_table"
smoker = "minecraft:smoker"
smooth_basalt = "minecraft:smooth_basalt"
smooth_quartz = "minecraft:smooth_quartz"
smooth_quartz_slab = "minecraft:smooth_quartz_slab"
smooth_quartz_stairs = "minecraft:smooth_quartz_stairs"
smooth_red_sandstone = "minecraft:smooth_red_sandstone"
smooth_red_sandstone_slab = "minecraft:smooth_red_sandstone_slab"
smooth_red_sandstone_stairs = "minecraft:smooth_red_sandstone_stairs"
smooth_sandstone = "minecraft:smooth_sandstone"
smooth_sandstone_slab = "minecraft:smooth_sandstone_slab"
smooth_sandstone_stairs = "minecraft:smooth_sandstone_stairs"
smooth_stone = "minecraft:smooth_stone"
smooth_stone_slab = "minecraft:smooth_stone_slab"
snow = "minecraft:snow"
snow_block = "minecraft:snow_block"
soul_campfire = "minecraft:soul_campfire"
soul_fire = "minecraft:soul_fire"
soul_lantern = "minecraft:soul_lantern"
soul_sand = "minecraft:soul_sand"
soul_soil = "minecraft:soul_soil"
soul_torch = "minecraft:soul_torch"
soul_wall_torch = "minecraft:soul_wall_torch"
spawner = "minecraft:spawner"
sponge = "minecraft:sponge"
spore_blossom = "minecraft:spore_blossom"
spruce_button = "minecraft:spruce_button"
spruce_door = "minecraft:spruce_door"
spruce_fence = "minecraft:spruce_fence"
spruce_fence_gate = "minecraft:spruce_fence_gate"
spruce_leaves = "minecraft:spruce_leaves"
spruce_log = "minecraft:spruce_log"
spruce_planks = "minecraft:spruce_planks"
spruce_pressure_plate = "minecraft:spruce_pressure_plate"
spruce_sapling = "minecraft:spruce_sapling"
spruce_sign = "minecraft:spruce_sign"
spruce_slab = "minecraft:spruce_slab"
spruce_stairs = "minecraft:spruce_stairs"
spruce_trapdoor = "minecraft:spruce_trapdoor"
spruce_wall_sign = "minecraft:spruce_wall_sign"
spruce_wood = "minecraft:spruce_wood"
sticky_piston = "minecraft:sticky_piston"
stone = "minecraft:stone"
stone_brick_slab = "minecraft:stone_brick_slab"
stone_brick_stairs = "minecraft:stone_brick_stairs"
stone_brick_wall = "minecraft:stone_brick_wall"
stone_bricks = "minecraft:stone_bricks"
stone_button = "minecraft:stone_button"
stone_pressure_plate = "minecraft:stone_pressure_plate"
stone_slab = "minecraft:stone_slab"
stone_stairs = "minecraft:stone_stairs"
stonecutter = "minecraft:stonecutter"
stripped_acacia_log = "minecraft:stripped_acacia_log"
stripped_acacia_wood = "minecraft:stripped_acacia_wood"
stripped_birch_log = "minecraft:stripped_birch_log"
stripped_birch_wood = "minecraft:stripped_birch_wood"
stripped_crimson_hyphae = "minecraft:stripped_crimson_hyphae"
stripped_crimson_stem = "minecraft:stripped_crimson_stem"
stripped_dark_oak_log = "minecraft:stripped_dark_oak_log"
stripped_dark_oak_wood = "minecraft:stripped_dark_oak_wood"
stripped_jungle_log = "minecraft:stripped_jungle_log"
stripped_jungle_wood = "minecraft:stripped_jungle_wood"
stripped_oak_log = "minecraft:stripped_oak_log"
stripped_oak_wood = "minecraft:stripped_oak_wood"
stripped_spruce_log = "minecraft:stripped_spruce_log"
stripped_spruce_wood = "minecraft:stripped_spruce_wood"
stripped_warped_hyphae = "minecraft:stripped_warped_hyphae"
stripped_warped_stem = "minecraft:stripped_warped_stem"
structure_block = "minecraft:structure_block"
structure_void = "minecraft:structure_void"
sugar_cane = "minecraft:sugar_cane"
sunflower = "minecraft:sunflower"
sweet_berry_bush = "minecraft:sweet_berry_bush"
tall_grass = "minecraft:tall_grass"
tall_seagrass = "minecraft:tall_seagrass"
target = "minecraft:target"
terracotta = "minecraft:terracotta"
tinted_glass = "minecraft:tinted_glass"
tnt = "minecraft:tnt"
torch = "minecraft:torch"
trapped_chest = "minecraft:trapped_chest"
tripwire = "minecraft:tripwire"
tripwire_hook = "minecraft:tripwire_hook"
tube_coral = "minecraft:tube_coral"
tube_coral_block = "minecraft:tube_coral_block"
tube_coral_fan = "minecraft:tube_coral_fan"
tube_coral_wall_fan = "minecraft:tube_coral_wall_fan"
tuff = "minecraft:tuff"
turtle_egg = "minecraft:turtle_egg"
twisting_vines = "minecraft:twisting_vines"
twisting_vines_plant = "minecraft:twisting_vines_plant"
vine = "minecraft:vine"
void_air = "minecraft:void_air"
wall_torch = "minecraft:wall_torch"
warped_button = "minecraft:warped_button"
warped_door = "minecraft:warped_door"
warped_fence = "minecraft:warped_fence"
warped_fence_gate = "minecraft:warped_fence_gate"
warped_fungus = "minecraft:warped_fungus"
warped_hyphae = "minecraft:warped_hyphae"
warped_nylium = "minecraft:warped_nylium"
warped_planks = "minecraft:warped_planks"
warped_pressure_plate = "minecraft:warped_pressure_plate"
warped_roots = "minecraft:warped_roots"
warped_sign = "minecraft:warped_sign"
warped_slab = "minecraft:warped_slab"
warped_stairs = "minecraft:warped_stairs"
warped_stem = "minecraft:warped_stem"
warped_trapdoor = "minecraft:warped_trapdoor"
warped_wall_sign = "minecraft:warped_wall_sign"
warped_wart_block = "minecraft:warped_wart_block"
water = "minecraft:water"
water_cauldron = "minecraft:water_cauldron"
waxed_copper_block = "minecraft:waxed_copper_block"
waxed_cut_copper = "minecraft:waxed_cut_copper"
waxed_cut_copper_slab = "minecraft:waxed_cut_copper_slab"
waxed_cut_copper_stairs = "minecraft:waxed_cut_copper_stairs"
waxed_exposed_copper = "minecraft:waxed_exposed_copper"
waxed_exposed_cut_copper = "minecraft:waxed_exposed_cut_copper"
waxed_exposed_cut_copper_slab = "minecraft:waxed_exposed_cut_copper_slab"
waxed_exposed_cut_copper_stairs = "minecraft:waxed_exposed_cut_copper_stairs"
waxed_oxidized_copper = "minecraft:waxed_oxidized_copper"
waxed_oxidized_cut_copper = "minecraft:waxed_oxidized_cut_copper"
waxed_oxidized_cut_copper_slab = "minecraft:waxed_oxidized_cut_copper_slab"
waxed_oxidized_cut_copper_stairs = "minecraft:waxed_oxidized_cut_copper_stairs"
waxed_weathered_copper = "minecraft:waxed_weathered_copper"
waxed_weathered_cut_copper = "minecraft:waxed_weathered_cut_copper"
waxed_weathered_cut_copper_slab = "minecraft:waxed_weathered_cut_copper_slab"
waxed_weathered_cut_copper_stairs = "minecraft:waxed_weathered_cut_copper_stairs"
weathered_copper = "minecraft:weathered_copper"
weathered_cut_copper = "minecraft:weathered_cut_copper"
weathered_cut_copper_slab = "minecraft:weathered_cut_copper_slab"
weathered_cut_copper_stairs = "minecraft:weathered_cut_copper_stairs"
weeping_vines = "minecraft:weeping_vines"
weeping_vines_plant = "minecraft:weeping_vines_plant"
wet_sponge = "minecraft:wet_sponge"
wheat = "minecraft:wheat"
white_banner = "minecraft:white_banner"
white_bed = "minecraft:white_bed"
white_candle = "minecraft:white_candle"
white_candle_cake = "minecraft:white_candle_cake"
white_carpet = "minecraft:white_carpet"
white_concrete = "minecraft:white_concrete"
white_concrete_powder = "minecraft:white_concrete_powder"
white_glazed_terracotta = "minecraft:white_glazed_terracotta"
white_shulker_box = "minecraft:white_shulker_box"
white_stained_glass = "minecraft:white_stained_glass"
white_stained_glass_pane = "minecraft:white_stained_glass_pane"
white_terracotta = "minecraft:white_terracotta"
white_tulip = "minecraft:white_tulip"
white_wall_banner = "minecraft:white_wall_banner"
white_wool = "minecraft:white_wool"
wither_rose = "minecraft:wither_rose"
wither_skeleton_skull = "minecraft:wither_skeleton_skull"
wither_skeleton_wall_skull = "minecraft:wither_skeleton_wall_skull"
yellow_banner = "minecraft:yellow_banner"
yellow_bed = "minecraft:yellow_bed"
yellow_candle = "minecraft:yellow_candle"
yellow_candle_cake = "minecraft:yellow_candle_cake"
yellow_carpet = "minecraft:yellow_carpet"
yellow_concrete = "minecraft:yellow_concrete"
yellow_concrete_powder = "minecraft:yellow_concrete_powder"
yellow_glazed_terracotta = "minecraft:yellow_glazed_terracotta"
yellow_shulker_box = "minecraft:yellow_shulker_box"
yellow_stained_glass = "minecraft:yellow_stained_glass"
yellow_stained_glass_pane = "minecraft:yellow_stained_glass_pane"
yellow_terracotta = "minecraft:yellow_terracotta"
yellow_wall_banner = "minecraft:yellow_wall_banner"
yellow_wool = "minecraft:yellow_wool"
zombie_head = "minecraft:zombie_head"
zombie_wall_head = "minecraft:zombie_wall_head"
|
150840
|
try:
from setuptools import setup
except ImportError :
raise ImportError("setuptools module required, please go to https://pypi.python.org/pypi/setuptools and follow the instructions for installing setuptools")
setup(
version='0.5.4',
url='https://github.com/datamade/probablepeople',
description='Parse romanized names & companies using advanced NLP methods',
name='probablepeople',
packages=['probablepeople'],
package_data={'probablepeople' : ['generic_learned_settings.crfsuite',
'person_learned_settings.crfsuite',
'company_learned_settings.crfsuite']},
license='The MIT License: http://www.opensource.org/licenses/mit-license.php',
install_requires=[
'python-crfsuite>=0.8',
'probableparsing',
'future>=0.14',
'doublemetaphone'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Information Analysis'],
long_description="""
probablepeople is a python library for parsing unstructured romanized name or company strings into components, using conditional random fields.
From the python interpreter:
>>> import probablepeople
>>> probablepeople.parse('Mr George "Gob" Bluth II')
[('Mr', 'PrefixMarital'),
('George', 'GivenName'),
('"Gob"', 'Nickname'),
('Bluth', 'Surname'),
('II', 'SuffixGenerational')]
>>> probablepeople.parse('Sitwell Housing Inc')
[('Sitwell', 'CorporationName'),
('Housing', 'CorporationName'),
('Inc', 'CorporationLegalType')]
"""
)
|
150841
|
class PageEffectiveImageMixin(object):
def get_effective_image(self):
if self.image:
return self.image
page = self.get_main_language_page()
if page.specific.image:
return page.specific.get_effective_image()
return ''
|
150851
|
import requests
import json
from snapshottest.file import FileSnapshot
import time
from enum import Enum
#API_BASE_URL = "https://api.consensys.space:8080"
API_BASE_URL = "http://127.0.0.1:8080"
TEST_OBJECT_IDENTIFIER = {'norad_number': 40538}
class RequestMethod(Enum):
GET = 1
POST = 2
def api_request(endpoint, expect_json_response, post_data, get_params):
"""
Issues a GET or a POST to an API endpoint, decoding the result and prettifying it if JSON.
Also prints timing info.
endpoint - the endpoint to append to API_BASE_URL
expect_json_response - True for JSON, false for text
post_data - If Truthey, a POST is issues with post_data in the body
If Falsey, a GET is issued and post_data is ignored
"""
start = time.time()
if post_data:
r = requests.post(API_BASE_URL + endpoint, json=post_data)
else:
r = requests.get(API_BASE_URL + endpoint, params=get_params, headers={'Accept': 'Cache-Control'})
end = time.time()
print(f"{'POST' if post_data else 'GET'} {endpoint} took {end - start} seconds")
if (expect_json_response):
output = json.loads(r.content)
formatted = json.dumps(output, sort_keys=True, indent=4, separators=(',', ': '))
else:
formatted = r.content.decode('utf-8')
# print(formatted)
# Munge out date from headers for reproducible tests
headers = r.headers
headers['Date'] = 'XXX'
return f"STATUS: {r.status_code}\n\nHEADERS:{headers}\n\nCONTENT:{formatted}"
def api_post(endpoint, data, expect_json_response=True):
"""
POSTs a resource to an API endpoint, decoding the response and prettifying it if JSON.
Also prints timing info.
"""
return api_request(endpoint, expect_json_response, data,{})
def api_get(endpoint, is_json, params):
"""
GETs a resource from an API endpoint, decoding it and prettifying it if JSON.
Also prints timing info.
"""
return api_request(endpoint, is_json, False, params)
def api_get_json(endpoint):
"""
GETs JSON from an API endpoint, and prettifies it ready for comparison against a snapshot prettified snapshot
of a previous test run.
Also outputs timing info (to see this, run `pytest -s`)
"""
return api_get(endpoint, True,{})
def api_get_utf8(endpoint):
"""
GETs JSON from an API endpoint, and prettifies it ready for comparison against a snapshot prettified snapshot
of a previous test run.
Also outputs timing info (to see this, run `pytest -s`)
"""
return api_get(endpoint, False, {})
# START GET request section
def test_catalog_priorities(snapshot):
api_response = api_get_json('/catalog/priorities')
snapshot.assert_match(api_response)
def test_catalog_undisclosed(snapshot):
api_response = api_get_json('/catalog/undisclosed')
snapshot.assert_match(api_response)
def test_catalog_debris(snapshot):
api_response = api_get_json('/catalog/debris')
snapshot.assert_match(api_response)
def test_catalog_latest(snapshot):
api_response = api_get_json('/catalog/latest')
snapshot.assert_match(api_response)
def test_catalog_all(snapshot):
api_response = api_get_json('/catalog/all')
snapshot.assert_match(api_response)
def test_tle_trusat_all(snapshot):
api_response = api_get_utf8('/tle/trusat_all.txt')
snapshot.assert_match(api_response)
def test_tle_trusat_priorities(snapshot):
api_response = api_get_utf8('/tle/trusat_priorities.txt')
snapshot.assert_match(api_response)
def test_tle_trusat_high_confidence(snapshot):
api_response = api_get_utf8('/tle/trusat_high_confidence.txt')
snapshot.assert_match(api_response)
def test_astriagraph(snapshot):
api_response = api_get_utf8('/astriagraph')
snapshot.assert_match(api_response)
# END GET request section
# START POST request section
def test_object_influence(snapshot):
api_response = api_get('/object/influence', True, params=TEST_OBJECT_IDENTIFIER)
snapshot.assert_match(api_response)
def test_object_info(snapshot):
api_response = api_get('/object/info', True, params=TEST_OBJECT_IDENTIFIER)
snapshot.assert_match(api_response)
def test_tle_object(snapshot):
api_response = api_get('/tle/object', False, params=TEST_OBJECT_IDENTIFIER)
snapshot.assert_match(api_response)
# END POST request section
|
150865
|
from .assigners import (AssignResult, BaseAssigner, CenterRegionAssigner,
MaxIoUAssigner)
from .builder import build_assigner, build_bbox_coder, build_sampler
from .coder import (BaseBBoxCoder, DeltaXYWHBBoxCoder, PseudoBBoxCoder,
TBLRBBoxCoder)
from .iou_calculators import BboxOverlaps2D, bbox_overlaps
from .samplers import (BaseSampler, CombinedSampler,
InstanceBalancedPosSampler, IoUBalancedNegSampler,
PseudoSampler, RandomSampler, SamplingResult)
from .transforms import (bbox2distance, bbox2result, bbox2roi, bbox_flip,
bbox_mapping, bbox_mapping_back, distance2bbox,
roi2bbox)
from .transforms_obb import (poly2obb, rectpoly2obb, poly2hbb, obb2poly, obb2hbb,
hbb2poly, hbb2obb, bbox2type, hbb_flip, obb_flip, poly_flip,
hbb_warp, obb_warp, poly_warp, hbb_mapping, obb_mapping,
poly_mapping, hbb_mapping_back, obb_mapping_back,
poly_mapping_back, arb_mapping, arb_mapping_back,
get_bbox_type, get_bbox_dim, get_bbox_areas, choice_by_type,
arb2result, arb2roi, distance2obb, regular_theta, regular_obb,
mintheta_obb)
from .iou_calculators import OBBOverlaps, PolyOverlaps
from .samplers import (OBBSamplingResult, OBBBaseSampler, OBBRandomSampler,
OBBOHEMSampler)
from .coder import OBB2OBBDeltaXYWHTCoder, HBB2OBBDeltaXYWHTCoder
__all__ = [
'bbox_overlaps', 'BboxOverlaps2D', 'BaseAssigner', 'MaxIoUAssigner',
'AssignResult', 'BaseSampler', 'PseudoSampler', 'RandomSampler',
'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
'SamplingResult', 'build_assigner', 'build_sampler', 'bbox_flip',
'bbox_mapping', 'bbox_mapping_back', 'bbox2roi', 'roi2bbox', 'bbox2result',
'distance2bbox', 'bbox2distance', 'build_bbox_coder', 'BaseBBoxCoder',
'PseudoBBoxCoder', 'DeltaXYWHBBoxCoder', 'TBLRBBoxCoder',
'CenterRegionAssigner',
'poly2obb', 'rectpoly2obb', 'poly2hbb', 'obb2poly', 'obb2hbb', 'hbb2poly',
'hbb2obb', 'bbox2type', 'hbb_flip', 'obb_flip', 'poly_flip', 'hbb_warp', 'obb_warp',
'poly_warp', 'hbb_mapping', 'obb_mapping', 'poly_mapping', 'hbb_mapping_back',
'obb_mapping_back', 'poly_mapping_back', 'get_bbox_type', 'get_bbox_dim', 'get_bbox_areas',
'choice_by_type', 'arb2roi', 'arb2result', 'distance2obb', 'arb_mapping', 'arb_mapping_back',
'OBBOverlaps', 'PolyOverlaps', 'OBBSamplingResult', 'OBBBaseSampler', 'OBBRandomSampler',
'OBBOHEMSampler', 'OBB2OBBDeltaXYWHTCoder', 'HBB2OBBDeltaXYWHTCoder', 'regular_theta',
'regular_obb', 'mintheta_obb'
]
|
150884
|
from inclusive_django_range_fields.drf.fields import InclusiveIntegerRangeField, InclusiveDateRangeField
|
150888
|
import json
from rgd import datastore
from rgd.management.commands._data_helper import SynchronousTasksCommand
from . import _data_helper as helper
SUCCESS_MSG = 'Finished loading all landsat data.'
def _get_landsat_urls(count):
path = datastore.datastore.fetch('landsat_texas.json')
with open(path, 'r') as f:
scenes = json.loads(f.read())
if count:
urls = {}
for k in list(scenes.keys())[0:count]:
urls[k] = scenes[k]
else:
urls = scenes
rasters = []
for name, rf in urls.items():
rasters.append(
helper.make_raster_dict(
[(None, rf['R']), (None, rf['G']), (None, rf['B'])],
date=rf['acquisition'],
name=name,
cloud_cover=rf['cloud_cover'],
instrumentation='OLI_TIRS',
)
)
return rasters
class Command(SynchronousTasksCommand):
help = 'Populate database with demo landsat data from S3.'
def add_arguments(self, parser):
parser.add_argument(
'-c', '--count', type=int, help='Indicates the number scenes to fetch.', default=0
)
parser.add_argument(
'-g', '--get_count', help='Use to fetch the number of available rasters.'
)
def handle(self, *args, **options):
self.set_synchronous()
if options.get('get_count', False):
n = len(_get_landsat_urls(0))
self.stdout.write(self.style.SUCCESS(f'Total of {n} rasters.'))
return
count = options.get('count', 0)
# Run the command
helper.load_raster_files(_get_landsat_urls(count))
self.stdout.write(self.style.SUCCESS(SUCCESS_MSG))
self.reset_celery()
|
150896
|
import math
def stripSuffix(name,delineator='_'):
'''
Return the portion of name minus the last element separated by the name delineator.
Useful for determining a name prefix from an existing object name.
@param name: String name to strip the suffix from
@type name: str
@param delineator: String delineator to split the string name with. If default will inherit the class delineator string.
@type delineator: str
'''
# Check for Delineator in Name
if not name.count(delineator): return name
# Determine Suffix
suffix = name.split(delineator)[-1]
# Remove Suffix
result = name.replace(delineator+suffix,'')
# Return Result
return result
def stringIndex(index,padding=2):
'''
Return the string equivalent for the specified iteger index.
@param index: The index to get the string equivalent for
@type index: int
@param padding: The number of characters for the index string
@type padding: int
'''
# Convert to String
strInd = str(index)
# Prepend Padding
for i in range(padding-len(strInd)): strInd = '0'+strInd
# Return Result
return strInd
def alphaIndex(index,upper=True):
'''
Return the alpha string equivalent for the specified iteger index.
@param index: The index to get the alpha string equivalent for
@type index: int
@param upper: Return the result in upper case form
@type upper: bool
'''
# Define Alpha List
alpha = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
# Build Alpha Index
alphaInd = alpha[index % 26]
depth = index / 26.0
while int(math.floor(depth)):
alphaInd = alpha[int(depth % 26)-1] + alphaInd
depth = depth / 26.0
# Check Case
if upper: alphaInd = alphaInd.upper()
# Return result
return alphaInd
|
150915
|
import logging
from flask import flash
from flask import request
from flask import (url_for, redirect)
from flask.views import MethodView
from cert_viewer import helpers
from cert_viewer.forms import BitcoinForm, SimpleRegistrationForm
from cert_viewer.notifier import Notifier
from cert_viewer.views.__init__ import render
TEMPLATE = 'request.html'
class RequestView(MethodView):
def post(self):
recipient_form = SimpleRegistrationForm(request.form)
if recipient_form.validate():
user_data = recipient_form.to_user_data()
from cert_viewer import introduction_store_bridge
introduction_store_bridge.insert_introduction(user_data)
succeeded = True
if not succeeded:
# error_message = str(r.content)
# logging.error('Problem processing introduction, %s', error_message)
return 'Problem processing introduction', 500
sent = Notifier.factory().notify(
recipient_form.email.data,
recipient_form.first_name.data,
recipient_form.last_name.data)
logging.debug('finished requesting certificate')
hidden_email = helpers.obfuscate_email_display(recipient_form.email.data)
if sent:
flash('We just sent a confirmation email to %s.' % hidden_email)
else:
flash('We received your request and will respond to %s.' % hidden_email)
return redirect(url_for('index'))
else:
bitcoin_form = BitcoinForm(request.form)
return render(TEMPLATE, form=recipient_form, registered=False, bitcoin=bitcoin_form)
def get(self):
"""Request an introduction. Forwarding to intro endpoint for backcompat"""
recipient_form = SimpleRegistrationForm(request.form)
bitcoin_form = BitcoinForm(request.form)
return render(TEMPLATE, form=recipient_form, registered=False, bitcoin=bitcoin_form)
|
150923
|
import pytest
from pycfmodel.model.cf_model import CFModel
from cfripper.model.enums import RuleGranularity, RuleMode, RuleRisk
from cfripper.model.result import Failure
from cfripper.rules.iam_roles import IAMRolesOverprivilegedRule
from tests.utils import compare_lists_of_failures, get_cfmodel_from
@pytest.fixture()
def valid_role_inline_policy() -> CFModel:
return get_cfmodel_from("rules/IAMRolesOverprivilegedRule/valid_role_inline_policy.json").resolve()
@pytest.fixture()
def invalid_role_inline_policy() -> CFModel:
return get_cfmodel_from("rules/IAMRolesOverprivilegedRule/invalid_role_inline_policy.json").resolve()
@pytest.fixture()
def invalid_role_inline_policy_resource_as_array() -> CFModel:
return get_cfmodel_from(
"rules/IAMRolesOverprivilegedRule/invalid_role_inline_policy_resource_as_array.json"
).resolve()
@pytest.fixture()
def valid_role_managed_policy() -> CFModel:
return get_cfmodel_from("rules/IAMRolesOverprivilegedRule/valid_role_managed_policy.json").resolve()
@pytest.fixture()
def invalid_role_managed_policy() -> CFModel:
return get_cfmodel_from("rules/IAMRolesOverprivilegedRule/invalid_role_managed_policy.json").resolve()
@pytest.fixture()
def invalid_role_inline_policy_fn_if() -> CFModel:
return get_cfmodel_from("rules/IAMRolesOverprivilegedRule/invalid_role_inline_policy_fn_if.json").resolve()
def test_with_valid_role_inline_policy(valid_role_inline_policy):
rule = IAMRolesOverprivilegedRule(None)
result = rule.invoke(valid_role_inline_policy)
assert result.valid
assert compare_lists_of_failures(result.failures, [])
def test_with_invalid_role_inline_policy(invalid_role_inline_policy):
rule = IAMRolesOverprivilegedRule(None)
result = rule.invoke(invalid_role_inline_policy)
assert not result.valid
assert compare_lists_of_failures(
result.failures,
[
Failure(
granularity=RuleGranularity.RESOURCE,
reason="Role 'RootRole' contains an insecure permission 'ec2:DeleteInternetGateway' in policy 'not_so_chill_policy'",
risk_value=RuleRisk.MEDIUM,
rule="IAMRolesOverprivilegedRule",
rule_mode=RuleMode.BLOCKING,
actions=None,
resource_ids={"RootRole"},
)
],
)
def test_with_invalid_role_inline_policy_resource_as_array(invalid_role_inline_policy_resource_as_array):
rule = IAMRolesOverprivilegedRule(None)
result = rule.invoke(invalid_role_inline_policy_resource_as_array)
assert not result.valid
assert compare_lists_of_failures(
result.failures,
[
Failure(
granularity=RuleGranularity.RESOURCE,
reason="Role 'RootRole' contains an insecure permission 'ec2:DeleteInternetGateway' in policy 'not_so_chill_policy'",
risk_value=RuleRisk.MEDIUM,
rule="IAMRolesOverprivilegedRule",
rule_mode=RuleMode.BLOCKING,
actions=None,
resource_ids={"RootRole"},
)
],
)
def test_with_valid_role_managed_policy(valid_role_managed_policy):
rule = IAMRolesOverprivilegedRule(None)
result = rule.invoke(valid_role_managed_policy)
assert result.valid
assert compare_lists_of_failures(result.failures, [])
def test_with_invalid_role_managed_policy(invalid_role_managed_policy):
rule = IAMRolesOverprivilegedRule(None)
result = rule.invoke(invalid_role_managed_policy)
assert not result.valid
assert compare_lists_of_failures(
result.failures,
[
Failure(
granularity=RuleGranularity.RESOURCE,
reason="Role RootRole has forbidden Managed Policy arn:aws:iam::aws:policy/AdministratorAccess",
risk_value=RuleRisk.MEDIUM,
rule="IAMRolesOverprivilegedRule",
rule_mode=RuleMode.BLOCKING,
actions=None,
resource_ids={"RootRole"},
)
],
)
def test_with_invalid_role_inline_policy_fn_if(invalid_role_inline_policy_fn_if):
rule = IAMRolesOverprivilegedRule(None)
result = rule.invoke(invalid_role_inline_policy_fn_if)
assert not result.valid
assert compare_lists_of_failures(
result.failures,
[
Failure(
granularity=RuleGranularity.RESOURCE,
reason="Role 'RootRole' contains an insecure permission 'ec2:DeleteVpc' in policy 'ProdCredentialStoreAccessPolicy'",
risk_value=RuleRisk.MEDIUM,
rule="IAMRolesOverprivilegedRule",
rule_mode=RuleMode.BLOCKING,
actions=None,
resource_ids={"RootRole"},
)
],
)
def test_rule_supports_filter_config(invalid_role_managed_policy, default_allow_all_config):
rule = IAMRolesOverprivilegedRule(default_allow_all_config)
result = rule.invoke(invalid_role_managed_policy)
assert result.valid
assert compare_lists_of_failures(result.failures, [])
|
150927
|
class Television(Electrodomestico)
def __init__(self, modelo, marca, color, numSerie, voltaje):
super().__init__(modelo, marca, color, numSerie, voltaje)
self.pulgadas = pulgadas
#Metodo getter y setter de pulgadas
def get_pulgadas(self):
return self.pulgadas
def set_pulgadas(self):
self.pulgadas = pulgadas
#Metodo para conectarse a internet solo manda un mensaje
def conectarse_internet
print ("Estas conectado a internet"):
|
150940
|
import eyekit
sentence = "The quick brown fox [jump]{stem_1}[ed]{suffix_1} over the lazy dog."
txt = eyekit.TextBlock(
sentence, position=(100, 500), font_face="Times New Roman", font_size=36
)
seq = eyekit.FixationSequence(
[
[106, 490, 0, 100],
[190, 486, 100, 200],
[230, 505, 200, 300],
[298, 490, 300, 400],
[361, 497, 400, 500],
[430, 489, 500, 600],
[450, 505, 600, 700],
[492, 491, 700, 800],
[562, 505, 800, 900],
[637, 493, 900, 1000],
[712, 497, 1000, 1100],
[763, 487, 1100, 1200],
]
)
def test_initialization():
assert txt.position == (100, 500)
assert txt.font_face == "Times New Roman"
assert txt.font_size == 36
assert txt.line_height == 36
assert txt.align == "left"
assert txt.anchor == "left"
assert txt.alphabet is None
assert txt.autopad == True
assert txt.n_rows == 1
assert txt.n_cols == 45
assert len(txt) == 45
def test_zone_extraction():
assert len(list(txt.zones())) == 2
for zone in txt.zones():
assert zone.id in ["stem_1", "suffix_1"]
assert zone.text in ["jump", "ed"]
assert zone.baseline == 500
assert zone.height == 36
def test_zone_find():
assert txt["stem_1"].text == "jump"
assert txt["suffix_1"].text == "ed"
txt[0:4:19].id = "test_id"
assert txt["test_id"].text == "quick brown fox"
assert txt[0:4:19].id == "test_id"
def test_word_extraction():
assert len(list(txt.words())) == 9
for word in txt.words():
assert word.text in [
"The",
"quick",
"brown",
"fox",
"jumped",
"over",
"the",
"lazy",
"dog",
]
assert word.baseline == 500
assert word.height == 36
def test_arbitrary_extraction():
assert txt[0:0:3].text == "The"
assert txt["0:0:3"].text == "The"
assert txt[(0, 0, 3)].text == "The"
assert txt[0:41:45].text == "dog."
assert txt["0:41:45"].text == "dog."
assert txt[(0, 41, 45)].text == "dog."
assert txt[0:4:19].text == "quick brown fox"
assert txt["0:4:19"].text == "quick brown fox"
assert txt[(0, 4, 19)].text == "quick brown fox"
assert txt[0::3].text == "The"
assert txt[0:36:].text == "lazy dog."
def test_IA_location():
assert txt[0:0:3].location == (0, 0, 3)
assert txt[0:5:40].location == (0, 5, 40)
def test_IA_relative_positions():
assert txt["stem_1"].is_right_of(seq[0]) == True
assert txt["stem_1"].is_left_of(seq[-1]) == True
assert txt["stem_1"].is_after(seq[0]) == True
assert txt["stem_1"].is_before(seq[-1]) == True
def test_which_methods():
for fixation, answer in zip(
seq,
[
"The",
"quick",
"quick",
"brown",
"fox",
"jumped",
"jumped",
"jumped",
"over",
"the",
"lazy",
"dog",
],
):
assert txt.which_word(fixation).text == answer
for fixation, answer in zip(
seq, ["T", "u", "k", "o", "f", "u", "m", "e", "v", "e", "y", "g"]
):
assert txt.which_character(fixation).text == answer
assert txt.which_line(fixation).id == "0:0:45"
def test_iter_pairs():
interest_area = txt["stem_1"]
for curr_fixation, next_fixation in seq.iter_pairs():
if curr_fixation in interest_area and next_fixation not in interest_area:
assert next_fixation.x == 492
def test_serialize():
data = txt.serialize()
assert data["text"] == [sentence]
assert data["position"] == (100, 500)
assert data["font_face"] == "Times New Roman"
assert data["font_size"] == 36
assert data["line_height"] == 36
assert data["align"] == "left"
assert data["anchor"] == "left"
assert data["alphabet"] is None
assert data["autopad"] == True
def test_complex_font_selection():
txt = eyekit.TextBlock(
sentence,
position=(100, 500),
font_face="Times New Roman bold italic",
font_size=36,
)
assert txt._font.family == "Times New Roman"
assert txt._font.slant == "italic"
assert txt._font.weight == "bold"
assert txt._font.size == 36
def test_align_and_anchor():
positions = [
("left", "left", 1048),
("left", "center", 938),
("left", "right", 828),
("center", "left", 1060),
("center", "center", 950),
("center", "right", 840),
("right", "left", 1073),
("right", "center", 963),
("right", "right", 853),
]
for align, anchor, target_x in positions:
txt = eyekit.TextBlock(
text=["The quick brown", "fox [jumps]{target} over", "the lazy dog"],
position=(960, 540),
font_face="Arial",
font_size=30,
align=align,
anchor=anchor,
)
assert txt.align == align
assert txt.anchor == anchor
assert int(txt["target"].x) == target_x
def test_right_to_left():
txt = eyekit.TextBlock(
text=["ืื ืกืงืจื ืฉื ืื ืืื ืื,", "ืื ืืคืชืข ืคืืฉ ืืืืจื", "ื ืืืื ืฉืฆืฆื ืื."],
position=(960, 540),
font_face="Raanana bold",
font_size=100,
right_to_left=True,
anchor="center",
)
for word, (logical_word, display_word, x, y) in zip(
txt.words(),
[
("ืื", "ืื", 1334, 502),
("ืกืงืจื", "ืืจืงืก", 1176, 502),
("ืฉื", "ืืฉ", 997, 502),
("ืื", "ืื", 875, 502),
("ืืื", "ืืื", 745, 502),
("ืื", "ืื", 611, 502),
("ืื", "ืื", 1321, 602),
("ืืคืชืข", "ืขืชืคื", 1143, 602),
("ืคืืฉ", "ืฉืืค", 945, 602),
("ืืืืจื", "ืืจืืื", 732, 602),
("ื ืืืื", "ืืืืื ", 1254, 702),
("ืฉืฆืฆื", "ืืฆืฆืฉ", 1002, 702),
("ืื", "ืื", 824, 702),
],
):
assert word.text == logical_word
assert word.display_text == display_word
assert int(word.x) == x
assert int(word.y) == y
|
150971
|
import pytest
pytestmark = pytest.mark.page('non_control_elements.html')
class TestLis(object):
def test_with_selectors_returns_the_matching_elements(self, browser):
assert list(browser.lis(class_name='nonlink')) == \
[browser.li(class_name='nonlink')]
def test_returns_the_correct_number_of_lis(self, browser):
assert len(browser.lis()) == 18
def test_get_item_returns_the_li_at_the_given_index(self, browser):
assert browser.lis()[4].id == 'non_link_1'
def test_iterates_through_lis_correctly(self, browser):
count = 0
for index, li in enumerate(browser.lis()):
assert li.id == browser.li(index=index).id
assert li.value == browser.li(index=index).value
count += 1
assert count > 0
|
151018
|
import onnx
import torch
from onnx_pytorch.op_code_generators import OpCodeGenerator
class UpsampleOpCodeGenerator(OpCodeGenerator):
def __init__(self,
onnx_ver=onnx.defs.onnx_opset_version(),
torch_ver=torch.__version__):
super(UpsampleOpCodeGenerator, self).__init__(onnx_ver, torch_ver)
def gen(self, node, value_infos, initializers):
attr_value_dict = self.get_attr_value_dict(node)
inputs_str, outputs_str = self.gen_input_output_string(
node, initializers, self.rename_helper, self.tensor_inplace)
if node.input[1] in initializers:
scales = tuple(
onnx.numpy_helper.to_array(initializers[node.input[1]])[2:])
else:
scales = f"list({self.rename_helper.tensor_name_mapping.get(node.input[1], node.input[1])})[2:]"
align_corners = None
mode = attr_value_dict['mode'].decode()
d = len(value_infos[node.input[0]].type.tensor_type.shape.dim) - 2
assert d < 4, "Currently temporal, spatial and volumetric sampling are supported."
if mode == "linear":
modes = ["linear", "bilinear", "trilinear"]
mode = modes[d - 1]
params_str = self.gen_params_str(
scale_factor=scales,
mode=f"'{mode}'",
align_corners=align_corners,
recompute_scale_factor=scales is not None,
)
init_str, forward_str = [], []
forward_str.append(
f"{outputs_str[0]} = F.interpolate({inputs_str[0]}, **{{{params_str}}})"
)
return {"init": init_str, "forward": forward_str}
|
151045
|
from django.conf import settings
from django.core import management
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = "Helpful command to load all fixtures"
def handle(self, *args, **options):
if not settings.DEBUG:
raise CommandError("Command not meant for production")
management.call_command("loaddata", "users")
management.call_command("loaddata", "tasks")
management.call_command("loaddata", "avatars")
|
151085
|
import tkinter as tk
from time import sleep
from playsound import playsound
import config
import fasttick
from helpmessage import fasttick_help_message
import misc
from tickerwindow import TickerWindow
class GUIfasttick(TickerWindow):
def __init__(self, app):
super().__init__(app)
misc.delete_ancient_pickles('fasttick_history')
self.draw_labels()
self.draw_buttons()
self.draw_lists()
self.draw_timer()
self.timer_update()
def draw_labels(self):
self.labelName.grid(row=3, column=0, sticky='NSWE')
self.labelChange.config(text='Rate')
self.labelChange.grid(row=3, column=1, sticky='NSWE')
self.labelVol.grid(row=3, column=2, sticky='NSWE')
self.labelBuf.grid(row=3, rowspan=2, column=3, columnspan=2, sticky='NSWE')
def draw_buttons(self):
self.sortByName.grid(row=4, column=0, sticky='NSWE')
self.sortByChange.grid(row=4, column=1, sticky='NSWE')
self.sortByVol.grid(row=4, column=2, sticky='NSWE')
self.notifyBell.grid(row=4, column=3, sticky='NSWE')
self.help.grid(row=3, column=4, sticky='E')
def on_click_help(self):
helpWindow = tk.Toplevel()
helpWindow.title('Help')
frameBuf = tk.Frame(helpWindow, width=192, bg=config.MAIN_BG)
frameBuf.grid(row=0, rowspan=4, column=0, columnspan=3)
message = tk.Message(frameBuf, bg=config.MAIN_BG, fg=config.TEXT_COLOR,
width=192, text=fasttick_help_message)
message.grid(row=0, columnspan=3)
dismissButton = tk.Button(frameBuf, text='Dismiss', command=helpWindow.destroy)
dismissButton.grid(row=1, column=1)
def draw_lists(self):
self.yScroll.grid(row=5, column=3, sticky='NSWE')
self.listName.grid(row=5, column=0, sticky='NSWE')
self.listChange.grid(row=5, column=1, sticky='NSWE')
self.listVol.grid(row=5, column=2, sticky='NSWE')
def draw_timer(self):
self.timerLabel.grid(row=5, column=4, ipadx=8)
self.timerFrame.grid(row=5, column=4, columnspan=3)
self.timerDisp.grid(row=5, column=4)
self.timerValue = config.FASTTICK_RATE
def timer_update(self):
if self.timerValue == 3:
self.async = self.pool.apply_async(fasttick.heartbeat)
if self.timerValue == 0:
while True:
if self.async.ready():
break
for i in range(1, 4):
if self.async.ready():
break
self.timerDisp.config(text=f'{"." * i}', font=('', 20))
self.app.update()
sleep(1)
self.ticker_data = self.async.get()
self.sort_ticker()
if self.notifyIsActive and self.ticker_data:
playsound('media/notification_sound.mp3')
self.timerValue = config.FASTTICK_RATE
values = divmod(self.timerValue, 60)
minutes = values[0]
seconds = values[1]
self.timerDisp.config(text=f'{minutes}:{seconds:0>2}', font=('', 20))
self.timerValue -= 1
self.app.after(1000, self.timer_update)
|
151094
|
import gtimer as gt
from rlkit.core import logger
from ROLL.online_LSTM_replay_buffer import OnlineLSTMRelabelingBuffer
import rlkit.torch.vae.vae_schedules as vae_schedules
import ROLL.LSTM_schedule as lstm_schedules
from rlkit.torch.torch_rl_algorithm import (
TorchBatchRLAlgorithm,
)
import rlkit.torch.pytorch_util as ptu
from torch.multiprocessing import Process, Pipe
from threading import Thread
from test_latent_space.test_LSTM import compare_latent_distance
from test_latent_space.test_LSTM2 import test_lstm_traj
from test_latent_space.test_masked_traj import test_masked_traj_lstm
import os
import os.path as osp
import numpy as np
from multiworld.core.image_env import unormalize_image, normalize_image
class OnlineLSTMAlgorithm(TorchBatchRLAlgorithm):
def __init__(
self,
env_id,
vae_original,
lstm_segmented,
vae_trainer_original,
lstm_trainer_segmented,
*base_args,
vae_save_period=1,
lstm_save_period=1,
vae_training_schedule=vae_schedules.never_train,
lstm_training_schedule=lstm_schedules.never_train,
lstm_test_N=500,
lstm_segmentation_method='color',
oracle_data=False,
parallel_vae_train=False,
vae_min_num_steps_before_training=0,
uniform_dataset=None,
keep_train_segmentation_lstm=False,
keep_train_original_vae=True,
**base_kwargs
):
super().__init__(*base_args, **base_kwargs)
assert isinstance(self.replay_buffer, OnlineLSTMRelabelingBuffer)
self.vae_original = vae_original
self.lstm_segmented = lstm_segmented
self.vae_trainer_original = vae_trainer_original
self.lstm_trainer_segmented = lstm_trainer_segmented
self.vae_trainer_original.model = self.vae_original
self.lstm_trainer_segmented.model = self.lstm_segmented
self.vae_save_period = vae_save_period
self.lstm_save_period = lstm_save_period
self.vae_training_schedule = vae_training_schedule
self.lstm_training_schedule = lstm_training_schedule
self.oracle_data = oracle_data
self.parallel_vae_train = parallel_vae_train
self.vae_min_num_steps_before_training = vae_min_num_steps_before_training
self.uniform_dataset = uniform_dataset
self._vae_training_process = None
self._update_subprocess_vae_thread = None
self._vae_conn_pipe = None
self.keep_train_segmentation_lstm = keep_train_segmentation_lstm
self.keep_train_original_vae = keep_train_original_vae
# below is just used for testing the segmentation vae.
self.env_id = env_id
self.lstm_test_N = lstm_test_N
self.lstm_segmentation_method = lstm_segmentation_method
def _train(self):
super()._train()
self._cleanup()
def _end_epoch(self, epoch):
# self.check_replay_buffer()
self._train_vae(epoch)
gt.stamp('vae training')
super()._end_epoch(epoch)
def _log_stats(self, epoch):
self._log_vae_stats()
super()._log_stats(epoch)
def to(self, device):
self.vae_original.to(device)
self.lstm_segmented.to(device)
super().to(device)
def _get_snapshot(self):
snapshot = super()._get_snapshot()
assert 'vae' not in snapshot
snapshot['vae_original'] = self.vae_original
snapshot['lstm_segmented'] = self.lstm_segmented
return snapshot
"""
debug code
"""
def check_replay_buffer(self):
batch = self.replay_buffer.random_batch(
self.batch_size)
rewards = batch['rewards']
terminals = batch['terminals']
obs = batch['observations']
actions = batch['actions']
next_obs = batch['next_observations']
goals = batch['resampled_goals']
print("obs: ", type(obs))
print("obs shape: ", obs.shape)
decoded_obs = self.eval_env._decode(obs, self.eval_env.vae_original)
for idx in range(10):
self.eval_env.show_obs(decoded_obs[idx], "sac policy obs")
print("next_obs: ", type(next_obs))
print("next obs shape: ", next_obs.shape)
decoded_next_obs = self.eval_env._decode(next_obs, self.eval_env.vae_original)
for idx in range(10):
self.eval_env.show_obs(decoded_next_obs[idx], "sac policy next_obs")
decoded_goal = self.eval_env._decode(goals, self.eval_env.lstm_segmented)
for idx in range(10):
self.eval_env.show_obs(decoded_goal[idx], "sac policy goal")
"""
VAE-specific Code
"""
def _train_vae(self, epoch):
if self.parallel_vae_train and self._vae_training_process is None:
self.init_vae_training_subprocess()
should_train, amount_to_train = self.vae_training_schedule(epoch)
_, lstm_amount_to_train = self.lstm_training_schedule(epoch)
rl_start_epoch = int(self.min_num_steps_before_training / (
self.num_expl_steps_per_train_loop * self.num_train_loops_per_epoch
))
print(" _train_vae called, should_train, amount_to_train", should_train, amount_to_train)
if should_train or epoch <= (rl_start_epoch - 1):
if self.parallel_vae_train:
assert self._vae_training_process.is_alive()
# Make sure the last vae update has finished before starting
# another one
if self._update_subprocess_vae_thread is not None:
self._update_subprocess_vae_thread.join()
self._update_subprocess_vae_thread = Thread(
target=OnlineVaeAlgorithmSegmented.update_vae_in_training_subprocess,
args=(self, epoch, ptu.device)
)
self._update_subprocess_vae_thread.start()
self._vae_conn_pipe.send((amount_to_train, epoch))
else:
if self.keep_train_original_vae:
_train_vae(
self.vae_trainer_original,
self.replay_buffer,
epoch,
amount_to_train,
key='image_observation'
)
_test_vae(
self.vae_trainer_original,
epoch,
self.replay_buffer,
vae_save_period=self.vae_save_period,
uniform_dataset=self.uniform_dataset,
save_prefix='r_original_'
)
if self.keep_train_segmentation_lstm:
_train_lstm(
lstm_trainer=self.lstm_trainer_segmented,
replay_buffer=self.replay_buffer,
epoch=epoch,
batches=lstm_amount_to_train,
oracle_data=False,
key='image_observation_segmented'
)
_test_lstm(
lstm_trainer=self.lstm_trainer_segmented,
epoch=epoch,
replay_buffer=self.replay_buffer,
env_id=self.env_id,
lstm_save_period=self.lstm_save_period,
uniform_dataset=None,
save_prefix='r_lstm_' ,
lstm_test_N=self.lstm_test_N,
lstm_segmentation_method=self.lstm_segmentation_method
)
# we only refresh goals if the segmentation lstm (used for goal sampling) has changed
self.replay_buffer.refresh_latents(epoch, refresh_goals=self.keep_train_segmentation_lstm)
def _log_vae_stats(self):
logger.record_dict(
self.vae_trainer_original.get_diagnostics(),
prefix='vae_trainer_original/',
)
logger.record_dict(
self.lstm_trainer_segmented.get_diagnostics(),
prefix='lstm_trainer_segmented/',
)
def _cleanup(self):
if self.parallel_vae_train:
self._vae_conn_pipe.close()
self._vae_training_process.terminate()
def init_vae_training_subprocess(self):
self._vae_conn_pipe, process_pipe = Pipe()
self._vae_training_process = Process(
target=subprocess_train_vae_loop,
args=(
process_pipe,
self.vae,
self.vae.state_dict(),
self.replay_buffer,
self.replay_buffer.get_mp_info(),
ptu.device,
)
)
self._vae_training_process.start()
self._vae_conn_pipe.send(self.vae_trainer)
def update_vae_in_training_subprocess(self, epoch, device):
self.vae.__setstate__(self._vae_conn_pipe.recv())
self.vae.to(device)
_test_vae(
self.vae_trainer,
epoch,
self.replay_buffer,
vae_save_period=self.vae_save_period,
uniform_dataset=self.uniform_dataset,
)
def _train_vae(vae_trainer, replay_buffer, epoch, batches=50, oracle_data=False, key='image_observation'):
batch_sampler = replay_buffer.random_vae_training_data
if oracle_data:
batch_sampler = None
vae_trainer.train_epoch(
epoch,
sample_batch=batch_sampler,
batches=batches,
from_rl=True,
key=key,
)
def _train_lstm(lstm_trainer, replay_buffer, epoch, batches=50, oracle_data=False, key='image_observation_segmented'):
batch_sampler = replay_buffer.random_lstm_training_data
if oracle_data:
batch_sampler = None
lstm_trainer.train_epoch(
epoch,
sample_batch=batch_sampler,
batches=batches,
from_rl=True,
key=key,
)
def _test_vae(vae_trainer, epoch, replay_buffer, vae_save_period=1, uniform_dataset=None, save_prefix='r'):
save_imgs = epoch % vae_save_period == 0
log_fit_skew_stats = replay_buffer._prioritize_vae_samples and uniform_dataset is not None
if uniform_dataset is not None:
replay_buffer.log_loss_under_uniform(uniform_dataset, vae_trainer.batch_size, rl_logger=vae_trainer.vae_logger_stats_for_rl)
vae_trainer.test_epoch(
epoch,
from_rl=True,
save_reconstruction=save_imgs,
save_prefix=save_prefix
)
if save_imgs:
sample_save_prefix = save_prefix.replace('r', 's')
vae_trainer.dump_samples(epoch, save_prefix=sample_save_prefix)
if log_fit_skew_stats:
replay_buffer.dump_best_reconstruction(epoch)
replay_buffer.dump_worst_reconstruction(epoch)
replay_buffer.dump_sampling_histogram(epoch, batch_size=vae_trainer.batch_size)
if uniform_dataset is not None:
replay_buffer.dump_uniform_imgs_and_reconstructions(dataset=uniform_dataset, epoch=epoch)
def _test_lstm(lstm_trainer, epoch, replay_buffer, env_id, lstm_save_period=1, uniform_dataset=None,
save_prefix='r', lstm_segmentation_method='color', lstm_test_N=500, key='image_observation_segmented'):
batch_sampler = replay_buffer.random_lstm_training_data
save_imgs = epoch % lstm_save_period == 0
log_fit_skew_stats = replay_buffer._prioritize_vae_samples and uniform_dataset is not None
if uniform_dataset is not None:
replay_buffer.log_loss_under_uniform(uniform_dataset, lstm_trainer.batch_size, rl_logger=lstm_trainer.vae_logger_stats_for_rl)
lstm_trainer.test_epoch(
epoch,
from_rl=True,
key=key,
sample_batch=batch_sampler,
save_reconstruction=save_imgs,
save_prefix=save_prefix
)
if save_imgs:
sample_save_prefix = save_prefix.replace('r', 's')
lstm_trainer.dump_samples(epoch, save_prefix=sample_save_prefix)
if log_fit_skew_stats:
replay_buffer.dump_best_reconstruction(epoch)
replay_buffer.dump_worst_reconstruction(epoch)
replay_buffer.dump_sampling_histogram(epoch, batch_size=lstm_trainer.batch_size)
if uniform_dataset is not None:
replay_buffer.dump_uniform_imgs_and_reconstructions(dataset=uniform_dataset, epoch=epoch)
m = lstm_trainer.model
pjhome = os.environ['PJHOME']
seg_name = 'seg-' + 'color'
if env_id in ['SawyerPushNIPSEasy-v0', 'SawyerPushHurdle-v0', 'SawyerPushHurdleMiddle-v0']:
N = 500
data_file_path = osp.join(pjhome, 'data/local/pre-train-lstm', '{}-{}-{}-0.3-0.5.npy'.format(env_id, seg_name, N))
puck_pos_path = osp.join(pjhome, 'data/local/pre-train-lstm', '{}-{}-{}-0.3-0.5-puck-pos.npy'.format(env_id, seg_name, N))
if osp.exists(data_file_path):
all_data = np.load(data_file_path)
puck_pos = np.load(puck_pos_path)
all_data = normalize_image(all_data.copy())
compare_latent_distance(m, all_data, puck_pos, save_dir=logger.get_snapshot_dir(), obj_name='puck',
save_name='online_lstm_latent_distance_{}.png'.format(epoch))
elif env_id == 'SawyerDoorHookResetFreeEnv-v1':
N = 1000
seg_name = 'seg-' + 'unet'
data_file_path = osp.join(pjhome, 'data/local/pre-train-lstm', 'vae-only-{}-{}-{}-0-0.npy'.format(env_id, seg_name, N))
door_angle_path = osp.join(pjhome, 'data/local/pre-train-lstm', 'vae-only-{}-{}-{}-0-0-door-angle.npy'.format(env_id, seg_name, N))
if osp.exists(data_file_path):
all_data = np.load(data_file_path)
door_angle = np.load(door_angle_path)
all_data = normalize_image(all_data.copy())
compare_latent_distance(m, all_data, door_angle, save_dir=logger.get_snapshot_dir(), obj_name='door',
save_name='online_lstm_latent_distance_{}.png'.format(epoch))
elif env_id == 'SawyerPushHurdleResetFreeEnv-v0':
N = 2000
data_file_path = osp.join(pjhome, 'data/local/pre-train-lstm', 'vae-only-{}-{}-{}-0.3-0.5.npy'.format(env_id, seg_name, N))
puck_pos_path = osp.join(pjhome, 'data/local/pre-train-lstm', 'vae-only-{}-{}-{}-0.3-0.5-puck-pos.npy'.format(env_id, seg_name, N))
if osp.exists(data_file_path):
all_data = np.load(data_file_path)
puck_pos = np.load(puck_pos_path)
all_data = normalize_image(all_data.copy())
compare_latent_distance(m, all_data, puck_pos, save_dir=logger.get_snapshot_dir(), obj_name='puck',
save_name='online_lstm_latent_distance_{}.png'.format(epoch))
test_lstm_traj(env_id, m, save_path=logger.get_snapshot_dir(),
save_name='online_lstm_test_traj_{}.png'.format(epoch))
test_masked_traj_lstm(env_id, m, save_dir=logger.get_snapshot_dir(),
save_name='online_masked_test_{}.png'.format(epoch))
def subprocess_train_vae_loop(
conn_pipe,
vae,
vae_params,
replay_buffer,
mp_info,
device,
):
"""
The observations and next_observations of the replay buffer are stored in
shared memory. This loop waits until the parent signals to start vae
training, trains and sends the vae back, and then refreshes the latents.
Refreshing latents in the subprocess reflects in the main process as well
since the latents are in shared memory. Since this is does asynchronously,
it is possible for the main process to see half the latents updated and half
not.
"""
ptu.device = device
vae_trainer = conn_pipe.recv()
vae.load_state_dict(vae_params)
vae.to(device)
vae_trainer.set_vae(vae)
replay_buffer.init_from_mp_info(mp_info)
replay_buffer.env.vae = vae
while True:
amount_to_train, epoch = conn_pipe.recv()
_train_vae(vae_trainer, replay_buffer, epoch, amount_to_train)
conn_pipe.send(vae_trainer.model.__getstate__())
replay_buffer.refresh_latents(epoch)
|
151099
|
import json
import unittest
from unittest.mock import PropertyMock, patch
import sys
import io
import os
from fzfaws.utils import FileLoader, BaseSession
from fzfaws.ec2 import EC2
from fzfaws.ec2.ls_instance import ls_instance, dump_response
import boto3
from botocore.stub import Stubber
from pathlib import Path
class TestEC2ls(unittest.TestCase):
def setUp(self):
self.capturedOutput = io.StringIO()
sys.stdout = self.capturedOutput
fileloader = FileLoader()
config_path = Path(__file__).resolve().parent.joinpath("../data/fzfaws.yml")
fileloader.load_config_file(config_path=str(config_path))
def tearDown(self):
sys.stdout = sys.__stdout__
def test_dump_response(self):
self.capturedOutput.truncate(0)
self.capturedOutput.seek(0)
test_dict = {"hello": "world", "ResponseMetadata": {"hello": "wold"}}
dump_response(test_dict)
self.assertEqual(self.capturedOutput.getvalue(), '{\n "hello": "world"\n}\n')
@patch.object(BaseSession, "client", new_callable=PropertyMock)
@patch.object(EC2, "set_ec2_instance")
def test_ls_instance(self, mocked_set_instance, mocked_client):
response_data = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "../data/ec2_instance.json"
)
with open(response_data, "r") as file:
response = json.load(file)
self.capturedOutput.truncate(0)
self.capturedOutput.seek(0)
ec2 = boto3.client("ec2")
stubber = Stubber(ec2)
stubber.add_response("describe_instances", response[0])
stubber.activate()
mocked_client.return_value = ec2
ls_instance(
ipv4=True, privateip=True, dns=True, az=True, keyname=True, instanceid=True
)
self.assertEqual(
self.capturedOutput.getvalue(), "None\nNone\nNone\nNone\nNone\nNone\n",
)
self.capturedOutput.truncate(0)
self.capturedOutput.seek(0)
ec2 = boto3.client("ec2")
stubber = Stubber(ec2)
stubber.add_response("describe_instances", response[0])
stubber.activate()
mocked_client.return_value = ec2
ls_instance()
self.assertRegex(self.capturedOutput.getvalue(), r"Reservations.*")
@patch.object(BaseSession, "client", new_callable=PropertyMock)
@patch.object(EC2, "get_vpc_id")
@patch.object(EC2, "set_ec2_instance")
def test_ls_vpc(self, mocked_set_instance, mocked_vpc, mocked_client):
response_data = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "../data/ec2_vpc.json"
)
with open(response_data, "r") as file:
response = json.load(file)
mocked_vpc.return_value = ["vpc-0f07bd18d891bc5c0"]
self.capturedOutput.truncate(0)
self.capturedOutput.seek(0)
ec2 = boto3.client("ec2")
stubber = Stubber(ec2)
stubber.add_response("describe_vpcs", response[0])
stubber.activate()
mocked_client.return_value = ec2
ls_instance(vpc=True)
self.assertRegex(
self.capturedOutput.getvalue(), r"State.*available",
)
self.capturedOutput.truncate(0)
self.capturedOutput.seek(0)
ls_instance(vpcid=True)
self.assertEqual(
self.capturedOutput.getvalue(), "vpc-0f07bd18d891bc5c0\n",
)
@patch.object(BaseSession, "client", new_callable=PropertyMock)
@patch.object(EC2, "get_volume_id")
@patch.object(EC2, "set_ec2_instance")
def test_ls_volume(self, mocked_set_instance, mocked_volume, mocked_client):
response_data = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "../data/ec2_volume.json"
)
with open(response_data, "r") as file:
response = json.load(file)
mocked_volume.return_value = ["vol-014718fdbcdf5ade8"]
self.capturedOutput.truncate(0)
self.capturedOutput.seek(0)
ec2 = boto3.client("ec2")
stubber = Stubber(ec2)
stubber.add_response("describe_volumes", response[0])
stubber.activate()
mocked_client.return_value = ec2
ls_instance(volume=True)
self.assertRegex(
self.capturedOutput.getvalue(), r"VolumeType.*gp2",
)
self.capturedOutput.truncate(0)
self.capturedOutput.seek(0)
ls_instance(volumeid=True)
self.assertEqual(
self.capturedOutput.getvalue(), "vol-014718fdbcdf5ade8\n",
)
@patch.object(BaseSession, "client", new_callable=PropertyMock)
@patch.object(EC2, "get_subnet_id")
@patch.object(EC2, "set_ec2_instance")
def test_ls_subnet(self, mocked_set_instance, mocked_subnet, mocked_client):
response_data = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "../data/ec2_subnet.json"
)
with open(response_data, "r") as file:
response = json.load(file)
mocked_subnet.return_value = ["subnet-0084be888f20fa8eb"]
self.capturedOutput.truncate(0)
self.capturedOutput.seek(0)
ec2 = boto3.client("ec2")
stubber = Stubber(ec2)
stubber.add_response("describe_subnets", response[0])
stubber.activate()
mocked_client.return_value = ec2
ls_instance(subnet=True)
self.assertRegex(
self.capturedOutput.getvalue(), r"State.*available",
)
self.capturedOutput.truncate(0)
self.capturedOutput.seek(0)
ls_instance(subnetid=True)
self.assertEqual(
self.capturedOutput.getvalue(), "subnet-0084be888f20fa8eb\n",
)
@patch.object(BaseSession, "client", new_callable=PropertyMock)
@patch.object(EC2, "get_security_groups")
@patch.object(EC2, "set_ec2_instance")
def test_ls_sg(self, mocked_set_instance, mocked_sg, mocked_client):
response_data = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "../data/ec2_sg.json"
)
with open(response_data, "r") as file:
response = json.load(file)
mocked_sg.return_value = ["sg-006ae18653dc5acd7"]
self.capturedOutput.truncate(0)
self.capturedOutput.seek(0)
ec2 = boto3.client("ec2")
stubber = Stubber(ec2)
stubber.add_response("describe_security_groups", response[0])
stubber.activate()
mocked_client.return_value = ec2
ls_instance(sg=True)
self.assertRegex(
self.capturedOutput.getvalue(),
r"GroupName.*hellotesting-EC2InstanceSecurityGroup",
)
mocked_sg.assert_called_with(
multi_select=True, return_attr="id", no_progress=True
)
self.capturedOutput.truncate(0)
self.capturedOutput.seek(0)
ls_instance(sgid=True)
self.assertEqual(
self.capturedOutput.getvalue(), "sg-006ae18653dc5acd7\n",
)
mocked_sg.assert_called_with(
multi_select=True, return_attr="id", no_progress=True
)
mocked_sg.return_value = ["hellotesting-EC2InstanceSecurityGroup"]
self.capturedOutput.truncate(0)
self.capturedOutput.seek(0)
ls_instance(sgname=True)
self.assertEqual(
self.capturedOutput.getvalue(), "hellotesting-EC2InstanceSecurityGroup\n",
)
mocked_sg.assert_called_with(
multi_select=True, return_attr="name", no_progress=True
)
|
151116
|
from mongonaut.sites import MongoAdmin
from articles.models import Post, User, NewUser
class PostAdmin(MongoAdmin):
def has_view_permission(self, request):
return True
def has_edit_permission(self, request):
return True
def has_add_permission(self, request):
return True
def has_delete_permission(self, request):
return True
search_fields = ('title', 'id')
list_fields = ('title', 'author', "published", "pub_date", "update_times")
class UserAdmin(MongoAdmin):
def has_view_permission(self, request):
return True
def has_edit_permission(self, request):
return True
def has_add_permission(self, request):
return True
list_fields = ('first_name', "last_name", "email")
Post.mongoadmin = PostAdmin()
User.mongoadmin = UserAdmin()
NewUser.mongoadmin = UserAdmin()
|
151139
|
import logging
import os
import sys
import arguments
from app.core import files
from cli.main import start_cli
from gui.main import start_gui
def main():
app_path = files.get_app_path()
if not os.path.exists(app_path):
os.mkdir(app_path)
if not os.path.isfile(files.get_config_path()):
open(files.get_config_path(), 'w')
if not os.path.isfile(files.get_accounts_path()):
open(files.get_accounts_path(), 'w')
aws_path = files.get_aws_path()
if not os.path.exists(aws_path):
os.mkdir(aws_path)
args = arguments.parse(sys.argv[1:])
logging.basicConfig(level=logging.getLevelName(args.loglevel))
logger = logging.getLogger('logsmith')
logger.propagate = False
logger.setLevel(logging.DEBUG)
log_formatter = logging.Formatter("%(asctime)12s [%(levelname)s] %(threadName)-12.12s %(message)s")
file_handler = logging.FileHandler(f'{app_path}/app.log', mode='w')
file_handler.setFormatter(log_formatter)
logger.addHandler(file_handler)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_formatter)
logger.addHandler(console_handler)
logging.info(f'config dir {app_path}')
logging.info('start app')
if arguments.use_cli(args):
start_cli(args)
else:
start_gui()
if __name__ == '__main__':
main()
|
151181
|
from django.contrib.auth.models import AnonymousUser
from app.testing import register
@register
def user(self, **kwargs):
return self.mixer.blend('users.User', **kwargs)
@register
def anon(self, **kwargs):
return AnonymousUser()
|
151186
|
from nox_poetry import Session, session
@session()
def tests(session: Session) -> None:
args = session.posargs or ["--cov=kiez/", "--cov-report=xml", "tests/"]
session.install(".[all]")
session.install("pytest")
session.install("pytest-cov")
session.run("pytest", *args)
locations = ["kiez", "tests", "noxfile.py"]
@session()
def lint(session: Session) -> None:
args = session.posargs or locations
session.run_always("poetry", "install", external=True)
session.run("pflake8", *args)
@session()
def type_checking(session: Session) -> None:
args = session.posargs or locations
session.run_always("poetry", "install", external=True)
session.run("mypy", "--ignore-missing-imports", *args)
|
151188
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import builtins
import math
import warnings
import inspect
from functools import partial
import tensorflow as tf
from trident.backend.common import TensorShape
from trident.backend.tensorflow_backend import *
from trident.backend.tensorflow_ops import *
from trident.backend.common import get_function, camel2snake
__all__ = ['kaiming_uniform', 'kaiming_normal','xavier_uniform','xavier_normal','trunc_normal','fill_zeros','fill_ones']
def calculate_gain(nonlinearity, param=None):
r"""Return the recommended gain value for the given nonlinearity function.
The values are as follows:
================= ====================================================
nonlinearity gain
================= ====================================================
Linear / Identity :math:`1`
Conv{1,2,3}D :math:`1`
Sigmoid :math:`1`
Tanh :math:`\frac{5}{3}`
ReLU :math:`\sqrt{2}`
Leaky Relu :math:`\sqrt{\frac{2}{1 + \text{negative\_slope}^2}}`
================= ====================================================
Args:
nonlinearity: the non-linear function (`nn.functional` name)
param: optional parameter for the non-linear function
Examples:
>>> gain = nn.init.calculate_gain('leaky_relu', 0.2) # leaky_relu with negative_slope=0.2
"""
linear_fns = ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d', 'conv_transpose2d', 'conv_transpose3d']
if nonlinearity in linear_fns or nonlinearity == 'sigmoid':
return 1
elif nonlinearity == 'tanh':
return 5.0 / 3
elif nonlinearity == 'relu':
return math.sqrt(2.0)
elif nonlinearity == 'leaky_relu':
if param is None:
negative_slope = 0.01
elif not isinstance(param, bool) and isinstance(param, int) or isinstance(param, float):
# True/False are instances of int, hence check above
negative_slope = param
else:
raise ValueError("negative_slope {} not a valid number".format(param))
return math.sqrt(2.0 / (1 + negative_slope ** 2))
else:
raise ValueError("Unsupported nonlinearity {}".format(nonlinearity))
def _calculate_fan_in_and_fan_out(tensor):
dimensions = len(tensor.shape)
if dimensions < 2:
raise ValueError("Fan in and fan out can not be computed for tensor with fewer than 2 dimensions")
num_input_fmaps = int_shape(tensor)[-1]
num_output_fmaps = int_shape(tensor)[0]
receptive_field_size = 1
if dimensions > 2:
receptive_field_size = tensor[0][0].numel()
fan_in = num_input_fmaps * receptive_field_size
fan_out = num_output_fmaps * receptive_field_size
return fan_in, fan_out
def _calculate_correct_fan(tensor, mode):
mode = mode.lower()
valid_modes = ['fan_in', 'fan_out']
if mode not in valid_modes:
raise ValueError("Mode {} not supported, please use one of {}".format(mode, valid_modes))
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
return fan_in if mode == 'fan_in' else fan_out
def uniform(tensor, a=0., b=1.):
# type: (Tensor, float, float) -> Tensor
r"""Fills the input Tensor with values drawn from the uniform
distribution :math:`\mathcal{U}(a, b)`.
Args:
tensor: an n-dimensional `torch.Tensor`
a: the lower bound of the uniform distribution
b: the upper bound of the uniform distribution
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.uniform_(w)
"""
if isinstance(tensor,tf.Module):
for name,weight in tensor.named_parameters():
if weight.trainable==True and 'bias' not in name:
weight.assign(random_uniform_like(weight, a=a,b=b))
elif isinstance(tensor, tf.Variable) and tensor.trainable==True:
tensor.assign(random_uniform_like(tensor, a=a,b=b))
def normal(tensor, mean=0., std=1.):
# type: (Tensor, float, float) -> Tensor
r"""Fills the input Tensor with values drawn from the normal
distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.normal_(w)
"""
if isinstance(tensor,tf.Module):
for name,weight in tensor.named_parameters():
if weight.trainable==True and 'bias' not in name:
weight.assign(random_normal_like(weight,mean=mean,std=std))
elif isinstance(tensor, tf.Variable) and tensor.trainable==True:
tensor.assign(random_normal_like(tensor,mean=mean,std=std))
def fill_zeros(tensor):
# type: (Tensor) -> Tensor
r"""Fills the input Tensor with the scalar value `0`.
Args:
tensor: an n-dimensional `torch.Tensor`
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.zeros_(w)
"""
if isinstance(tensor,tf.Module):
for name,weight in tensor.named_parameters():
if weight.trainable :
weight.assign(zeros_like(weight))
elif isinstance(tensor, tf.Variable) and tensor.trainable==True:
tensor.assign(zeros_like(tensor))
def fill_ones(tensor):
# type: (Tensor) -> Tensor
r"""Fills the input Tensor with the scalar value `1`.
Args:
tensor: an n-dimensional `torch.Tensor`
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.ones_(w)
"""
if isinstance(tensor,tf.Module):
for name,weight in tensor.named_parameters():
if weight.trainable==True and 'bias' not in name:
weight.assign(ones_like(weight))
elif isinstance(tensor, tf.Variable) and tensor.trainable==True:
tensor.assign(ones_like(tensor))
def kaiming_uniform(tensor, a=0, mode='fan_in', nonlinearity='leaky_relu'):
r"""Fills the input `Tensor` with values according to the method
described in `Delving deep into rectifiers: Surpassing human-level
performance on ImageNet classification` - He, K. et al. (2015), using a
uniform distribution. The resulting tensor will have values sampled from
:math:`\mathcal{U}(-\text{bound}, \text{bound})` where
.. math::
\text{bound} = \text{gain} \times \sqrt{\frac{3}{\text{fan\_mode}}}
Also known as He initialization.
Args:
tensor: an n-dimensional `torch.Tensor`
a: the negative slope of the rectifier used after this layer (only
used with ``'leaky_relu'``)
mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
preserves the magnitude of the variance of the weights in the
forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the
backwards pass.
nonlinearity: the non-linear function (`nn.functional` name),
recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).
Examples:
>>> w = zeros((3, 5))
>>> kaiming_uniform(w, mode='fan_in', nonlinearity='relu')
"""
if isinstance(tensor,tf.Module):
for name,weight in tensor.named_parameters():
if weight.trainable==True and 'bias' not in name:
kaiming_uniform(weight, a, mode, nonlinearity)
elif isinstance(tensor, tf.Variable) and tensor.trainable == True:
tensor_data = tensor.value()
fan = to_numpy(_calculate_correct_fan(tensor_data, mode)).mean()
gain = calculate_gain(nonlinearity, a)
std = true_divide(gain, math.sqrt(fan))
bound = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
tensor.assign(random_uniform_like(tensor_data, -bound, bound, tensor_data.dtype))
def kaiming_normal(tensor, a=0, mode='fan_in', nonlinearity='leaky_relu'):
r"""Fills the input `Tensor` with values according to the method
described in `Delving deep into rectifiers: Surpassing human-level
performance on ImageNet classification` - <NAME>. et al. (2015), using a
normal distribution. The resulting tensor will have values sampled from
:math:`\mathcal{N}(0, \text{std}^2)` where
.. math::
\text{std} = \frac{\text{gain}}{\sqrt{\text{fan\_mode}}}
Also known as He initialization.
Args:
tensor: an n-dimensional `torch.Tensor`
a: the negative slope of the rectifier used after this layer (only
used with ``'leaky_relu'``)
mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
preserves the magnitude of the variance of the weights in the
forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the
backwards pass.
nonlinearity: the non-linear function (`nn.functional` name),
recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.kaiming_normal_(w, mode='fan_out', nonlinearity='relu')
"""
if isinstance(tensor, tf.Module):
for name,weight in tensor.named_parameters():
if weight.trainable==True and 'bias' not in name:
kaiming_normal(weight, a, mode, nonlinearity)
elif isinstance(tensor, tf.Variable) and tensor.trainable == True:
tensor_data=tensor.value()
fan = to_numpy(_calculate_correct_fan(tensor_data, mode)).mean()
gain = calculate_gain(nonlinearity, a)
std = true_divide(gain , math.sqrt(fan))
tensor.assign(random_normal_like(tensor_data,0, std, tensor_data.dtype))
def xavier_uniform(tensor, gain=1.):
# type: (Tensor, float) -> Tensor
r"""Fills the input `Tensor` with values according to the method
described in `Understanding the difficulty of training deep feedforward
neural networks` - <NAME>. & <NAME>. (2010), using a uniform
distribution. The resulting tensor will have values sampled from
:math:`\mathcal{U}(-a, a)` where
.. math::
a = \text{gain} \times \sqrt{\frac{6}{\text{fan\_in} + \text{fan\_out}}}
Also known as Glorot initialization.
Args:
tensor: an n-dimensional `torch.Tensor`
gain: an optional scaling factor
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.xavier_uniform_(w, gain=nn.init.calculate_gain('relu'))
"""
if isinstance(tensor,tf.Module):
for name,weight in tensor.named_parameters():
if weight.trainable==True and 'bias' not in name:
xavier_uniform(weight, gain)
elif isinstance(tensor, tf.Variable) and tensor.trainable==True:
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
tensor.assign(random_uniform_like(tensor, -a, a))
def xavier_normal(tensor, gain=1.):
# type: (Tensor, float) -> Tensor
r"""Fills the input `Tensor` with values according to the method
described in `Understanding the difficulty of training deep feedforward
neural networks` - <NAME>. & <NAME>. (2010), using a normal
distribution. The resulting tensor will have values sampled from
:math:`\mathcal{N}(0, \text{std}^2)` where
.. math::
\text{std} = \text{gain} \times \sqrt{\frac{2}{\text{fan\_in} + \text{fan\_out}}}
Also known as Glorot initialization.
Args:
tensor: an n-dimensional `torch.Tensor`
gain: an optional scaling factor
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.xavier_normal_(w)
"""
if isinstance(tensor,tf.Module):
for name,weight in tensor.named_parameters():
if weight.trainable==True and 'bias' not in name:
xavier_normal(weight, gain)
elif isinstance(tensor, tf.Variable) and tensor.trainable==True:
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
std = gain * math.sqrt(2.0 / float(fan_in + fan_out))
tensor.assign(random_normal_like(tensor, 0, std))
def trunc_normal(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
if isinstance(tensor,tf.Module):
for name,weight in tensor.named_parameters():
if weight.trainable==True and 'bias' not in name:
weight.assign(tf.random.truncated_normal(weight.shape,mean=mean, std=std, a=a, b=b))
elif isinstance(tensor, tf.Variable) and tensor.trainable==True:
tensor.assign(tf.random.truncated_normal(tensor.shape,mean=mean, std=std, a=a, b=b))
def get_initializer(initializer,**kwargs):
if isinstance(initializer,str):
initializer_fn = get_function(camel2snake(initializer), ['trident.backend.tensorflow_initializers'])
initializer_fn=partial(initializer_fn,**kwargs) if len(kwargs)>0 else initializer_fn
return initializer_fn
elif inspect.isfunction(initializer) and getattr(initializer, '__module__', None) =='trident.backend.tensorflow_initializers':
initializer = partial(initializer, **kwargs) if len(kwargs) > 0 else initializer
return initializer
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.