content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
from odoo.tests.common import TransactionCase
class TestComputeDomain(TransactionCase):
at_install = True
post_install = True
def setUp(self):
super(TestComputeDomain, self).setUp()
self.demo_user = self.env.ref("base.user_demo")
self.env["ir.rule"].create(
{
"name": "test ir_rule_website",
"model_id": self.env.ref("base.model_res_partner").id,
"domain_force": "[('parent_id', 'in', [website_id])]",
}
)
def _cached_compute_domain(self, website_id):
test_domain = ("parent_id", "in", [website_id])
domain = (
self.env["ir.rule"]
.sudo(user=self.demo_user.id)
.with_context(website_id=website_id)
._compute_domain("res.partner")
)
self.assertTrue(test_domain in domain)
def test_cache(self):
self._cached_compute_domain(1)
self._cached_compute_domain(2)
|
nilq/baby-python
|
python
|
#%%
import os, glob
import numpy as np
import tensorflow as tf
# from myDataset import *
# from myconfig import cfg
# from yoloNet import *
# model = yoloNetModle()
# tf.keras.utils.plot_model(model, show_shapes=True, show_layer_names=True)
# weight_reader = WeightReader('yolo.weights')
# print(weight_reader.all_weights[:4])
# conv_layer = model.get_layer('conv_1')
# for i in conv_layer.weights:
# print(i.shape)
# norm_layer = model.get_layer('norm_1')
# for i in norm_layer.weights:
# print(i.shape)
# print(i.name)
# %%
# for i in norm_layer.weights:
# print(i.shape)
# print(i.name)
# %%
bn1 = tf.keras.layers.BatchNormalization(name='bn1')
inTem = tf.convert_to_tensor([[1,2,3.0],[1,2,3.0]])
outTem = bn1(inTem)
print(outTem)
bn1.weights[1].assign(np.array([1,2,3], dtype=np.float32))
outTem = bn1(inTem)
print(outTem)
for i in bn1.weights:
print(i)
# print(i.shape)
# print(i.name)
# %%
# %%
# bn1.weights[1].assign(np.array([[1,2,3]], dtype=np.float32))
bn1 = tf.keras.layers.BatchNormalization(name='bn1')
inTem = tf.convert_to_tensor([[1,2,3.0],[1,2,3.0]])
for i in range(10):
outTem = bn1(inTem, training=True)
# print(outTem)
outTem = bn1(inTem, training=False) # False
print(outTem)
outTem = bn1(inTem, training=True) # False
print(outTem)
# %%
for i in bn1.weights:
print(i)
# %%
|
nilq/baby-python
|
python
|
class Cliente:
def __init__(self, nome, email, plano):
self.nome = nome
self.email = email
self.lista_planos = ["basic", "premium"]#se fosse uma variavel somente pra essa funcao, nao precisaria do self.
if plano in self.lista_planos:
self.plano = plano
else:
raise Exception("Plano inválido!")
def mudar_plano(self, novo_plano):
if novo_plano in self.lista_planos:
self.plano = novo_plano
else:
print("Plano inválido!")
def ver_filme(self, filme, plano_filme):
if self.plano == plano_filme:
print(f'ver {filme}')
elif self.plano == "premium":
print(f'ver {filme}')
else:
print(f'filme não disponível no plano {self.plano}')
cliente1 = Cliente("Isaque", "isaque@gmal.com", "basic")
print(cliente1.plano)
cliente1.ver_filme("Homem-Aranha", "premium")
cliente1.mudar_plano("premium")
print(cliente1.plano)
cliente1.ver_filme("Homem-Aranha", "premium")
|
nilq/baby-python
|
python
|
import sys
f=open(sys.argv[1],'r')
patch_no=sys.argv[2]
#g=open('/Volumes/Unnamed/patched_function_old/'+patch_no,'r')
#method=g.readline().strip()
from unidiff import PatchSet
import os
def get_patched_class(patch_no):
patchfile=os.path.join('../../patches',patch_no)
patch = PatchSet.from_filename(patchfile,encoding='utf-8')
source_file=patch[0].source_file
#print(source_file)
line_no_list=[]
tmp_file='tmp_result'+patch_no
for hunki in range(len(patch[0])):
for i in range(len(patch[0][hunki])):
if not patch[0][hunki][i].is_context:
line_no_list.append(str(patch[0][hunki][i-1].source_line_no+1))
break
os.system('cd .. && make PatchInfo ARGS="'+os.path.join('../source/',source_file)+' '+tmp_file+' '+','.join(line_no_list)+'" >/dev/null')
f=open('../'+tmp_file)
res=f.readlines()[0].strip()
f.close()
os.system('rm ../'+tmp_file)
return res
method=get_patched_class(patch_no)
traces=[]
trace=[]
stackdepth=0
isinTargetMethod=False
#print('<Method_invoked,'+method+'>')
for line in f:
if line.startswith('<Method_invoked,'+method+'>'):
stackdepth=1
trace=[]
traces.append(trace)
trace.append(line)
isinTargetMethod=True
continue
if not isinTargetMethod:
continue
trace.append(line)
if line.startswith('<Method_invoked,'):
stackdepth+=1
# print(line.strip())
# print(stackdepth)
# input()
continue
if line.startswith('<ReturnStatement>'):
stackdepth-=1
# print(line.strip())
# print(stackdepth)
# input()
if stackdepth == 0:
isinTargetMethod=False
#print(len(traces))
for line in traces[0]:
print(line.strip())
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""Concrete VeiL api objects."""
from .cluster import VeilCluster
from .controller import VeilController
from .data_pool import VeilDataPool
from .domain import (DomainBackupConfiguration, DomainConfiguration, DomainTcpUsb,
DomainUpdateConfiguration, VeilDomain, VeilGuestAgentCmd)
from .library import VeilLibrary
from .node import VeilNode
from .resource_pool import VeilResourcePool
from .vdisk import VeilVDisk
from ..base import VeilRestPaginator
class VeilDomainExt(VeilDomain):
"""Extension of VeilDomain with methods related to VeilLibrary."""
async def backup_list(self, paginator: VeilRestPaginator = None):
"""List of domain backup files."""
lib = VeilLibrary(client=self._client,
retry_opts=self.retry_opts,
cache_opts=self.cache_opts)
return await lib.list(domain=self.api_object_id,
paginator=paginator)
__all__ = (
'DomainConfiguration', 'VeilGuestAgentCmd', 'DomainTcpUsb', 'VeilDomainExt',
'DomainBackupConfiguration', 'VeilLibrary', 'VeilNode', 'VeilController',
'VeilDataPool', 'VeilResourcePool', 'VeilVDisk', 'VeilCluster',
'DomainUpdateConfiguration'
)
|
nilq/baby-python
|
python
|
from unittest import mock
from py.test.tools import (
BaseTestCase,
)
from py.src.match.model.data import (
GlobalRankedData,
)
from py.src.view_model import (
GlobalViewModelCache,
)
class GlobalIntegrationTest(BaseTestCase):
@classmethod
def setUpChildClass(cls):
cls.matches = cls.sample_matches()
def _create_global_view_model(self, global_dict):
with mock.patch(
'py.src.view_model.get_global_ranked_match_cache',
) as mp:
mp.return_value = global_dict
return GlobalViewModelCache()
def test_global(self):
global_data = GlobalRankedData(global_dict=None)
global_data._process_matches(self.matches, lambda m: True)
vm_cache = self._create_global_view_model(global_data.to_dict())
global_vm = vm_cache.get_by_leagues([], [])
self.assertEqual(188, global_vm.data.game_count)
self.assertEqual(94, global_vm.data.game_wins)
self.assertEqual(47, global_vm.data.characters[14].game_count)
def test_to_dict(self):
global_data = GlobalRankedData(global_dict=None)
global_data._process_matches(self.matches, lambda m: True)
dict1 = global_data.to_dict()
dict2 = GlobalRankedData(global_dict=dict1).to_dict()
self.assertEqual(dict1, dict2)
|
nilq/baby-python
|
python
|
"""
Miscellanea utilities
"""
from __future__ import ( division, absolute_import, print_function, unicode_literals )
import sys, os, tempfile, logging
if sys.version_info >= (3,):
import urllib.request as urllib2
import urllib.parse as urlparse
else:
import urllib2
import urlparse
# Courtesy of https://stackoverflow.com/a/16518224/8053968
def download_file(url, dest=None):
"""
Download and save a file specified by url to dest directory,
"""
u = urllib2.urlopen(url)
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
filename = os.path.basename(path)
if not filename:
raise "Couldn't get filename from URL"
if dest:
filename = os.path.join(dest, filename)
with open(filename, 'wb') as f:
meta = u.info()
meta_func = meta.getheaders if hasattr(meta, 'getheaders') else meta.get_all
meta_length = meta_func("Content-Length")
file_size = None
if meta_length:
file_size = int(meta_length[0])
print("Downloading: {0} Bytes: {1}".format(url, file_size))
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = "{0:16}".format(file_size_dl)
if file_size:
status += " [{0:6.2f}%]".format(file_size_dl * 100 / file_size)
status += chr(13)
print(status, end="")
print()
return filename
if __name__ == "__main__": # Only run if this file is called directly
print("Testing with 10MB download")
url = "http://download.thinkbroadband.com/10MB.zip"
filename = download_file(url)
print(filename)
|
nilq/baby-python
|
python
|
'''
Author= 'Parampreet Singh'
Project= 'https://github.com/paramsingh96/TensorFlow-Tutorials'
Simple tutorial for understanding MNIST Data Sets
'''
# Importing different libraries
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import tensorflow.examples.tutorials.mnist.input_data as input_data
# MNIST are the data sets of hand written images ranging from 0 to 9
# The data is split into three parts: 55,000 data points of training data
# 10,000 data points of test data
# 5,000 of validation data
# Each image is 28 pixel by 28 pixel
mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)
# Within each, we can access images, labels and num_examples
print(mnist.train.num_examples, mnist.test.num_examples, mnist.validation.num_examples)
# (55000, 784) (55000, 10)
print(mnist.train.images.shape, mnist.train.labels.shape)
# The ranges of values in each images is from 0-1
print(np.min(mnist.train.images), np.max(mnist.train.images))
# We can visualize any one of the images by reshaping it to a 28*28 image
plt.imshow(np.reshape(mnist.train.images[1000, :], (28, 28)), cmap='rainbow')
plt.show()
# try out with different data points and colors, we will get great insights. :)
|
nilq/baby-python
|
python
|
import concurrent.futures
import datetime
import itertools
import json
import logging
import os
from typing import Dict, Iterable, List
import kafka
from finitestate.common.retry_utils import retry
from finitestate.common.timer import CodeTimer
from finitestate.firmware.datasets import Dataset, Density, Format, Granularity
from finitestate.firmware.bundle import FirmwareDataBundler, DEFAULT_MAX_WORKERS, get_file_suffix, list_keys
__all__ = [
'DynamicFirmwareDataBundler'
]
logger = logging.getLogger(__name__)
class DynamicFirmwareDataBundler(FirmwareDataBundler):
"""
A FirmwareDataBundler that leverages kafka messages and the status tracker build a bundle containing recent
FWAN plugin output. The intended use of this implementation is to build a "delta" bundle to a first pass that
includes all previously analyzed outputs (since the unpack process doesn't re-analyze previously-seen files).
"""
def __init__(self, firmware_metadata_bucket: str, *, max_workers: int = DEFAULT_MAX_WORKERS, kafka_bootstrap_server: str, topic: str, earliest_offset_time: datetime.datetime):
"""
Creates a new DynamicFirmwareDataBundler that will leverage kafka to determine which
paths should go in the bundle.
Note that instances of this class aren't re-usable, since they're associated to a specific timestamp.
:param firmware_metadata_bucket: The bucket from which to read objects
:param max_workers: The maximum number of parallel threads with which objects will be read from object storage.
:param earliest_offset_time: The earliest time at which a relevant message may be found on the topic.
"""
super().__init__(firmware_metadata_bucket=firmware_metadata_bucket, max_workers=max_workers)
self.kafka_bootstrap_server = kafka_bootstrap_server
self.topic = topic
self.earliest_offset_time = earliest_offset_time
def build_paths(self, firmware_hash: str, datasets: List[Dataset], file_hashes: Iterable[str], delta_to: Dict[str, str] = None) -> List[str]:
@retry(on='kafka.errors.NoBrokersAvailable')
def get_kafka_consumer():
return kafka.KafkaConsumer(bootstrap_servers=self.kafka_bootstrap_server, enable_auto_commit=False)
with CodeTimer('Connect to kafka'):
consumer = get_kafka_consumer()
with CodeTimer(f'Determine partitions and end offsets for {self.topic}'):
topicparts = [kafka.TopicPartition(self.topic, p) for p in consumer.partitions_for_topic(self.topic) or []]
end_offsets = consumer.end_offsets(topicparts)
logger.info(f'End offsets = {end_offsets}')
# Determine the start offsets using the earliest offset time
if self.earliest_offset_time is not None:
timestamp_millis = self.earliest_offset_time.timestamp() * 1000
with CodeTimer(f'Find offsets on {self.topic} for unpack timestamp'):
start_offset_times = consumer.offsets_for_times(
{topicpart: timestamp_millis for topicpart in topicparts}
)
logger.info(f'Start offset times = {start_offset_times}')
else:
logger.warning(f'Earliest offset time is not specified - consuming all messages on topic')
start_offset_times = {}
consumer = None
def get_paths_from_topicpart(topicpart: kafka.TopicPartition, datasets: List[Dataset], start_offset: int, stop_offset: int, delta_to: Dict[str, str] = None) -> List[str]:
consumer = get_kafka_consumer()
consumer.assign([topicpart])
if start_offset is not None:
consumer.seek(topicpart, offset=start_offset)
else:
consumer.seek_to_beginning(topicpart)
dataset_lookup = {d.name: d for d in datasets if d.granularity == Granularity.FILE}
paths = set([])
def get_path(record) -> str:
"""
Parses and creates a path from the record value, which is expected to look like
{
"output_location": "bin_info/exports",
"file_id": "<sha256 file hash>",
"etag": "object storage checksum of the object at the time it was written"
}
:param record: A kafka record
:return: An object storage path
"""
try:
entity = json.loads(record.value)
dataset = dataset_lookup.get(entity['output_location'])
if dataset:
file_hash = entity['file_id']
if file_hash in file_hashes:
path = f'{dataset.name}/{file_hash}{get_file_suffix(dataset.format)}'
if delta_to is None or entity['etag'] != delta_to.get(path):
return path
except json.JSONDecodeError as e:
logger.warning(f'Failed to de-serialize {record}')
except KeyError:
logger.warning(f'Failed to process {record}')
keep_polling = True
while keep_polling:
response = consumer.poll(max_records=1000, timeout_ms=60 * 1000).values()
if not response:
logger.warning(f'Timed out waiting for records on {topicpart} without hitting the stop offset - exiting polling loop')
break
for records in response:
for record in records:
paths.add(get_path(record))
if record.offset >= stop_offset:
logger.info(f'Reached stop offset for {topicpart} - exiting polling loop')
keep_polling = False
return sorted(filter(None, paths))
paths = []
with concurrent.futures.ThreadPoolExecutor(max_workers=self.max_workers) as executor:
futures = []
# Process FILE-level Datasets by looking for recent plugin outputs on Kafka
for topicpart in topicparts:
start_offset_time = start_offset_times.get(topicpart)
futures.append(
# stop_offset = end_offset - 1 because the end_offset points to the offset of the upcoming message,
# i.e. the offset of the last available message + 1
executor.submit(
get_paths_from_topicpart,
topicpart=topicpart,
datasets=datasets,
start_offset=start_offset_time.offset if start_offset_time is not None else None,
stop_offset=end_offsets[topicpart] - 1,
delta_to=delta_to
)
)
# Process FIRMWARE- and FIRMWARE_FILE-level outputs by directly accessing S3
for dataset in filter(lambda d: d.name != 'file_tree', datasets):
file_suffix = get_file_suffix(dataset.format)
if dataset.granularity == Granularity.FIRMWARE:
paths.append(f'{os.path.join(dataset.name, firmware_hash)}{file_suffix}')
elif dataset.granularity == Granularity.FIRMWARE_FILE:
futures.append(
executor.submit(
list_keys,
bucket=self.firmware_metadata_bucket,
prefix=f'{dataset.name}/{firmware_hash}/',
suffix=file_suffix,
delta_to=delta_to
)
)
return sorted(set(itertools.chain(paths, *[f.result() for f in concurrent.futures.as_completed(futures)])))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import sys, os
import errno
import arsdkparser
from common import *
#===============================================================================
# root file that includes all generated c files
#===============================================================================
def gen_root_jni(ctx, out):
out.write("/** Generated, do not edit ! */\n")
out.write("#include <stdio.h>\n")
out.write("#include <string.h>\n")
out.write("#include <jni.h>\n")
out.write("#include <arsdk/arsdk.h>\n")
for featureId in sorted(ctx.featuresById.keys()):
featureobj = ctx.featuresById[featureId]
out.write("#include \"%s\"\n", jni_file_name(featureobj.name))
#===============================================================================
def gen_jni_callback_cache(feature, cls, evts, out):
full_name = feature.name + ("_" + cls.name if cls else "")
java_cb_name = (
"Lcom/parrot/drone/sdkcore/arsdk/"
+ java_feature_class_name(feature.name)
+ ("$" + cls.name if cls else "")
+ "$Callback;"
)
# callback methods id cache
out.write("static struct {\n")
for evt in sorted(evts, key=lambda evt: evt.cmdId):
out.write("\tjmethodID jmid_%s;\n", evt.name)
out.write("} s_cb_%s_cache;\n\n", full_name)
# class init
out.write("JNIEXPORT void JNICALL\n%s_nativeClassInit(JNIEnv *env, jclass jcls) {\n",
jni_func_name(feature, cls))
for evt in sorted(evts, key=lambda evt: evt.cmdId):
out.write("\ts_cb_%s_cache.jmid_%s = (*env)->GetStaticMethodID(env, jcls, \"%s\", \"(%s%s)V\");\n",
full_name, evt.name, java_method_name(evt.name), java_cb_name, jni_method_signature(evt.args))
out.write("}\n\n")
#===============================================================================
def gen_jni_decode(feature, cls, evts, out):
full_name = feature.name + ("_" + cls.name if cls else "")
for evt in sorted(evts, key=lambda evt: evt.cmdId):
# static decode fn
out.write("static int evt_%s_%s(JNIEnv *env, jclass clazz, struct arsdk_cmd *cmd, jobject callback) {\n", full_name, evt.name)
for arg in evt.args:
out.write("\t%s %s;\n", c_arg_type(arg, feature), arg.name)
if evt.args:
out.write("\tint res = arsdk_cmd_dec_%s_%s(cmd, %s);\n", c_name(full_name), c_name(evt.name),
", ".join("&" + arg.name for arg in evt.args))
else:
out.write("\tint res = arsdk_cmd_dec_%s_%s(cmd);\n", c_name(full_name), c_name(evt.name))
out.write("\tif (res < 0)\n\t\treturn res;\n")
for arg in evt.args:
if arg.argType == arsdkparser.ArArgType.BINARY:
out.write("\tjbyteArray j_%s = (*env)->NewByteArray(env, %s.len);\n", arg.name, arg.name)
out.write("\t(*env)->SetByteArrayRegion(env, j_%s, 0, %s.len, %s.cdata);\n", arg.name, arg.name, arg.name)
out.write("\t(*env)->CallStaticVoidMethod(env, clazz, s_cb_%s_cache.jmid_%s, callback", full_name, evt.name)
for arg in evt.args:
out.write(", ")
if arg.argType == arsdkparser.ArArgType.STRING:
out.write("(*env)->NewStringUTF(env, %s)", arg.name)
elif arg.argType == arsdkparser.ArArgType.BINARY:
out.write("j_%s", arg.name)
else:
out.write("(%s)%s", jni_arg_type(arg), arg.name)
out.write(");\n")
out.write("\treturn 0;\n")
out.write("}\n\n")
# decode jni fn
out.write("JNIEXPORT jint JNICALL\n%s_nativeDecode(JNIEnv *env, jclass jcls, jlong nativeCmd, jobject callback) {\n",
jni_func_name(feature, cls))
out.write("\tstruct arsdk_cmd *cmd = (struct arsdk_cmd *)(uintptr_t)nativeCmd;\n")
out.write("\tif (cmd->prj_id != %d || cmd->cls_id != %d)\n\t\treturn -1;\n", feature.featureId,
cls.classId if cls else 0)
out.write("\tswitch(cmd->cmd_id) {\n")
for evt in sorted(evts, key=lambda evt: evt.cmdId):
out.write("\t\tcase %d: return evt_%s_%s(env, jcls, cmd, callback);\n", evt.cmdId, full_name, evt.name)
out.write("\t}\n")
out.write("\treturn -1;\n")
out.write("}\n\n")
#===============================================================================
def gen_jni_encode(feature, cls, cmds, out):
full_name = feature.name + ("_" + cls.name if cls else "")
for cmd in sorted(cmds, key=lambda cmd: cmd.cmdId):
out.write("JNIEXPORT jint JNICALL\n%s_nativeEncode%s(JNIEnv *env, jclass jcls, jlong nativeCmd",
jni_func_name(feature, cls), jni_method_name(cmd.name))
for arg in cmd.args:
out.write(", %s %s", jni_arg_type(arg), arg.name)
out.write(") {\n")
out.write("\tstruct arsdk_cmd *cmd = (struct arsdk_cmd *)(uintptr_t)nativeCmd;\n")
for arg in cmd.args:
if arg.argType == arsdkparser.ArArgType.STRING:
out.write("\tconst char* c_%s = (*env)->GetStringUTFChars(env, %s, NULL);\n", arg.name, arg.name)
elif arg.argType == arsdkparser.ArArgType.BINARY:
out.write("\tstruct arsdk_binary c_%s = {\n", arg.name)
out.write("\t\t.len = (*env)->GetArrayLength(env, %s),\n", arg.name)
out.write("\t\t.cdata = (*env)->GetPrimitiveArrayCritical(env, %s, NULL)\n", arg.name)
out.write("\t};\n")
out.write("\tint res = arsdk_cmd_enc_%s_%s(cmd", c_name(full_name), c_name(cmd.name))
for arg in cmd.args:
arg_prefix = ""
if arg.argType == arsdkparser.ArArgType.STRING:
arg_prefix = "c_"
elif arg.argType == arsdkparser.ArArgType.BINARY:
arg_prefix = "&c_"
out.write(", %s%s", arg_prefix, arg.name)
out.write(");\n")
for arg in cmd.args:
if arg.argType == arsdkparser.ArArgType.STRING:
out.write("\tif (c_%s != NULL) (*env)->ReleaseStringUTFChars(env, %s, c_%s);\n",
arg.name, arg.name, arg.name)
if arg.argType == arsdkparser.ArArgType.BINARY:
out.write("\t(*env)->ReleasePrimitiveArrayCritical(env, %s, (void *) c_%s.cdata, JNI_ABORT);\n", arg.name, arg.name)
out.write("\treturn res;\n")
out.write("}\n\n")
#===============================================================================
def _evts(msgs):
for msg in sorted(msgs, key=lambda msg: msg.cmdId):
if isinstance(msg, arsdkparser.ArEvt):
yield msg
def _cmds(msgs):
for msg in sorted(msgs, key=lambda msg: msg.cmdId):
if isinstance(msg, arsdkparser.ArCmd):
yield msg
#===============================================================================
def gen_jni_feature(feature, cls, evts, cmds, out):
if evts:
# decode evts
gen_jni_callback_cache(feature, cls, evts, out)
gen_jni_decode(feature, cls, evts, out)
if cmds:
# encode cmds
gen_jni_encode(feature, cls, cmds, out)
#===============================================================================
def gen_jni(feature, out):
out.write("/** Generated, do not edit ! */\n\n")
if feature.classes:
# Project based (old)
for clsId in sorted(feature.classesById.keys()):
cls = feature.classesById[clsId]
gen_jni_feature(feature, cls,
[evt for evt in feature.evts if clsId == evt.cls.classId],
[cmd for cmd in feature.cmds if clsId == cmd.cls.classId], out)
else:
# feature based (new)
gen_jni_feature(feature, None, feature.evts, feature.cmds, out)
#===============================================================================
#===============================================================================
def list_files(ctx, outdir, extra):
for featureId in sorted(ctx.featuresById.keys()):
featureobj = ctx.featuresById[featureId]
print(jni_file_name(featureobj.name))
#===============================================================================
#===============================================================================
def generate_files(ctx, outdir, extra):
# main file that include all other files
filepath = os.path.join(outdir, "arsdkgen.c")
with open(filepath, "w") as fileobj:
print("generating %s" % filepath)
gen_root_jni(ctx, Writer(fileobj))
# features
for featureId in sorted(ctx.featuresById.keys()):
featureobj = ctx.featuresById[featureId]
filepath = os.path.join(outdir, jni_file_name(featureobj.name))
try:
os.makedirs(os.path.dirname(filepath))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
print("generating %s" % filepath)
with open(filepath, "w") as fileobj:
gen_jni(featureobj, Writer(fileobj))
|
nilq/baby-python
|
python
|
# Generated by Django 3.2.2 on 2021-05-18 19:03
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Email',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sender', models.EmailField(max_length=254)),
('receiver', models.EmailField(max_length=254)),
('text', models.TextField()),
('date', models.DateField(default=datetime.date.today)),
('time', models.TimeField(null=True)),
('subject', models.CharField(blank=True, max_length=200)),
('attachment', models.URLField(blank=True)),
],
),
]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
__author__ = "Frank Becker <fb@alien8.de>"
__copyright__ = "Frank Becker"
__license__ = "mit"
import importlib
def test_module_imports():
"""Test if all modules can be imported."""
modules = ["api", "auth", "awattar", "cli", "config", "defaults", "poller", "utils", "weather"]
for module in modules:
try:
importlib.import_module("discovergy." + module)
except ImportError:
assert False
else:
assert True
|
nilq/baby-python
|
python
|
class Queue:
def __init__(self):
self.storage = []
def size(self):
return len(self.storage)
def enqueue(self, item):
self.storage.append(item)
def dequeue(self):
return self.storage.pop(0)
|
nilq/baby-python
|
python
|
# yellowbrick.utils
# Utility functions and helpers for the Yellowbrick library.
#
# Author: Jason Keung <jason.s.keung@gmail.com>
# Author: Patrick O'Melveny <pvomelveny@gmail.com>
# Author: Benjamin Bengfort <bbengfort@districtdatalabs.com>
# Author: Rebecca Bilbro <rbilbro@districtdatalabs.com>
# Created: Thu Jun 02 15:33:18 2016 -0500
#
# Copyright (C) 2016 District Data LAbs
# For license information, see LICENSE.txt
#
# ID: __init__.py [79cd8cf] benjamin@bengfort.com $
"""
Utility functions and helpers for the Yellowbrick library.
"""
##########################################################################
## Imports
##########################################################################
from .helpers import *
from .types import *
|
nilq/baby-python
|
python
|
# Generated by Django 3.2.7 on 2021-11-26 08:30
from django.db import migrations
# Migration that creates a user group called "Download approved user"
def create_barrier_search_download_approved_group(apps, schema_editor):
Group = apps.get_model("auth", "Group")
Group.objects.create(name="Download approved user")
# create a "download_barrier" Permission and add it to the group
Permission = apps.get_model("auth", "Permission")
ContentType = apps.get_model("contenttypes", "ContentType")
Barrier = apps.get_model("barriers", "Barrier")
barrier_content_type = ContentType.objects.get_for_model(Barrier)
download_barriers_permission, created = Permission.objects.get_or_create(
codename="download_barriers",
defaults={
"name": "Can download barriers",
"content_type": barrier_content_type,
},
)
group = Group.objects.get(name="Download approved user")
group.permissions.add(download_barriers_permission)
def undo_create_barrier_search_download_approved_group(apps, schema_editor):
Group = apps.get_model("auth", "Group")
Group.objects.filter(name="Download approved user").delete()
# remove "download_barriers" Permission
Permission = apps.get_model("auth", "Permission")
permission = Permission.objects.get(codename="download_barriers")
permission.delete()
class Migration(migrations.Migration):
dependencies = [
("user", "0022_auto_20210510_1019"),
("barriers", "0113_alter_barrier_options"),
]
operations = [
migrations.RunPython(
create_barrier_search_download_approved_group,
undo_create_barrier_search_download_approved_group,
),
]
|
nilq/baby-python
|
python
|
import numpy as np
#
# class Decoration:
# def __init__(self, s0, s1, s2, r0, r1, r2):
# self.s0 = s0
# self.s1 = s1
# self.s2 = s2
# self.r0 = r0
# self.r1 = r1
# self.r2 = r2
# assert len(s0) == 3, "s0 not a valid R^3 vector"
# assert len(s1) == 3, "s1 not a valid R^3 vector"
# assert len(s2) == 3, "s0 not a valid R^3 vector"
#
# def normalise_decoration(self):
# self.s0 = self.s0/np.linalg.norm(self.s0)
# self.s1 = self.s1/np.linalg.norm(self.s1)
# self.s2 = self.s2/np.linalg.norm(self.s2)
class Vertex:
def __init__(self,c,r, c_clover, r_clover):
self.c = c
self.r = r
self.c_clover = c_clover
self.r_clover = r_clover
self.edges = []
class Edge:
def __init__(self, v0,v1, connected):
self.v0 = v0
self.v1 = v1
self.v0.edges.append(self)
self.v1.edges.append(self)
self.ev0v1 = np.dot(self.v0.r,self.v1.c)
self.ev1v0 = np.dot(self.v1.r, self.v0.c)
self.triangle = []
self.connected = False
self.edge_connected = None
class Triangle:
def __init__(self, e0, e1, e2):
self.edges = [e0, e1, e2]
self.vertices = [e0.v0, e1.v0, e2.v0]
edge_index = 0
for edge in self.edges:
edge.triangle = self
edge.index = edge_index
edge_index+=1
[v0, v1, v2] = self.vertices
#print([v.c for v in self.vertices])
# if np.linalg.det([v0.c,v1.c,v2.c]) < 0:
# self.vertices = [v0, v2, v1]
# if np.linalg.det([v0.c,v1.c,v2.c]) < 0:
# for edge in self.edges:
# [edge.v0,edge.v1] = [edge.v1, edge.v0]
#print(np.linalg.det([self.edges[0].v0.c, self.edges[1].v0.c, self.edges[2].v0.c]))
#print(self.t)
self.neighbours = []
def add_neighbour(self, neighbour_triangle):
self.neighbours.append(neighbour_triangle)
class Surface:
def __init__(self, c0, c1, c2, r0, r1, r2, c0_clover, c1_clover, c2_clover, r0_clover, r1_clover, r2_clover):
vertices = [Vertex(c0,r0, c0_clover, r0_clover), Vertex(c1,r1, c1_clover, r1_clover), Vertex(c2,r2, c2_clover, r2_clover)]
edges = [Edge(vertices[0],vertices[1], False),Edge(vertices[1],vertices[2], False),Edge(vertices[2],vertices[0], False)]
#print(np.linalg.det([c0,c1,c2]))
initial_triangle = Triangle(edges[0],edges[1],edges[2])
self.triangles = [initial_triangle]
initial_triangle.distance_from_centre = 0
initial_triangle.index = 0
def add_triangle(self, connecting_edge, v0, v1, new_vertex):
# if np.linalg.det(np.array([v0.c,v1.c,new_vertex.c])) > 0:
# print(v0.c_clover,v1.c_clover, new_vertex.c_clover)
connecting_edge.connected = True
new_triangle = Triangle(Edge(v0, v1, True), Edge(v1,new_vertex, False), Edge(new_vertex,v0, False))
#print([v.c_clover for v in new_triangle.vertices])
connecting_edge.edge_connected = new_triangle.edges[0]
new_triangle.edges[0].edge_connected = connecting_edge
new_triangle.edges[0].connected = True
self.triangles.append(new_triangle)
new_triangle.add_neighbour(connecting_edge.triangle)
self.triangles[-1].add_neighbour(new_triangle)
new_triangle.index = connecting_edge.triangle.index+1
new_triangle.distance_from_centre = connecting_edge.triangle.distance_from_centre+1
return new_triangle
def connect_edges(self, e1,e2):
e1.edge_connected = e2
e2.edge_connected = e1
if e1.edge_connected:
e1.connected = True
if e2.edge_connected:
e2.connected = True
def flip_edge(self, edge):
edge_forward = edge.triangle.edges[(edge.index+1) % 3]
edge_backward = edge.triangle.edges[(edge.index-1) % 3]
edge_connected = edge.edge_connected
edge_connected_forward = edge_connected.triangle.edges[(edge_connected.index+1) % 3]
edge_connected_backward = edge_connected.triangle.edges[(edge_connected.index - 1)%3]
e_prime = Edge(edge_forward.v1, edge_connected_backward.v0, True)
e_prime_forward = edge_connected_backward
e_prime_backward = edge_forward
# if edge_connected_backward.edge_connected:
# self.connect_edges(e_prime_forward, edge_connected_backward.edge_connected)
# if edge_forward.edge_connected:
# self.connect_edges(e_prime_backward, edge_forward.edge_connected)
e_prime_connected = Edge(e_prime.v1, e_prime.v0, e_prime.connected)
self.connect_edges(e_prime,e_prime_connected)
e_prime_connected_forward = edge_backward
e_prime_connected_backward = edge_connected_forward
# if edge_backward.edge_connected:
# self.connect_edges(e_prime_connected_forward, edge_backward.edge_connected)
# if edge_connected_forward.edge_connected:
# self.connect_edges(e_prime_connected_backward, edge_connected_forward.edge_connected)
triangle_1 = edge.triangle
triangle_2 = edge_connected.triangle
for triangle_index in range(len(self.triangles)):
if self.triangles[triangle_index] == triangle_1:
new_triangle_1 = Triangle(e_prime, e_prime_forward, e_prime_backward)
new_triangle_1.index = triangle_1.index
self.triangles[triangle_index] = new_triangle_1
for neighbour_index in range(len(self.triangles[triangle_index].neighbours)):
if self.triangles[triangle_index].neighbours[neighbour_index] == triangle_1:
self.triangles[triangle_index].neighbours[neighbour_index] = self.triangles[triangle_index]
for neighbour in triangle_1.neighbours:
self.triangles[triangle_index].neighbours.append(neighbour)
for triangle_index in range(len(self.triangles)):
if self.triangles[triangle_index] == triangle_2:
new_triangle_2 = Triangle(e_prime_connected, e_prime_connected_forward, e_prime_connected_backward)
new_triangle_2.index = triangle_2.index
self.triangles[triangle_index] = new_triangle_2
for neighbour_index in range(len(self.triangles[triangle_index].neighbours)):
if self.triangles[triangle_index].neighbours[neighbour_index] == triangle_2:
self.triangles[triangle_index].neighbours[neighbour_index] = self.triangles[triangle_index]
for neighbour in triangle_2.neighbours:
self.triangles[triangle_index].neighbours.append(neighbour)
return e_prime
def normalise_vertices(self):
all_vertices = []
for triangle in self.triangles:
for vertex in triangle.vertices:
if vertex not in all_vertices:
all_vertices.append(vertex)
for vertex in all_vertices:
vertex.c = [vertex.c[0],vertex.c[1],vertex.c[2],1]
#vertex.c_clover = [vertex.c_clover[0], vertex.c_clover[1], vertex.c_clover[2], 1]
vertex.c = vertex.c/np.linalg.norm(vertex.c)
#vertex.c_clover = vertex.c_clover / np.linalg.norm(vertex.c_clover)
vertex.c = np.array(vertex.c[1:])/(1+vertex.c[0])
#vertex.c_clover = np.array(vertex.c_clover[1:])/(1+vertex.c_clover[0])
# def add_vertex(self, triangle, new_vertex):
# decoration = triangle.decoration
# decoration = np.array([np.transpose(decoration.s0), np.transpose(decoration.s1), np.transpose(decoration.s2)])
# distances = np.linalg.norm(np.repeat([new_vertex],3,axis=0)-decoration,axis=1)
# other_vertices = decoration[np.argsort(distances)[:2]]
# determinant = np.linalg.det([other_vertices[0],other_vertices[1],new_vertex])
# assert determinant != 0, 'New Vertex does not span a triangle.'
# if determinant > 0:
# self.add_triangle(triangle,Triangle(Decoration(other_vertices[0],other_vertices[1],new_vertex)))
# else:
# self.add_triangle(triangle, Triangle(Decoration(other_vertices[0],new_vertex,other_vertices[1])))
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
import cohesity_management_sdk.models.agent_information
import cohesity_management_sdk.models.datastore_information
import cohesity_management_sdk.models.vmware_object_id
import cohesity_management_sdk.models.vmware_tag_attributes
import cohesity_management_sdk.models.vcloud_director_info_information
import cohesity_management_sdk.models.virtual_disk_information
class VmwareProtectionSource(object):
"""Implementation of the 'VMware Protection Source.' model.
Specifies a Protection Source in a VMware environment.
Attributes:
agent_id (long|int): Specifies the id of the persistent agent.
agents (list of AgentInformation): Specifies the list of agent
information on the Virtual Machine. This is set only if the
Virtual Machine has persistent agent.
connection_state (ConnectionStateEnum): Specifies the connection state
of the Object and are only valid for ESXi hosts ('kHostSystem') or
Virtual Machines ('kVirtualMachine'). These enums are equivalent
to the connection states documented in VMware's reference
documentation. Examples of Cohesity connection states include
'kConnected', 'kDisconnected', 'kInacccessible', etc. 'kConnected'
indicates that server has access to virtual machine.
'kDisconnected' indicates that server is currently disconnected to
virtual machine. 'kInacccessible' indicates that one or more
configuration files are inacccessible. 'kInvalid' indicates that
virtual machine configuration is invalid. 'kOrphaned' indicates
that virtual machine is no longer registered on the host it is
associated with. 'kNotResponding' indicates that virtual machine
is failed to response due to external issues such as network
connectivity, hostd not running etc.
datastore_info (DatastoreInformation): TODO: type description here.
folder_type (FolderTypeEnum): Specifies the folder type for the
'kFolder' Object. 'kVMFolder' indicates folder can hold VMs or
vApps. 'kHostFolder' indicates folder can hold hosts and compute
resources. 'kDatastoreFolder' indicates folder can hold datastores
and storage pods. 'kNetworkFolder' indicates folder can hold
networks and switches. 'kRootFolder' indicates folder can hold
datacenters.
has_persistent_agent (bool): Set to true if a persistent agent is
running on the Virtual Machine. This is populated for entities of
type 'kVirtualMachine'.
host_type (HostType6Enum): Specifies the host type for the
'kVirtualMachine' Object. 'kLinux' indicates the Linux operating
system. 'kWindows' indicates the Microsoft Windows operating
system. 'kAix' indicates the IBM AIX operating system. 'kSolaris'
indicates the Oracle Solaris operating system.
id (VmwareObjectId): Specifies a unique Protection Source id across
Cohesity Clusters. It is derived from the id of the VMware
Protection Source.
is_vm_template (bool): IsTemplate specifies if the VM is a template or
not.
name (string): Specifies a human readable name of the Protection
Source.
tag_attributes (list of VmwareTagAttributes): Specifies the optional
list of VM Tag attributes associated with this Object.
tools_running_status (ToolsRunningStatusEnum): Specifies the status of
VMware Tools for the guest OS on the VM. This is only valid for
the 'kVirtualMachine' type. 'kGuestToolsRunning' means the VMware
tools are running on the guest OS. 'kGuestToolsNotRunning' means
the VMware tools are not running on the guest OS. 'kUnknown' means
the state of the VMware tools on the guest OS is not known.
'kGuestToolsExecutingScripts' means the guest OS is currently
executing scripts using VMware tools.
mtype (Type19Enum): Specifies the type of managed Object in a VMware
Protection Source. Examples of VMware Objects include 'kVCenter',
'kFolder', 'kDatacenter', 'kResourcePool', 'kDatastore',
'kVirtualMachine', etc. 'kVCenter' indicates the vCenter entity in
a VMware protection source type. 'kFolder indicates the folder
entity (of any kind) in a VMware protection source type.
'kDatacenter' indicates the datacenter entity in a VMware
protection source type. 'kComputeResource' indicates the physical
compute resource entity in a VMware protection source type.
'kResourcePool' indicates the set of physical resourses within a
compute resource or cloudcompute resource. 'kDataStore' indicates
the datastore entity in a VMware protection source type.
'kHostSystem' indicates the ESXi host entity in a VMware
protection source type. 'kVirtualMachine' indicates the virtual
machine entity in a VMware protection source type. 'kVirtualApp'
indicates the virtual app entity in a VMware protection source
type. 'kStandaloneHost' indicates the standalone ESXi host entity
(not managed by vCenter) in a VMware protection source type.
'kStoragePod' indicates the storage pod entity in a VMware
protection source type. 'kNetwork' indicates the standard vSwitch
in a VMware protection source type. 'kDistributedVirtualPortgroup'
indicates a distributed vSwitch port group in a VMware protection
source type. 'kTagCategory' indicates a tag category entity in a
VMware protection source type. 'kTag' indocates a tag entity in a
VMware protection source type. 'kOpaqueNetwork' indicates a opaque
network which is created and managed by an entity outside of
vSphere. 'kVCloudDirector' indicates a vCloud director entity in a
VMware protection source type. 'kOrganization' indicates an
Organization under a vCD in a VMware protection source type.
'kVirtualDatacenter' indicates a virtual datacenter entity in a
VMware protection source type. 'kCatalog' indocates a VCD catalog
entity in a VMware protection source type. 'kOrgMetadata'
indicates an VCD organization metadata in a VMware protection
source type. 'kStoragePolicy' indicates a storage policy
associated with the vApp in a VMware protection source type.
vcloud_director_info (list of VcloudDirectorInfoInformation):
Specifies an array of vCenters to be registered
virtual_disks (list of VirtualDiskInformation): Specifies an array of
virtual disks that are part of the Virtual Machine. This is
populated for entities of type 'kVirtualMachine'.
"""
# Create a mapping from Model property names to API property names
_names = {
"agent_id":'agentId',
"agents":'agents',
"connection_state":'connectionState',
"datastore_info":'datastoreInfo',
"folder_type":'folderType',
"has_persistent_agent":'hasPersistentAgent',
"host_type":'hostType',
"id":'id',
"is_vm_template":'isVmTemplate',
"name":'name',
"tag_attributes":'tagAttributes',
"tools_running_status":'toolsRunningStatus',
"mtype":'type',
"vcloud_director_info":'vCloudDirectorInfo',
"virtual_disks":'virtualDisks'
}
def __init__(self,
agent_id=None,
agents=None,
connection_state=None,
datastore_info=None,
folder_type=None,
has_persistent_agent=None,
host_type=None,
id=None,
is_vm_template=None,
name=None,
tag_attributes=None,
tools_running_status=None,
mtype=None,
vcloud_director_info=None,
virtual_disks=None):
"""Constructor for the VmwareProtectionSource class"""
# Initialize members of the class
self.agent_id = agent_id
self.agents = agents
self.connection_state = connection_state
self.datastore_info = datastore_info
self.folder_type = folder_type
self.has_persistent_agent = has_persistent_agent
self.host_type = host_type
self.id = id
self.is_vm_template = is_vm_template
self.name = name
self.tag_attributes = tag_attributes
self.tools_running_status = tools_running_status
self.mtype = mtype
self.vcloud_director_info = vcloud_director_info
self.virtual_disks = virtual_disks
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
agent_id = dictionary.get('agentId')
agents = None
if dictionary.get('agents') != None:
agents = list()
for structure in dictionary.get('agents'):
agents.append(cohesity_management_sdk.models.agent_information.AgentInformation.from_dictionary(structure))
connection_state = dictionary.get('connectionState')
datastore_info = cohesity_management_sdk.models.datastore_information.DatastoreInformation.from_dictionary(dictionary.get('datastoreInfo')) if dictionary.get('datastoreInfo') else None
folder_type = dictionary.get('folderType')
has_persistent_agent = dictionary.get('hasPersistentAgent')
host_type = dictionary.get('hostType')
id = cohesity_management_sdk.models.vmware_object_id.VmwareObjectId.from_dictionary(dictionary.get('id')) if dictionary.get('id') else None
is_vm_template = dictionary.get('isVmTemplate')
name = dictionary.get('name')
tag_attributes = None
if dictionary.get('tagAttributes') != None:
tag_attributes = list()
for structure in dictionary.get('tagAttributes'):
tag_attributes.append(cohesity_management_sdk.models.vmware_tag_attributes.VmwareTagAttributes.from_dictionary(structure))
tools_running_status = dictionary.get('toolsRunningStatus')
mtype = dictionary.get('type')
vcloud_director_info = None
if dictionary.get('vCloudDirectorInfo') != None:
vcloud_director_info = list()
for structure in dictionary.get('vCloudDirectorInfo'):
vcloud_director_info.append(cohesity_management_sdk.models.vcloud_director_info_information.VcloudDirectorInfoInformation.from_dictionary(structure))
virtual_disks = None
if dictionary.get('virtualDisks') != None:
virtual_disks = list()
for structure in dictionary.get('virtualDisks'):
virtual_disks.append(cohesity_management_sdk.models.virtual_disk_information.VirtualDiskInformation.from_dictionary(structure))
# Return an object of this model
return cls(agent_id,
agents,
connection_state,
datastore_info,
folder_type,
has_persistent_agent,
host_type,
id,
is_vm_template,
name,
tag_attributes,
tools_running_status,
mtype,
vcloud_director_info,
virtual_disks)
|
nilq/baby-python
|
python
|
from adv_finance.multiprocess.multiprocess import mp_pandas_obj, process_jobs, process_jobs_
|
nilq/baby-python
|
python
|
import pytest
from icevision.all import *
@pytest.fixture
def coco_bbox_parser(coco_dir):
return parsers.COCOBBoxParser(coco_dir / "annotations.json", coco_dir / "images")
@pytest.fixture
def coco_mask_parser(coco_dir):
return parsers.COCOMaskParser(coco_dir / "annotations.json", coco_dir / "images")
def test_keypoints_parser(coco_dir, coco_keypoints_parser):
records = coco_keypoints_parser.parse(data_splitter=SingleSplitSplitter())[0]
assert len(records) == 2
record = records[1]
assert record.filepath == coco_dir / "images/000000404249.jpg"
assert len(record.detection.keypoints) == 1
assert record.detection.keypoints[0].n_visible_keypoints == 16
assert record.detection.keypoints[0].y.max() == 485
assert len(records[0].detection.keypoints) == 3
def test_bbox_parser(coco_dir, coco_bbox_parser):
records = coco_bbox_parser.parse(data_splitter=SingleSplitSplitter())[0]
assert len(records) == 5
record = records[0]
assert record.record_id == 343934
assert record.filepath == coco_dir / "images/000000343934.jpg"
assert record.width == 640
assert record.height == 480
assert len(record.detection.class_map) == 91
assert record.detection.class_map.get_by_id(90) == "toothbrush"
assert record.detection.label_ids == [4]
assert pytest.approx(record.detection.bboxes[0].xyxy) == (
175.14,
175.68,
496.2199,
415.68,
)
assert record.detection.iscrowds == [0]
assert pytest.approx(record.detection.areas) == [43522.805]
def test_mask_parser(coco_mask_parser):
records = coco_mask_parser.parse(data_splitter=SingleSplitSplitter())[0]
assert len(records) == 5
record = records[0]
assert len(record.detection.masks) == 1
assert record.detection.masks[0].points == [
[
457.3,
258.92,
458.38,
276.22,
467.03,
289.19,
473.51,
305.41,
483.24,
334.59,
492.97,
359.46,
496.22,
390.81,
492.97,
415.68,
478.92,
397.3,
472.43,
382.16,
464.86,
361.62,
452.97,
340,
437.84,
334.59,
428.11,
337.84,
415.14,
354.05,
387.03,
373.51,
362.16,
361.62,
339.46,
351.89,
311.35,
348.65,
307.03,
352.97,
301.62,
363.78,
295.14,
373.51,
283.24,
392.97,
274.59,
402.7,
231.35,
409.19,
210.81,
403.78,
190.27,
383.24,
182.7,
367.03,
185.95,
336.76,
197.84,
312.97,
205.41,
306.49,
229.19,
289.19,
254.05,
280.54,
256.22,
258.92,
234.59,
242.7,
220.54,
247.03,
208.65,
258.92,
202.16,
271.89,
180.54,
285.95,
177.3,
287.03,
175.14,
266.49,
178.38,
250.27,
176.22,
236.22,
175.14,
222.16,
188.11,
209.19,
212.97,
212.43,
234.59,
220,
256.22,
223.24,
297.3,
227.57,
318.92,
234.05,
330.81,
235.14,
351.35,
226.49,
360,
218.92,
376.22,
216.76,
398.92,
217.84,
418.38,
213.51,
420.54,
201.62,
419.46,
196.22,
435.68,
183.24,
449.73,
175.68,
468.11,
176.76,
464.86,
192.97,
460.54,
196.22,
458.38,
207.03,
464.86,
221.08,
468.11,
234.05,
469.19,
240.54,
468.11,
243.78,
]
]
def test_coco_base_parser_init_from_dict(coco_dir):
annotations_dict = json.load(open(coco_dir / "annotations.json"))
parser_init_by_dict = parsers.COCOBBoxParser(annotations_dict, coco_dir / "images")
assert parser_init_by_dict.annotations_dict == annotations_dict
|
nilq/baby-python
|
python
|
################################################################################
#
# Package : AlphaPy
# Module : analysis
# Created : July 11, 2013
#
# Copyright 2017 ScottFree Analytics LLC
# Mark Conway & Robert D. Scott II
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
#
# Imports
#
from alphapy.__main__ import main_pipeline
from alphapy.frame import load_frames
from alphapy.frame import sequence_frame
from alphapy.frame import write_frame
from alphapy.globals import SSEP, TAG_ID, USEP
from alphapy.utilities import subtract_days
from datetime import timedelta
import logging
import pandas as pd
from pandas.tseries.offsets import BDay
#
# Initialize logger
#
logger = logging.getLogger(__name__)
#
# Function analysis_name
#
def analysis_name(gname, target):
r"""Get the name of the analysis.
Parameters
----------
gname : str
Group name.
target : str
Target of the analysis.
Returns
-------
name : str
Value for the corresponding key.
"""
name = USEP.join([gname, target])
return name
#
# Class Analysis
#
class Analysis(object):
"""Create a new analysis for a group. All analyses are stored
in ``Analysis.analyses``. Duplicate keys are not allowed.
Parameters
----------
model : alphapy.Model
Model object for the analysis.
group : alphapy.Group
The group of members in the analysis.
Attributes
----------
Analysis.analyses : dict
Class variable for storing all known analyses
"""
analyses = {}
# __new__
def __new__(cls,
model,
group):
# set analysis name
name = model.specs['directory'].split(SSEP)[-1]
target = model.specs['target']
an = analysis_name(name, target)
if not an in Analysis.analyses:
return super(Analysis, cls).__new__(cls)
else:
logger.info("Analysis %s already exists", an)
# function __init__
def __init__(self,
model,
group):
# set analysis name
name = model.specs['directory'].split(SSEP)[-1]
target = model.specs['target']
an = analysis_name(name, target)
# initialize analysis
self.name = an
self.model = model
self.group = group
# add analysis to analyses list
Analysis.analyses[an] = self
# __str__
def __str__(self):
return self.name
#
# Function run_analysis
#
def run_analysis(analysis, lag_period, forecast_period, leaders,
predict_history, splits=True):
r"""Run an analysis for a given model and group.
First, the data are loaded for each member of the analysis group.
Then, the target value is lagged for the ``forecast_period``, and
any ``leaders`` are lagged as well. Each frame is split along
the ``predict_date`` from the ``analysis``, and finally the
train and test files are generated.
Parameters
----------
analysis : alphapy.Analysis
The analysis to run.
lag_period : int
The number of lagged features for the analysis.
forecast_period : int
The period for forecasting the target of the analysis.
leaders : list
The features that are contemporaneous with the target.
predict_history : int
The number of periods required for lookback calculations.
splits : bool, optional
If ``True``, then the data for each member of the analysis
group are in separate files.
Returns
-------
analysis : alphapy.Analysis
The completed analysis.
"""
# Unpack analysis
name = analysis.name
model = analysis.model
group = analysis.group
# Unpack model data
predict_file = model.predict_file
test_file = model.test_file
train_file = model.train_file
# Unpack model specifications
directory = model.specs['directory']
extension = model.specs['extension']
predict_date = model.specs['predict_date']
predict_mode = model.specs['predict_mode']
separator = model.specs['separator']
target = model.specs['target']
train_date = model.specs['train_date']
# Calculate split date
logger.info("Analysis Dates")
split_date = subtract_days(predict_date, predict_history)
logger.info("Train Date: %s", train_date)
logger.info("Split Date: %s", split_date)
logger.info("Test Date: %s", predict_date)
# Load the data frames
data_frames = load_frames(group, directory, extension, separator, splits)
# Create dataframes
if predict_mode:
# create predict frame
predict_frame = pd.DataFrame()
else:
# create train and test frames
train_frame = pd.DataFrame()
test_frame = pd.DataFrame()
# Subset each individual frame and add to the master frame
leaders.extend([TAG_ID])
for df in data_frames:
try:
tag = df[TAG_ID].unique()[0]
except:
tag = 'Unknown'
first_date = df.index[0]
last_date = df.index[-1]
logger.info("Analyzing %s from %s to %s", tag, first_date, last_date)
# sequence leaders, laggards, and target(s)
df = sequence_frame(df, target, forecast_period, leaders, lag_period)
# get frame subsets
if predict_mode:
new_predict = df.loc[(df.index >= split_date) & (df.index <= last_date)]
if len(new_predict) > 0:
predict_frame = predict_frame.append(new_predict)
else:
logger.info("Prediction frame %s has zero rows. Check prediction date.",
tag)
else:
# split data into train and test
new_train = df.loc[(df.index >= train_date) & (df.index < split_date)]
if len(new_train) > 0:
new_train = new_train.dropna()
train_frame = train_frame.append(new_train)
new_test = df.loc[(df.index >= split_date) & (df.index <= last_date)]
if len(new_test) > 0:
# check if target column has NaN values
nan_count = df[target].isnull().sum()
forecast_check = forecast_period - 1
if nan_count != forecast_check:
logger.info("%s has %d records with NaN targets", tag, nan_count)
# drop records with NaN values in target column
new_test = new_test.dropna(subset=[target])
# append selected records to the test frame
test_frame = test_frame.append(new_test)
else:
logger.info("Testing frame %s has zero rows. Check prediction date.",
tag)
else:
logger.info("Training frame %s has zero rows. Check data source.", tag)
# Write out the frames for input into the AlphaPy pipeline
directory = SSEP.join([directory, 'input'])
if predict_mode:
# write out the predict frame
write_frame(predict_frame, directory, predict_file, extension, separator,
index=True, index_label='date')
else:
# write out the train and test frames
write_frame(train_frame, directory, train_file, extension, separator,
index=True, index_label='date')
write_frame(test_frame, directory, test_file, extension, separator,
index=True, index_label='date')
# Run the AlphaPy pipeline
analysis.model = main_pipeline(model)
# Return the analysis
return analysis
|
nilq/baby-python
|
python
|
import asyncio
import discord
from common import botcolour, botname, db, logo, prefix
from discord.ext import commands
from modules.utils import getChannel, isAdmin, system_notification
class New(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name="new", aliases=['create'])
async def new(self, ctx):
if isAdmin(ctx.message.author, ctx.guild.id):
sent_initial_message = await ctx.send("Welcome to the Reaction Light creation program. Please provide the required information once requested. If you would like to abort the creation, do not respond and the program will time out.")
rl_object = {}
cancelled = False
def check(message):
return message.author.id == ctx.message.author.id and message.content != ""
if cancelled == False:
error_messages = []
user_messages = []
sent_reactions_message = await ctx.send(
"Attach roles and emojis separated by one space (one combination"
" per message). When you are done type `done`. Example:\n:smile:"
" `@Role`"
)
rl_object["reactions"] = {}
try:
while True:
reactions_message = await self.bot.wait_for('message', timeout=120, check=check)
user_messages.append(reactions_message)
if reactions_message.content.lower() != "done":
reaction = (reactions_message.content.split())[0]
try:
role = reactions_message.role_mentions[0].id
except IndexError:
error_messages.append((await ctx.send(
"Mention a role after the reaction. Example:\n:smile:"
" `@Role`"
)))
continue
if reaction in rl_object["reactions"]:
error_messages.append((await ctx.send(
"You have already used that reaction for another role. Please choose another reaction"
)))
continue
else:
try:
await reactions_message.add_reaction(reaction)
rl_object["reactions"][reaction] = role
except discord.HTTPException:
error_messages.append((await ctx.send(
"You can only use reactions uploaded to servers the bot has"
" access to or standard emojis."
)))
continue
else:
break
except asyncio.TimeoutError:
await ctx.author.send("Reaction Light creation failed, you took too long to provide the requested information.")
cancelled = True
finally:
await sent_reactions_message.delete()
for message in error_messages + user_messages:
await message.delete()
if cancelled == False:
sent_oldmessagequestion_message = await ctx.send(f"Would you like to use an existing message or create one using {self.bot.user.mention}? Please react with a 🗨️ to use an existing message or a 🤖 to create one.")
def reaction_check(payload):
return payload.member.id == ctx.message.author.id and payload.message_id == sent_oldmessagequestion_message.id and (str(payload.emoji) == "🗨️" or str(payload.emoji) == "🤖")
try:
await sent_oldmessagequestion_message.add_reaction("🗨️")
await sent_oldmessagequestion_message.add_reaction("🤖")
oldmessagequestion_response_payload = await self.bot.wait_for('raw_reaction_add', timeout=120, check=reaction_check)
if str(oldmessagequestion_response_payload.emoji) == "🗨️":
rl_object["old_message"] = True
else:
rl_object["old_message"] = False
except asyncio.TimeoutError:
await ctx.author.send("Reaction Light creation failed, you took too long to provide the requested information.")
cancelled = True
finally:
await sent_oldmessagequestion_message.delete()
if cancelled == False:
error_messages = []
user_messages = []
if rl_object["old_message"] == True:
sent_oldmessage_message = await ctx.send(f"Which message would you like to use? Please react with a 🔧 on the message you would like to use.")
def reaction_check2(payload):
return payload.member.id == ctx.message.author.id and payload.guild_id == sent_oldmessage_message.guild.id and str(payload.emoji) == "🔧"
try:
while True:
oldmessage_response_payload = await self.self.bot.wait_for('raw_reaction_add', timeout=120, check=reaction_check2)
try:
channel = await getChannel(self.bot, oldmessage_response_payload.channel_id)
if channel is None:
raise discord.NotFound
try:
message = await channel.fetch_message(oldmessage_response_payload.message_id)
except discord.HTTPException:
raise discord.NotFound
try:
await message.add_reaction("👌")
await message.remove_reaction("👌", message.guild.me)
await message.remove_reaction("🔧", ctx.author)
except discord.HTTPException:
raise discord.NotFound
if db.exists(message.id):
raise ValueError
rl_object["message"] = dict(
message_id=message.id, channel_id=message.channel.id, guild_id=message.guild.id)
final_message = message
break
except discord.NotFound:
error_messages.append((await ctx.send("I can not access or add reactions to the requested message. Do I have sufficent permissions?")))
except ValueError:
error_messages.append((await ctx.send(f"This message already got a reaction light instance attached to it, consider running `{prefix}edit` instead.")))
except asyncio.TimeoutError:
await ctx.author.send("Reaction Light creation failed, you took too long to provide the requested information.")
cancelled = True
finally:
await sent_oldmessage_message.delete()
for message in error_messages:
await message.delete()
else:
sent_channel_message = await ctx.send("Mention the #channel where to send the auto-role message.")
try:
while True:
channel_message = await self.bot.wait_for('message', timeout=120, check=check)
if channel_message.channel_mentions:
rl_object["target_channel"] = channel_message.channel_mentions[0]
break
else:
error_messages.append((await message.channel.send("The channel you mentioned is invalid.")))
except asyncio.TimeoutError:
await ctx.author.send("Reaction Light creation failed, you took too long to provide the requested information.")
cancelled = True
finally:
await sent_channel_message.delete()
for message in error_messages:
await message.delete()
if cancelled == False and 'target_channel' in rl_object:
error_messages = []
selector_embed = discord.Embed(
title="Embed_title",
description="Embed_content",
colour=botcolour,
)
selector_embed.set_footer(text=f"{botname}", icon_url=logo)
sent_message_message = await message.channel.send(
"What would you like the message to say?\nFormatting is:"
" `Message // Embed_title // Embed_content`.\n\n`Embed_title`"
" and `Embed_content` are optional. You can type `none` in any"
" of the argument fields above (e.g. `Embed_title`) to make the"
" bot ignore it.\n\n\nMessage",
embed=selector_embed,
)
try:
while True:
message_message = await self.bot.wait_for('message', timeout=120, check=check)
# I would usually end up deleting message_message in the end but users usually want to be able to access the
# format they once used incase they want to make any minor changes
msg_values = message_message.content.split(" // ")
# This whole system could also be re-done using wait_for to make the syntax easier for the user
# But it would be a breaking change that would be annoying for thoose who have saved their message commands
# for editing.
selector_msg_body = (
msg_values[0] if msg_values[0].lower(
) != "none" else None
)
selector_embed = discord.Embed(colour=botcolour)
selector_embed.set_footer(
text=f"{botname}", icon_url=logo)
if len(msg_values) > 1:
if msg_values[1].lower() != "none":
selector_embed.title = msg_values[1]
if len(msg_values) > 2 and msg_values[2].lower() != "none":
selector_embed.description = msg_values[2]
# Prevent sending an empty embed instead of removing it
selector_embed = (
selector_embed
if selector_embed.title or selector_embed.description
else None
)
if selector_msg_body or selector_embed:
target_channel = rl_object["target_channel"]
sent_final_message = None
try:
sent_final_message = await target_channel.send(
content=selector_msg_body, embed=selector_embed
)
rl_object["message"] = dict(
message_id=sent_final_message.id, channel_id=sent_final_message.channel.id, guild_id=sent_final_message.guild.id)
final_message = sent_final_message
break
except discord.Forbidden:
error_messages.append((await message.channel.send(
"I don't have permission to send messages to"
f" the channel {target_channel.mention}. Please check my permissions and try again."
)))
except asyncio.TimeoutError:
await ctx.author.send("Reaction Light creation failed, you took too long to provide the requested information.")
cancelled = True
finally:
await sent_message_message.delete()
for message in error_messages:
await message.delete()
if cancelled == False:
# Ait we are (almost) all done, now we just need to insert that into the database and add the reactions 💪
try:
r = db.add_reaction_role(rl_object)
except Exception:
await ctx.send(f"The requested message already got a reaction light instance attached to it, consider running `{prefix}edit` instead.")
return
if isinstance(r, Exception):
await system_notification(self.bot,
ctx.message.guild.id,
f"Database error when creating reaction-light instance:\n```\n{r}\n```",
)
return
for reaction, _ in rl_object["reactions"].items():
await final_message.add_reaction(reaction)
await ctx.message.add_reaction("✅")
await sent_initial_message.delete()
if cancelled == True:
await ctx.message.add_reaction("❌")
else:
await ctx.send(
f"You do not have an admin role. You might want to use `{prefix}admin`"
" first."
)
|
nilq/baby-python
|
python
|
import math
import random
import numpy as np
import quaternion
from scipy import integrate
import cv2
from scipy.interpolate import RegularGridInterpolator, NearestNDInterpolator
from scipy.interpolate.interpnd import LinearNDInterpolator
from visnav.algo.image import ImageProc
from visnav.algo import tools
class Particles:
# TODO: figure out a better way than the following coef (maybe px_sr involved still?)
CONE_INTENSITY_COEF = 3e-8 # heuristical coef that scales the jet intensities
(
TYPE_HAZE_ONLY,
TYPE_CONES,
TYPE_VOXELS,
) = range(3)
def __init__(self, cam, density, scale, voxels=None, cones=None, haze=0.0):
self.cam = cam
self.density = density
self.scale = scale
self.voxels = voxels
self.cones = cones
self.haze = haze
self.type = Particles.TYPE_HAZE_ONLY
if cones is not None:
self.type = Particles.TYPE_CONES
elif voxels is not None:
self.type = Particles.TYPE_VOXELS
else:
assert haze > 0, 'Need to give either cones, voxels, or haze. Not both cones and voxels though.'
def flux_density(self, img, dist, mask, lf_ast_v, lf_ast_q, lf_light_v, solar_flux):
if self.voxels is not None:
voxel_res = self.flux_density_voxels(lf_ast_v, lf_ast_q, mask, solar_flux, down_scaling=10, quad_lim=5)
else:
voxel_res = np.array([0])
if self.cones is not None:
if isinstance(self.cones, dict):
n = self.cones.pop('n')
self.cones = Cone.random(n, self.cam, img, dist, mask, lf_ast_v, lf_ast_q, **self.cones)
cone_res = self.flux_density_cones(lf_ast_v, lf_ast_q, mask, solar_flux, down_scaling=6)
else:
cone_res = np.array([0])
if self.haze > 0:
haze_res = self.flux_density_haze(mask, solar_flux)
else:
haze_res = np.array([0])
return voxel_res + cone_res + haze_res
def _px_ray_axes(self, scaling, dq): # TODO: use dq
# construct an array of unit rays, one ray per pixel
iK = self.cam.inv_intrinsic_camera_mx()
xx, yy = np.meshgrid(np.linspace(self.cam.width, 0, int(self.cam.width * scaling)),
np.linspace(0, self.cam.height, int(self.cam.height * scaling)), indexing='xy')
img_coords = np.vstack((xx.flatten() + 0.5, yy.flatten() + 0.5, np.ones(xx.size)))
ray_axes = iK.dot(img_coords).T * -1
ray_axes /= np.linalg.norm(ray_axes, axis=1).reshape((-1, 1))
return tools.q_times_mx(dq.conj(), ray_axes), xx.shape
def flux_density_voxels(self, lf_ast_v, lf_ast_q, mask, solar_flux, down_scaling=1, quad_lim=15):
# TODO: implement this someday in OpenGL, e.g. like this: http://www.alexandre-pestana.com/volumetric-lights
assert down_scaling >= 1, 'only values of >=1 make sense for down_scaling'
dq = lf_ast_q * self.voxels.lf_ast_q.conj()
dv = tools.q_times_v(dq.conj(), lf_ast_v - self.voxels.lf_ast_v)
ny, nx, nz = self.voxels.voxel_data.shape
dx, dy, dz = [self.voxels.cell_size] * 3
gx = np.linspace(-(nx-1) * dx / 2, (nx-1) * dx / 2, nx)
gy = np.linspace(-(ny-1) * dy / 2, (ny-1) * dy / 2, ny)
gz = np.linspace(-(nz-1) * dz / 2, (nz-1) * dz / 2, nz)
interp3d = RegularGridInterpolator((gx, gy, gz), self.voxels.voxel_data, bounds_error=False, fill_value=0.0)
#interp3d = NearestNDInterpolator((gx, gy, gz), self.voxels.voxel_data)
ray_axes, sc_shape = self._px_ray_axes(1 / down_scaling, dq)# tools.ypr_to_q(0, np.pi, 0) * dq)
margin = 0.0
dist = np.linalg.norm(dv)
fg_near = dist - nz*dz/2
fg_far = dist - margin/2
bg_near = dist + margin/2
bg_far = dist + nz*dz/2
def quad_fn(interp3d, ray_axes, near, far):
points = None #np.linspace(near, far, nx/2)
res = integrate.quad_vec(lambda r: interp3d(ray_axes * r - dv), near, far, points=points, limit=quad_lim)
return res[0]
# integrate density along the rays (quad_vec requires at least scipy 1.4.x)
res = quad_fn(interp3d, ray_axes, bg_near, bg_far)
bg_res = cv2.resize(res.reshape(sc_shape).astype(np.float32), mask.shape)
bg_res[mask] = 0
# integrate density along the rays (quad_vec requires at least scipy 1.4.x)
res = quad_fn(interp3d, ray_axes, fg_near, fg_far)
fg_res = cv2.resize(res.reshape(sc_shape).astype(np.float32), mask.shape)
result = ((0 if bg_res is None else bg_res) + (0 if fg_res is None else fg_res)) \
* Particles.CONE_INTENSITY_COEF * solar_flux * self.voxels.intensity
return result
def flux_density_cones(self, lf_ast_v, lf_ast_q, mask, solar_flux, down_scaling=1, quad_lim=25):
"""
- The jet generated is a truncated cone that has a density proportional to (truncation_distance/distance_from_untruncated_origin)**2
- Truncation so that base width is 0.1 of mask diameter
- base_loc gives the coordinates of the base in camera frame (opengl type, -z camera axis, +y is up)
- 95% of luminosity lost when distance from base is `length`
- angular_radius [rad] of the cone, 95% of luminosity lost if this much off axis, uses normal distribution
- intensity of the cone at truncation point (>0)
- if phase_angle < pi/2, cone not drawn on top of masked parts of image as it starts behind object
- direction: 0 - to the right, pi/2 - up, pi - left, -pi/2 - down
"""
assert down_scaling >= 1, 'only values of >=1 make sense for down_scaling'
scaling = 1 / down_scaling
base_locs = [c.base_loc for c in self.cones]
phase_angles = [c.phase_angle for c in self.cones]
directions =[c.direction for c in self.cones]
trunc_lens = [c.trunc_len for c in self.cones]
angular_radii = [c.angular_radius for c in self.cones]
intensities = [c.intensity for c in self.cones]
axes = []
for i in range(len(self.cones)):
# q = np.quaternion(math.cos(-direction / 2), 0, 0, math.sin(-direction / 2)) \
# * np.quaternion(math.cos(phase_angle / 2), 0, math.sin(phase_angle / 2), 0)
q1 = np.quaternion(math.cos(-phase_angles[i] / 2), 0, math.sin(-phase_angles[i] / 2), 0)
q2 = np.quaternion(math.cos(directions[i] / 2), 0, 0, math.sin(directions[i] / 2))
q = q2 * q1
axis = tools.q_times_v(q, np.array([0, 0, -1]))
axes.append(axis)
base_locs[i] -= axis * trunc_lens[i]
# density function of the jet
def density(loc_arr, base_loc, axis, d0, angular_radius, intensity):
loc_arr = loc_arr - base_loc
r, d = tools.dist_across_and_along_vect(loc_arr, axis)
# r, d = tools.point_vector_dist(loc_arr, axis, dist_along_v=True)
# get distance along axis
coef = np.zeros((len(loc_arr), 1))
coef[d > d0] = (d0 / d[d > d0]) ** 2
# get radial distance from axis, use normal dist pdf but scaled so that max val is 1
r_sd = d[coef > 0] * np.tan(angular_radius)
coef[coef > 0] *= np.exp((-0.5 / r_sd ** 2) * (r[coef > 0] ** 2))
return coef * intensity
dq = lf_ast_q * self.cones.lf_ast_q.conj()
dv = tools.q_times_v(dq.conj(), lf_ast_v - self.cones.lf_ast_v)
ray_axes, sc_shape = self._px_ray_axes(scaling, dq)
def i_fun(r, arg_arr):
result = None
for args in arg_arr:
res = density(ray_axes * r - dv, *args)
if result is None:
result = res
else:
result += res
return result
bg_args_arr, bg_near, bg_far = [], np.inf, -np.inf
fg_args_arr, fg_near, fg_far = [], np.inf, -np.inf
for args in zip(base_locs, axes, trunc_lens, angular_radii, intensities):
base_loc, axis, trunc_len, angular_radius, intensity = args
dist = np.linalg.norm(base_loc)
if axis[2] < 0:
# z-component of axis is negative => jet goes away from cam => starts behind object
bg_args_arr.append(args)
bg_near = min(bg_near, dist - trunc_len)
bg_far = max(bg_far, 2 * dist) # crude heuristic, is it enough?
else:
# jet goes towards cam
fg_args_arr.append(args)
fg_near = min(fg_near, 0)
fg_far = max(fg_far, dist + trunc_len)
bg_res, fg_res = None, None
if bg_args_arr:
# integrate density along the rays (quad_vec requires at least scipy 1.4.x)
res = integrate.quad_vec(lambda r: i_fun(r, bg_args_arr), bg_near, bg_far, limit=quad_lim)
# bg_sc = np.max(res[0])/maxval
bg_res = cv2.resize(res[0].reshape(sc_shape).astype(np.float32), mask.shape)
# bg_res = cv2.resize((res[0]/bg_sc).reshape(xx.shape).astype(img.dtype), img.shape).astype(np.float32)*bg_sc
bg_res[mask] = 0
if fg_args_arr:
# integrate density along the rays (quad_vec requires at least scipy 1.4.x)
res = integrate.quad_vec(lambda r: i_fun(r, fg_args_arr), fg_near, fg_far, limit=quad_lim)
# fg_sc = np.max(res[0])/maxval
fg_res = cv2.resize(res[0].reshape(sc_shape).astype(np.float32), mask.shape)
# fg_res = cv2.resize((res[0]/fg_sc).reshape(xx.shape).astype(img.dtype), img.shape).astype(np.float32)*fg_sc
result = ((0 if bg_res is None else bg_res) + (0 if fg_res is None else fg_res)) \
* Particles.CONE_INTENSITY_COEF * solar_flux
# max_r = np.max(result)
# if max_r > 0:
# maxval = ImageProc._img_max_valid(img)
# result = (result / max_r) * maxval * np.max(intensities)
return result
def flux_density_haze(self, mask, solar_flux):
result = np.ones(mask.shape, dtype=np.float32) \
* Particles.CONE_INTENSITY_COEF * solar_flux * self.haze * 0.5
if mask is not None:
result[np.logical_not(mask)] *= 2
else:
result *= 2
return result
class VoxelParticles:
def __init__(self, voxel_data, cell_size, intensity, lf_ast_v=np.zeros((3,)), lf_ast_q=quaternion.one):
self.voxel_data = voxel_data
self.cell_size = cell_size
self.intensity = intensity
self.lf_ast_v = lf_ast_v
self.lf_ast_q = lf_ast_q
class Cone:
def __init__(self, base_loc, trunc_len, phase_angle, direction, intensity, angular_radius, lf_ast_v, lf_ast_q):
self.base_loc = base_loc
self.trunc_len = trunc_len
self.phase_angle = phase_angle
self.direction = direction
self.intensity = intensity
self.angular_radius = angular_radius
self.lf_ast_v = lf_ast_v
self.lf_ast_q = lf_ast_q
@staticmethod
def random(n, cam, img, dist, mask, lf_ast_v, lf_ast_q, jet_int_mode, jet_int_conc, trunc_len_m,
trunc_len_sd=0.2, ang_rad_m=np.pi/30, ang_rad_sd=0.3):
fg_yx = np.vstack(np.where(np.logical_and(mask, img > np.max(img)*0.2))).T
bg_yx = np.vstack(np.where(mask)).T
cones = []
for i in range(n):
phase_angle = np.random.uniform(0, np.pi)
if phase_angle < np.pi / 2 and len(bg_yx) > 0:
yi, xi = random.choice(bg_yx)
elif len(fg_yx) > 0:
yi, xi = random.choice(fg_yx)
else:
continue
z = -dist[yi, xi]
x, y = cam.calc_xy(xi, yi, z)
base_loc = np.array((x, y, z))
alpha = jet_int_mode * (jet_int_conc - 2) + 1
beta = (1 - jet_int_mode) * (jet_int_conc - 2) + 1
intensity = np.random.beta(alpha, beta)
direction = np.random.uniform(-np.pi, np.pi)
trunc_len = 1e-3 * trunc_len_m * np.random.lognormal(0, trunc_len_sd)
angular_radius = ang_rad_m * np.random.lognormal(0, ang_rad_sd)
cones.append(Cone(base_loc, trunc_len, phase_angle, direction, intensity,
angular_radius, lf_ast_v, lf_ast_q))
return cones
|
nilq/baby-python
|
python
|
import os
import pickle
import json
import numpy as np
from sklearn.externals import joblib
from sklearn.linear_model import Ridge
from inference_schema.schema_decorators import input_schema, output_schema
from inference_schema.parameter_types.numpy_parameter_type import NumpyParameterType
def init():
global model
# AZUREML_MODEL_DIR is an environment variable created during deployment.
# It is the path to the model folder (./azureml-models/$MODEL_NAME/$VERSION)
# For multiple models, it points to the folder containing all deployed models (./azureml-models)
model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'sklearn_regression_model.pkl')
# deserialize the model file back into a sklearn model
model = joblib.load(model_path)
input_sample = np.array([[10, 9, 8, 7, 6, 5, 4, 3, 2, 1]])
output_sample = np.array([3726.995])
@input_schema('data', NumpyParameterType(input_sample))
@output_schema(NumpyParameterType(output_sample))
def run(data):
try:
result = model.predict(data)
# you can return any datatype as long as it is JSON-serializable
return result.tolist()
except Exception as e:
error = str(e)
return error
|
nilq/baby-python
|
python
|
import SimpleHTTPServer
import SocketServer
import sys
import time
class MyHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def handle_one_request(self):
time.sleep(0.1)
return SimpleHTTPServer.SimpleHTTPRequestHandler.handle_one_request(self)
print("Serving local directory")
httpd = SocketServer.TCPServer(("", 8080), MyHandler)
while True:
httpd.handle_request()
|
nilq/baby-python
|
python
|
"""
Support to interface with LGE ThinQ Devices.
"""
__version__ = "0.23.0"
PROJECT_URL = "https://github.com/ollo69/ha-smartthinq-sensors/"
ISSUE_URL = "{}issues".format(PROJECT_URL)
DOMAIN = "smartthinq_sensors"
MIN_HA_MAJ_VER = 2022
MIN_HA_MIN_VER = 5
__min_ha_version__ = f"{MIN_HA_MAJ_VER}.{MIN_HA_MIN_VER}.0"
CONF_LANGUAGE = "language"
CONF_OAUTH_URL = "outh_url"
CONF_USE_API_V2 = "use_api_v2"
CLIENT = "client"
LGE_DEVICES = "lge_devices"
DEFAULT_ICON = "def_icon"
DEFAULT_SENSOR = "default"
STARTUP = """
-------------------------------------------------------------------
{}
Version: {}
This is a custom component
If you have any issues with this you need to open an issue here:
{}
-------------------------------------------------------------------
""".format(
DOMAIN, __version__, ISSUE_URL
)
|
nilq/baby-python
|
python
|
# coding: utf-8
from sqlalchemy import CHAR, Column, Date, Float, ForeignKey, Integer, LargeBinary, SmallInteger, String, Table, Text, text
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class Category(Base):
__tablename__ = 'categories'
CategoryID = Column(SmallInteger, primary_key=True, server_default=text(
"nextval('categories_categoryid_seq'::regclass)"))
CategoryName = Column(String(15), nullable=False)
Description = Column(Text)
Picture = Column(LargeBinary)
class Customercustomerdemo(Base):
__tablename__ = 'customercustomerdemo'
CustomerID = Column(CHAR(6), primary_key=True, nullable=False)
CustomerTypeID = Column(CHAR(6), primary_key=True, nullable=False)
class Customerdemographic(Base):
__tablename__ = 'customerdemographics'
CustomerTypeID = Column(SmallInteger, primary_key=True, server_default=text(
"nextval('customerdemographics_customertypeid_seq'::regclass)"))
CustomerDesc = Column(Text)
class Customer(Base):
__tablename__ = 'customers'
CustomerID = Column(CHAR(6), primary_key=True)
CompanyName = Column(String(40), nullable=False)
ContactName = Column(String(30))
ContactTitle = Column(String(30))
Address = Column(String(60))
City = Column(String(15))
Region = Column(String(15))
PostalCode = Column(String(10))
Country = Column(String(15))
Phone = Column(String(24))
Fax = Column(String(24))
class Employee(Base):
__tablename__ = 'employees'
EmployeeID = Column(SmallInteger, primary_key=True, server_default=text(
"nextval('employees_employeeid_seq'::regclass)"))
LastName = Column(String(20), nullable=False)
FirstName = Column(String(10), nullable=False)
Title = Column(String(30))
TitleOfCourtesy = Column(String(25))
BirthDate = Column(Date)
HireDate = Column(Date)
Address = Column(String(60))
City = Column(String(15))
Region = Column(String(15))
PostalCode = Column(String(10))
Country = Column(String(15))
HomePhone = Column(String(24))
Extension = Column(String(4))
Photo = Column(LargeBinary)
Notes = Column(Text)
ReportsTo = Column(SmallInteger)
PhotoPath = Column(String(255))
class Employeeterritory(Base):
__tablename__ = 'employeeterritories'
EmployeeID = Column(SmallInteger, primary_key=True, nullable=False)
TerritoryID = Column(Integer, primary_key=True, nullable=False)
class OrderDetail(Base):
__tablename__ = 'order_details'
OrderID = Column(SmallInteger, primary_key=True, nullable=False)
ProductID = Column(SmallInteger, primary_key=True, nullable=False)
UnitPrice = Column(Float, nullable=False)
Quantity = Column(SmallInteger, nullable=False)
Discount = Column(Float, nullable=False)
class Region(Base):
__tablename__ = 'region'
RegionID = Column(SmallInteger, primary_key=True, server_default=text(
"nextval('region_regionid_seq'::regclass)"))
RegionDescription = Column(CHAR(8), nullable=False)
class Shipper(Base):
__tablename__ = 'shippers'
ShipperID = Column(SmallInteger, primary_key=True, server_default=text(
"nextval('shippers_shipperid_seq'::regclass)"))
CompanyName = Column(String(40), nullable=False)
Phone = Column(String(24))
class ShippersTmp(Base):
__tablename__ = 'shippers_tmp'
ShipperID = Column(SmallInteger, primary_key=True, server_default=text(
"nextval('shippers_tmp_shipperid_seq'::regclass)"))
CompanyName = Column(String(40), nullable=False)
Phone = Column(String(24))
class Supplier(Base):
__tablename__ = 'suppliers'
SupplierID = Column(SmallInteger, primary_key=True, server_default=text(
"nextval('suppliers_supplierid_seq'::regclass)"))
CompanyName = Column(String(40), nullable=False)
ContactName = Column(String(30))
ContactTitle = Column(String(30))
Address = Column(String(60))
City = Column(String(15))
Region = Column(String(15))
PostalCode = Column(String(10))
Country = Column(String(15))
Phone = Column(String(24))
Fax = Column(String(24))
HomePage = Column(Text)
class Territory(Base):
__tablename__ = 'territories'
TerritoryID = Column(Integer, primary_key=True, server_default=text(
"nextval('territories_territoryid_seq'::regclass)"))
TerritoryDescription = Column(CHAR(64), nullable=False)
RegionID = Column(SmallInteger, nullable=False)
t_usstates = Table(
'usstates', metadata,
Column('StateID', SmallInteger, nullable=False, server_default=text(
"nextval('usstates_stateid_seq'::regclass)")),
Column('StateName', String(100)),
Column('StateAbbr', String(2)),
Column('StateRegion', String(50))
)
class Order(Base):
__tablename__ = 'orders'
OrderID = Column(SmallInteger, primary_key=True, server_default=text(
"nextval('orders_orderid_seq'::regclass)"))
CustomerID = Column(ForeignKey('customers.CustomerID'))
EmployeeID = Column(ForeignKey('employees.EmployeeID'))
OrderDate = Column(Date)
RequiredDate = Column(Date)
ShippedDate = Column(Date)
ShipVia = Column(ForeignKey('shippers.ShipperID'))
Freight = Column(Float)
ShipName = Column(String(40))
ShipAddress = Column(String(60))
ShipCity = Column(String(15))
ShipRegion = Column(String(15))
ShipPostalCode = Column(String(10))
ShipCountry = Column(String(15))
customer = relationship('Customer')
employee = relationship('Employee')
shipper = relationship('Shipper')
class Product(Base):
__tablename__ = 'products'
ProductID = Column(SmallInteger, primary_key=True, server_default=text(
"nextval('products_productid_seq'::regclass)"))
ProductName = Column(String(40), nullable=False)
SupplierID = Column(ForeignKey(
'suppliers.SupplierID', ondelete='SET NULL'), nullable=True)
CategoryID = Column(ForeignKey('categories.CategoryID'))
QuantityPerUnit = Column(String(20))
UnitPrice = Column(Float)
UnitsInStock = Column(SmallInteger)
UnitsOnOrder = Column(SmallInteger)
ReorderLevel = Column(SmallInteger)
Discontinued = Column(Integer, nullable=False)
category = relationship('Category')
supplier = relationship('Supplier')
|
nilq/baby-python
|
python
|
from art import logo, vs
from data import data
import random
print(logo)
my_score = 0
missed = True
while missed:
random_data1 = random.choice(data)
score_1 = random_data1["follower_count"]
random_data2 = random.choice(data)
score_2 = random_data2["follower_count"]
print(f"Compare A: {random_data1['name']}, {random_data1['description']}, from {random_data1['country']}" )
print(vs)
print(f"Compare B: {random_data2['name']}, {random_data2['description']}, from {random_data2['country']}" )
print(score_1,score_2)
answer = input("Who has more followers 'A' or 'B': ").lower()
if answer == "a" and score_1 > score_2:
my_score += 1
print("\n Yes! \n \n")
elif answer == "a" and score_1 < score_2:
print(f"No! You lost! Your score is {my_score}")
missed = False
elif answer == "b" and score_1 < score_2:
my_score += 1
print("\n Yes! \n \n")
elif answer == "b" and score_1 > score_2:
print(f"No! You lost! Your score is {my_score}")
missed = False
|
nilq/baby-python
|
python
|
from typing import List, Optional
from aws_cdk.aws_lambda import Runtime
from aws_cdk.core import Stack
from b_cfn_lambda_layer.lambda_layer import LambdaLayer
class OpensearchIndexLayer(LambdaLayer):
def __init__(
self,
scope: Stack,
name: str
) -> None:
super().__init__(
scope=scope,
name=name,
source_path=self.source_path(),
code_runtimes=self.runtimes()
)
@staticmethod
def source_path() -> str:
from . import root
return root
@staticmethod
def runtimes() -> Optional[List[Runtime]]:
return [Runtime.PYTHON_3_8]
|
nilq/baby-python
|
python
|
import no_pda_exceptions as e
class NOPDA(object):
def __init__(self, rules, input_alphabet, states,
initial_states, terminate_states):
self.rules = rules
self.input_alphabet = input_alphabet
self.states = states
self.current_state = initial_states
self.terminate_states = terminate_states
self.crash = False
def _crash(self):
self.crash = True
def input(self, symbol):
if self.crash:
raise e.NOPDACrashException(
"Error by input. PDA is crashed!")
new_states = {}
for state in self.states:
try:
'''
TODO Need add checkers for UnknowSymbols Exception
'''
new_states.extend(self.rules[self.state][symbol])
except Exception as e:
pass
if not new_states:
self._crash()
else:
self.current_state = new_states
def in_terminate_state(self):
self.current_states & self.terminate_states
'''
input_alphabet = {'a', 'b', 'c'}
states = {'s1', 's2', 's3'}
rules = {'s1': {'a': ['s1'], 'b': ['s2'], 'c': ['s3']}, 's2': {'a': ['s2'], 'b': ['s2'], 'c': ['s3', 's1']}, 's3': {'a': ['s2'], 'b': ['s1'], 'c': ['s3']}}
'''
|
nilq/baby-python
|
python
|
# Generated by Django 3.1.13 on 2022-05-28 00:03
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, help_text='Unique Record Identifier', primary_key=True, serialize=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=50)),
('code', models.CharField(max_length=10, unique=True)),
('credits', models.PositiveSmallIntegerField(default=1)),
],
options={
'abstract': False,
},
),
]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 21 16:29:21 2020
@author: willi
"""
import re
try:
from snapgene_reader import snapgene_file_to_dict, snapgene_file_to_seqrecord
except:
pass
try:
from Bio import SeqIO
from Bio import Entrez
except:
pass
import os
class FileParser():
'''
Class to parse incoming files
'''
def __init__(self):
self.file = None
self.letter_dict = ['a','c','u','g']
self.sub_dict = {'m':'a','w':'a','r':'g','y':'t','k':'g','s':'g','w':'a','h':'a','n':'a'}
self.test_seq = 'aucuguacguacguaucgaucguguacuggcaaaacguaguagcugagcaucaucuaug'
pass
def clean_seq(self,seq):
'''
clean the sequences to lowercase only a, u, g, c
'''
seq = seq.lower()
for key in self.sub_dict.keys():
seq = seq.replace(key,self.sub_dict[key])
seq = seq.replace('t','u')
return seq
def get_sequence(self,file):
'''
Return the sequence from several different file types
Supported: .txt, .dna, .gb, .fasta
'''
self.__check_valid_file(file)
extension = file.split('.')[-1]
if extension == 'dna':
try:
seq_record = snapgene_file_to_seqrecord(file)
except:
print('To read .dna files please install snapegenereader: pip install snapgene_reader - https://github.com/IsaacLuo/SnapGeneFileReader' )
sequence_str = str(seq_record.seq)
if extension == 'txt':
sequence_str = self.__get_seq_from_txt(file)
if extension == 'gb':
gb_record = SeqIO.read(open(file, "r"), "genbank")
sequence_str = str(gb_record.seq)
if extension == 'fasta':
fastas = list(SeqIO.parse(file,'fasta'))
if len(fastas) > 1:
return 'Multiple line fastas not supported'
else:
sequence_str = (str(fastas[0].seq))
cleaned_sequence_str = self.clean_seq(sequence_str)
return cleaned_sequence_str
def __check_valid_file(self,file):
extension = file.split('.')[-1]
if extension in ['fasta','gb','txt','dna']:
return True
else:
raise Exception("Unrecognized File type, the sequence file must be a .txt, .dna, .gb, or .fasta")
def get_name(self,file):
self.__check_valid_file(file)
name = 'unknown'
extension = file.split('.')[-1]
if extension == 'fasta':
fastas = list(SeqIO.parse(file,'fasta'))
if len(fastas) > 1:
return 'Multiple line fastas not supported'
else:
name = (str(fastas[0].id))
if extension == 'gb':
gb_record = SeqIO.read(open(file, "r"), "genbank")
name = str(gb_record.id)
if extension == 'txt':
name = self.__get_name_from_text(file)
if extension == 'dna':
try:
seq_record = snapgene_file_to_seqrecord(file)
except:
print('To read .dna files please install snapegenereader: pip install snapgene_reader - https://github.com/IsaacLuo/SnapGeneFileReader' )
name = seq_record.name
return name
def get_description(self,file):
self.__check_valid_file(file)
extension = file.split('.')[-1]
if extension == 'fasta':
fastas = list(SeqIO.parse(file,'fasta'))
if len(fastas) > 1:
return 'Multiple line fastas not supported'
else:
desc = str(fastas[0].description)
if extension == 'gb':
gb_record = SeqIO.read(open(file, "r"), "genbank")
desc = str(gb_record.description)
if extension == 'dna':
try:
seq_record = snapgene_file_to_seqrecord(file)
except:
print('To read .dna files please install snapegenereader: pip install snapgene_reader - https://github.com/IsaacLuo/SnapGeneFileReader' )
desc = seq_record.description
if extension == 'txt':
desc = '<unknown description>'
return desc
@classmethod
def __get_seq_from_txt(cls,file):
with open(file) as f:
raw = f.readlines()
raw = ''.join(raw)
onlychar = re.split(r'[^A-Za-z]', raw)
validt = ['A', 'G', 'T', 'C']
validu = ['A', 'G', 'U', 'C']
sequence_str = ''
for i in range(len(onlychar)):
section = onlychar[i]
if set(section.upper()) == set(validt):
sequence_str += section.upper()
elif set(section.upper()) == set(validu):
sequence_str += section.upper()
return sequence_str
@classmethod
def __get_name_from_text(cls,file):
name = ''
with open(file) as f:
raw = f.readlines()
raw = ''.join(raw)
validt = ['A', 'G', 'T', 'C']
validu = ['A', 'G', 'U', 'C']
onlychar = re.split(r'[^A-Za-z]', raw)
namelen = 0
for i in range(len(onlychar)):
section = onlychar[i]
if set(section.upper()) != set(validt):
if set(section.upper()) != set(validu):
if len(section)>namelen:
name = section
namelen = len(section)
if name =='':
name = os.path.basename(file)[:-4]
return name
|
nilq/baby-python
|
python
|
#!/usr/bin/python3.8
import time
from flask import render_template, flash, redirect, url_for, request
from flask_login import login_user, current_user, logout_user, login_required
from werkzeug.utils import secure_filename
from webapp import app, config, db, db_manager, bcrypt
from webapp.models import User
from webapp.forms import RegisterUser, Login, RequestResetForm, ResetPasswordForm, InviteUser, TableViewSelect, TableSearch, AddVehicleForm, AddCheckinForm, AddExpenseForm
from webapp.user_functions import send_reset_email, send_admin_invite
from webapp.prepare_functions import prep_expense_summary
### User Function Web Routes ###
@app.route('/register_user', methods = ['GET', 'POST'])
@login_required
def register_user():
form = RegisterUser()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user = {"first": form.first_name.data,
"middle": form.middle_name.data,
"last": form.last_name.data,
"email": form.email.data,
"password": hashed_password}
if db_manager.add_user(**user):
return redirect(url_for('home'))
return render_template('register_user.html', title = 'Register User', form = form)
@app.route('/login', methods = ['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('home'))
form = Login()
if form.validate_on_submit():
user = User.query.filter_by(email = form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
login_user(user, remember = form.remember.data)
return redirect(url_for('home'))
flash('Login Unsuccessful. Please check email and password.')
return render_template('login.html', title = 'Login', form = form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('home'))
@app.route('/manage_users', methods = ['GET', 'POST'])
@login_required
def manage_users():
table_cols = ["first", "middle", "last", "email", "admin"]
table_rows = None
invite_form = InviteUser()
search_form = TableSearch()
if request.method == 'POST':
if request.form.get('submit') == 'Send Invite':
if current_user.admin >= 2:
email = request.form.get('email')
send_admin_invite('name', email)
if db_manager.add_admin_invite(email):
flash(f'Admin invite sent to {email}', "success")
else:
flash('Insufficient Privileges')
elif request.form.get('submit') == 'Search':
key = request.form.get('criteria')
value = request.form.get('value')
kwarg = {key: value}
print('Applying search term', kwarg)
table_rows = [i.get_row() for i in User.query.filter_by(**kwarg).all() if i.level]
if not table_rows:
flash("Your search didn't return any results.")
if table_rows == None:
table_rows = db_manager.get_users()
return render_template('admin_users.html', title = 'Manage Admin Users', invite_form = invite_form, search_form = search_form, table_cols = table_cols, table_rows = table_rows, enumerate = enumerate)
@app.route('/reset_password', methods = ['GET', 'POST'])
def reset_request():
form = RequestResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email = form.email.data).first()
send_reset_email(user)
flash('An email has been sent with instructions to reset your password', 'info')
return redirect(url_for('login'))
return render_template('reset_request.html', title = 'Reset Password', form = form)
@app.route('/reset_password/reset_password/<token>', methods = ['GET', 'POST'])
def reset_token(token):
user = User.verify_reset_token(token)
if not user:
flash('That is an invalid or expired token', 'warning')
return redirect(url_for('reset_request'))
form = ResetPasswordForm()
if form.validate_on_submit():
hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')
user.password = hashed_password
db.session.commit()
flash('Your password has been updated! You can now log in', 'success')
return redirect(url_for('login'))
return render_template('reset_token.html', title = 'Reset Password', form = form)
### Main App Web Routes ###
@app.route('/')
@login_required
def home():
user_id = current_user.get_id()
user = db_manager.get_user(**{"id": user_id})
reminders = []
expense_summary = prep_expense_summary(user_id, "2021")
print(expense_summary)
vehicles = db_manager.get_user_vehicles(**{"user_id": user_id})
return render_template('home.html', user = user, reminders = reminders, expenses = expense_summary, vehicles = vehicles)
@app.route('/add_vehicle', methods = ['GET', 'POST'])
@login_required
def add_vehicle():
uid = {"id": current_user.get_id()}
user = db_manager.get_user(**uid)
form = AddVehicleForm()
if form.validate_on_submit():
vehicle = {"user_id": uid["id"],
"year": form.year.data,
"make": form.make.data,
"model": form.model.data,
"trim": form.trim.data,
"color": form.color.data}
if db_manager.add_vehicle(**vehicle):
flash(f"You added a {vehicle['year']} {vehicle['make']} {vehicle['model']} to your garage!", "success")
return redirect(url_for("home"))
return render_template('add_vehicle.html', user = user, form = form)
@app.route('/add_checkin/<vehicle_id>', methods = ['GET', 'POST'])
@login_required
def add_checkin(vehicle_id):
user = db_manager.get_user(**{"id": current_user.get_id()})
form = AddCheckinForm()
# vehicle = db_manager.get_vehicle(public_id = vehicle_id)
if form.validate_on_submit():
checkin = {"vehicle_id": vehicle_id,
"odometer": form.odometer.data}
checkin_id, vehicle = db_manager.add_checkin(**checkin)
checkin_type = form.checkin_type.data
if checkin_type == "shift start":
db_manager.start_shift_miles(db_manager.get_user_id(user["id"]), checkin_id)
elif checkin_type == "shift end":
if db_manager.complete_shift_miles(checkin_id):
flash(f"You completed shift miles!", "success")
flash(f"You added an odometer checkin for your {vehicle['year']} {vehicle['make']} {vehicle['model']}!", "success")
return redirect(url_for("home"))
return render_template('add_checkin.html', user = user, form = form)
@app.route('/add_expense/<vehicle_id>', methods = ['GET', 'POST'])
@login_required
def add_expense(vehicle_id):
user = db_manager.get_user(**{"id": current_user.get_id()})
form = AddExpenseForm()
if form.validate_on_submit():
if form.odometer.data:
checkin = {"odometer": form.odometer.data,
"vehicle_id": vehicle_id}
checkin_id, vehicle = db_manager.add_checkin(**checkin)
else:
checkin_id = None
expense = {"vehicle_id": vehicle["id"],
"expense_type": form.expense_type.data,
"recur_interval": form.interval.data,
"notes": form.notes.data,
"odometer_id": checkin_id}
if db_manager.add_expense(**expense):
if expense["recur_interval"] == "none":
message = f"You added a one-time {expense['expense_type']} expense!"
else:
message = f"You added a(n) {expense['expense_type']} expense that recurs every {expense['recur_interval']}!"
flash(message, "success")
return redirect(url_for("home"))
return render_template('add_expense.html', user = user, form = form)
|
nilq/baby-python
|
python
|
import numpy as np
from config import Config
def compute_challenge_metric_custom(res,lbls,normalize=True):
normal_class = '426783006'
normal_index=Config.HASH_TABLE[0][normal_class]
lbls=lbls>0
res=res>0
weights = Config.loaded_weigths
observed_score=np.sum(weights*get_confusion(lbls,res))
if normalize == False:
return observed_score
correct_score=np.sum(weights*get_confusion(lbls,lbls))
inactive_outputs = np.zeros_like(lbls)
inactive_outputs[:, normal_index] = 1
inactive_score=np.sum(weights*get_confusion(lbls,inactive_outputs))
normalized_score = float(observed_score - inactive_score) / float(correct_score - inactive_score)
return normalized_score
def get_confusion(lbls,res):
normalizer=np.sum(lbls|res,axis=1)
normalizer[normalizer<1]=1
A=lbls.astype(np.float32).T@(res.astype(np.float32)/normalizer.reshape(normalizer.shape[0],1))
# num_sigs,num_classes=lbls.shape
# A=np.zeros((num_classes,num_classes))
# for sig_num in range(num_sigs):
# A=A+lbls[[sig_num], :].T@res[[sig_num], :]/normalizer[sig_num]
# B=np.zeros((num_classes,num_classes))
# for sig_num in range(num_sigs):
# for j in range(num_classes):
# # Assign full and/or partial credit for each positive class.
# if lbls[sig_num, j]:
# for k in range(num_classes):
# if res[sig_num, k]:
# B[j, k] += 1.0/normalizer[sig_num]
# if np.sum(A!=B)>0:
# fsfdsf=fsdfsdf
return A
|
nilq/baby-python
|
python
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
import copy
import frappe
import frappe.share
from frappe import _, msgprint
from frappe.utils import cint
rights = ("select", "read", "write", "create", "delete", "submit", "cancel", "amend",
"print", "email", "report", "import", "export", "set_user_permissions", "share")
def check_admin_or_system_manager(user=None):
if not user: user = frappe.session.user
if ("System Manager" not in frappe.get_roles(user)) and (user!="Administrator"):
frappe.throw(_("Not permitted"), frappe.PermissionError)
def print_has_permission_check_logs(func):
def inner(*args, **kwargs):
frappe.flags['has_permission_check_logs'] = []
result = func(*args, **kwargs)
self_perm_check = True if not kwargs.get('user') else kwargs.get('user') == frappe.session.user
raise_exception = False if kwargs.get('raise_exception') == False else True
# print only if access denied
# and if user is checking his own permission
if not result and self_perm_check and raise_exception:
msgprint(('<br>').join(frappe.flags.get('has_permission_check_logs', [])))
frappe.flags.pop('has_permission_check_logs', None)
return result
return inner
@print_has_permission_check_logs
def has_permission(doctype, ptype="read", doc=None, verbose=False, user=None, raise_exception=True):
"""Returns True if user has permission `ptype` for given `doctype`.
If `doc` is passed, it also checks user, share and owner permissions.
Note: if Table DocType is passed, it always returns True.
"""
if not user: user = frappe.session.user
if not doc and hasattr(doctype, 'doctype'):
# first argument can be doc or doctype
doc = doctype
doctype = doc.doctype
if frappe.is_table(doctype):
return True
if user=="Administrator":
return True
meta = frappe.get_meta(doctype)
if doc:
if isinstance(doc, str):
doc = frappe.get_doc(meta.name, doc)
perm = get_doc_permissions(doc, user=user, ptype=ptype).get(ptype)
if not perm: push_perm_check_log(_('User {0} does not have access to this document').format(frappe.bold(user)))
else:
if ptype=="submit" and not cint(meta.is_submittable):
push_perm_check_log(_("Document Type is not submittable"))
return False
if ptype=="import" and not cint(meta.allow_import):
push_perm_check_log(_("Document Type is not importable"))
return False
role_permissions = get_role_permissions(meta, user=user)
perm = role_permissions.get(ptype)
if not perm:
push_perm_check_log(_('User {0} does not have doctype access via role permission for document {1}').format(frappe.bold(user), frappe.bold(doctype)))
def false_if_not_shared():
if ptype in ("read", "write", "share", "submit", "email", "print"):
shared = frappe.share.get_shared(doctype, user,
["read" if ptype in ("email", "print") else ptype])
if doc:
doc_name = get_doc_name(doc)
if doc_name in shared:
if ptype in ("read", "write", "share", "submit") or meta.permissions[0].get(ptype):
return True
elif shared:
# if atleast one shared doc of that type, then return True
# this is used in db_query to check if permission on DocType
return True
return False
if not perm:
perm = false_if_not_shared()
return perm
def get_doc_permissions(doc, user=None, ptype=None):
"""Returns a dict of evaluated permissions for given `doc` like `{"read":1, "write":1}`"""
if not user: user = frappe.session.user
if frappe.is_table(doc.doctype): return {"read": 1, "write": 1}
meta = frappe.get_meta(doc.doctype)
def is_user_owner():
doc_owner = doc.get('owner') or ''
doc_owner = doc_owner.lower()
session_user = frappe.session.user.lower()
return doc_owner == session_user
if has_controller_permissions(doc, ptype, user=user) == False :
push_perm_check_log('Not allowed via controller permission check')
return {ptype: 0}
permissions = copy.deepcopy(get_role_permissions(meta, user=user, is_owner=is_user_owner()))
if not cint(meta.is_submittable):
permissions["submit"] = 0
if not cint(meta.allow_import):
permissions["import"] = 0
# Override with `if_owner` perms irrespective of user
if permissions.get('has_if_owner_enabled'):
# apply owner permissions on top of existing permissions
# some access might be only for the owner
# eg. everyone might have read access but only owner can delete
permissions.update(permissions.get("if_owner", {}))
if not has_user_permission(doc, user):
if is_user_owner():
# replace with owner permissions
permissions = permissions.get("if_owner", {})
# if_owner does not come with create rights...
permissions['create'] = 0
else:
permissions = {}
return permissions
def get_role_permissions(doctype_meta, user=None, is_owner=None):
"""
Returns dict of evaluated role permissions like
{
"read": 1,
"write": 0,
// if "if_owner" is enabled
"if_owner":
{
"read": 1,
"write": 0
}
}
"""
if isinstance(doctype_meta, str):
doctype_meta = frappe.get_meta(doctype_meta) # assuming doctype name was passed
if not user: user = frappe.session.user
cache_key = (doctype_meta.name, user)
if user == 'Administrator':
return allow_everything()
if not frappe.local.role_permissions.get(cache_key):
perms = frappe._dict(
if_owner={}
)
roles = frappe.get_roles(user)
def is_perm_applicable(perm):
return perm.role in roles and cint(perm.permlevel)==0
def has_permission_without_if_owner_enabled(ptype):
return any(p.get(ptype, 0) and not p.get('if_owner', 0) for p in applicable_permissions)
applicable_permissions = list(filter(is_perm_applicable, getattr(doctype_meta, 'permissions', [])))
has_if_owner_enabled = any(p.get('if_owner', 0) for p in applicable_permissions)
perms['has_if_owner_enabled'] = has_if_owner_enabled
for ptype in rights:
pvalue = any(p.get(ptype, 0) for p in applicable_permissions)
# check if any perm object allows perm type
perms[ptype] = cint(pvalue)
if (pvalue
and has_if_owner_enabled
and not has_permission_without_if_owner_enabled(ptype)
and ptype != 'create'):
perms['if_owner'][ptype] = cint(pvalue and is_owner)
# has no access if not owner
# only provide select or read access so that user is able to at-least access list
# (and the documents will be filtered based on owner sin further checks)
perms[ptype] = 1 if ptype in ['select', 'read'] else 0
frappe.local.role_permissions[cache_key] = perms
return frappe.local.role_permissions[cache_key]
def get_user_permissions(user):
from frappe.core.doctype.user_permission.user_permission import get_user_permissions
return get_user_permissions(user)
def has_user_permission(doc, user=None):
'''Returns True if User is allowed to view considering User Permissions'''
from frappe.core.doctype.user_permission.user_permission import get_user_permissions
user_permissions = get_user_permissions(user)
if not user_permissions:
# no user permission rules specified for this doctype
return True
# user can create own role permissions, so nothing applies
if get_role_permissions('User Permission', user=user).get('write'):
return True
apply_strict_user_permissions = frappe.get_system_settings('apply_strict_user_permissions')
doctype = doc.get('doctype')
docname = doc.get('name')
# STEP 1: ---------------------
# check user permissions on self
if doctype in user_permissions:
allowed_docs = get_allowed_docs_for_doctype(user_permissions.get(doctype, []), doctype)
# if allowed_docs is empty it states that there is no applicable permission under the current doctype
# only check if allowed_docs is not empty
if allowed_docs and docname not in allowed_docs:
# no user permissions for this doc specified
push_perm_check_log(_('Not allowed for {0}: {1}').format(_(doctype), docname))
return False
# STEP 2: ---------------------------------
# check user permissions in all link fields
def check_user_permission_on_link_fields(d):
# check user permissions for all the link fields of the given
# document object d
#
# called for both parent and child records
meta = frappe.get_meta(d.get("doctype"))
# check all link fields for user permissions
for field in meta.get_link_fields():
if field.ignore_user_permissions: continue
# empty value, do you still want to apply user permissions?
if not d.get(field.fieldname) and not apply_strict_user_permissions:
# nah, not strict
continue
if field.options not in user_permissions:
continue
# get the list of all allowed values for this link
allowed_docs = get_allowed_docs_for_doctype(user_permissions.get(field.options, []), doctype)
if allowed_docs and d.get(field.fieldname) not in allowed_docs:
# restricted for this link field, and no matching values found
# make the right message and exit
if d.get('parentfield'):
# "Not allowed for Company = Restricted Company in Row 3. Restricted field: reference_type"
msg = _('Not allowed for {0}: {1} in Row {2}. Restricted field: {3}').format(
_(field.options), d.get(field.fieldname), d.idx, field.fieldname)
else:
# "Not allowed for Company = Restricted Company. Restricted field: reference_type"
msg = _('Not allowed for {0}: {1}. Restricted field: {2}').format(
_(field.options), d.get(field.fieldname), field.fieldname)
push_perm_check_log(msg)
return False
return True
if not check_user_permission_on_link_fields(doc):
return False
for d in doc.get_all_children():
if not check_user_permission_on_link_fields(d):
return False
return True
def has_controller_permissions(doc, ptype, user=None):
"""Returns controller permissions if defined. None if not defined"""
if not user: user = frappe.session.user
methods = frappe.get_hooks("has_permission").get(doc.doctype, [])
if not methods:
return None
for method in methods:
controller_permission = frappe.call(frappe.get_attr(method), doc=doc, ptype=ptype, user=user)
if controller_permission is not None:
return controller_permission
# controller permissions could not decide on True or False
return None
def get_doctypes_with_read():
return list({p.parent if type(p.parent) == str else p.parent.encode('UTF8') for p in get_valid_perms()})
def get_valid_perms(doctype=None, user=None):
'''Get valid permissions for the current user from DocPerm and Custom DocPerm'''
roles = get_roles(user)
perms = get_perms_for(roles)
custom_perms = get_perms_for(roles, 'Custom DocPerm')
doctypes_with_custom_perms = get_doctypes_with_custom_docperms()
for p in perms:
if not p.parent in doctypes_with_custom_perms:
custom_perms.append(p)
if doctype:
return [p for p in custom_perms if p.parent == doctype]
else:
return custom_perms
def get_all_perms(role):
'''Returns valid permissions for a given role'''
perms = frappe.get_all('DocPerm', fields='*', filters=dict(role=role))
custom_perms = frappe.get_all('Custom DocPerm', fields='*', filters=dict(role=role))
doctypes_with_custom_perms = frappe.db.sql_list("""select distinct parent
from `tabCustom DocPerm`""")
for p in perms:
if p.parent not in doctypes_with_custom_perms:
custom_perms.append(p)
return custom_perms
def get_roles(user=None, with_standard=True):
"""get roles of current user"""
if not user:
user = frappe.session.user
if user=='Guest':
return ['Guest']
def get():
if user == 'Administrator':
return [r[0] for r in frappe.db.sql("select name from `tabRole`")] # return all available roles
else:
return [r[0] for r in frappe.db.sql("""select role from `tabHas Role`
where parent=%s and role not in ('All', 'Guest')""", (user,))] + ['All', 'Guest']
roles = frappe.cache().hget("roles", user, get)
# filter standard if required
if not with_standard:
roles = filter(lambda x: x not in ['All', 'Guest', 'Administrator'], roles)
return roles
def get_doctype_roles(doctype, access_type="read"):
"""Returns a list of roles that are allowed to access passed doctype."""
meta = frappe.get_meta(doctype)
return [d.role for d in meta.get("permissions") if d.get(access_type)]
def get_perms_for(roles, perm_doctype='DocPerm'):
'''Get perms for given roles'''
filters = {
'permlevel': 0,
'docstatus': 0,
'role': ['in', roles]
}
return frappe.db.get_all(perm_doctype, fields=['*'], filters=filters)
def get_doctypes_with_custom_docperms():
'''Returns all the doctypes with Custom Docperms'''
doctypes = frappe.db.get_all('Custom DocPerm', fields=['parent'], distinct=1)
return [d.parent for d in doctypes]
def can_set_user_permissions(doctype, docname=None):
# System Manager can always set user permissions
if frappe.session.user == "Administrator" or "System Manager" in frappe.get_roles():
return True
meta = frappe.get_meta(doctype)
# check if current user has read permission for docname
if docname and not has_permission(doctype, "read", docname):
return False
# check if current user has a role that can set permission
if get_role_permissions(meta).set_user_permissions!=1:
return False
return True
def set_user_permission_if_allowed(doctype, name, user, with_message=False):
if get_role_permissions(frappe.get_meta(doctype), user).set_user_permissions!=1:
add_user_permission(doctype, name, user)
def add_user_permission(doctype, name, user, ignore_permissions=False, applicable_for=None,
is_default=0, hide_descendants=0):
'''Add user permission'''
from frappe.core.doctype.user_permission.user_permission import user_permission_exists
if not user_permission_exists(user, doctype, name, applicable_for):
if not frappe.db.exists(doctype, name):
frappe.throw(_("{0} {1} not found").format(_(doctype), name), frappe.DoesNotExistError)
frappe.get_doc(dict(
doctype='User Permission',
user=user,
allow=doctype,
for_value=name,
is_default=is_default,
applicable_for=applicable_for,
hide_descendants=hide_descendants,
)).insert(ignore_permissions=ignore_permissions)
def remove_user_permission(doctype, name, user):
user_permission_name = frappe.db.get_value('User Permission',
dict(user=user, allow=doctype, for_value=name))
frappe.delete_doc('User Permission', user_permission_name)
def clear_user_permissions_for_doctype(doctype, user=None):
filters = {'allow': doctype}
if user:
filters['user'] = user
user_permissions_for_doctype = frappe.db.get_all('User Permission', filters=filters)
for d in user_permissions_for_doctype:
frappe.delete_doc('User Permission', d.name)
def can_import(doctype, raise_exception=False):
if not ("System Manager" in frappe.get_roles() or has_permission(doctype, "import")):
if raise_exception:
raise frappe.PermissionError("You are not allowed to import: {doctype}".format(doctype=doctype))
else:
return False
return True
def can_export(doctype, raise_exception=False):
if "System Manager" in frappe.get_roles():
return True
else:
role_permissions = frappe.permissions.get_role_permissions(doctype)
has_access = role_permissions.get('export') or \
role_permissions.get('if_owner').get('export')
if not has_access and raise_exception:
raise frappe.PermissionError(_("You are not allowed to export {} doctype").format(doctype))
return has_access
def update_permission_property(doctype, role, permlevel, ptype, value=None, validate=True):
'''Update a property in Custom Perm'''
from frappe.core.doctype.doctype.doctype import validate_permissions_for_doctype
out = setup_custom_perms(doctype)
name = frappe.get_value('Custom DocPerm', dict(parent=doctype, role=role,
permlevel=permlevel))
frappe.db.sql("""
update `tabCustom DocPerm`
set `{0}`=%s where name=%s""".format(ptype), (value, name))
if validate:
validate_permissions_for_doctype(doctype)
return out
def setup_custom_perms(parent):
'''if custom permssions are not setup for the current doctype, set them up'''
if not frappe.db.exists('Custom DocPerm', dict(parent=parent)):
copy_perms(parent)
return True
def add_permission(doctype, role, permlevel=0, ptype=None):
'''Add a new permission rule to the given doctype
for the given Role and Permission Level'''
from frappe.core.doctype.doctype.doctype import validate_permissions_for_doctype
setup_custom_perms(doctype)
if frappe.db.get_value('Custom DocPerm', dict(parent=doctype, role=role,
permlevel=permlevel, if_owner=0)):
return
if not ptype:
ptype = 'read'
custom_docperm = frappe.get_doc({
"doctype":"Custom DocPerm",
"__islocal": 1,
"parent": doctype,
"parenttype": "DocType",
"parentfield": "permissions",
"role": role,
"permlevel": permlevel,
ptype: 1,
})
custom_docperm.save()
validate_permissions_for_doctype(doctype)
return custom_docperm.name
def copy_perms(parent):
'''Copy all DocPerm in to Custom DocPerm for the given document'''
for d in frappe.get_all('DocPerm', fields='*', filters=dict(parent=parent)):
custom_perm = frappe.new_doc('Custom DocPerm')
custom_perm.update(d)
custom_perm.insert(ignore_permissions=True)
def reset_perms(doctype):
"""Reset permissions for given doctype."""
from frappe.desk.notifications import delete_notification_count_for
delete_notification_count_for(doctype)
frappe.db.sql("""delete from `tabCustom DocPerm` where parent=%s""", doctype)
def get_linked_doctypes(dt):
return list(set([dt] + [d.options for d in
frappe.get_meta(dt).get("fields", {
"fieldtype":"Link",
"ignore_user_permissions":("!=", 1),
"options": ("!=", "[Select]")
})
]))
def get_doc_name(doc):
if not doc: return None
return doc if isinstance(doc, str) else doc.name
def allow_everything():
'''
returns a dict with access to everything
eg. {"read": 1, "write": 1, ...}
'''
perm = {ptype: 1 for ptype in rights}
return perm
def get_allowed_docs_for_doctype(user_permissions, doctype):
''' Returns all the docs from the passed user_permissions that are
allowed under provided doctype '''
return filter_allowed_docs_for_doctype(user_permissions, doctype, with_default_doc=False)
def filter_allowed_docs_for_doctype(user_permissions, doctype, with_default_doc=True):
''' Returns all the docs from the passed user_permissions that are
allowed under provided doctype along with default doc value if with_default_doc is set '''
allowed_doc = []
default_doc = None
for doc in user_permissions:
if not doc.get('applicable_for') or doc.get('applicable_for') == doctype:
allowed_doc.append(doc.get('doc'))
if doc.get('is_default') or len(user_permissions) == 1 and with_default_doc:
default_doc = doc.get('doc')
return (allowed_doc, default_doc) if with_default_doc else allowed_doc
def push_perm_check_log(log):
if frappe.flags.get('has_permission_check_logs') == None: return
frappe.flags.get('has_permission_check_logs').append(_(log))
|
nilq/baby-python
|
python
|
#!/usr/bin/python
import os,sys
import lcm
import time
home_dir =os.getenv("HOME")
#print home_dir
sys.path.append(home_dir + "/drc/software/build/lib/python2.7/site-packages")
sys.path.append(home_dir + "/drc/software/build/lib/python2.7/dist-packages")
from ipab.pause_command_message_t import pause_command_message_t
def timestamp_now (): return int (time.time () * 1000000)
if len(sys.argv)>1:
val = sys.argv[1].lower()
if (val[0] == 'f'):
pause = False
if (val[0] == 't'):
pause = True
# currently crashes otherwise
else:
print 'No mode specified!'
pause= True
msg = pause_command_message_t()
msg.utime = timestamp_now()
msg.pause = pause
lc = lcm.LCM()
lc.publish("VAL_COMMAND_PAUSE", msg.encode())
print "Commanding pause: " , pause
|
nilq/baby-python
|
python
|
from exporters.logger.base_logger import TransformLogger
from exporters.pipeline.base_pipeline_item import BasePipelineItem
class BaseTransform(BasePipelineItem):
"""
This module receives a batch and writes it where needed. It can implement the following methods:
"""
def __init__(self, options, metadata=None):
super(BaseTransform, self).__init__(options, metadata)
self.logger = TransformLogger({
'log_level': options.get('log_level'),
'logger_name': options.get('logger_name')
})
def transform_batch(self, batch):
"""
Receives the batch, transforms it, and returns it.
"""
raise NotImplementedError
def set_metadata(self, key, value, module='transform'):
super(BaseTransform, self).set_metadata(key, value, module)
def update_metadata(self, data, module='transform'):
super(BaseTransform, self).update_metadata(data, module)
def get_metadata(self, key, module='transform'):
return super(BaseTransform, self).get_metadata(key, module)
def get_all_metadata(self, module='transform'):
return super(BaseTransform, self).get_all_metadata(module)
|
nilq/baby-python
|
python
|
from Acccount import Account
class Drive(Account):
type = int
activo = int
def __init__(self, name, document, type, activo):
super().__init__(name, document)
self.type = type
self.activo = activo
|
nilq/baby-python
|
python
|
import unittest
from app.models import Source
class SourceTest(unittest.TestCase):
'''
Test Class to test the behaviour of the Source class
'''
def setUp(self):
'''
Set up method that will run before every Test
'''
self.new_source = Source(1,"Washington Post", "Leading news outlet", "http://stuff.com", "business", "US", "en")
def test_instance(self):
'''
method asserts existence of an instance
:return:
'''
self.assertTrue(isinstance(self.new_source,Source))
|
nilq/baby-python
|
python
|
"""
opstate.py
"Specification of the OpState class."
@author: Johan Monster (https://github.com/Hans-Bananendans/)
"""
class OpState:
"""This class represents a separate operational state, and can be used
to calculate used power values and separate these by channel."""
def __init__(self, device_power_values, channels, device_channels, \
blips_on=1):
self.device_power_values = device_power_values
self.devices = list(device_power_values.keys())
self.channels = channels
self.device_channels = device_channels
self.blips_on = blips_on
def power_used_channel(self):
# Generate empty dictionary
channel_power = {chan: 0 for chan in self.channels}
# Calculate power for each channel
for device in self.devices:
# Find which power channel the device is connected to
chan = self.device_channels[device]
# Add power used by the device to total power used by channel
channel_power[chan] += self.device_power_values[device]
return channel_power
def power_used_device(self):
return self.device_power_values
def power_used(self):
return sum(self.device_power_values.values())
def blips(self):
return self.blips_on
|
nilq/baby-python
|
python
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Losses used for segmentation models."""
from typing import Optional, Sequence
import tensorflow as tf
class SegmentationLossDiceScore(object):
"""Semantic segmentation loss using generalized dice score.
Dice score (DSC) is a similarity measure that equals twice the number of
elements common to both sets divided by the sum of the number of elements
in each set. It is commonly used to evaluate segmentation performance to
measure the overlap of predicted and groundtruth regions.
(https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient)
Generalized dice score is the dice score weighted by the volume of groundtruth
labels per class. Adaptive dice score adds weights to generalized dice score.
It assigns larger weights to lower dice score, so that wrong predictions
contribute more to the total loss. Model will then be trained to focus more on
these hard examples.
"""
def __init__(self,
metric_type: Optional[str] = None,
axis: Optional[Sequence[int]] = (1, 2, 3)):
"""Initializes dice score loss object.
Args:
metric_type: An optional `str` specifying the type of the dice score to
compute. Compute generalized or adaptive dice score if metric type is
`generalized` or `adaptive`; otherwise compute original dice score.
axis: An optional sequence of `int` specifying the axis to perform reduce
ops for raw dice score.
"""
self._dice_score = 0
self._metric_type = metric_type
self._axis = axis
def __call__(self, logits: tf.Tensor, labels: tf.Tensor) -> tf.Tensor:
"""Computes and returns a loss based on 1 - dice score.
Args:
logits: A Tensor of the prediction.
labels: A Tensor of the groundtruth label.
Returns:
The loss value of (1 - dice score).
"""
labels = tf.cast(labels, logits.dtype)
if labels.get_shape().ndims < 2 or logits.get_shape().ndims < 2:
raise ValueError('The labels and logits must be at least rank 2.')
epsilon = tf.keras.backend.epsilon()
keep_label_axis = list(range(len(logits.shape) - 1))
keep_batch_axis = list(range(1, len(logits.shape)))
# Compute sample mask to filter out samples with both all-0's labels and
# predictions because such samples should not contribute to mean dice score
# in this batch.
sample_mask = tf.logical_or(
tf.cast(tf.reduce_sum(labels, axis=keep_batch_axis), dtype=tf.bool),
tf.cast(tf.reduce_sum(logits, axis=keep_batch_axis), dtype=tf.bool))
labels = tf.boolean_mask(labels, sample_mask)
logits = tf.boolean_mask(logits, sample_mask)
# If all samples are filtered out, return 0 as the loss so this batch does
# not contribute.
if labels.shape[0] == 0:
return tf.convert_to_tensor(0.0)
# Calculate intersections and unions per class.
intersection = tf.reduce_sum(labels * logits, axis=keep_label_axis)
union = tf.reduce_sum(labels + logits, axis=keep_label_axis)
if self._metric_type == 'generalized':
# Calculate the volume of groundtruth labels.
w = tf.math.reciprocal(
tf.square(tf.reduce_sum(labels, axis=keep_label_axis)) + epsilon)
# Calculate the weighted dice score and normalizer.
dice = 2 * tf.reduce_sum(w * intersection)
normalizer = tf.reduce_sum(w * union)
if normalizer == 0:
return tf.convert_to_tensor(1.0)
dice = tf.cast(dice, dtype=tf.float32)
normalizer = tf.cast(normalizer, dtype=tf.float32)
return 1 - tf.reduce_mean(dice / normalizer)
elif self._metric_type == 'adaptive':
dice = 2.0 * intersection / (union + epsilon)
# Calculate weights based on Dice scores.
weights = tf.exp(-1.0 * dice)
# Multiply weights by corresponding scores and get sum.
weighted_dice = tf.reduce_sum(weights * dice)
# Calculate normalization factor.
normalizer = tf.cast(tf.size(input=dice), dtype=tf.float32) * tf.exp(-1.0)
if normalizer == 0:
return tf.convert_to_tensor(1.0)
weighted_dice = tf.cast(weighted_dice, dtype=tf.float32)
return 1 - tf.reduce_mean(weighted_dice / normalizer)
else:
summation = tf.reduce_sum(
labels, axis=self._axis) + tf.reduce_sum(
logits, axis=self._axis)
dice = (2 * tf.reduce_sum(labels * logits, axis=self._axis)) / (
summation + epsilon)
return 1 - tf.reduce_mean(dice)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from ..setup.constants import NETWORK_DEVICE, SERIAL_DEVICE
from ..user import User, UserDetail
from ..settings import Setting
from ..certificate import Certificate
from ..notifications import Notification, NotificationSetting
from ..zones import Zone
from ..keypad import KeypadButton
HOSTS_FILE = '/etc/hosts'
HOSTNAME_FILE = '/etc/hostname'
NETWORK_FILE = '/etc/network/interfaces'
EXPORT_MAP = {
'settings.json': Setting,
'certificates.json': Certificate,
'notifications.json': Notification,
'notification_settings.json': NotificationSetting,
'users.json': User,
'user_details.json': UserDetail,
'zones.json': Zone,
'buttons.json': KeypadButton
}
|
nilq/baby-python
|
python
|
################################################################################
#
# Copyright (C) 2013-2015, Michele Cappellari
# E-mail: michele.cappellari_at_physics.ox.ac.uk
#
# Updated versions of the software are available from my web page
# http://purl.org/cappellari/software
#
# This software is provided as is without any warranty whatsoever.
# Permission to use, for non-commercial purposes is granted.
# Permission to modify for personal or internal use is granted,
# provided this copyright and disclaimer are included unchanged
# at the beginning of the file. All other rights are reserved.
#
################################################################################
#
# This program is a wrapper for mge_fit_sectors procedure and it accepts all
# keyword of that program. One should look at the documentation of mge_fit_sectors
# for usage details.
#
# The wrapper implements the method described in Section 2.2.2 of
# Cappellari (2002, MNRAS, 333, 400) to "regularize" an MGE model by restricting
# the allowed range in qObs of the Gaussians until the fit becomes unacceptable.
# In this way one ensures that the permitted galaxy inclinations are not being
# artificially restricted to a smaller range than allowed by the data.
#
# The detailed approach implemented here is the one used for the MGE fits of
# the galaxies in the Atlas3D project and described in Section 3.2 of
# Scott et al. (2013, MNRAS, 432, 1894).
#
# The intended usage of this wrapper is the following:
# 1. First perform a standard MGE fit with mge_fit_sectors;
# 2. Once all parameters (e.g. PA, eps, centre, sky subtraction) are OK and the
# fit looks good, simply rename "mge_fit_sectors" in your script into
# "mge_fit_sectors_regularized" (and import the module) to cleanup the final solution.
# This is because this wrapper calls mge_fit_sectors repeatedly, taking much longer,
# so it is not useful to run it until all input parameters are settled.
#
# VERSION HISTORY:
# V1.0.0: Michele Cappellari, Oxford, 22 January 2013
# V1.0.1: Fixed program stop when (qmin==qmax). Thanks to Silvia Posacki (Bologna)
# for reporting the problem and the solution. MC, Oxford, 9 May 2013
# V2.0.0: Converted from IDL into Python. MC, Oxford, 27 March 2015
# V2.0.1; Removed truncation of input eps in mge_fit_sectors.
# MC, Atlantic Ocean, 28 March 2015
# V2.0.2: Cleaned up loop. MC, Oxford, 30 May 2015
#
################################################################################
import numpy as np
from Cappellari.mge.mge_fit_sectors import mge_fit_sectors
#----------------------------------------------------------------------------
class mge_fit_sectors_regularized(object):
def __init__(self, radius, angle, counts, eps, qbounds=[0, 1], **kwargs):
qmin, qmax = qbounds
nq = int(np.ceil((qmax - qmin)/0.05) + 1) # Adopt step <= 0.05 in qObs
qrange = np.linspace(qmin, qmax, nq)
bestnorm = np.inf
frac = 1.1 # Allowed fractional increase in ABSDEV
for j in range(nq - 1):
qmin = qrange[j]
m = mge_fit_sectors(radius, angle, counts, eps, qbounds=[qmin, qmax], **kwargs)
absdev = m.absdev
print('(minloop) qbounds=%6.4f %6.4f' % (qmin, qmax))
if absdev > bestnorm*frac:
jbest = j - 1
qmin = qrange[jbest]
break # stops if error increases more than frac
else:
jbest = j
bestnorm = min(bestnorm, absdev)
self.sol = m.sol
for k in range(nq - 2, jbest, -1):
qmax = qrange[k]
m = mge_fit_sectors(radius, angle, counts, eps, qbounds=[qmin, qmax], **kwargs)
absdev = m.absdev
print('(maxloop) qbounds=%6.4f %6.4f' % (qmin, qmax))
if absdev > bestnorm*frac:
qmax = qrange[k + 1]
break # stops if error increases more than frac
else:
bestnorm = min(bestnorm, absdev)
self.sol = m.sol
print('Final qbounds=%6.4f %6.4f' % (qmin, qmax))
#----------------------------------------------------------------------------
|
nilq/baby-python
|
python
|
from typing import Optional
from asserts import (
assert_equal,
assert_false,
assert_raises,
assert_succeeds,
assert_true,
)
from dectest import TestCase, test
from dbupgrade.args import Arguments
from dbupgrade.files import FileInfo
from dbupgrade.filter import (
MAX_API_LEVEL,
MAX_VERSION,
Filter,
filter_from_arguments,
)
class FilterTest(TestCase):
@test
def init(self) -> None:
filter_ = Filter("myschema", "postgres", 5, 10, 3)
assert_equal("myschema", filter_.schema)
assert_equal("postgres", filter_.dialect)
assert_equal(5, filter_.min_version)
assert_equal(10, filter_.max_version)
assert_equal(3, filter_.target_api_level)
@test
def init__max_version_equals_min_version(self) -> None:
with assert_succeeds(ValueError):
Filter("myschema", "postgres", 5, 5, 3)
@test
def init__max_version_smaller_than_min_version(self) -> None:
with assert_raises(ValueError):
Filter("myschema", "postgres", 5, 4, 3)
@test
def matches__version_matches_lower(self) -> None:
filter_ = Filter("myschema", "postgres", 5, 10, 3)
file_info = FileInfo("", "myschema", "postgres", 5, 2)
assert_true(filter_.matches(file_info))
@test
def matches__version_in_between(self) -> None:
filter_ = Filter("myschema", "postgres", 5, 10, 3)
file_info = FileInfo("", "myschema", "postgres", 7, 2)
assert_true(filter_.matches(file_info))
@test
def matches__version_matches_upper(self) -> None:
filter_ = Filter("myschema", "postgres", 5, 10, 3)
file_info = FileInfo("", "myschema", "postgres", 10, 2)
assert_true(filter_.matches(file_info))
@test
def matches__api_level_match(self) -> None:
filter_ = Filter("myschema", "postgres", 5, 10, 3)
file_info = FileInfo("", "myschema", "postgres", 7, 3)
assert_true(filter_.matches(file_info))
@test
def matches__wrong_schema(self) -> None:
filter_ = Filter("myschema", "postgres", 5, 10, 3)
file_info = FileInfo("", "wrongschema", "postgres", 5, 2)
assert_false(filter_.matches(file_info))
@test
def matches__wrong_dialect(self) -> None:
filter_ = Filter("myschema", "postgres", 5, 10, 3)
file_info = FileInfo("", "myschema", "mysql", 5, 2)
assert_false(filter_.matches(file_info))
@test
def matches__min_version_too_small(self) -> None:
filter_ = Filter("myschema", "postgres", 5, 10, 3)
file_info = FileInfo("", "myschema", "postgres", 4, 2)
assert_false(filter_.matches(file_info))
@test
def matches__min_version_too_large(self) -> None:
filter_ = Filter("myschema", "postgres", 5, 10, 3)
file_info = FileInfo("", "myschema", "postgres", 11, 2)
assert_false(filter_.matches(file_info))
@test
def matches__api_level_too_large(self) -> None:
filter_ = Filter("myschema", "postgres", 5, 10, 3)
file_info = FileInfo("", "myschema", "postgres", 7, 4)
assert_false(filter_.matches(file_info))
class FilterFromArgumentsTest(TestCase):
def _create_arguments(
self,
*,
schema: str = "testschema",
db_url: str = "postgres://localhost/foo",
max_api_level: Optional[int] = None,
max_version: Optional[int] = None,
ignore_api_level: bool = False
) -> Arguments:
return Arguments(
schema,
db_url,
"/tmp",
max_api_level,
max_version,
ignore_api_level=ignore_api_level,
)
@test
def schema(self) -> None:
args = self._create_arguments(schema="myschema")
filter_ = filter_from_arguments(args, 0, 0)
assert_equal("myschema", filter_.schema)
@test
def dialect(self) -> None:
args = self._create_arguments(
db_url="postgres+psycopg2://localhost/foo"
)
filter_ = filter_from_arguments(args, 0, 0)
assert_equal("postgres", filter_.dialect)
@test
def min_version(self) -> None:
args = self._create_arguments()
filter_ = filter_from_arguments(args, 13, 0)
assert_equal(13, filter_.min_version)
@test
def max_version_default(self) -> None:
args = self._create_arguments(max_version=None)
filter_ = filter_from_arguments(args, 0, 0)
assert_equal(MAX_VERSION, filter_.max_version)
@test
def max_version_explicit(self) -> None:
args = self._create_arguments(max_version=34)
filter_ = filter_from_arguments(args, 0, 0)
assert_equal(34, filter_.max_version)
@test
def target_api_level_default(self) -> None:
args = self._create_arguments(
max_api_level=None, ignore_api_level=False
)
filter_ = filter_from_arguments(args, 0, 12)
assert_equal(12, filter_.target_api_level)
@test
def target_api_level_ignore(self) -> None:
args = self._create_arguments(ignore_api_level=True)
filter_ = filter_from_arguments(args, 0, 0)
assert_equal(MAX_API_LEVEL, filter_.target_api_level)
@test
def target_api_level_explicit(self) -> None:
args = self._create_arguments(max_api_level=12)
filter_ = filter_from_arguments(args, 0, 0)
assert_equal(12, filter_.target_api_level)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from django.core.management import CommandError, call_command
from django.test import TestCase
from django.test.utils import override_settings
from six import StringIO
try:
from unittest import mock
except ImportError:
import mock
class ResetSchemaExceptionsTests(TestCase):
"""Tests if reset_schema command raises exceptions."""
def test_should_raise_CommandError_when_router_does_not_exist(self):
with self.assertRaisesRegexp(CommandError, 'Unknown database router non-existing_router'):
call_command('reset_schema', '--router=non-existing_router')
@override_settings(DATABASES={
'default': {
'ENGINE': 'django.db.backends.mysql',
},
})
def test_should_raise_CommandError_when_database_ENGINE_different_thant_postgresql(self):
with self.assertRaisesRegexp(CommandError, 'This command can be used only with PostgreSQL databases.'):
call_command('reset_schema')
@override_settings(DATABASES={
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'test',
'USER': 'test',
'PASSWORD': 'test',
'HOST': 'localhost',
},
})
class ResetSchemaTests(TestCase):
"""Tests for reset_chema command."""
def test_should_drop_schema_and_create_new_one(self):
m_cursor = mock.Mock()
m_router = mock.Mock()
m_router.cursor.return_value = mock.Mock(
__enter__=mock.Mock(return_value=m_cursor),
__exit__=mock.Mock(return_value=False),
)
expected_calls = [
mock.call('DROP SCHEMA test_public CASCADE'),
mock.call('CREATE SCHEMA test_public'),
]
with mock.patch('django_extensions.management.commands.reset_schema.connections', {'default': m_router}):
call_command('reset_schema', '--noinput', '--schema=test_public')
m_cursor.execute.assert_has_calls(expected_calls, any_order=False)
@mock.patch('sys.stdout', new_callable=StringIO)
@mock.patch('django_extensions.management.commands.reset_schema.input')
def test_should_cancel_reset_schema_and_print_info_if_input_is_different_than_yes(self, m_input, m_stdout):
m_input.return_value = 'no'
call_command('reset_schema')
self.assertEqual("Reset cancelled.\n", m_stdout.getvalue())
|
nilq/baby-python
|
python
|
from django.db.models.manager import BaseManager
from .querysets import LocationQuerySet
class LocationManager(BaseManager.from_queryset(LocationQuerySet)):
"""
Manager class for location models constructed from LocationQuerySet.
Uses miles as distance unit.
"""
|
nilq/baby-python
|
python
|
from django.db import models
from django.contrib.auth.models import AbstractBaseUser,BaseUserManager
from panel.models import Discount
from product.models import Product,ProductDetail
# Create your models here.
class MyAccountManager(BaseUserManager):
def create_user(self,email,first_name,last_name,password):
if not email:
raise ValueError("Users must have an email address")
if not first_name:
raise ValueError("Users must have an first name")
if not last_name:
raise ValueError("Users must have an last name")
if not password:
raise ValueError("Users must have an password")
user = self.model(
email=self.normalize_email(email),
first_name= first_name,
last_name = last_name,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email ,first_name,last_name , password):
user = self.create_user(email=self.normalize_email(email),password=password,first_name= first_name,last_name = last_name,)
user.is_admin = True
user.is_staff = True
user.is_active = True
user.is_store = True
user.is_superuser = True
user.save(using=self._db)
return user
class Account(AbstractBaseUser):
email = models.EmailField(verbose_name="email", max_length=60, unique=True)
first_name = models.CharField(max_length=200)
last_name = models.CharField(max_length=200)
phone = models.CharField(verbose_name="phone",max_length=30,blank=True,null=True)
date_joined = models.DateTimeField(verbose_name='date joined', auto_now_add=True)
last_login = models.DateTimeField(verbose_name='last login', auto_now=True)
date_of_birth = models.DateField(verbose_name='last login',blank=True,null=True)
"""
Gender :
-1: Belirtmek istemiyorum
0 : Erkek
1 : K
"""
gender = models.IntegerField(blank=True,null=True,default=-1)
is_admin = models.BooleanField(default=False)
is_active = models.BooleanField(default=True)
is_superuser = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False) #-1,0-1-2-3 staff perm, different perms
is_store = models.BooleanField(default=False)
is_customer = models.BooleanField(default=False)
date_activate = models.DateField(verbose_name="date of activate",blank = True, null=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS= ['first_name','last_name']
objects = MyAccountManager()
def has_perm(self,perm,obj=None):
return self.is_admin
def has_module_perms(self, app_label):
return True
def __str__(self):
return self.email
class Adress(models.Model):
user = models.ForeignKey(Account,on_delete=models.CASCADE,related_name="customer")
adress_first_name = models.CharField(max_length=100)
adress_last_name = models.CharField(max_length=100)
adress_phone = models.CharField(verbose_name="phone",max_length=30)
city = models.CharField(max_length=100)
country = models.CharField(max_length=100)
adresss = models.TextField(max_length=100)
def __str__(self):
return f"{self.adress_first_name} {self.adress_last_name} - {self.adresss} {self.city}/{self.country}"
class Order(models.Model):
customer = models.ForeignKey(Account,on_delete=models.CASCADE,related_name="buyer")
address = models.ForeignKey(Adress,on_delete=models.CASCADE,related_name="adress")
items = models.ManyToManyField(ProductDetail)
discount = models.ForeignKey(Discount,on_delete=models.CASCADE,related_name="indirim",blank=True,null=True)
created = models.DateTimeField(verbose_name='date created', auto_now_add=True)
"""
Status Means :
-1 : Error
0 : İade
1 : Ödeme bekleniyor
2 : Ödeme alındı
3 : Sipariş hazırlanıyor
4 : Kargoya verildi
"""
status = models.IntegerField()
amount = models.FloatField()
# def total(self):
# for item in self.items:
# total_price += item
# return total_price
|
nilq/baby-python
|
python
|
#!/pxrpythonsubst
#
# Copyright 2020 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
import os, unittest
from pxr import Plug, Sdf, Usd, Vt, Tf, Gf
class TestUsdAppliedAPISchemas(unittest.TestCase):
@classmethod
def setUpClass(cls):
pr = Plug.Registry()
testPlugins = pr.RegisterPlugins(os.path.abspath("resources"))
assert len(testPlugins) == 1, \
"Failed to load expected test plugin"
assert testPlugins[0].name == "testUsdAppliedAPISchemas", \
"Failed to load expected test plugin"
cls.SingleApplyAPIType = \
Tf.Type(Usd.SchemaBase).FindDerivedByName("TestSingleApplyAPI")
cls.MultiApplyAPIType = \
Tf.Type(Usd.SchemaBase).FindDerivedByName("TestMultiApplyAPI")
cls.SingleCanApplyAPIType = \
Tf.Type(Usd.SchemaBase).FindDerivedByName("TestSingleCanApplyAPI")
cls.MultiCanApplyAPIType = \
Tf.Type(Usd.SchemaBase).FindDerivedByName("TestMultiCanApplyAPI")
cls.NestedInnerSingleApplyAPIType = \
Tf.Type(Usd.SchemaBase).FindDerivedByName("TestNestedInnerSingleApplyAPI")
cls.NestedOuterSingleApplyAPIType = \
Tf.Type(Usd.SchemaBase).FindDerivedByName("TestNestedOuterSingleApplyAPI")
cls.NestedInnerMultiApplyBaseAPIType = \
Tf.Type(Usd.SchemaBase).FindDerivedByName("TestNestedInnerMultiApplyBaseAPI")
cls.NestedInnerMultiApplyDerivedAPIType = \
Tf.Type(Usd.SchemaBase).FindDerivedByName("TestNestedInnerMultiApplyDerivedAPI")
cls.NestedOuterMultiApplyAPIType = \
Tf.Type(Usd.SchemaBase).FindDerivedByName("TestNestedOuterMultiApplyAPI")
cls.NestedMultiApplyInSingleApplyAPIType = \
Tf.Type(Usd.SchemaBase).FindDerivedByName("TestNestedMultiApplyInSingleApplyAPI")
cls.NestedCycle1APIType = \
Tf.Type(Usd.SchemaBase).FindDerivedByName("TestNestedCycle1API")
cls.NestedCycle2APIType = \
Tf.Type(Usd.SchemaBase).FindDerivedByName("TestNestedCycle2API")
cls.NestedCycle3APIType = \
Tf.Type(Usd.SchemaBase).FindDerivedByName("TestNestedCycle3API")
cls.NestedMultiApplyCycle1APIType = \
Tf.Type(Usd.SchemaBase).FindDerivedByName("TestNestedMultiApplyCycle1API")
cls.NestedMultiApplyCycle2APIType = \
Tf.Type(Usd.SchemaBase).FindDerivedByName("TestNestedMultiApplyCycle2API")
cls.NestedMultiApplyCycle3APIType = \
Tf.Type(Usd.SchemaBase).FindDerivedByName("TestNestedMultiApplyCycle3API")
cls.AutoAppliedToAPIType = \
Tf.Type(Usd.SchemaBase).FindDerivedByName("TestAutoAppliedToAPI")
cls.NestedAutoAppliedToAPIType = \
Tf.Type(Usd.SchemaBase).FindDerivedByName("TestNestedAutoAppliedToAPI")
cls.NestedAutoAppliedToAPIAppliedToPrimType = \
Tf.Type(Usd.SchemaBase).FindDerivedByName("TestNestedAutoAppliedToAPIAppliedToPrim")
def test_SimpleTypedSchemaPrimDefinition(self):
"""
Tests the prim definition for a simple typed schema that has no
built-in API schemas
"""
primDef = Usd.SchemaRegistry().FindConcretePrimDefinition(
"TestTypedSchema")
self.assertTrue(primDef)
self.assertEqual(primDef.GetPropertyNames(), ["testAttr", "testRel"])
self.assertEqual(primDef.GetAppliedAPISchemas(), [])
self.assertEqual(primDef.GetDocumentation(), "Testing typed schema")
# Verify property specs for named properties.
for propName in primDef.GetPropertyNames():
self.assertTrue(primDef.GetSchemaPropertySpec(propName))
# Verify the attribute spec and its fallback value and type
testAttr = primDef.GetSchemaAttributeSpec("testAttr")
self.assertEqual(testAttr.default, "foo")
self.assertEqual(testAttr.typeName.cppTypeName, "std::string")
# Verify the relationship spec
self.assertTrue(primDef.GetSchemaRelationshipSpec("testRel"))
def test_TypedSchemaWithBuiltinAPISchemas(self):
"""
Tests the prim definition for schema prim type that has API schemas
applied to it in its generated schema.
"""
# Find the prim definition for the test single apply schema. It has
# some properties defined.
singleApplyAPIDef = Usd.SchemaRegistry().FindAppliedAPIPrimDefinition(
"TestSingleApplyAPI")
self.assertTrue(singleApplyAPIDef)
self.assertEqual(singleApplyAPIDef.GetAppliedAPISchemas(),
["TestSingleApplyAPI"])
self.assertEqual(singleApplyAPIDef.GetPropertyNames(), [
"single:bool_attr", "single:token_attr", "single:relationship"])
self.assertEqual(singleApplyAPIDef.GetDocumentation(),
"Test single apply API schema")
# Find the prim definition for the test multi apply schema. It has
# some properties defined. Note that the properties in the multi apply
# definition are not prefixed yet.
multiApplyAPIDef = Usd.SchemaRegistry().FindAppliedAPIPrimDefinition(
"TestMultiApplyAPI")
self.assertTrue(multiApplyAPIDef)
self.assertEqual(multiApplyAPIDef.GetAppliedAPISchemas(),
["TestMultiApplyAPI:__INSTANCE_NAME__"])
self.assertEqual(multiApplyAPIDef.GetPropertyNames(), [
"multi:__INSTANCE_NAME__:bool_attr",
"multi:__INSTANCE_NAME__:token_attr",
"multi:__INSTANCE_NAME__:relationship"])
self.assertEqual(multiApplyAPIDef.GetDocumentation(),
"Test multi-apply API schema")
# Find the prim definition for the concrete prim type with built-in
# API schemas. You can query its API schemas and it will have properties
# from those schemas already.
primDef = Usd.SchemaRegistry().FindConcretePrimDefinition(
"TestWithBuiltinAppliedSchema")
self.assertTrue(primDef)
self.assertEqual(primDef.GetAppliedAPISchemas(), [
"TestSingleApplyAPI", "TestMultiApplyAPI:builtin"])
self.assertEqual(sorted(primDef.GetPropertyNames()), [
"multi:builtin:bool_attr",
"multi:builtin:relationship",
"multi:builtin:token_attr",
"single:bool_attr",
"single:relationship",
"single:token_attr",
"testAttr",
"testRel"])
# Note that prim def documentation does not come from the built-in API
# schemas.
self.assertEqual(primDef.GetDocumentation(),
"Test with built-in API schemas")
# Verify property specs for all named properties.
for propName in primDef.GetPropertyNames():
self.assertTrue(primDef.GetSchemaPropertySpec(propName))
# Verify fallback value and type for properties defined in the
# concrete prim
testAttr = primDef.GetSchemaAttributeSpec("testAttr")
self.assertEqual(testAttr.default, "foo")
self.assertEqual(testAttr.typeName.cppTypeName, "std::string")
self.assertTrue(primDef.GetSchemaRelationshipSpec("testRel"))
# Verify fallback value and type for properties from the single applied
# schema. These properties will return the same property spec as the
# API schema prim definition.
singleBoolAttr = primDef.GetSchemaAttributeSpec("single:bool_attr")
self.assertEqual(singleBoolAttr,
singleApplyAPIDef.GetSchemaAttributeSpec("single:bool_attr"))
self.assertEqual(singleBoolAttr.default, True)
self.assertEqual(singleBoolAttr.typeName.cppTypeName, "bool")
singleTokenAttr = primDef.GetSchemaAttributeSpec("single:token_attr")
self.assertEqual(singleTokenAttr,
singleApplyAPIDef.GetSchemaAttributeSpec("single:token_attr"))
self.assertEqual(singleTokenAttr.default, "bar")
self.assertEqual(singleTokenAttr.typeName.cppTypeName, "TfToken")
singleRelationship = primDef.GetSchemaRelationshipSpec(
"single:relationship")
self.assertTrue(singleRelationship)
self.assertEqual(singleRelationship,
singleApplyAPIDef.GetSchemaRelationshipSpec("single:relationship"))
# Verify fallback value and type for properties from the multi applied
# schema. These properties will return the same property spec as the
# API schema prim definition even the properties on the concrete prim
# definion are namespace prefixed.
multiTokenAttr = primDef.GetSchemaAttributeSpec(
"multi:builtin:token_attr")
self.assertEqual(multiTokenAttr,
multiApplyAPIDef.GetSchemaAttributeSpec(
"multi:__INSTANCE_NAME__:token_attr"))
self.assertEqual(multiTokenAttr.default, "foo")
self.assertEqual(multiTokenAttr.typeName.cppTypeName, "TfToken")
multiRelationship = primDef.GetSchemaRelationshipSpec(
"multi:builtin:relationship")
self.assertTrue(multiRelationship)
self.assertEqual(multiRelationship,
multiApplyAPIDef.GetSchemaRelationshipSpec(
"multi:__INSTANCE_NAME__:relationship"))
# Verify the case where the concrete type overrides a property from
# one of its applied API schemas. In this case the property spec from
# the concrete prim is returned instead of the property spec from the
# API schema.
multiBoolAttr = primDef.GetSchemaAttributeSpec(
"multi:builtin:bool_attr")
apiBoolAttr = multiApplyAPIDef.GetSchemaAttributeSpec(
"multi:__INSTANCE_NAME__:bool_attr")
self.assertNotEqual(multiBoolAttr, apiBoolAttr)
self.assertEqual(multiBoolAttr.default, False)
self.assertEqual(apiBoolAttr.default, True)
self.assertEqual(multiBoolAttr.typeName.cppTypeName, "bool")
self.assertEqual(apiBoolAttr.typeName.cppTypeName, "bool")
def test_UntypedPrimOnStage(self):
"""
Tests the fallback properties of untyped prims on a stage when API
schemas are applied
"""
stage = Usd.Stage.CreateInMemory()
# Add a prim with no type. It has no applied schemas or properties.
untypedPrim = stage.DefinePrim("/Untyped")
self.assertEqual(untypedPrim.GetTypeName(), '')
self.assertEqual(untypedPrim.GetAppliedSchemas(), [])
self.assertEqual(untypedPrim.GetPrimTypeInfo().GetTypeName(), '')
self.assertEqual(untypedPrim.GetPrimTypeInfo().GetAppliedAPISchemas(),
[])
self.assertEqual(untypedPrim.GetPropertyNames(), [])
# Add an api schema to the prim's metadata.
untypedPrim.ApplyAPI(self.SingleApplyAPIType)
# Prim still has no type but does have applied schemas
self.assertEqual(untypedPrim.GetTypeName(), '')
self.assertEqual(untypedPrim.GetAppliedSchemas(), ["TestSingleApplyAPI"])
self.assertEqual(untypedPrim.GetPrimTypeInfo().GetTypeName(), '')
self.assertEqual(untypedPrim.GetPrimTypeInfo().GetAppliedAPISchemas(),
["TestSingleApplyAPI"])
self.assertTrue(untypedPrim.HasAPI(self.SingleApplyAPIType))
# The prim has properties from the applied schema and value resolution
# returns the applied schema's property fallback value.
self.assertEqual(untypedPrim.GetPropertyNames(), [
"single:bool_attr", "single:relationship", "single:token_attr"])
self.assertEqual(untypedPrim.GetAttribute("single:token_attr").Get(),
"bar")
# Applied schemas are unable to define fallback metadata values for
# prims. Just verifying that no fallback exists for "hidden" here as
# a contrast to the other cases below where this metadata fallback will
# be defined.
self.assertFalse("hidden" in untypedPrim.GetAllMetadata())
self.assertIsNone(untypedPrim.GetMetadata("hidden"))
self.assertFalse(untypedPrim.HasAuthoredMetadata("hidden"))
# Untyped prim still has no documentation even with API schemas applied.
self.assertIsNone(untypedPrim.GetMetadata("documentation"))
def test_TypedPrimOnStage(self):
"""
Tests the fallback properties of typed prims on a stage when API
schemas are applied when the prim type does not start with API schemas.
"""
stage = Usd.Stage.CreateInMemory()
# Add a typed prim. It has no API schemas but has properties from its
# type schema.
typedPrim = stage.DefinePrim("/TypedPrim", "TestTypedSchema")
self.assertEqual(typedPrim.GetTypeName(), 'TestTypedSchema')
self.assertEqual(typedPrim.GetAppliedSchemas(), [])
self.assertEqual(typedPrim.GetPrimTypeInfo().GetTypeName(),
'TestTypedSchema')
self.assertEqual(typedPrim.GetPrimTypeInfo().GetAppliedAPISchemas(), [])
self.assertEqual(typedPrim.GetPropertyNames(), ["testAttr", "testRel"])
# Add an api schemas to the prim's metadata.
typedPrim.ApplyAPI(self.SingleApplyAPIType)
typedPrim.ApplyAPI(self.MultiApplyAPIType, "garply")
# Prim has the same type and now has API schemas. The properties have
# been expanded to include properties from the API schemas
self.assertEqual(typedPrim.GetTypeName(), 'TestTypedSchema')
self.assertEqual(typedPrim.GetAppliedSchemas(),
["TestSingleApplyAPI", "TestMultiApplyAPI:garply"])
self.assertEqual(typedPrim.GetPrimTypeInfo().GetTypeName(),
'TestTypedSchema')
self.assertEqual(typedPrim.GetPrimTypeInfo().GetAppliedAPISchemas(),
["TestSingleApplyAPI", "TestMultiApplyAPI:garply"])
self.assertTrue(typedPrim.HasAPI(self.SingleApplyAPIType))
self.assertTrue(typedPrim.HasAPI(self.MultiApplyAPIType))
self.assertEqual(typedPrim.GetPropertyNames(), [
"multi:garply:bool_attr",
"multi:garply:relationship",
"multi:garply:token_attr",
"single:bool_attr",
"single:relationship",
"single:token_attr",
"testAttr",
"testRel"])
# Property fallback comes from TestSingleApplyAPI
attr = typedPrim.GetAttribute("single:token_attr")
self.assertEqual(attr.Get(), "bar")
self.assertEqual(attr.GetResolveInfo().GetSource(),
Usd.ResolveInfoSourceFallback)
# Property fallback comes from TestMultiApplyAPI
attr = typedPrim.GetAttribute("multi:garply:bool_attr")
self.assertEqual(attr.Get(), True)
self.assertEqual(attr.GetResolveInfo().GetSource(),
Usd.ResolveInfoSourceFallback)
# Property fallback comes from TestTypedSchema
attr = typedPrim.GetAttribute("testAttr")
self.assertEqual(attr.Get(), "foo")
self.assertEqual(attr.GetResolveInfo().GetSource(),
Usd.ResolveInfoSourceFallback)
# Metadata "hidden" has a fallback value defined in TestTypedSchema. It
# will be returned by GetMetadata and GetAllMetadata but will return
# false for queries about whether it's authored
self.assertEqual(typedPrim.GetAllMetadata()["hidden"], True)
self.assertEqual(typedPrim.GetMetadata("hidden"), True)
self.assertFalse(typedPrim.HasAuthoredMetadata("hidden"))
self.assertFalse("hidden" in typedPrim.GetAllAuthoredMetadata())
# Documentation metadata comes from prim type definition even with API
# schemas applied.
self.assertEqual(typedPrim.GetMetadata("documentation"),
"Testing typed schema")
def test_TypedPrimsOnStageWithBuiltinAPISchemas(self):
"""
Tests the fallback properties of typed prims on a stage when new API
schemas are applied to a prim whose type already has built-in applied
API schemas.
"""
stage = Usd.Stage.CreateInMemory()
# Add a typed prim. It has API schemas already from its prim definition
# and has properties from both its type and its APIs.
typedPrim = stage.DefinePrim("/TypedPrim", "TestWithBuiltinAppliedSchema")
self.assertEqual(typedPrim.GetTypeName(), 'TestWithBuiltinAppliedSchema')
self.assertEqual(typedPrim.GetAppliedSchemas(),
["TestSingleApplyAPI", "TestMultiApplyAPI:builtin"])
self.assertEqual(typedPrim.GetPrimTypeInfo().GetTypeName(),
'TestWithBuiltinAppliedSchema')
# Note that prim type info does NOT contain the built-in applied API
# schemas from the concrete type's prim definition as these are not part
# of the type identity.
self.assertEqual(typedPrim.GetPrimTypeInfo().GetAppliedAPISchemas(), [])
self.assertTrue(typedPrim.HasAPI(self.SingleApplyAPIType))
self.assertTrue(typedPrim.HasAPI(self.MultiApplyAPIType))
self.assertEqual(typedPrim.GetPropertyNames(), [
"multi:builtin:bool_attr",
"multi:builtin:relationship",
"multi:builtin:token_attr",
"single:bool_attr",
"single:relationship",
"single:token_attr",
"testAttr",
"testRel"])
# Add a new api schemas to the prim's metadata.
typedPrim.ApplyAPI(self.MultiApplyAPIType, "garply")
# Prim has the same type and now has both its original API schemas and
# the new one. Note that the new schema was added using an explicit
# list op but was still prepended to the original list. Built-in API
# schemas cannot be deleted and any authored API schemas will always be
# prepended to the built-ins.
self.assertEqual(typedPrim.GetTypeName(), 'TestWithBuiltinAppliedSchema')
self.assertEqual(typedPrim.GetAppliedSchemas(),
["TestMultiApplyAPI:garply",
"TestSingleApplyAPI", "TestMultiApplyAPI:builtin"])
self.assertEqual(typedPrim.GetPrimTypeInfo().GetTypeName(),
'TestWithBuiltinAppliedSchema')
# Note that prim type info does NOT contain the built-in applied API
# schemas from the concrete type's prim definition as these are not part
# of the type identity.
self.assertEqual(typedPrim.GetPrimTypeInfo().GetAppliedAPISchemas(),
["TestMultiApplyAPI:garply"])
self.assertTrue(typedPrim.HasAPI(self.SingleApplyAPIType))
self.assertTrue(typedPrim.HasAPI(self.MultiApplyAPIType))
# Properties have been expanded to include the new API schema
self.assertEqual(typedPrim.GetPropertyNames(), [
"multi:builtin:bool_attr",
"multi:builtin:relationship",
"multi:builtin:token_attr",
"multi:garply:bool_attr",
"multi:garply:relationship",
"multi:garply:token_attr",
"single:bool_attr",
"single:relationship",
"single:token_attr",
"testAttr",
"testRel"])
# Property fallback comes from TestSingleApplyAPI
attr = typedPrim.GetAttribute("single:token_attr")
self.assertEqual(attr.Get(), "bar")
self.assertEqual(attr.GetResolveInfo().GetSource(),
Usd.ResolveInfoSourceFallback)
# Property fallback comes from TestMultiApplyAPI
attr = typedPrim.GetAttribute("multi:garply:bool_attr")
self.assertEqual(attr.Get(), True)
self.assertEqual(attr.GetResolveInfo().GetSource(),
Usd.ResolveInfoSourceFallback)
# Property fallback actually comes from TestWithBuiltinAppliedSchema as
# the typed schema overrides this property from its built-in API schema.
attr = typedPrim.GetAttribute("multi:builtin:bool_attr")
self.assertEqual(attr.Get(), False)
self.assertEqual(attr.GetResolveInfo().GetSource(),
Usd.ResolveInfoSourceFallback)
# Property fallback comes from TestWithBuiltinAppliedSchema
attr = typedPrim.GetAttribute("testAttr")
self.assertEqual(attr.Get(), "foo")
self.assertEqual(attr.GetResolveInfo().GetSource(),
Usd.ResolveInfoSourceFallback)
# Metadata "hidden" has a fallback value defined in
# TestWithBuiltinAppliedSchema. It will be returned by GetMetadata and
# GetAllMetadata but will return false for queries about whether it's
# authored
self.assertEqual(typedPrim.GetAllMetadata()["hidden"], False)
self.assertEqual(typedPrim.GetMetadata("hidden"), False)
self.assertFalse(typedPrim.HasAuthoredMetadata("hidden"))
self.assertFalse("hidden" in typedPrim.GetAllAuthoredMetadata())
# Documentation metadata comes from prim type definition even with API
# schemas applied.
self.assertEqual(typedPrim.GetMetadata("documentation"),
"Test with built-in API schemas")
def test_TypedPrimsOnStageWithBuiltinReapply(self):
"""
Tests the fallback properties of typed prims on a stage when the same
API schemas are applied again to a prim whose type already has applied
API schemas.
"""
stage = Usd.Stage.CreateInMemory()
# Add a typed prim. It has API schemas already from its prim definition
# and has properties from both its type and its APIs.
typedPrim = stage.DefinePrim("/TypedPrim", "TestWithBuiltinAppliedSchema")
self.assertEqual(typedPrim.GetTypeName(), 'TestWithBuiltinAppliedSchema')
self.assertEqual(typedPrim.GetAppliedSchemas(),
["TestSingleApplyAPI", "TestMultiApplyAPI:builtin"])
self.assertEqual(typedPrim.GetPrimTypeInfo().GetTypeName(),
'TestWithBuiltinAppliedSchema')
# Note that prim type info does NOT contain the built-in applied API
# schemas from the concrete type's prim definition as these are not part
# of the type identity.
self.assertEqual(typedPrim.GetPrimTypeInfo().GetAppliedAPISchemas(), [])
self.assertTrue(typedPrim.HasAPI(self.SingleApplyAPIType))
self.assertTrue(typedPrim.HasAPI(self.MultiApplyAPIType))
self.assertEqual(typedPrim.GetPropertyNames(), [
"multi:builtin:bool_attr",
"multi:builtin:relationship",
"multi:builtin:token_attr",
"single:bool_attr",
"single:relationship",
"single:token_attr",
"testAttr",
"testRel"])
# Property fallback comes from TestSingleApplyAPI
attr = typedPrim.GetAttribute("single:token_attr")
self.assertEqual(attr.Get(), "bar")
self.assertEqual(attr.GetResolveInfo().GetSource(),
Usd.ResolveInfoSourceFallback)
# Property fallback actually comes from TestTypedSchema as the typed
# schema overrides this property from its built-in API schema.
attr = typedPrim.GetAttribute("multi:builtin:bool_attr")
self.assertEqual(attr.Get(), False)
self.assertEqual(attr.GetResolveInfo().GetSource(),
Usd.ResolveInfoSourceFallback)
# Property fallback comes from TestTypedSchema
attr = typedPrim.GetAttribute("testAttr")
self.assertEqual(attr.Get(), "foo")
self.assertEqual(attr.GetResolveInfo().GetSource(),
Usd.ResolveInfoSourceFallback)
# Add the built-in api schemas again to the prim's metadata.
typedPrim.ApplyAPI(self.MultiApplyAPIType, "builtin")
typedPrim.ApplyAPI(self.SingleApplyAPIType)
# Prim has the same type and now has both its original API schemas and
# plus the same schemas again appended to the list (i.e. both schemas
# now show up twice).
self.assertEqual(typedPrim.GetTypeName(), 'TestWithBuiltinAppliedSchema')
self.assertEqual(typedPrim.GetAppliedSchemas(),
["TestMultiApplyAPI:builtin", "TestSingleApplyAPI",
"TestSingleApplyAPI", "TestMultiApplyAPI:builtin"])
self.assertEqual(typedPrim.GetPrimTypeInfo().GetTypeName(),
'TestWithBuiltinAppliedSchema')
# Note that prim type info does NOT contain the built-in applied API
# schemas from the concrete type's prim definition as these are not part
# of the type identity.
self.assertEqual(typedPrim.GetPrimTypeInfo().GetAppliedAPISchemas(),
["TestMultiApplyAPI:builtin", "TestSingleApplyAPI"])
self.assertTrue(typedPrim.HasAPI(self.SingleApplyAPIType))
self.assertTrue(typedPrim.HasAPI(self.MultiApplyAPIType))
# The list of properties hasn't changed as there are no "new" schemas,
# however the defaults may have changed.
self.assertEqual(typedPrim.GetPropertyNames(), [
"multi:builtin:bool_attr",
"multi:builtin:relationship",
"multi:builtin:token_attr",
"single:bool_attr",
"single:relationship",
"single:token_attr",
"testAttr",
"testRel"])
# Property fallback comes from TestSingleApplyAPI - no change
attr = typedPrim.GetAttribute("single:token_attr")
self.assertEqual(attr.Get(), "bar")
self.assertEqual(attr.GetResolveInfo().GetSource(),
Usd.ResolveInfoSourceFallback)
# Property fallback has now changed from False to True as the
# TestTypedSchema originally overrode the fallback from
# TestMultiApplyAPI. But by applying TestMultiApplyAPI again with the
# same instance, we've re-overridden the attribute getting the default
# from the applied schema.
attr = typedPrim.GetAttribute("multi:builtin:bool_attr")
self.assertEqual(attr.Get(), True)
self.assertEqual(attr.GetResolveInfo().GetSource(),
Usd.ResolveInfoSourceFallback)
# Property fallback comes from TestTypedSchema - no change
attr = typedPrim.GetAttribute("testAttr")
self.assertEqual(attr.Get(), "foo")
self.assertEqual(attr.GetResolveInfo().GetSource(),
Usd.ResolveInfoSourceFallback)
# Prim metadata is unchanged from the case above as there is still
# no way for applied API schemas to impart prim metadata defaults.
self.assertEqual(typedPrim.GetAllMetadata()["hidden"], False)
self.assertEqual(typedPrim.GetMetadata("hidden"), False)
self.assertFalse(typedPrim.HasAuthoredMetadata("hidden"))
self.assertFalse("hidden" in typedPrim.GetAllAuthoredMetadata())
# Documentation metadata comes from prim type definition even with API
# schemas applied.
self.assertEqual(typedPrim.GetMetadata("documentation"),
"Test with built-in API schemas")
@unittest.skipIf(Tf.GetEnvSetting('USD_DISABLE_AUTO_APPLY_API_SCHEMAS'),
"Auto apply API schemas are disabled")
def test_TypedPrimsOnStageWithAutoAppliedAPIs(self):
"""
Tests the fallback properties of typed prims on a stage where API
schemas are auto applied.
"""
stage = Usd.Stage.CreateInMemory()
# Add a typed prim that has two types of built-in applied schemas.
# TestMultiApplyAPI:builtin comes from the apiSchemas metadata defined
# in TestTypedSchemaForAutoApply's schema definition.
# TestSingleApplyAPI and TestMultiApplyAPI:autoFoo come from
# TestTypedSchemaForAutoApply being listed in the "AutoApplyAPISchemas"
# plugInfo metadata for both API schemas.
# The built-in applied schemas that come from the apiSchemas metadata
# will always be listed before (and be stronger than) any applied
# schemas that come from apiSchemaAutoApplyTo.
typedPrim = stage.DefinePrim("/TypedPrim", "TestTypedSchemaForAutoApply")
self.assertEqual(typedPrim.GetTypeName(), 'TestTypedSchemaForAutoApply')
self.assertEqual(typedPrim.GetAppliedSchemas(),
["TestMultiApplyAPI:builtin",
"TestMultiApplyAPI:autoFoo",
"TestSingleApplyAPI"])
self.assertEqual(typedPrim.GetPrimTypeInfo().GetTypeName(),
'TestTypedSchemaForAutoApply')
# Note that prim type info does NOT contain the applied API
# schemas from the concrete type's prim definition as these are not part
# of the type identity.
self.assertEqual(typedPrim.GetPrimTypeInfo().GetAppliedAPISchemas(), [])
self.assertTrue(typedPrim.HasAPI(self.MultiApplyAPIType))
self.assertTrue(typedPrim.HasAPI(self.SingleApplyAPIType))
self.assertEqual(typedPrim.GetPropertyNames(), [
"multi:autoFoo:bool_attr",
"multi:autoFoo:relationship",
"multi:autoFoo:token_attr",
"multi:builtin:bool_attr",
"multi:builtin:relationship",
"multi:builtin:token_attr",
"single:bool_attr",
"single:relationship",
"single:token_attr",
"testAttr",
"testRel"])
# Add a concrete typed prim which receives an auto applied API schema.
# TestSingleApplyAPI comes from TestTypedSchemaForAutoApplyConcreteBase
# being listed in TestSingleApplyAPI's apiSchemaAutoApplyTo data.
typedPrim.SetTypeName("TestTypedSchemaForAutoApplyConcreteBase")
self.assertEqual(typedPrim.GetTypeName(),
'TestTypedSchemaForAutoApplyConcreteBase')
self.assertEqual(typedPrim.GetAppliedSchemas(),
["TestSingleApplyAPI"])
self.assertEqual(typedPrim.GetPrimTypeInfo().GetTypeName(),
'TestTypedSchemaForAutoApplyConcreteBase')
# Note that prim type info does NOT contain the auto applied API
# schemas from the concrete type's prim definition as these are not part
# of the type identity.
self.assertEqual(typedPrim.GetPrimTypeInfo().GetAppliedAPISchemas(), [])
self.assertTrue(typedPrim.HasAPI(self.SingleApplyAPIType))
self.assertEqual(typedPrim.GetPropertyNames(), [
"single:bool_attr",
"single:relationship",
"single:token_attr",
"testAttr",
"testRel"])
# Add a concrete typed prim which receives an auto applied API schema
# because it is derived from a base class type that does.
# TestSingleApplyAPI comes from the base class of this type,
# TestTypedSchemaForAutoApplyConcreteBase, being listed in
# TestSingleApplyAPI's apiSchemaAutoApplyTo data.
typedPrim.SetTypeName("TestDerivedTypedSchemaForAutoApplyConcreteBase")
self.assertEqual(typedPrim.GetTypeName(),
'TestDerivedTypedSchemaForAutoApplyConcreteBase')
self.assertEqual(typedPrim.GetAppliedSchemas(),
["TestSingleApplyAPI"])
self.assertEqual(typedPrim.GetPrimTypeInfo().GetTypeName(),
'TestDerivedTypedSchemaForAutoApplyConcreteBase')
# Note that prim type info does NOT contain the auto applied API
# schemas from the concrete type's prim definition as these are not part
# of the type identity.
self.assertEqual(typedPrim.GetPrimTypeInfo().GetAppliedAPISchemas(), [])
self.assertTrue(typedPrim.HasAPI(self.SingleApplyAPIType))
self.assertEqual(typedPrim.GetPropertyNames(), [
"single:bool_attr",
"single:relationship",
"single:token_attr",
"testAttr",
"testRel"])
# Add a concrete typed prim which receives an auto applied API schema
# because it is derived from a base class type that does.
# TestSingleApplyAPI comes from the base class of this type,
# TestTypedSchemaForAutoApplyAbstractBase, being listed in
# TestSingleApplyAPI's apiSchemaAutoApplyTo data. This is different
# from case above in that the base class is an abstract type and cannot
# be instantiated as a prim type, but API schemas can still be
# designated to auto apply to abstract types to have the API applied
# to prims of all derived types.
typedPrim.SetTypeName("TestDerivedTypedSchemaForAutoApplyAbstractBase")
self.assertEqual(typedPrim.GetTypeName(),
'TestDerivedTypedSchemaForAutoApplyAbstractBase')
self.assertEqual(typedPrim.GetAppliedSchemas(), ['TestSingleApplyAPI'])
self.assertEqual(typedPrim.GetPrimTypeInfo().GetTypeName(),
'TestDerivedTypedSchemaForAutoApplyAbstractBase')
# Note that prim type info does NOT contain the auto applied API
# schemas from the concrete type's prim definition as these are not part
# of the type identity.
self.assertEqual(typedPrim.GetPrimTypeInfo().GetAppliedAPISchemas(), [])
self.assertTrue(typedPrim.HasAPI(self.SingleApplyAPIType))
self.assertEqual(typedPrim.GetPropertyNames(), [
"single:bool_attr",
"single:relationship",
"single:token_attr",
"testAttr",
"testRel"])
# Verify that we can get the value of the auto apply API metadata for
# TestSingleApplyAPI from the schema registry.
self.assertEqual(
Usd.SchemaRegistry.GetAutoApplyAPISchemas()['TestSingleApplyAPI'],
['TestTypedSchemaForAutoApplyConcreteBase',
'TestTypedSchemaForAutoApplyAbstractBase',
'TestAutoAppliedToAPI',
'TestTypedSchemaForAutoApply'])
@unittest.skipIf(not Tf.GetEnvSetting('USD_DISABLE_AUTO_APPLY_API_SCHEMAS'),
"Auto apply API schemas are not disabled")
def test_TypedPrimsOnStageWithAutoAppliedAPIs_AutoApplyDisabled(self):
"""
Tests the disabling of auto apply schemas through the environment
variable USD_DISABLE_AUTO_APPLY_API_SCHEMAS.
"""
stage = Usd.Stage.CreateInMemory()
# Add a typed prim that has two types of built-in applied schemas.
# TestMultiApplyAPI:builtin comes from the apiSchemas metadata defined
# in TestTypedSchemaForAutoApply's schema definition and is NOT affected
# by disabling auto apply API schemas.
#
# TestSingleApplyAPI and TestMultiApplyAPI:autoFoo would come from
# TestTypedSchemaForAutoApply being listed in the "AutoApplyAPISchemas"
# plugInfo metadata for both API schemas, but with auto apply disabled,
# they are not applied to this type.
typedPrim = stage.DefinePrim("/TypedPrim", "TestTypedSchemaForAutoApply")
self.assertEqual(typedPrim.GetTypeName(),
'TestTypedSchemaForAutoApply')
self.assertEqual(typedPrim.GetAppliedSchemas(),
["TestMultiApplyAPI:builtin"])
self.assertTrue(typedPrim.HasAPI(self.MultiApplyAPIType, 'builtin'))
self.assertFalse(typedPrim.HasAPI(self.MultiApplyAPIType, 'autoFoo'))
self.assertFalse(typedPrim.HasAPI(self.SingleApplyAPIType))
self.assertEqual(typedPrim.GetPropertyNames(), [
"multi:builtin:bool_attr",
"multi:builtin:relationship",
"multi:builtin:token_attr",
"testAttr",
"testRel"])
# Add a concrete typed prim which receives an auto applied API schema.
# TestSingleApplyAPI would be auto applied to this type, but with auto
# apply disable, this type has no applied API schemas.
typedPrim.SetTypeName("TestTypedSchemaForAutoApplyConcreteBase")
self.assertEqual(typedPrim.GetTypeName(),
'TestTypedSchemaForAutoApplyConcreteBase')
self.assertEqual(typedPrim.GetAppliedSchemas(), [])
self.assertFalse(typedPrim.HasAPI(self.SingleApplyAPIType))
self.assertEqual(typedPrim.GetPropertyNames(), [
"testAttr",
"testRel"])
# Verify that the auto apply API schema dictionary is empty when auto
# apply is disabled..
self.assertEqual(Usd.SchemaRegistry.GetAutoApplyAPISchemas(), {})
def test_ApplyRemoveAPI(self):
"""
Tests the detail of the Apply and Remove API for API schemas.
"""
stage = Usd.Stage.CreateInMemory()
rootLayer = stage.GetRootLayer()
sessionLayer = stage.GetSessionLayer()
self.assertTrue(rootLayer)
self.assertTrue(sessionLayer)
# Add a basic prim with no type. It has no applied schemas or properties.
prim = stage.DefinePrim("/Prim")
self.assertEqual(prim.GetAppliedSchemas(), [])
self.assertEqual(prim.GetPrimTypeInfo().GetAppliedAPISchemas(), [])
# Helper function for verifying the state of the 'apiSchemas' list op
# field in the prim spec for the test prim on the specified layer.
def _VerifyListOp(layer, explicit = [], prepended = [],
appended = [], deleted = []):
spec = layer.GetPrimAtPath('/Prim')
listOp = spec.GetInfo('apiSchemas')
self.assertEqual(listOp.explicitItems, explicit)
self.assertEqual(listOp.prependedItems, prepended)
self.assertEqual(listOp.appendedItems, appended)
self.assertEqual(listOp.deletedItems, deleted)
# Apply a single api schema withe default edit target. Adds to the end
# prepend list.
prim.ApplyAPI(self.SingleApplyAPIType)
self.assertEqual(prim.GetAppliedSchemas(), ["TestSingleApplyAPI"])
self.assertTrue(prim.HasAPI(self.SingleApplyAPIType))
_VerifyListOp(rootLayer, prepended = ["TestSingleApplyAPI"])
# Apply the same API schema again. This will not update the list.
prim.ApplyAPI(self.SingleApplyAPIType)
self.assertEqual(prim.GetAppliedSchemas(), ["TestSingleApplyAPI"])
self.assertTrue(prim.HasAPI(self.SingleApplyAPIType))
_VerifyListOp(rootLayer, prepended = ["TestSingleApplyAPI"])
# Remove the API schema. This removes the schema from the prepend and
# puts in it the deleted list.
prim.RemoveAPI(self.SingleApplyAPIType)
self.assertEqual(prim.GetAppliedSchemas(), [])
_VerifyListOp(rootLayer, deleted = ["TestSingleApplyAPI"])
# Remove the same API again. This is a no op.
prim.RemoveAPI(self.SingleApplyAPIType)
self.assertEqual(prim.GetAppliedSchemas(), [])
_VerifyListOp(rootLayer, deleted = ["TestSingleApplyAPI"])
# Remove a multi apply schema which is not currently in the list. The
# This schema instance name is still added to the deleted list.
prim.RemoveAPI(self.MultiApplyAPIType, "foo")
self.assertEqual(prim.GetAppliedSchemas(), [])
_VerifyListOp(rootLayer,
deleted = ["TestSingleApplyAPI", "TestMultiApplyAPI:foo"])
# Apply the same instance of the multi-apply schema we just deleted. It
# is added to the prepended but is NOT removed from the deleted list.
# It still ends up in the composed API schemas since deletes are
# processed before prepends in the same list op.
prim.ApplyAPI(self.MultiApplyAPIType, "foo")
self.assertEqual(prim.GetAppliedSchemas(), ["TestMultiApplyAPI:foo"])
_VerifyListOp(rootLayer,
prepended = ["TestMultiApplyAPI:foo"],
deleted = ["TestSingleApplyAPI", "TestMultiApplyAPI:foo"])
# Apply a different instance of the multi-apply schema. Its is added to
# the end of the prepends list.
prim.ApplyAPI(self.MultiApplyAPIType, "bar")
self.assertEqual(prim.GetAppliedSchemas(),
["TestMultiApplyAPI:foo", "TestMultiApplyAPI:bar"])
_VerifyListOp(rootLayer,
prepended = ["TestMultiApplyAPI:foo", "TestMultiApplyAPI:bar"],
deleted = ["TestSingleApplyAPI", "TestMultiApplyAPI:foo"])
# Remove the "bar" instance of the multi-apply schema on the session
# layer. The schema is added to the deleted list on the session layer
# and root layer remains the same. It does not show up in the composed
# API schemas after composition
with Usd.EditContext(stage, sessionLayer):
prim.RemoveAPI(self.MultiApplyAPIType, "bar")
self.assertEqual(prim.GetAppliedSchemas(), ["TestMultiApplyAPI:foo"])
_VerifyListOp(rootLayer,
prepended = ["TestMultiApplyAPI:foo", "TestMultiApplyAPI:bar"],
deleted = ["TestSingleApplyAPI", "TestMultiApplyAPI:foo"])
_VerifyListOp(sessionLayer,
deleted = ["TestMultiApplyAPI:bar"])
# Re-apply the "bar" instance of the multi-apply schema on the session
# layer. It is added to the prepend list in the session layer but still
# remains in the delete list. Note that the "bar" instance is back in
# the composed API schemas list but now it is first instead of second
# like it was before as it get deleted and prepended by the session
# layer.
with Usd.EditContext(stage, sessionLayer):
prim.ApplyAPI(self.MultiApplyAPIType, "bar")
self.assertEqual(prim.GetAppliedSchemas(),
["TestMultiApplyAPI:bar", "TestMultiApplyAPI:foo"])
_VerifyListOp(rootLayer,
prepended = ["TestMultiApplyAPI:foo", "TestMultiApplyAPI:bar"],
deleted = ["TestSingleApplyAPI", "TestMultiApplyAPI:foo"])
_VerifyListOp(sessionLayer,
prepended = ["TestMultiApplyAPI:bar"],
deleted = ["TestMultiApplyAPI:bar"])
# These next few cases verifies the behavior when the list op has
# appends or explicit entries. (Note that we don't define behaviors for
# add or reorder).
# Update the session layer to have an appended API schema.
with Usd.EditContext(stage, sessionLayer):
appendedListOp = Sdf.TokenListOp()
appendedListOp.appendedItems = ["TestMultiApplyAPI:bar"]
prim.SetMetadata('apiSchemas', appendedListOp)
# Update the root layer to have an explicit list op.
explicitListOp = Sdf.TokenListOp()
explicitListOp.explicitItems = ["TestMultiApplyAPI:foo"]
prim.SetMetadata('apiSchemas', explicitListOp)
# Verify the initial authored and composed lists.
self.assertEqual(prim.GetAppliedSchemas(),
["TestMultiApplyAPI:foo", "TestMultiApplyAPI:bar"])
_VerifyListOp(rootLayer,
explicit = ["TestMultiApplyAPI:foo"])
_VerifyListOp(sessionLayer,
appended = ["TestMultiApplyAPI:bar"])
# On the session and root layers, try to apply the API schema that
# is already in each respective list. This will be a no op even though
# the schemas aren't in the prepended lists.
with Usd.EditContext(stage, sessionLayer):
prim.ApplyAPI(self.MultiApplyAPIType, "bar")
prim.ApplyAPI(self.MultiApplyAPIType, "foo")
self.assertEqual(prim.GetAppliedSchemas(),
["TestMultiApplyAPI:foo", "TestMultiApplyAPI:bar"])
_VerifyListOp(rootLayer,
explicit = ["TestMultiApplyAPI:foo"])
_VerifyListOp(sessionLayer,
appended = ["TestMultiApplyAPI:bar"])
# Apply the single apply schema to both layers. The root layer adds it
# to the end of its explicit list while the session layer will add it
# the prepends. The composed API schemas will only contain the schema
# once with the prepend from the stronger session layer winning for
# ordering.
with Usd.EditContext(stage, sessionLayer):
prim.ApplyAPI(self.SingleApplyAPIType)
prim.ApplyAPI(self.SingleApplyAPIType)
self.assertEqual(prim.GetAppliedSchemas(),
["TestSingleApplyAPI", "TestMultiApplyAPI:foo",
"TestMultiApplyAPI:bar"])
_VerifyListOp(rootLayer,
explicit = ["TestMultiApplyAPI:foo", "TestSingleApplyAPI"])
_VerifyListOp(sessionLayer,
prepended = ["TestSingleApplyAPI"],
appended = ["TestMultiApplyAPI:bar"])
# Remove the starting API schemas from the root and session layers. In
# the root layer it is just removed from the explicit list. In the
# session layer it is removed from appends and added to the deletes.
with Usd.EditContext(stage, sessionLayer):
prim.RemoveAPI(self.MultiApplyAPIType, "bar")
prim.RemoveAPI(self.MultiApplyAPIType, "foo")
self.assertEqual(prim.GetAppliedSchemas(),
["TestSingleApplyAPI"])
_VerifyListOp(rootLayer,
explicit = ["TestSingleApplyAPI"])
_VerifyListOp(sessionLayer,
prepended = ["TestSingleApplyAPI"],
deleted = ["TestMultiApplyAPI:bar"])
# Clear the apiSchemas in both layers for the next tests.
# XXX: Should we have additional API for clearing the list op like we
# do for other list op fields?
with Usd.EditContext(stage, sessionLayer):
prim.SetMetadata('apiSchemas', Sdf.TokenListOp())
prim.SetMetadata('apiSchemas', Sdf.TokenListOp())
self.assertEqual(prim.GetAppliedSchemas(), [])
_VerifyListOp(rootLayer)
_VerifyListOp(sessionLayer)
# Trying to apply or remove a multi-apply schema with no instance name
# is an error.
with self.assertRaises(Tf.ErrorException):
prim.ApplyAPI(self.MultiApplyAPIType)
self.assertEqual(prim.GetAppliedSchemas(), [])
_VerifyListOp(rootLayer)
with self.assertRaises(Tf.ErrorException):
prim.RemoveAPI(self.MultiApplyAPIType)
self.assertEqual(prim.GetAppliedSchemas(), [])
_VerifyListOp(rootLayer)
# Trying to apply or remove a single apply schema with an instance name
# is an error.
with self.assertRaises(Tf.ErrorException):
prim.ApplyAPI(self.SingleApplyAPIType, "foo")
self.assertEqual(prim.GetAppliedSchemas(), [])
_VerifyListOp(rootLayer)
with self.assertRaises(Tf.ErrorException):
prim.RemoveAPI(self.SingleApplyAPIType, "foo")
self.assertEqual(prim.GetAppliedSchemas(), [])
_VerifyListOp(rootLayer)
# Trying to apply or remove a no apply schema is an error.
with self.assertRaises(Tf.ErrorException):
prim.ApplyAPI(Usd.ModelAPI)
self.assertEqual(prim.GetAppliedSchemas(), [])
_VerifyListOp(rootLayer)
with self.assertRaises(Tf.ErrorException):
prim.RemoveAPI(Usd.ModelAPI)
self.assertEqual(prim.GetAppliedSchemas(), [])
_VerifyListOp(rootLayer)
# AddAPITypeName will just add by schema type name with no validity
# checks. But it still won't add duplicates.
#
# Valid type names
prim.AddAppliedSchema("TestSingleApplyAPI")
prim.AddAppliedSchema("TestMultiApplyAPI:bar")
# Invalid type names.
prim.AddAppliedSchema("BogusTypeName")
prim.AddAppliedSchema("TestMultiApplyAPI")
# Duplicate.
prim.AddAppliedSchema("TestSingleApplyAPI")
# Even though invalid type names get added to the apiSchemas metadata,
# they won't show up in GetAppliedSchemas as they won't be composed into
# the prim's UsdPrimDefinition.
self.assertEqual(prim.GetAppliedSchemas(),
["TestSingleApplyAPI", "TestMultiApplyAPI:bar"])
_VerifyListOp(rootLayer,
prepended = ["TestSingleApplyAPI", "TestMultiApplyAPI:bar",
"BogusTypeName", "TestMultiApplyAPI"])
# RemoveAPITypeName will just delete by schema type name with no
# validity checks. But it still won't add duplicate delete entries.
#
# Valid type names
prim.RemoveAppliedSchema("TestSingleApplyAPI")
prim.RemoveAppliedSchema("TestMultiApplyAPI:bar")
# Invalid type names.
prim.RemoveAppliedSchema("BogusTypeName")
prim.RemoveAppliedSchema("TestMultiApplyAPI")
# Duplicate.
prim.RemoveAppliedSchema("TestSingleApplyAPI")
self.assertEqual(prim.GetAppliedSchemas(), [])
_VerifyListOp(rootLayer,
deleted = ["TestSingleApplyAPI", "TestMultiApplyAPI:bar",
"BogusTypeName", "TestMultiApplyAPI"])
def test_CanApplyAPI(self):
"""
Tests the details of the Usd.Prim.CanApplyAPI.
"""
stage = Usd.Stage.CreateInMemory()
rootLayer = stage.GetRootLayer()
sessionLayer = stage.GetSessionLayer()
self.assertTrue(rootLayer)
self.assertTrue(sessionLayer)
# Add prims of various types to test CanApplyAPI.
prim = stage.DefinePrim(
"/Prim")
prim2 = stage.DefinePrim(
"/Prim2", "TestTypedSchema")
prim3 = stage.DefinePrim(
"/Prim3", "TestTypedSchemaForAutoApply")
prim4 = stage.DefinePrim(
"/Prim4", "TestDerivedTypedSchemaForAutoApplyConcreteBase")
prim5 = stage.DefinePrim(
"/Prim5", "TestDerivedTypedSchemaForAutoApplyAbstractBase")
# Single apply schema with no specified "apiSchemaCanOnlyApplyTo"
# metadata. Can apply to all prims.
self.assertEqual(Usd.SchemaRegistry.GetAPISchemaCanOnlyApplyToTypeNames(
"TestSingleApplyAPI"), [])
self.assertTrue(prim.CanApplyAPI(self.SingleApplyAPIType))
self.assertTrue(prim2.CanApplyAPI(self.SingleApplyAPIType))
self.assertTrue(prim3.CanApplyAPI(self.SingleApplyAPIType))
self.assertTrue(prim4.CanApplyAPI(self.SingleApplyAPIType))
self.assertTrue(prim5.CanApplyAPI(self.SingleApplyAPIType))
# Multiple apply schema with no specified "apiSchemaCanOnlyApplyTo"
# metadata and no "allowedInstanceNames". Can apply to all prims with
# all instance names (with the notable exception of instance names that
# match a property name from the schema).
self.assertEqual(Usd.SchemaRegistry.GetAPISchemaCanOnlyApplyToTypeNames(
"TestMultiApplyAPI", "foo"), [])
self.assertTrue(Usd.SchemaRegistry.IsAllowedAPISchemaInstanceName(
"TestMultiApplyAPI", "foo"))
self.assertTrue(prim.CanApplyAPI(self.MultiApplyAPIType, "foo"))
self.assertTrue(prim2.CanApplyAPI(self.MultiApplyAPIType, "foo"))
self.assertTrue(prim3.CanApplyAPI(self.MultiApplyAPIType, "foo"))
self.assertTrue(prim4.CanApplyAPI(self.MultiApplyAPIType, "foo"))
self.assertTrue(prim5.CanApplyAPI(self.MultiApplyAPIType, "foo"))
self.assertEqual(Usd.SchemaRegistry.GetAPISchemaCanOnlyApplyToTypeNames(
"TestMultiApplyAPI", "bar"), [])
self.assertTrue(Usd.SchemaRegistry.IsAllowedAPISchemaInstanceName(
"TestMultiApplyAPI", "bar"))
self.assertTrue(prim.CanApplyAPI(self.MultiApplyAPIType, "bar"))
self.assertTrue(prim2.CanApplyAPI(self.MultiApplyAPIType, "bar"))
self.assertTrue(prim3.CanApplyAPI(self.MultiApplyAPIType, "bar"))
self.assertTrue(prim4.CanApplyAPI(self.MultiApplyAPIType, "bar"))
self.assertTrue(prim5.CanApplyAPI(self.MultiApplyAPIType, "bar"))
self.assertEqual(Usd.SchemaRegistry.GetAPISchemaCanOnlyApplyToTypeNames(
"TestMultiApplyAPI", "baz"), [])
self.assertTrue(Usd.SchemaRegistry.IsAllowedAPISchemaInstanceName(
"TestMultiApplyAPI", "baz"))
self.assertTrue(prim.CanApplyAPI(self.MultiApplyAPIType, "baz"))
self.assertTrue(prim2.CanApplyAPI(self.MultiApplyAPIType, "baz"))
self.assertTrue(prim3.CanApplyAPI(self.MultiApplyAPIType, "baz"))
self.assertTrue(prim4.CanApplyAPI(self.MultiApplyAPIType, "baz"))
self.assertTrue(prim5.CanApplyAPI(self.MultiApplyAPIType, "baz"))
self.assertEqual(Usd.SchemaRegistry.GetAPISchemaCanOnlyApplyToTypeNames(
"TestMultiApplyAPI", "Bar"), [])
self.assertTrue(Usd.SchemaRegistry.IsAllowedAPISchemaInstanceName(
"TestMultiApplyAPI", "Bar"))
self.assertTrue(prim.CanApplyAPI(self.MultiApplyAPIType, "Bar"))
self.assertTrue(prim2.CanApplyAPI(self.MultiApplyAPIType, "Bar"))
self.assertTrue(prim3.CanApplyAPI(self.MultiApplyAPIType, "Bar"))
self.assertTrue(prim4.CanApplyAPI(self.MultiApplyAPIType, "Bar"))
self.assertTrue(prim5.CanApplyAPI(self.MultiApplyAPIType, "Bar"))
self.assertEqual(Usd.SchemaRegistry.GetAPISchemaCanOnlyApplyToTypeNames(
"TestMultiApplyAPI", "qux"), [])
self.assertTrue(Usd.SchemaRegistry.IsAllowedAPISchemaInstanceName(
"TestMultiApplyAPI", "qux"))
self.assertTrue(prim.CanApplyAPI(self.MultiApplyAPIType, "qux"))
self.assertTrue(prim2.CanApplyAPI(self.MultiApplyAPIType, "qux"))
self.assertTrue(prim3.CanApplyAPI(self.MultiApplyAPIType, "qux"))
self.assertTrue(prim4.CanApplyAPI(self.MultiApplyAPIType, "qux"))
self.assertTrue(prim5.CanApplyAPI(self.MultiApplyAPIType, "qux"))
# As mentioned above, property names of the API schema are not valid
# instance names and will return false for CanApplyAPI
self.assertFalse(Usd.SchemaRegistry.IsAllowedAPISchemaInstanceName(
"TestMultiApplyAPI", "bool_attr"))
self.assertFalse(prim.CanApplyAPI(self.MultiApplyAPIType, "bool_attr"))
self.assertFalse(prim2.CanApplyAPI(self.MultiApplyAPIType, "bool_attr"))
self.assertFalse(prim3.CanApplyAPI(self.MultiApplyAPIType, "bool_attr"))
self.assertFalse(prim4.CanApplyAPI(self.MultiApplyAPIType, "bool_attr"))
self.assertFalse(prim5.CanApplyAPI(self.MultiApplyAPIType, "bool_attr"))
self.assertFalse(Usd.SchemaRegistry.IsAllowedAPISchemaInstanceName(
"TestMultiApplyAPI", "relationship"))
self.assertFalse(prim.CanApplyAPI(self.MultiApplyAPIType, "relationship"))
self.assertFalse(prim2.CanApplyAPI(self.MultiApplyAPIType, "relationship"))
self.assertFalse(prim3.CanApplyAPI(self.MultiApplyAPIType, "relationship"))
self.assertFalse(prim4.CanApplyAPI(self.MultiApplyAPIType, "relationship"))
self.assertFalse(prim5.CanApplyAPI(self.MultiApplyAPIType, "relationship"))
# Single apply API schema that does specify an "apiSchemaCanOnlyApplyTo"
# list. prim3 is a type in the list and prim4 derives from a type in the
# list so only these return true.
self.assertEqual(Usd.SchemaRegistry.GetAPISchemaCanOnlyApplyToTypeNames(
"TestSingleCanApplyAPI"),
["TestTypedSchemaForAutoApply",
"TestTypedSchemaForAutoApplyConcreteBase"])
self.assertFalse(prim.CanApplyAPI(self.SingleCanApplyAPIType))
self.assertFalse(prim2.CanApplyAPI(self.SingleCanApplyAPIType))
self.assertTrue(prim3.CanApplyAPI(self.SingleCanApplyAPIType))
self.assertTrue(prim4.CanApplyAPI(self.SingleCanApplyAPIType))
self.assertFalse(prim5.CanApplyAPI(self.SingleCanApplyAPIType))
# Multiple apply API schema that specifies allow instance names
# "foo", "bar", and "baz". All other instance names aren't allowed
# and will return false.
self.assertFalse(Usd.SchemaRegistry.IsAllowedAPISchemaInstanceName(
"TestMultiCanApplyAPI", "Bar"))
self.assertFalse(prim.CanApplyAPI(self.MultiCanApplyAPIType, "Bar"))
self.assertFalse(prim2.CanApplyAPI(self.MultiCanApplyAPIType, "Bar"))
self.assertFalse(prim3.CanApplyAPI(self.MultiCanApplyAPIType, "Bar"))
self.assertFalse(prim4.CanApplyAPI(self.MultiCanApplyAPIType, "Bar"))
self.assertFalse(prim5.CanApplyAPI(self.MultiCanApplyAPIType, "Bar"))
self.assertFalse(Usd.SchemaRegistry.IsAllowedAPISchemaInstanceName(
"TestMultiCanApplyAPI", "qux"))
self.assertFalse(prim.CanApplyAPI(self.MultiCanApplyAPIType, "qux"))
self.assertFalse(prim2.CanApplyAPI(self.MultiCanApplyAPIType, "qux"))
self.assertFalse(prim3.CanApplyAPI(self.MultiCanApplyAPIType, "qux"))
self.assertFalse(prim4.CanApplyAPI(self.MultiCanApplyAPIType, "qux"))
self.assertFalse(prim5.CanApplyAPI(self.MultiCanApplyAPIType, "qux"))
# Same multiple apply API schema with allowed instance name "baz".
# The API schema type specifies an "apiSchemaCanOnlyApplyTo" list so
# this instance can only be applied to those types. prim3 is a type in
# the list and prim5 derives from a type in the list so only these
# return true.
self.assertEqual(Usd.SchemaRegistry.GetAPISchemaCanOnlyApplyToTypeNames(
"TestMultiCanApplyAPI", "baz"),
["TestTypedSchemaForAutoApply",
"TestTypedSchemaForAutoApplyAbstractBase"])
self.assertEqual(
Usd.SchemaRegistry.GetAPISchemaCanOnlyApplyToTypeNames(
"TestMultiCanApplyAPI", "baz"),
Usd.SchemaRegistry.GetAPISchemaCanOnlyApplyToTypeNames(
"TestMultiCanApplyAPI"))
self.assertTrue(Usd.SchemaRegistry.IsAllowedAPISchemaInstanceName(
"TestMultiCanApplyAPI", "baz"))
self.assertFalse(prim.CanApplyAPI(self.MultiCanApplyAPIType, "baz"))
self.assertFalse(prim2.CanApplyAPI(self.MultiCanApplyAPIType, "baz"))
self.assertTrue(prim3.CanApplyAPI(self.MultiCanApplyAPIType, "baz"))
self.assertFalse(prim4.CanApplyAPI(self.MultiCanApplyAPIType, "baz"))
self.assertTrue(prim5.CanApplyAPI(self.MultiCanApplyAPIType, "baz"))
# Same multiple apply API schema with allowed instance name "foo".
# The API schema type specifies an "apiSchemaCanOnlyApplyTo" list
# specifically for "foo" so this instance can only be applied to those
# types. prim3 is a type in the list and prim4 derives from a type in
# the list so only these return true.
self.assertEqual(Usd.SchemaRegistry.GetAPISchemaCanOnlyApplyToTypeNames(
"TestMultiCanApplyAPI", "foo"),
["TestTypedSchemaForAutoApply",
"TestTypedSchemaForAutoApplyConcreteBase"])
self.assertNotEqual(
Usd.SchemaRegistry.GetAPISchemaCanOnlyApplyToTypeNames(
"TestMultiCanApplyAPI", "foo"),
Usd.SchemaRegistry.GetAPISchemaCanOnlyApplyToTypeNames(
"TestMultiCanApplyAPI"))
self.assertTrue(Usd.SchemaRegistry.IsAllowedAPISchemaInstanceName(
"TestMultiCanApplyAPI", "foo"))
self.assertFalse(prim.CanApplyAPI(self.MultiCanApplyAPIType, "foo"))
self.assertFalse(prim2.CanApplyAPI(self.MultiCanApplyAPIType, "foo"))
self.assertTrue(prim3.CanApplyAPI(self.MultiCanApplyAPIType, "foo"))
self.assertTrue(prim4.CanApplyAPI(self.MultiCanApplyAPIType, "foo"))
self.assertFalse(prim5.CanApplyAPI(self.MultiCanApplyAPIType, "foo"))
# Same multiple apply API schema with allowed instance name "bar".
# The API schema type specifies yet another "apiSchemaCanOnlyApplyTo"
# list specifically for "bar" so this instance can only be applied to
# those types. prim4 and prim5 each derive from a different type in
# the list so only these return true.
self.assertEqual(Usd.SchemaRegistry.GetAPISchemaCanOnlyApplyToTypeNames(
"TestMultiCanApplyAPI", "bar"),
["TestTypedSchemaForAutoApplyAbstractBase",
"TestTypedSchemaForAutoApplyConcreteBase"])
self.assertNotEqual(
Usd.SchemaRegistry.GetAPISchemaCanOnlyApplyToTypeNames(
"TestMultiCanApplyAPI", "bar"),
Usd.SchemaRegistry.GetAPISchemaCanOnlyApplyToTypeNames(
"TestMultiCanApplyAPI"))
self.assertTrue(Usd.SchemaRegistry.IsAllowedAPISchemaInstanceName(
"TestMultiCanApplyAPI", "bar"))
self.assertFalse(prim.CanApplyAPI(self.MultiCanApplyAPIType, "bar"))
self.assertFalse(prim2.CanApplyAPI(self.MultiCanApplyAPIType, "bar"))
self.assertFalse(prim3.CanApplyAPI(self.MultiCanApplyAPIType, "bar"))
self.assertTrue(prim4.CanApplyAPI(self.MultiCanApplyAPIType, "bar"))
self.assertTrue(prim5.CanApplyAPI(self.MultiCanApplyAPIType, "bar"))
# Error conditions
# Coding error if called on single apply schema with an instance name.
with self.assertRaises(Tf.ErrorException):
self.assertFalse(prim.CanApplyAPI(self.SingleApplyAPIType, "foo"))
# Coding error if called on multiple apply schema without an instance
# name.
with self.assertRaises(Tf.ErrorException):
self.assertFalse(prim.CanApplyAPI(self.MultiApplyAPIType))
# Coding error if called on multiple apply schema with an empty instance
# name.
with self.assertRaises(Tf.ErrorException):
self.assertFalse(prim.CanApplyAPI(self.MultiApplyAPIType, ""))
# Verify whyNot annotations when CanApplyAPI is false.
result = prim4.CanApplyAPI(self.MultiCanApplyAPIType, "bar")
self.assertTrue(result)
self.assertEqual(result.whyNot, "")
result = prim.CanApplyAPI(self.MultiCanApplyAPIType, "qux")
self.assertFalse(result)
self.assertEqual(result.whyNot,
"'qux' is not an allowed instance name for multiple "
"apply API schema 'TestMultiCanApplyAPI'.")
result = prim.CanApplyAPI(self.MultiCanApplyAPIType, "bar")
self.assertFalse(result)
self.assertEqual(result.whyNot,
"API schema 'TestMultiCanApplyAPI:bar' can only be "
"applied to prims of the following types: "
"TestTypedSchemaForAutoApplyAbstractBase, "
"TestTypedSchemaForAutoApplyConcreteBase.")
result = prim.CanApplyAPI(self.MultiApplyAPIType, "bool_attr")
self.assertFalse(result)
self.assertEqual(result.whyNot,
"'bool_attr' is not an allowed instance name for "
"multiple apply API schema 'TestMultiApplyAPI'.")
def test_NestedSingleApplyAPISchemas(self):
"""
Tests the application of single apply API schemas that have nested
built-in API schemas
"""
stage = Usd.Stage.CreateInMemory()
# Simple helper for testing that a prim has expected attributes that
# resolve to expected values.
def _VerifyAttrValues(prim, expectedAttrValues):
values = {name : prim.GetAttribute(name).Get()
for name in expectedAttrValues.keys()}
self.assertEqual(values, expectedAttrValues)
# Add a prim with no type and apply the TestNestedInnerSingleApplyAPI.
innerSinglePrim = stage.DefinePrim("/InnerSingle")
innerSinglePrim.ApplyAPI(self.NestedInnerSingleApplyAPIType)
# The authored applied API schemas for the prim is only the applied
# TestNestedInnerSingleApplyAPI.
self.assertEqual(innerSinglePrim.GetPrimTypeInfo().GetTypeName(), '')
self.assertEqual(innerSinglePrim.GetPrimTypeInfo().GetAppliedAPISchemas(),
["TestNestedInnerSingleApplyAPI"])
# The composed applied API schemas however also contain the
# TestSingleApplyAPI and the "bar" instance of TestMultiApplyAPI as
# these are built-in APIs of TestNestedInnerSingleApplyAPI
expectedAPISchemas = [
"TestNestedInnerSingleApplyAPI",
"TestSingleApplyAPI",
"TestMultiApplyAPI:bar"]
self.assertEqual(innerSinglePrim.GetAppliedSchemas(),
expectedAPISchemas)
# The prim "has" all these built-in APIs as well.
self.assertTrue(innerSinglePrim.HasAPI(self.NestedInnerSingleApplyAPIType))
self.assertTrue(innerSinglePrim.HasAPI(self.SingleApplyAPIType))
self.assertTrue(innerSinglePrim.HasAPI(self.MultiApplyAPIType))
self.assertTrue(innerSinglePrim.HasAPI(self.MultiApplyAPIType, "bar"))
# Properties come from all composed built-in APIs
expectedPropNames = [
# Properties from TestNestedInnerSingleApplyAPI
"innerSingle:int_attr",
"innerSingle:relationship",
"innerSingle:token_attr",
# Properties from TestMultiApplyAPI:bar
"multi:bar:bool_attr",
"multi:bar:relationship",
"multi:bar:token_attr",
# Properties from TestSingleApplyAPI
"single:bool_attr",
"single:relationship",
"single:token_attr"]
self.assertEqual(innerSinglePrim.GetPropertyNames(), expectedPropNames)
# Verify that the attribute fallback values come from the API schemas
# that define them. The attribute "multi:bar:token_attr" is defined in
# TestNestedInnerSingleApplyAPI and overrides the attr fallback value
# defined in TestMultiApplyAPI:bar
expectedAttrValues = {
"multi:bar:token_attr" : "inner_override",
"multi:bar:bool_attr" : True,
"innerSingle:token_attr" : "inner",
"innerSingle:int_attr" : 3,
"single:token_attr" : "bar",
"single:bool_attr" : True}
_VerifyAttrValues(innerSinglePrim, expectedAttrValues)
# Get the prim definition for the API schema and verify its applied
# API schemas and properties match what was imparted on the prim.
innerSingleApplyAPIDef = \
Usd.SchemaRegistry().FindAppliedAPIPrimDefinition(
"TestNestedInnerSingleApplyAPI")
self.assertTrue(innerSingleApplyAPIDef)
self.assertEqual(innerSingleApplyAPIDef.GetAppliedAPISchemas(),
expectedAPISchemas)
self.assertEqual(sorted(innerSingleApplyAPIDef.GetPropertyNames()),
expectedPropNames)
self.assertEqual(innerSingleApplyAPIDef.GetDocumentation(),
"Test nested single apply API schema: inner schema")
# Add a prim with no type and apply the TestNestedOuterSingleApplyAPI.
outerSinglePrim = stage.DefinePrim("/OuterSingle")
outerSinglePrim.ApplyAPI(self.NestedOuterSingleApplyAPIType)
# The authored applied API schemas for the prim is only the applied
# TestNestedOuterSingleApplyAPI.
self.assertEqual(outerSinglePrim.GetPrimTypeInfo().GetTypeName(), '')
self.assertEqual(outerSinglePrim.GetPrimTypeInfo().GetAppliedAPISchemas(),
["TestNestedOuterSingleApplyAPI"])
# The composed applied API schemas however also contain the
# the "foo" instance of TestMultiApplyAPI and
# TestNestedInnerSingleApplyAPI and as these are built-in APIs of
# TestNestedOuterSingleApplyAPI. However, because
# TestNestedInnerSingleApplyAPI has its own built-in API schemas, those
# are also pulled into the composed applied API schemas for this prim
self.assertEqual(outerSinglePrim.GetTypeName(), '')
expectedAPISchemas = [
"TestNestedOuterSingleApplyAPI",
"TestMultiApplyAPI:foo",
"TestNestedInnerSingleApplyAPI",
"TestSingleApplyAPI",
"TestMultiApplyAPI:bar"]
self.assertEqual(outerSinglePrim.GetAppliedSchemas(),
expectedAPISchemas)
# Properties come from all composed built-in APIs
expectedPropNames = [
"innerSingle:int_attr",
"innerSingle:relationship",
"innerSingle:token_attr",
# Properties from TestMultiApplyAPI:bar (included from
# TestNestedInnerSingleApplyAPI)
"multi:bar:bool_attr",
"multi:bar:relationship",
"multi:bar:token_attr",
# Properties from TestMultiApplyAPI:foo
"multi:foo:bool_attr",
"multi:foo:relationship",
"multi:foo:token_attr",
# Properties from TestNestedOuterSingleApplyAPI
"outerSingle:int_attr",
"outerSingle:relationship",
"outerSingle:token_attr",
# Properties from TestSingleApplyAPI (included from
# TestNestedInnerSingleApplyAPI)
"single:bool_attr",
"single:relationship",
"single:token_attr"]
self.assertEqual(outerSinglePrim.GetPropertyNames(), expectedPropNames)
# Verify that the attribute fallback values come from the API schemas
# that define them. The attribute override for "multi:bar:token_attr"
# from TestNestedInnerSingleApplyAPI still comes through.
# Also The attribute "single:token_attr" is defined in
# TestNestedOuterSingleApplyAPI and overrides the attr fallback value
# defined in TestSingleApplyAPI
expectedAttrValues = {
"innerSingle:token_attr" : "inner",
"innerSingle:int_attr" : 3,
"multi:bar:token_attr" : "inner_override",
"multi:bar:bool_attr" : True,
"multi:foo:token_attr" : "foo",
"multi:foo:bool_attr" : True,
"outerSingle:token_attr" : "outer",
"outerSingle:int_attr" : 4,
"single:token_attr" : "outer_override",
"single:bool_attr" : True}
_VerifyAttrValues(outerSinglePrim, expectedAttrValues)
# Get the prim definition for the API schema and verify its applied
# API schemas and properties match what was imparted on the prim.
outerSingleApplyAPIDef = \
Usd.SchemaRegistry().FindAppliedAPIPrimDefinition(
"TestNestedOuterSingleApplyAPI")
self.assertTrue(outerSingleApplyAPIDef)
self.assertEqual(outerSingleApplyAPIDef.GetAppliedAPISchemas(),
expectedAPISchemas)
self.assertEqual(sorted(outerSingleApplyAPIDef.GetPropertyNames()),
expectedPropNames)
self.assertEqual(outerSingleApplyAPIDef.GetDocumentation(),
"Test nested single apply API schema: outer schema")
# Apply the TestNestedInnerSingleApplyAPI to the same prim. This API
# is already included through the TestNestedOuterSingleApplyAPI.
outerSinglePrim.ApplyAPI(self.NestedInnerSingleApplyAPIType)
# The authored applied API schemas for the prim now contain both the
# applied APIs.
self.assertEqual(outerSinglePrim.GetPrimTypeInfo().GetTypeName(), '')
self.assertEqual(outerSinglePrim.GetPrimTypeInfo().GetAppliedAPISchemas(),
["TestNestedOuterSingleApplyAPI",
"TestNestedInnerSingleApplyAPI"])
# The composed applied API schemas are fully expanded from both authored
# APIs and will contain duplicates. We don't check for duplicates that
# occur because different authored APIs include the same applied schema
# even though we do remove duplicates when determining what API schemas
# a single API includes.
self.assertEqual(outerSinglePrim.GetTypeName(), '')
expectedAPISchemas = [
# API schemas from expanded TestNestedOuterSingleApplyAPI
"TestNestedOuterSingleApplyAPI",
"TestMultiApplyAPI:foo",
"TestNestedInnerSingleApplyAPI",
"TestSingleApplyAPI",
"TestMultiApplyAPI:bar",
# API schemas from expanded TestNestedInnerSingleApplyAPI (even
# though they're already in the list)
"TestNestedInnerSingleApplyAPI",
"TestSingleApplyAPI",
"TestMultiApplyAPI:bar"]
self.assertEqual(outerSinglePrim.GetAppliedSchemas(),
expectedAPISchemas)
# The properties list and attribute values haven't changed though
# because no actual new API schemas were added and the strength order
# of the APIs haven't changed.
self.assertEqual(outerSinglePrim.GetPropertyNames(), expectedPropNames)
_VerifyAttrValues(outerSinglePrim, expectedAttrValues)
# Now let's swap the order of these two APIs so that the inner comes
# before the outer.
outerSinglePrim.RemoveAPI(self.NestedOuterSingleApplyAPIType)
outerSinglePrim.ApplyAPI(self.NestedOuterSingleApplyAPIType)
self.assertEqual(outerSinglePrim.GetPrimTypeInfo().GetAppliedAPISchemas(),
["TestNestedInnerSingleApplyAPI",
"TestNestedOuterSingleApplyAPI"])
# The order of the expanded API schemas has now changed too.
expectedAPISchemas = [
# API schemas from expanded TestNestedInnerSingleApplyAPI
"TestNestedInnerSingleApplyAPI",
"TestSingleApplyAPI",
"TestMultiApplyAPI:bar",
# API schemas from expanded TestNestedOuterSingleApplyAPI
"TestNestedOuterSingleApplyAPI",
"TestMultiApplyAPI:foo",
"TestNestedInnerSingleApplyAPI",
"TestSingleApplyAPI",
"TestMultiApplyAPI:bar"]
self.assertEqual(outerSinglePrim.GetAppliedSchemas(),
expectedAPISchemas)
# Once again The properties list doesn't change.
self.assertEqual(outerSinglePrim.GetPropertyNames(), expectedPropNames)
# However, the attribute value for single:token_attr has changed
# because the override from TestNestedOuterSingleApplyAPI is no longer
# stronger than the value from TestSingleApplyAPI as the first instance
# of TestSingleApplyAPI now comes before it.
expectedAttrValues["single:token_attr"] = "bar"
_VerifyAttrValues(outerSinglePrim, expectedAttrValues)
def test_NestedMultiApplyAPISchemas(self):
"""
Tests the application of multiple apply API schemas that have nested
built-in API schemas
"""
stage = Usd.Stage.CreateInMemory()
# Simple helper for testing that a prim has expected attributes that
# resolve to expected values.
def _VerifyAttrValues(prim, expectedAttrValues):
values = {name : prim.GetAttribute(name).Get()
for name in expectedAttrValues.keys()}
self.assertEqual(values, expectedAttrValues)
# Add a prim with no type and apply the
# TestNestedInnerMultiApplyDerivedAPI using the instance "foo".
innerMultiPrim = stage.DefinePrim("/InnerMulti")
innerMultiPrim.ApplyAPI(self.NestedInnerMultiApplyDerivedAPIType, "foo")
# The authored applied API schemas for the prim is only the "foo"
# instance of the applied TestNestedInnerMultiApplyDerivedAPI.
self.assertEqual(innerMultiPrim.GetPrimTypeInfo().GetTypeName(), '')
self.assertEqual(innerMultiPrim.GetPrimTypeInfo().GetAppliedAPISchemas(),
["TestNestedInnerMultiApplyDerivedAPI:foo"])
# The composed applied API schemas however also contain the
# the "foo" instance of TestNestedInnerMultiApplyBaseAPI as it is a
# built-in API of TestNestedInnerMultiApplyDerivedAPI
expectedAPISchemas = [
"TestNestedInnerMultiApplyDerivedAPI:foo",
"TestNestedInnerMultiApplyBaseAPI:foo"]
self.assertEqual(innerMultiPrim.GetAppliedSchemas(),
expectedAPISchemas)
# The prim "has" all these built-in APIs as well.
self.assertTrue(innerMultiPrim.HasAPI(self.NestedInnerMultiApplyDerivedAPIType))
self.assertTrue(innerMultiPrim.HasAPI(self.NestedInnerMultiApplyBaseAPIType))
self.assertTrue(innerMultiPrim.HasAPI(self.NestedInnerMultiApplyDerivedAPIType, "foo"))
self.assertTrue(innerMultiPrim.HasAPI(self.NestedInnerMultiApplyBaseAPIType, "foo"))
# Properties come from all composed built-in APIs
expectedPropNames = [
# Properties from TestNestedInnerMultiApplyDerivedAPI:foo
"innerMulti:foo:derived:int_attr",
# Properties from TestNestedInnerMultiApplyBaseAPI:foo
"innerMulti:foo:int_attr",
"innerMulti:foo:relationship",
"innerMulti:foo:token_attr"]
self.assertEqual(innerMultiPrim.GetPropertyNames(), expectedPropNames)
# Verify that the attribute fallback values come from the API schemas
# that define them. The attribute "innerMulti:foo:token_attr" is defined
# in TestNestedInnerMultiApplyDerivedAPI and overrides the attr fallback
# value defined in TestNestedInnerMultiApplyBaseAPI
expectedAttrValues = {
"innerMulti:foo:derived:int_attr" : 4,
"innerMulti:foo:int_attr" : 3,
"innerMulti:foo:token_attr" : "inner_derived"}
_VerifyAttrValues(innerMultiPrim, expectedAttrValues)
# Apply the TestNestedInnerMultiApplyDerivedAPI to the same prim again,
# now with the instance "bar"
innerMultiPrim.ApplyAPI(self.NestedInnerMultiApplyDerivedAPIType, "bar")
self.assertEqual(innerMultiPrim.GetPrimTypeInfo().GetAppliedAPISchemas(),
["TestNestedInnerMultiApplyDerivedAPI:foo",
"TestNestedInnerMultiApplyDerivedAPI:bar"])
# Now the same API schemas included "foo" are also included for "bar"
expectedAPISchemas = [
"TestNestedInnerMultiApplyDerivedAPI:foo",
"TestNestedInnerMultiApplyBaseAPI:foo",
"TestNestedInnerMultiApplyDerivedAPI:bar",
"TestNestedInnerMultiApplyBaseAPI:bar"]
self.assertEqual(innerMultiPrim.GetAppliedSchemas(),
expectedAPISchemas)
# There are now also "bar" instances of all the same properties.
expectedPropNames = [
"innerMulti:bar:derived:int_attr",
"innerMulti:bar:int_attr",
"innerMulti:bar:relationship",
"innerMulti:bar:token_attr",
"innerMulti:foo:derived:int_attr",
"innerMulti:foo:int_attr",
"innerMulti:foo:relationship",
"innerMulti:foo:token_attr"]
self.assertEqual(innerMultiPrim.GetPropertyNames(), expectedPropNames)
# And the "bar" instances of the attributes have the same fallback
# values.
expectedAttrValues = {
"innerMulti:foo:derived:int_attr" : 4,
"innerMulti:foo:int_attr" : 3,
"innerMulti:foo:token_attr" : "inner_derived",
"innerMulti:bar:derived:int_attr" : 4,
"innerMulti:bar:int_attr" : 3,
"innerMulti:bar:token_attr" : "inner_derived"}
_VerifyAttrValues(innerMultiPrim, expectedAttrValues)
# Get the prim definition for the API schema and verify its applied
# API schemas and properties are template versions of the proeperties.
innerMultiApplyAPIDef = \
Usd.SchemaRegistry().FindAppliedAPIPrimDefinition(
"TestNestedInnerMultiApplyDerivedAPI")
self.assertTrue(innerMultiApplyAPIDef)
expectedAPISchemas = [
"TestNestedInnerMultiApplyDerivedAPI:__INSTANCE_NAME__",
"TestNestedInnerMultiApplyBaseAPI:__INSTANCE_NAME__"]
self.assertEqual(innerMultiApplyAPIDef.GetAppliedAPISchemas(),
expectedAPISchemas)
expectedPropNames = [
"innerMulti:__INSTANCE_NAME__:derived:int_attr",
"innerMulti:__INSTANCE_NAME__:int_attr",
"innerMulti:__INSTANCE_NAME__:relationship",
"innerMulti:__INSTANCE_NAME__:token_attr"]
self.assertEqual(sorted(innerMultiApplyAPIDef.GetPropertyNames()),
expectedPropNames)
self.assertEqual(innerMultiApplyAPIDef.GetDocumentation(),
"Test nested multi apply API schema: inner schema derived")
# Add a prim with no type and apply the TestNestedOuterMultiApplyAPI
# with the instance "foo".
outerMultiPrim = stage.DefinePrim("/OuterMulti")
outerMultiPrim.ApplyAPI(self.NestedOuterMultiApplyAPIType, "foo")
# The authored applied API schemas for the prim is only the applied
# TestNestedOuterMultiApplyAPI:foo.
self.assertEqual(outerMultiPrim.GetPrimTypeInfo().GetTypeName(), '')
self.assertEqual(outerMultiPrim.GetPrimTypeInfo().GetAppliedAPISchemas(),
["TestNestedOuterMultiApplyAPI:foo"])
# TestNestedOuterMultiApplyAPI's definition includes
# TestNestedInnerMultiApplyDerivedAPI:builtin and
# TestNestedInnerMultiApplyDerivedAPI:outerMulti. Thus, the composed
# applied API schemas also contain both a "foo:builtin" and a
# "foo:outerMulti" instance of TestNestedInnerMultiApplyDerivedAPI,
# which in turn include "foo:builtin" and "foo:outerMulti" instances of
# TestNestedInnerMultiApplyBaseAPI. Since TestNestedOuterMultiApplyAPI
# also includes TestNestedInnerMultiApplyBaseAPI a "foo" instance of it
# is included as well.
self.assertEqual(outerMultiPrim.GetTypeName(), '')
expectedAPISchemas = [
"TestNestedOuterMultiApplyAPI:foo",
"TestNestedInnerMultiApplyDerivedAPI:foo:builtin",
"TestNestedInnerMultiApplyBaseAPI:foo:builtin",
"TestNestedInnerMultiApplyDerivedAPI:foo:outerMulti",
"TestNestedInnerMultiApplyBaseAPI:foo:outerMulti",
"TestNestedInnerMultiApplyBaseAPI:foo"]
self.assertEqual(outerMultiPrim.GetAppliedSchemas(),
expectedAPISchemas)
# Properties come from all composed built-in APIs
expectedPropNames = sorted([
# Properties from TestNestedOuterMultiApplyAPI:foo
"outerMulti:foo:int_attr",
"outerMulti:foo:relationship",
"outerMulti:foo:token_attr",
# Properties from TestNestedInnerMultiApplyDerivedAPI:foo:builtin
"innerMulti:foo:builtin:derived:int_attr",
# Properties from TestNestedInnerMultiApplyBaseAPI:foo:builtin
"innerMulti:foo:builtin:int_attr",
"innerMulti:foo:builtin:relationship",
"innerMulti:foo:builtin:token_attr",
# Properties from TestNestedInnerMultiApplyDerivedAPI:foo:outerMulti
"innerMulti:foo:outerMulti:derived:int_attr",
# Properties from TestNestedInnerMultiApplyBaseAPI:foo:outerMulti
"innerMulti:foo:outerMulti:int_attr",
"innerMulti:foo:outerMulti:relationship",
"innerMulti:foo:outerMulti:token_attr",
# Properties from TestNestedInnerMultiApplyBaseAPI:foo
"innerMulti:foo:int_attr",
"innerMulti:foo:relationship",
"innerMulti:foo:token_attr"])
self.assertEqual(outerMultiPrim.GetPropertyNames(), expectedPropNames)
# Verify that the attribute fallback values come from the API schemas
# that define them. The "innerMulti:foo:XXX:token_attr" values from
# from TestNestedInnerMultiApplyDerivedAPI override the values from
# TestNestedInnerMultiApplyBaseAPI. innerMulti:foo:token_attr uses
# the BaseAPI value since this instance is from the BaseAPI being
# included on its own.
expectedAttrValues = {
# Properties from TestNestedOuterMultiApplyAPI:foo
"outerMulti:foo:int_attr" : 5,
"outerMulti:foo:token_attr" : "outer",
# Properties from TestNestedInnerMultiApplyDerivedAPI:foo:builtin
"innerMulti:foo:builtin:derived:int_attr" : 4,
"innerMulti:foo:builtin:token_attr" : "inner_derived",
# Properties from TestNestedInnerMultiApplyBaseAPI:foo:builtin
"innerMulti:foo:builtin:int_attr" : 3,
# Properties from TestNestedInnerMultiApplyDerivedAPI:foo:outerMulti
"innerMulti:foo:outerMulti:derived:int_attr" : 4,
"innerMulti:foo:outerMulti:token_attr" : "inner_derived",
# Properties from TestNestedInnerMultiApplyBaseAPI:foo:outerMulti
"innerMulti:foo:outerMulti:int_attr" : 3,
# Properties from TestNestedInnerMultiApplyBaseAPI:foo
"innerMulti:foo:int_attr" : 3,
"innerMulti:foo:token_attr" : "inner_base"}
_VerifyAttrValues(outerMultiPrim, expectedAttrValues)
# Get the prim definition for the API schema and verify its applied
# API schemas and properties match what was imparted on the prim.
outerMultiApplyAPIDef = \
Usd.SchemaRegistry().FindAppliedAPIPrimDefinition(
"TestNestedOuterMultiApplyAPI")
self.assertTrue(outerMultiApplyAPIDef)
# Note that the __INSTANCE_NAME__ is alway directly after the API
# schema name even when it is included as an encapsulated subinstance.
expectedAPISchemas = [
"TestNestedOuterMultiApplyAPI:__INSTANCE_NAME__",
"TestNestedInnerMultiApplyDerivedAPI:__INSTANCE_NAME__:builtin",
"TestNestedInnerMultiApplyBaseAPI:__INSTANCE_NAME__:builtin",
"TestNestedInnerMultiApplyDerivedAPI:__INSTANCE_NAME__:outerMulti",
"TestNestedInnerMultiApplyBaseAPI:__INSTANCE_NAME__:outerMulti",
"TestNestedInnerMultiApplyBaseAPI:__INSTANCE_NAME__"]
self.assertEqual(outerMultiApplyAPIDef.GetAppliedAPISchemas(),
expectedAPISchemas)
expectedPropNames = sorted([
# Properties from TestNestedOuterMultiApplyAPI
"outerMulti:__INSTANCE_NAME__:int_attr",
"outerMulti:__INSTANCE_NAME__:relationship",
"outerMulti:__INSTANCE_NAME__:token_attr",
# Properties from TestNestedInnerMultiApplyDerivedAPI:builtin
"innerMulti:__INSTANCE_NAME__:builtin:derived:int_attr",
# Properties from TestNestedInnerMultiApplyBaseAPI:builtin
"innerMulti:__INSTANCE_NAME__:builtin:int_attr",
"innerMulti:__INSTANCE_NAME__:builtin:relationship",
"innerMulti:__INSTANCE_NAME__:builtin:token_attr",
# Properties from TestNestedInnerMultiApplyDerivedAPI:outerMulti
"innerMulti:__INSTANCE_NAME__:outerMulti:derived:int_attr",
# Properties from TestNestedInnerMultiApplyBaseAPI:outerMulti
"innerMulti:__INSTANCE_NAME__:outerMulti:int_attr",
"innerMulti:__INSTANCE_NAME__:outerMulti:relationship",
"innerMulti:__INSTANCE_NAME__:outerMulti:token_attr",
# Properties from TestNestedInnerMultiApplyBaseAPI
"innerMulti:__INSTANCE_NAME__:int_attr",
"innerMulti:__INSTANCE_NAME__:relationship",
"innerMulti:__INSTANCE_NAME__:token_attr"])
self.assertEqual(sorted(outerMultiApplyAPIDef.GetPropertyNames()),
expectedPropNames)
self.assertEqual(outerMultiApplyAPIDef.GetDocumentation(),
"Test nested multi apply API schema: outer schema")
# Add a prim with no type and apply the
# TestNestedMultiApplyInSingleApplyAPI.
singleApplyPrim = stage.DefinePrim("/SingleApply")
singleApplyPrim.ApplyAPI(self.NestedMultiApplyInSingleApplyAPIType)
# The authored applied API schemas for the prim is only the applied
# TestNestedMultiApplyInSingleApplyAPI.
self.assertEqual(singleApplyPrim.GetPrimTypeInfo().GetTypeName(), '')
self.assertEqual(singleApplyPrim.GetPrimTypeInfo().GetAppliedAPISchemas(),
["TestNestedMultiApplyInSingleApplyAPI"])
# TestNestedMultiApplyInSingleApplyAPI includes
# TestNestedOuterMultiApplyAPI:foo and
# TestNestedInnerMultiApplyDerivedAPI:bar so "foo" and "bar" instances
# of these multi apply schemas are fully expanded into the composed API
# schemas udner TestNestedMultiApplyInSingleApplyAPI.
self.assertEqual(singleApplyPrim.GetTypeName(), '')
expectedAPISchemas = [
"TestNestedMultiApplyInSingleApplyAPI",
# Expanded from TestNestedOuterMultiApplyAPI:foo
"TestNestedOuterMultiApplyAPI:foo",
"TestNestedInnerMultiApplyDerivedAPI:foo:builtin",
"TestNestedInnerMultiApplyBaseAPI:foo:builtin",
"TestNestedInnerMultiApplyDerivedAPI:foo:outerMulti",
"TestNestedInnerMultiApplyBaseAPI:foo:outerMulti",
"TestNestedInnerMultiApplyBaseAPI:foo",
# Expanded from TestNestedInnerMultiApplyDerivedAPI:bar
"TestNestedInnerMultiApplyDerivedAPI:bar",
"TestNestedInnerMultiApplyBaseAPI:bar"]
self.assertEqual(singleApplyPrim.GetAppliedSchemas(),
expectedAPISchemas)
# Properties come from all composed built-in APIs
expectedPropNames = sorted([
# Properties from TestNestedMultiApplyInSingleApplyAPI
"int_attr",
# Properties from TestNestedOuterMultiApplyAPI:foo
"outerMulti:foo:int_attr",
"outerMulti:foo:relationship",
"outerMulti:foo:token_attr",
# Properties from TestNestedInnerMultiApplyDerivedAPI:foo:builtin
"innerMulti:foo:builtin:derived:int_attr",
# Properties from TestNestedInnerMultiApplyBaseAPI:foo:builtin
"innerMulti:foo:builtin:int_attr",
"innerMulti:foo:builtin:relationship",
"innerMulti:foo:builtin:token_attr",
# Properties from TestNestedInnerMultiApplyDerivedAPI:foo:outerMulti
"innerMulti:foo:outerMulti:derived:int_attr",
# Properties from TestNestedInnerMultiApplyBaseAPI:foo:outerMulti
"innerMulti:foo:outerMulti:int_attr",
"innerMulti:foo:outerMulti:relationship",
"innerMulti:foo:outerMulti:token_attr",
# Properties from TestNestedInnerMultiApplyBaseAPI:foo
"innerMulti:foo:int_attr",
"innerMulti:foo:relationship",
"innerMulti:foo:token_attr",
# Properties from TestNestedInnerMultiApplyDerivedAPI:bar
"innerMulti:bar:derived:int_attr",
# Properties from TestNestedInnerMultiApplyBaseAPI:bar
"innerMulti:bar:int_attr",
"innerMulti:bar:relationship",
"innerMulti:bar:token_attr"])
self.assertEqual(singleApplyPrim.GetPropertyNames(), expectedPropNames)
# Verify that the attribute fallback values come from the API schemas
# that define them.
expectedAttrValues = {
# Property only defined in TestNestedMultiApplyInSingleApplyAPI
"int_attr" : 10,
# Property from TestNestedInnerMultiApplyDerivedAPI:foo:builtin
# overridden in TestNestedMultiApplyInSingleApplyAPI
"innerMulti:foo:builtin:derived:int_attr" : 20,
# Property from TestNestedInnerMultiApplyBaseAPI:bar overridden in
# TestNestedMultiApplyInSingleApplyAPI
"innerMulti:bar:int_attr" : 30,
# Properties from TestNestedOuterMultiApplyAPI:foo
"outerMulti:foo:int_attr" : 5,
"outerMulti:foo:token_attr" : "outer",
# Properties from TestNestedInnerMultiApplyDerivedAPI:foo:builtin
"innerMulti:foo:builtin:token_attr" : "inner_derived",
# Properties from TestNestedInnerMultiApplyBaseAPI:foo:builtin
"innerMulti:foo:builtin:int_attr" : 3,
# Properties from TestNestedInnerMultiApplyDerivedAPI:foo:outerMulti
"innerMulti:foo:outerMulti:derived:int_attr" : 4,
"innerMulti:foo:outerMulti:token_attr" : "inner_derived",
# Properties from TestNestedInnerMultiApplyBaseAPI:foo:outerMulti
"innerMulti:foo:outerMulti:int_attr" : 3,
# Properties from TestNestedInnerMultiApplyBaseAPI:foo
"innerMulti:foo:int_attr" : 3,
"innerMulti:foo:token_attr" : "inner_base",
# Properties from TestNestedInnerMultiApplyDerivedAPI:bar
"innerMulti:bar:derived:int_attr" : 4,
# Properties from TestNestedInnerMultiApplyBaseAPI:bar
"innerMulti:bar:token_attr" : "inner_derived"}
_VerifyAttrValues(singleApplyPrim, expectedAttrValues)
# Get the prim definition for the API schema and verify its applied
# API schemas and properties match what was imparted on the prim. This
# is an exact match because this a single apply API schema that contains
# specific instances of the multi apply API schemas.
singleApplyAPIDef = \
Usd.SchemaRegistry().FindAppliedAPIPrimDefinition(
"TestNestedMultiApplyInSingleApplyAPI")
self.assertTrue(singleApplyAPIDef)
self.assertEqual(singleApplyAPIDef.GetAppliedAPISchemas(),
expectedAPISchemas)
self.assertEqual(sorted(singleApplyAPIDef.GetPropertyNames()),
expectedPropNames)
self.assertEqual(singleApplyAPIDef.GetDocumentation(),
"Test single apply API with builtin nested multi apply API schema "
"instances")
def test_NestedCycleAPISchema(self):
"""
Tests the application of API schemas that have nested built-in API
schemas that would cause an inclusion cycle
"""
stage = Usd.Stage.CreateInMemory()
# Simple helper for testing that a prim has expected attributes that
# resolve to expected values.
def _VerifyAttrValues(prim, expectedAttrValues):
values = {name : prim.GetAttribute(name).Get()
for name in expectedAttrValues.keys()}
self.assertEqual(values, expectedAttrValues)
# Test behavior when nested API schema form a cycle. In this example
# TestNestedCycle1API includes TestNestedCycle2API which includes
# TestNestedCycle3API which includes TestNestedCycle1API again. Create
# three prims, each applying one of the API schemas.
nestedCyclePrim1 = stage.DefinePrim("/Cycle1")
nestedCyclePrim2 = stage.DefinePrim("/Cycle2")
nestedCyclePrim3 = stage.DefinePrim("/Cycle3")
nestedCyclePrim1.ApplyAPI(self.NestedCycle1APIType)
nestedCyclePrim2.ApplyAPI(self.NestedCycle2APIType)
nestedCyclePrim3.ApplyAPI(self.NestedCycle3APIType)
# For each prim the authored applied API schemas for the prim are still
# only the single API that was applied.
self.assertEqual(nestedCyclePrim1.GetPrimTypeInfo().GetTypeName(), '')
self.assertEqual(nestedCyclePrim1.GetPrimTypeInfo().GetAppliedAPISchemas(),
["TestNestedCycle1API"])
self.assertEqual(nestedCyclePrim2.GetPrimTypeInfo().GetTypeName(), '')
self.assertEqual(nestedCyclePrim2.GetPrimTypeInfo().GetAppliedAPISchemas(),
["TestNestedCycle2API"])
self.assertEqual(nestedCyclePrim3.GetPrimTypeInfo().GetTypeName(), '')
self.assertEqual(nestedCyclePrim3.GetPrimTypeInfo().GetAppliedAPISchemas(),
["TestNestedCycle3API"])
# The composed applied API schemas all contain all three API schemas,
# however the order is different based on which schema was actually
# the authored one. Note that during API schema expansion we don't try
# to include the same API schema more than once which allows us to
# handle cycles in this way.
self.assertEqual(nestedCyclePrim1.GetTypeName(), '')
self.assertEqual(nestedCyclePrim1.GetAppliedSchemas(), [
"TestNestedCycle1API",
"TestNestedCycle2API",
"TestNestedCycle3API"])
self.assertEqual(nestedCyclePrim2.GetTypeName(), '')
self.assertEqual(nestedCyclePrim2.GetAppliedSchemas(), [
"TestNestedCycle2API",
"TestNestedCycle3API",
"TestNestedCycle1API"])
self.assertEqual(nestedCyclePrim3.GetTypeName(), '')
self.assertEqual(nestedCyclePrim3.GetAppliedSchemas(), [
"TestNestedCycle3API",
"TestNestedCycle1API",
"TestNestedCycle2API"])
# All three prims "has" all three built-in APIs as well.
self.assertTrue(nestedCyclePrim1.HasAPI(self.NestedCycle1APIType))
self.assertTrue(nestedCyclePrim1.HasAPI(self.NestedCycle2APIType))
self.assertTrue(nestedCyclePrim1.HasAPI(self.NestedCycle3APIType))
self.assertTrue(nestedCyclePrim2.HasAPI(self.NestedCycle1APIType))
self.assertTrue(nestedCyclePrim2.HasAPI(self.NestedCycle2APIType))
self.assertTrue(nestedCyclePrim2.HasAPI(self.NestedCycle3APIType))
self.assertTrue(nestedCyclePrim3.HasAPI(self.NestedCycle1APIType))
self.assertTrue(nestedCyclePrim3.HasAPI(self.NestedCycle2APIType))
self.assertTrue(nestedCyclePrim3.HasAPI(self.NestedCycle3APIType))
# All three prims have all the same properties since they all have all
# three API schemas applied.
expectedPropNames = [
"cycle1:token_attr",
"cycle2:token_attr",
"cycle3:token_attr",
"cycle:int_attr"]
self.assertEqual(nestedCyclePrim1.GetPropertyNames(), expectedPropNames)
self.assertEqual(nestedCyclePrim2.GetPropertyNames(), expectedPropNames)
self.assertEqual(nestedCyclePrim3.GetPropertyNames(), expectedPropNames)
# For the three token attributes, each is only defined in its respective
# API schema so they have the same fallback in each prim.
# 'cycle:int_attr' is defined in all three of the cycle API schemas but
# with different default values. Here the order of the applied schemas
# matters and the strongest applied API's value wins.
expectedAttrValues = {
"cycle1:token_attr" : "cycle1",
"cycle2:token_attr" : "cycle2",
"cycle3:token_attr" : "cycle3"}
expectedAttrValues["cycle:int_attr"] = 1
_VerifyAttrValues(nestedCyclePrim1, expectedAttrValues)
expectedAttrValues["cycle:int_attr"] = 2
_VerifyAttrValues(nestedCyclePrim2, expectedAttrValues)
expectedAttrValues["cycle:int_attr"] = 3
_VerifyAttrValues(nestedCyclePrim3, expectedAttrValues)
# Get the prim definitions for each of these API schemas and verify its
# applied API schemas and properties match what was imparted on the
# prims.
cycle1APIDef = Usd.SchemaRegistry().FindAppliedAPIPrimDefinition(
"TestNestedCycle1API")
self.assertTrue(cycle1APIDef)
self.assertEqual(cycle1APIDef.GetAppliedAPISchemas(),
["TestNestedCycle1API",
"TestNestedCycle2API",
"TestNestedCycle3API"])
self.assertEqual(sorted(cycle1APIDef.GetPropertyNames()),
expectedPropNames)
self.assertEqual(cycle1APIDef.GetDocumentation(),
"Test nested single apply API schema with a cycle #1")
cycle2APIDef = Usd.SchemaRegistry().FindAppliedAPIPrimDefinition(
"TestNestedCycle2API")
self.assertTrue(cycle2APIDef)
self.assertEqual(cycle2APIDef.GetAppliedAPISchemas(),
["TestNestedCycle2API",
"TestNestedCycle3API",
"TestNestedCycle1API"])
self.assertEqual(sorted(cycle2APIDef.GetPropertyNames()),
expectedPropNames)
self.assertEqual(cycle2APIDef.GetDocumentation(),
"Test nested single apply API schema with a cycle #2")
cycle3APIDef = Usd.SchemaRegistry().FindAppliedAPIPrimDefinition(
"TestNestedCycle3API")
self.assertTrue(cycle3APIDef)
self.assertEqual(cycle3APIDef.GetAppliedAPISchemas(),
["TestNestedCycle3API",
"TestNestedCycle1API",
"TestNestedCycle2API"])
self.assertEqual(sorted(cycle3APIDef.GetPropertyNames()),
expectedPropNames)
self.assertEqual(cycle3APIDef.GetDocumentation(),
"Test nested single apply API schema with a cycle #3")
def test_NestedMultiApplyCycleAPISchema(self):
"""
Tests the handling of inclusion cycles that are particular to how
built-in multiple apply schemas are processed.
"""
stage = Usd.Stage.CreateInMemory()
# Test behavior when nested API schema form a cycle. In this example we
# have two types of cycles.
#
# The first is an "inheritance" cycle where
# TestNestedMultiApplyCycle1API includes TestNestedMultiApplyCycle3API
# directly which includes TestNestedMultiApplyCycle2API which comes
# and includes TestNestedMultiApplyCycle1API again. Since these are
# all "inheritance" style built-ins, these will all use the same
# instance name and can be handled gracefully, like we do with single
# apply built-in cycles, by always skipping duplicate API schemas when
# expanding.
#
# The second type of cycle comes from encapsulated sub-instance built-in
# API schemas, where TestNestedMultiApplyCycle1API includes a "cycle1"
# sub-instance of TestNestedMultiApplyCycle2API which includes a
# "cycle2" sub-instance of TestNestedMultiApplyCycle3API which then
# includes a "cycle3" sub-instance of TestNestedMultiApplyCycle1API.
# Because of the way instance names nest (e.g. applying Cycle1:foo
# will include Cycle2:foo:cycle1 will include Cycle3:foo:cycle1:cycle2
# will include Cycle1:foo:cycle1:cycle2:cycle3 and so on) these kinds of
# cycles will become infinite as every instance will be unique as we
# expand. These types of cycles must be broken by making sure that we
# don't add a built-in API if it is the same schema type as one of its
# direct ancestors during the depth first expansion of included API
# schemas.
nestedCyclePrim1 = stage.DefinePrim("/Cycle1")
nestedCyclePrim2 = stage.DefinePrim("/Cycle2")
nestedCyclePrim3 = stage.DefinePrim("/Cycle3")
nestedCyclePrim1.ApplyAPI(self.NestedMultiApplyCycle1APIType, "foo")
nestedCyclePrim2.ApplyAPI(self.NestedMultiApplyCycle2APIType, "foo")
nestedCyclePrim3.ApplyAPI(self.NestedMultiApplyCycle3APIType, "foo")
# For each prim the authored applied API schemas for the prim are still
# only the single API that was applied.
self.assertEqual(nestedCyclePrim1.GetPrimTypeInfo().GetTypeName(), '')
self.assertEqual(
nestedCyclePrim1.GetPrimTypeInfo().GetAppliedAPISchemas(),
["TestNestedMultiApplyCycle1API:foo"])
self.assertEqual(nestedCyclePrim2.GetPrimTypeInfo().GetTypeName(), '')
self.assertEqual(
nestedCyclePrim2.GetPrimTypeInfo().GetAppliedAPISchemas(),
["TestNestedMultiApplyCycle2API:foo"])
self.assertEqual(nestedCyclePrim3.GetPrimTypeInfo().GetTypeName(), '')
self.assertEqual(
nestedCyclePrim3.GetPrimTypeInfo().GetAppliedAPISchemas(),
["TestNestedMultiApplyCycle3API:foo"])
# The composed applied API schemas include all the possible instances
# of all three API schemas that can be added before the cycle detection
# stops the depth first traversal of the built-ins. The commented out
# entries represent the schemas that trip the cycle detection and
# therefore were not added when expanded halting their branch of API
# schema expansion.
self.assertEqual(nestedCyclePrim1.GetTypeName(), '')
self.assertEqual(nestedCyclePrim1.GetAppliedSchemas(), [
"TestNestedMultiApplyCycle1API:foo",
"TestNestedMultiApplyCycle2API:foo:cycle1",
"TestNestedMultiApplyCycle3API:foo:cycle1:cycle2",
# "TestNestedMultiApplyCycle1API:foo:cycle1:cycle2:cycle3",
# "TestNestedMultiApplyCycle2API:foo:cycle1:cycle2",
# "TestNestedMultiApplyCycle1API:foo:cycle1"
"TestNestedMultiApplyCycle3API:foo",
# "TestNestedMultiApplyCycle1API:foo:cycle3",
"TestNestedMultiApplyCycle2API:foo",
# "TestNestedMultiApplyCycle3API:foo:cycle2",
# "TestNestedMultiApplyCycle1API:foo"
])
self.assertEqual(nestedCyclePrim2.GetTypeName(), '')
self.assertEqual(nestedCyclePrim2.GetAppliedSchemas(), [
"TestNestedMultiApplyCycle2API:foo",
"TestNestedMultiApplyCycle3API:foo:cycle2",
"TestNestedMultiApplyCycle1API:foo:cycle2:cycle3",
# "TestNestedMultiApplyCycle2API:foo:cycle2:cycle3:cycle1",
# "TestNestedMultiApplyCycle3API:foo:cycle2:cycle3",
# "TestNestedMultiApplyCycle2API:foo:cycle2"
"TestNestedMultiApplyCycle1API:foo",
# "TestNestedMultiApplyCycle2API:foo:cycle1",
"TestNestedMultiApplyCycle3API:foo",
# "TestNestedMultiApplyCycle1API:foo:cycle3",
# "TestNestedMultiApplyCycle2API:foo"
])
self.assertEqual(nestedCyclePrim3.GetTypeName(), '')
self.assertEqual(nestedCyclePrim3.GetAppliedSchemas(), [
"TestNestedMultiApplyCycle3API:foo",
"TestNestedMultiApplyCycle1API:foo:cycle3",
"TestNestedMultiApplyCycle2API:foo:cycle3:cycle1",
# "TestNestedMultiApplyCycle3API:foo:cycle3:cycle1:cycle2",
# "TestNestedMultiApplyCycle1API:foo:cycle3:cycle1",
# "TestNestedMultiApplyCycle3API:foo:cycle3"
"TestNestedMultiApplyCycle2API:foo",
# "TestNestedMultiApplyCycle3API:foo:cycle2",
"TestNestedMultiApplyCycle1API:foo",
# "TestNestedMultiApplyCycle2API:foo:cycle1",
# "TestNestedMultiApplyCycle3API:foo"
])
# Each of the three API schemas provides a "token_attr" so each of
# prims has the prefixed "token_attr" for the API schemas that managed
# to be included for each one.
expectedPropNames = [
"cycle1:foo:token_attr",
"cycle2:foo:cycle1:token_attr",
"cycle2:foo:token_attr",
"cycle3:foo:cycle1:cycle2:token_attr",
"cycle3:foo:token_attr"
]
self.assertEqual(nestedCyclePrim1.GetPropertyNames(), expectedPropNames)
expectedPropNames = [
"cycle1:foo:cycle2:cycle3:token_attr",
"cycle1:foo:token_attr",
"cycle2:foo:token_attr",
"cycle3:foo:cycle2:token_attr",
"cycle3:foo:token_attr"
]
self.assertEqual(nestedCyclePrim2.GetPropertyNames(), expectedPropNames)
expectedPropNames = [
"cycle1:foo:cycle3:token_attr",
"cycle1:foo:token_attr",
"cycle2:foo:cycle3:cycle1:token_attr",
"cycle2:foo:token_attr",
"cycle3:foo:token_attr"
]
self.assertEqual(nestedCyclePrim3.GetPropertyNames(), expectedPropNames)
# Get the prim definitions for each of these API schemas and verify its
# applied API schemas and properties match what was imparted on the
# prims.
cycle1APIDef = Usd.SchemaRegistry().FindAppliedAPIPrimDefinition(
"TestNestedMultiApplyCycle1API")
self.assertTrue(cycle1APIDef)
self.assertEqual(cycle1APIDef.GetAppliedAPISchemas(),
["TestNestedMultiApplyCycle1API:__INSTANCE_NAME__",
"TestNestedMultiApplyCycle2API:__INSTANCE_NAME__:cycle1",
"TestNestedMultiApplyCycle3API:__INSTANCE_NAME__:cycle1:cycle2",
"TestNestedMultiApplyCycle3API:__INSTANCE_NAME__",
"TestNestedMultiApplyCycle2API:__INSTANCE_NAME__"])
self.assertEqual(sorted(cycle1APIDef.GetPropertyNames()),
["cycle1:__INSTANCE_NAME__:token_attr",
"cycle2:__INSTANCE_NAME__:cycle1:token_attr",
"cycle2:__INSTANCE_NAME__:token_attr",
"cycle3:__INSTANCE_NAME__:cycle1:cycle2:token_attr",
"cycle3:__INSTANCE_NAME__:token_attr"])
cycle2APIDef = Usd.SchemaRegistry().FindAppliedAPIPrimDefinition(
"TestNestedMultiApplyCycle2API")
self.assertTrue(cycle2APIDef)
self.assertEqual(cycle2APIDef.GetAppliedAPISchemas(),
["TestNestedMultiApplyCycle2API:__INSTANCE_NAME__",
"TestNestedMultiApplyCycle3API:__INSTANCE_NAME__:cycle2",
"TestNestedMultiApplyCycle1API:__INSTANCE_NAME__:cycle2:cycle3",
"TestNestedMultiApplyCycle1API:__INSTANCE_NAME__",
"TestNestedMultiApplyCycle3API:__INSTANCE_NAME__"])
self.assertEqual(sorted(cycle2APIDef.GetPropertyNames()),
["cycle1:__INSTANCE_NAME__:cycle2:cycle3:token_attr",
"cycle1:__INSTANCE_NAME__:token_attr",
"cycle2:__INSTANCE_NAME__:token_attr",
"cycle3:__INSTANCE_NAME__:cycle2:token_attr",
"cycle3:__INSTANCE_NAME__:token_attr"])
cycle3APIDef = Usd.SchemaRegistry().FindAppliedAPIPrimDefinition(
"TestNestedMultiApplyCycle3API")
self.assertTrue(cycle3APIDef)
self.assertEqual(cycle3APIDef.GetAppliedAPISchemas(),
["TestNestedMultiApplyCycle3API:__INSTANCE_NAME__",
"TestNestedMultiApplyCycle1API:__INSTANCE_NAME__:cycle3",
"TestNestedMultiApplyCycle2API:__INSTANCE_NAME__:cycle3:cycle1",
"TestNestedMultiApplyCycle2API:__INSTANCE_NAME__",
"TestNestedMultiApplyCycle1API:__INSTANCE_NAME__"])
self.assertEqual(sorted(cycle3APIDef.GetPropertyNames()),
["cycle1:__INSTANCE_NAME__:cycle3:token_attr",
"cycle1:__INSTANCE_NAME__:token_attr",
"cycle2:__INSTANCE_NAME__:cycle3:cycle1:token_attr",
"cycle2:__INSTANCE_NAME__:token_attr",
"cycle3:__INSTANCE_NAME__:token_attr"])
def test_ConcreteTypeWithBuiltinNestedAPISchemas(self):
"""
Tests a concrete schema type with built-in API schemas that include
other API schemas.
"""
stage = Usd.Stage.CreateInMemory()
# Simple helper for testing that a prim has expected attributes that
# resolve to expected values.
def _VerifyAttrValues(prim, expectedAttrValues):
values = {name : prim.GetAttribute(name).Get()
for name in expectedAttrValues.keys()}
self.assertEqual(values, expectedAttrValues)
# Test a typed prim whose concrete typed schema has built-in API schemas
# that nest other API schemas.
typedPrim = stage.DefinePrim(
"/TypedPrim", "TestWithBuiltinNestedAppliedSchema")
# The prim has a type but no authored API schemas.
self.assertEqual(typedPrim.GetPrimTypeInfo().GetTypeName(),
"TestWithBuiltinNestedAppliedSchema")
self.assertEqual(typedPrim.GetPrimTypeInfo().GetAppliedAPISchemas(), [])
# The composed API schemas are fully expanded from the two built-in API
# schemas of the TestWithBuiltinNestedAppliedSchema type.
expectedAPISchemas = [
# Expanded API schemas from built-in TestNestedOuterSingleApplyAPI
"TestNestedOuterSingleApplyAPI",
"TestMultiApplyAPI:foo",
"TestNestedInnerSingleApplyAPI",
"TestSingleApplyAPI",
"TestMultiApplyAPI:bar",
# Expanded API schemas from built-in TestNestedCycle1API
"TestNestedCycle1API",
"TestNestedCycle2API",
"TestNestedCycle3API"]
self.assertEqual(typedPrim.GetAppliedSchemas(),
expectedAPISchemas)
# Properties come from the type and all composed built-in APIs
expectedPropNames = [
# Properties from expanded built-in TestNestedCycle1API
"cycle1:token_attr",
"cycle2:token_attr",
"cycle3:token_attr",
"cycle:int_attr",
# Properties from expanded built-in TestNestedOuterSingleApplyAPI
"innerSingle:int_attr",
"innerSingle:relationship",
"innerSingle:token_attr",
"multi:bar:bool_attr",
"multi:bar:relationship",
"multi:bar:token_attr",
"multi:foo:bool_attr",
"multi:foo:relationship",
"multi:foo:token_attr",
"outerSingle:int_attr",
"outerSingle:relationship",
"outerSingle:token_attr",
"single:bool_attr",
"single:relationship",
"single:token_attr",
# Properties from the prim type TestWithBuiltinNestedAppliedSchema
"testAttr",
"testRel"]
self.assertEqual(typedPrim.GetPropertyNames(), expectedPropNames)
# Get the prim definition for the concrete typed schema and verify its
# applied API schemas and properties match what was imparted on the
# prim.
typedPrimDef = \
Usd.SchemaRegistry().FindConcretePrimDefinition(
"TestWithBuiltinNestedAppliedSchema")
self.assertTrue(typedPrimDef)
self.assertEqual(typedPrimDef.GetAppliedAPISchemas(),
expectedAPISchemas)
self.assertEqual(sorted(typedPrimDef.GetPropertyNames()),
expectedPropNames)
self.assertEqual(typedPrimDef.GetDocumentation(),
"Test with built-in nested API schemas")
@unittest.skipIf(Tf.GetEnvSetting('USD_DISABLE_AUTO_APPLY_API_SCHEMAS'),
"Auto apply API schemas are disabled")
def test_APISchemasAutoAppliedToAPISchemas(self):
"""
Tests the behaviors of API schemas that are auto applied to other API
schemas.
"""
stage = Usd.Stage.CreateInMemory()
# Define a prim with an empty type name and apply TestAutoAppliedToAPI.
# TestAutoAppliedToAPI includes other API schemas through a combination
# of built-in APIs and auto applied APIs.
prim = stage.DefinePrim("/Prim")
prim.ApplyAPI(self.AutoAppliedToAPIType)
self.assertEqual(prim.GetTypeName(), '')
self.assertEqual(prim.GetAppliedSchemas(), [
# Authored applied API
"TestAutoAppliedToAPI",
# Built-in API of 'TestAutoAppliedToAPI'
"TestMultiApplyAPI:builtin",
# Defined in plugin metadata that 'TestMultiApplyAPI:autoFoo' auto
# applies to 'TestAutoAppliedToAPI'
"TestMultiApplyAPI:autoFoo",
# 'TestSingleApplyAPI' defines in its schema def that it auto
# applies to 'TestAutoAppliedToAPI'
"TestSingleApplyAPI"])
self.assertTrue(prim.HasAPI(self.AutoAppliedToAPIType))
self.assertTrue(prim.HasAPI(self.MultiApplyAPIType, "builtin"))
self.assertTrue(prim.HasAPI(self.MultiApplyAPIType, "autoFoo"))
self.assertTrue(prim.HasAPI(self.SingleApplyAPIType))
# Prim's authored type is empty and its authored API schemas is just the
# single authored schema.
self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(), '')
self.assertEqual(prim.GetPrimTypeInfo().GetAppliedAPISchemas(),
["TestAutoAppliedToAPI"])
# Prim's built-in properties come from all of the applied API schemas.
self.assertEqual(prim.GetPropertyNames(), [
"multi:autoFoo:bool_attr",
"multi:autoFoo:relationship",
"multi:autoFoo:token_attr",
"multi:builtin:bool_attr",
"multi:builtin:relationship",
"multi:builtin:token_attr",
"single:bool_attr",
"single:relationship",
"single:token_attr",
"testAttr",
"testRel"])
# Define a prim with an empty type name and apply
# TestNestedAutoAppliedToAPI.
# TestAutoAppliedToAPI auto applies to TestNestedAutoAppliedToAPI and
# brings with it all of the API schemas that are built-in to it and auto
# applied to it.
prim = stage.DefinePrim("/Prim2")
prim.ApplyAPI(self.NestedAutoAppliedToAPIType)
self.assertEqual(prim.GetTypeName(), '')
self.assertEqual(prim.GetAppliedSchemas(), [
# Authored applied API
"TestNestedAutoAppliedToAPI",
# Built-in API of 'TestNestedAutoAppliedToAPI'
"TestMultiApplyAPI:foo",
# 'TestAutoAppliedToAPI' defines in its schema def that it auto
# applies to 'TestNestedAutoAppliedToAPI'
"TestAutoAppliedToAPI",
# Built-in API of 'TestAutoAppliedToAPI'
"TestMultiApplyAPI:builtin",
# Defined in plugin metadata that 'TestMultiApplyAPI:autoFoo' auto
# applies to 'TestAutoAppliedToAPI'
"TestMultiApplyAPI:autoFoo",
# 'TestSingleApplyAPI' defines in its schema def that it auto
# applies to 'TestAutoAppliedToAPI'
"TestSingleApplyAPI"])
self.assertTrue(prim.HasAPI(self.NestedAutoAppliedToAPIType))
self.assertTrue(prim.HasAPI(self.AutoAppliedToAPIType))
self.assertTrue(prim.HasAPI(self.MultiApplyAPIType, "foo"))
self.assertTrue(prim.HasAPI(self.MultiApplyAPIType, "builtin"))
self.assertTrue(prim.HasAPI(self.MultiApplyAPIType, "autoFoo"))
self.assertTrue(prim.HasAPI(self.SingleApplyAPIType))
# Prim's authored type is empty and its authored API schemas is just the
# single authored schema.
self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(), '')
self.assertEqual(prim.GetPrimTypeInfo().GetAppliedAPISchemas(),
["TestNestedAutoAppliedToAPI"])
# Prim's built-in properties come from all of the applied API schemas.
self.assertEqual(prim.GetPropertyNames(), [
"multi:autoFoo:bool_attr",
"multi:autoFoo:relationship",
"multi:autoFoo:token_attr",
"multi:builtin:bool_attr",
"multi:builtin:relationship",
"multi:builtin:token_attr",
"multi:foo:bool_attr",
"multi:foo:relationship",
"multi:foo:token_attr",
"single:bool_attr",
"single:relationship",
"single:token_attr",
"testAttr",
"testRel"])
# Define a prim with type name TestNestedAutoAppliedToAPIAppliedToPrim.
# TestNestedAutoAppliedToAPI is defined to auto apply to this prim type
# and brings with it all of the API schemas that are built-in to it and
# auto applied to it.
prim = stage.DefinePrim("/Prim3",
"TestNestedAutoAppliedToAPIAppliedToPrim")
self.assertEqual(prim.GetTypeName(),
'TestNestedAutoAppliedToAPIAppliedToPrim')
self.assertEqual(prim.GetAppliedSchemas(), [
# 'TestNestedAutoAppliedToAPI' defines in its schema def that it
# auto applies to 'TestNestedAutoAppliedToAPIAppliedToPrim'
"TestNestedAutoAppliedToAPI",
# Built-in API of 'TestNestedAutoAppliedToAPI'
"TestMultiApplyAPI:foo",
# 'TestAutoAppliedToAPI' defines in its schema def that it auto
# applies to 'TestNestedAutoAppliedToAPI'
"TestAutoAppliedToAPI",
# Built-in API of 'TestAutoAppliedToAPI'
"TestMultiApplyAPI:builtin",
# Defined in plugin metadata that 'TestMultiApplyAPI:autoFoo' auto
# applies to 'TestAutoAppliedToAPI'
"TestMultiApplyAPI:autoFoo",
# 'TestSingleApplyAPI' defines in its schema def that it auto
# applies to 'TestAutoAppliedToAPI'
"TestSingleApplyAPI"])
# Prim's authored applied API schemas is empty as the API schemas are
# part of the type (through auto apply).
self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(),
'TestNestedAutoAppliedToAPIAppliedToPrim')
self.assertEqual(prim.GetPrimTypeInfo().GetAppliedAPISchemas(), [])
# Prim's built-in properties come from all of the applied API schemas.
self.assertTrue(prim.HasAPI(self.NestedAutoAppliedToAPIType))
self.assertTrue(prim.HasAPI(self.AutoAppliedToAPIType))
self.assertTrue(prim.HasAPI(self.MultiApplyAPIType, "foo"))
self.assertTrue(prim.HasAPI(self.MultiApplyAPIType, "builtin"))
self.assertTrue(prim.HasAPI(self.MultiApplyAPIType, "autoFoo"))
self.assertTrue(prim.HasAPI(self.SingleApplyAPIType))
self.assertEqual(prim.GetPropertyNames(), [
"multi:autoFoo:bool_attr",
"multi:autoFoo:relationship",
"multi:autoFoo:token_attr",
"multi:builtin:bool_attr",
"multi:builtin:relationship",
"multi:builtin:token_attr",
"multi:foo:bool_attr",
"multi:foo:relationship",
"multi:foo:token_attr",
"single:bool_attr",
"single:relationship",
"single:token_attr",
"testAttr",
"testRel"])
@unittest.skipIf(not Tf.GetEnvSetting('USD_DISABLE_AUTO_APPLY_API_SCHEMAS'),
"Auto apply API schemas are NOT disabled")
def test_APISchemasAutoAppliedToAPISchemas_AutoApplyDisabled(self):
"""
Tests the behaviors of API schemas that are auto applied to other API
schemas.
"""
stage = Usd.Stage.CreateInMemory()
# Define a prim with an empty type name and apply TestAutoAppliedToAPI.
# TestAutoAppliedAPI includes other API schemas through a combination of
# built-in APIs and auto applied APIs. The auto applied schemas are
# disabled in the this test case.
prim = stage.DefinePrim("/Prim")
prim.ApplyAPI(self.AutoAppliedToAPIType)
self.assertEqual(prim.GetTypeName(), '')
self.assertEqual(prim.GetAppliedSchemas(), [
# Authored applied API
"TestAutoAppliedToAPI",
# Built-in API of 'TestAutoAppliedToAPI'
"TestMultiApplyAPI:builtin"
# 'TestMultiApplyAPI:autoFoo' and 'TestSingleApplyAPI' would be
# auto applied so they do not show up when auto apply is disabled
])
self.assertTrue(prim.HasAPI(self.AutoAppliedToAPIType))
self.assertTrue(prim.HasAPI(self.MultiApplyAPIType, "builtin"))
self.assertFalse(prim.HasAPI(self.MultiApplyAPIType, "autoFoo"))
self.assertFalse(prim.HasAPI(self.SingleApplyAPIType))
# Prim's authored type is empty and its authored API schemas is just the
# single authored schema.
self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(), '')
self.assertEqual(prim.GetPrimTypeInfo().GetAppliedAPISchemas(),
["TestAutoAppliedToAPI"])
# Prim's built-in properties come from all of the applied API schemas.
self.assertEqual(prim.GetPropertyNames(), [
"multi:builtin:bool_attr",
"multi:builtin:relationship",
"multi:builtin:token_attr",
"testAttr",
"testRel"])
# Define a prim with an empty type name and apply
# TestNestedAutoAppliedToAPI.
# TestAutoAppliedAPI auto applies to TestNestedAutoAppliedToAPI but
# auto apply is disabled in this test case.
prim = stage.DefinePrim("/Prim2")
prim.ApplyAPI(self.NestedAutoAppliedToAPIType)
self.assertEqual(prim.GetTypeName(), '')
self.assertEqual(prim.GetAppliedSchemas(), [
# Authored applied API
"TestNestedAutoAppliedToAPI",
# Built-in API of 'TestNestedAutoAppliedToAPI'
"TestMultiApplyAPI:foo",
# 'TestAutoAppliedToAPI' would be auto applied it doesn't show up
# when auto apply is disabled, nor do any of the API schemas that
# would be included by it.
])
self.assertTrue(prim.HasAPI(self.NestedAutoAppliedToAPIType))
self.assertFalse(prim.HasAPI(self.AutoAppliedToAPIType))
self.assertTrue(prim.HasAPI(self.MultiApplyAPIType, "foo"))
self.assertFalse(prim.HasAPI(self.MultiApplyAPIType, "builtin"))
self.assertFalse(prim.HasAPI(self.MultiApplyAPIType, "autoFoo"))
self.assertFalse(prim.HasAPI(self.SingleApplyAPIType))
# Prim's authored type is empty and its authored API schemas is just the
# single authored schema.
self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(), '')
self.assertEqual(prim.GetPrimTypeInfo().GetAppliedAPISchemas(),
["TestNestedAutoAppliedToAPI"])
# Prim's built-in properties come from all of the applied API schemas.
self.assertEqual(prim.GetPropertyNames(), [
"multi:foo:bool_attr",
"multi:foo:relationship",
"multi:foo:token_attr"])
# Define a prim with type name TestNestedAutoAppliedToAPIAppliedToPrim.
# TestNestedAutoAppliedToAPI is defined to auto apply to this prim type
# auto apply is disabled in this test case.
prim = stage.DefinePrim("/Prim3",
"TestNestedAutoAppliedToAPIAppliedToPrim")
self.assertEqual(prim.GetTypeName(),
'TestNestedAutoAppliedToAPIAppliedToPrim')
# 'TestNestedAutoAppliedToAPI' would be auto applied so it doesn't show
# up when auto apply is disabled, nor do any of the API schemas that
# would be included by it.
self.assertEqual(prim.GetAppliedSchemas(), [])
self.assertEqual(prim.GetPrimTypeInfo().GetTypeName(),
'TestNestedAutoAppliedToAPIAppliedToPrim')
self.assertEqual(prim.GetPrimTypeInfo().GetAppliedAPISchemas(), [])
self.assertEqual(prim.GetPropertyNames(), [])
@unittest.skipIf(Tf.GetEnvSetting('USD_DISABLE_AUTO_APPLY_API_SCHEMAS'),
"Auto apply API schemas are disabled")
def test_PropertyTypeConflicts(self):
"""
Test the resolution of property type conflicts between prim type and
API schema prim definitions when API schemas are applied to prims.
"""
stage = Usd.Stage.CreateInMemory()
# Helper for verifying the attribute types and computed values of
# any arbitrary set of attributes on the prim. The arguments are the
# prim followed by any number of keyword arguments of the form:
# <attrName> = (<attrTypeNameStr>, <attrValue>, <docStr>)
# For each keyword arg, it verifies that the prim has an attribute
# named <attrName> whose typeName computes to <attrTypeNameStr> and
# computed valued computes to <attrValue>. Also verifies that the
# attribute's documentation matches <docStr>
def _VerifyAttrTypes(prim, **kwargs):
for name, (attrTypeNameStr, attrValue, docStr) in kwargs.items():
attr = prim.GetAttribute(name)
self.assertEqual(attr.GetTypeName(), attrTypeNameStr)
self.assertEqual(attr.Get(), attrValue)
self.assertEqual(attr.GetDocumentation(), docStr)
# Helper for verifying that an arbitrary set of properties is or isn't
# a relationship. The arguments are the prim followed by any number of
# keyword arguments of the form:
# <propertyName> = (<isRelationship>, <docStr>)
# For each keyword arg, it verifies that the prim has a property
# named <propertyName> that is a relationship iff <isRelationship> is
# True. Also verifies that the property's documentation matches <docStr>
def _VerifyIsRel(prim, **kwargs):
for name, (isRelationship, docStr) in kwargs.items():
prop = prim.GetProperty(name)
self.assertTrue(prop)
if isRelationship:
self.assertTrue(prim.GetRelationship(name))
else:
self.assertFalse(prim.GetRelationship(name))
self.assertEqual(prop.GetDocumentation(), docStr)
# We've defined 3 API schema types for applying directly to a prim
authoredOneAPIName = "TestPropTypeConflictAuthoredOneAPI"
authoredTwoAPIName = "TestPropTypeConflictAuthoredTwoAPI"
nestedAPIName = "TestPropTypeConflictNestedAuthoredAPI"
# We also typed prim type for this test case that includes a separate
# built-in API schema type.
conflictPrimTypeName = "TestPropTypeConflictsPrim"
builtinAPIName = "TestPropTypeConflictBuiltinAPI"
# Each of the defined schema types above uses a single doc string for
# all of it properties which we can use to help verify which schemas
# the property definitions come from.
authoredOneAPIDocStr = "From TestPropTypeConflictAuthoredOneAPI"
authoredTwoAPIDocStr = "From TestPropTypeConflictAuthoredTwoAPI"
nestedAPIDocStr = "From TestPropTypeConflictNestedAuthoredAPI"
conflictPrimTypeDocStr = "From TestPropTypeConflictsPrim"
builtinAPIDocStr = "From TestPropTypeConflictBuiltinAPI"
# Test 1: Prim with no type name; apply the authoredOneAPI and
# authoredTwoAPI schemas in that strength order.
untypedPrim = stage.DefinePrim("/UntypedPrim")
untypedPrim.AddAppliedSchema(authoredOneAPIName)
untypedPrim.AddAppliedSchema(authoredTwoAPIName)
self.assertEqual(untypedPrim.GetAppliedSchemas(),
[authoredOneAPIName,
authoredTwoAPIName])
# Only the authoredOneAPI defines attr1 and attr2
# Only the authoredTwoAPI defines attr3 and attr4
# Both define attr5 and attr6, so authoredOneAPI's version of those
# attributes are used since it's stronger.
_VerifyAttrTypes(untypedPrim,
attr1 = ("int", 0, authoredOneAPIDocStr),
attr2 = ("double", 0.0, authoredOneAPIDocStr),
attr3 = ("int", 10, authoredTwoAPIDocStr),
attr4 = ("string", "foo", authoredTwoAPIDocStr),
attr5 = ("point3f", Gf.Vec3f(0, 0, 0), authoredOneAPIDocStr),
attr6 = ("int", 20, authoredOneAPIDocStr))
# Both define rel1 and rel2, but since authoredOneAPI is stronger, it wins
# and only rel2 is a relationship.
_VerifyIsRel(untypedPrim,
rel1 = (False, authoredOneAPIDocStr),
rel2 = (True, authoredOneAPIDocStr))
# Test 2: Prim with no type name; apply the authoredTwoAPI and builtAPI
# schemas in that strength order. This is the reverse order of Test 1
untypedPrim = stage.DefinePrim("/UntypedPrim2")
untypedPrim.AddAppliedSchema(authoredTwoAPIName)
untypedPrim.AddAppliedSchema(authoredOneAPIName)
self.assertEqual(untypedPrim.GetAppliedSchemas(),
[authoredTwoAPIName,
authoredOneAPIName])
# Only the authoredOneAPI defines attr1 and attr2 (same as Test 1)
# Only the authoredTwoAPI defines attr3 and attr4 (same as Test 1)
# Both define attr5 and attr6, but now authoredTwoAPI is stronger so
# its version of those attributes are used. Note that this leads to a
# different type and default value for attr5 and just a different
# default value for attr6.
_VerifyAttrTypes(untypedPrim,
attr1 = ("int", 0, authoredOneAPIDocStr),
attr2 = ("double", 0.0, authoredOneAPIDocStr),
attr3 = ("int", 10, authoredTwoAPIDocStr),
attr4 = ("string", "foo", authoredTwoAPIDocStr),
attr5 = ("float3", Gf.Vec3f(1, 2, 3), authoredTwoAPIDocStr),
attr6 = ("int", 10, authoredTwoAPIDocStr))
# Both define rel1 and rel2, but now since authoredTwoAPI is stronger,
# it wins and only rel1 is a relationship (opposite of Test 1).
_VerifyIsRel(untypedPrim,
rel1 = (True, authoredTwoAPIDocStr),
rel2 = (False, authoredTwoAPIDocStr))
# Test 3: Prim with type name set to the conflictPrimType; no authored
# applied API schemas. The conflictPrimType has a single built-in API
# schema builtinAPI.
prim = stage.DefinePrim("/TypedPrim", conflictPrimTypeName)
self.assertEqual(prim.GetAppliedSchemas(),
[builtinAPIName])
# The conflictPrimType schema defines all of attr1-6 attributes EXCEPT
# attr4. It also defines rel1 and rel2. So outside of attr4, the typed
# schema's version of these properties are used.
# For attr4, it is defined in the builtinAPI schema so that attribute's
# opinion comes from the builtinAPI. Note that builtinAPI does define
# attr2, rel1, and rel2 (all with different types than the
# conflictPrimType schema) but the stronger conflictPrimType still wins
# over its built-in APIs.
_VerifyAttrTypes(prim,
attr1 = ("int", 1, conflictPrimTypeDocStr),
attr2 = ("int", 2, conflictPrimTypeDocStr),
attr3 = ("int", 3, conflictPrimTypeDocStr),
attr4 = ("int", 4, builtinAPIDocStr),
attr5 = ("color3f", Gf.Vec3f(1, 1, 1), conflictPrimTypeDocStr),
attr6 = ("int", 6, conflictPrimTypeDocStr))
_VerifyIsRel(prim,
rel1 = (True, conflictPrimTypeDocStr),
rel2 = (False, conflictPrimTypeDocStr))
# Test 4: Take the same prim from Test 3 above, with type name set to
# the conflictPrimType, and author authoredOneAPI and authoredTwoAPI
# applied schemas in that strength order (like in Test 1).
prim.AddAppliedSchema(authoredOneAPIName)
prim.AddAppliedSchema(authoredTwoAPIName)
self.assertEqual(prim.GetAppliedSchemas(),
[authoredOneAPIName,
authoredTwoAPIName,
builtinAPIName])
_VerifyAttrTypes(prim,
# attr1 is defined in authoredOneAPI which is now the strongest
# opinion. Since its type name matches attr1's type name in the prim
# type's definition, it can be used for attr1 in the composed prim
# definition.
attr1 = ("int", 0, authoredOneAPIDocStr),
# attr2 is defined in authoredOneAPI which would be the strongest
# opinion. However, its type name is "double" which doesn't match
# the existing prim type's definition of the attribute which has it
# as "int". We use the prim type's definition for this attribute
# ignoring the API schema.
attr2 = ("int", 2, conflictPrimTypeDocStr),
# attr3 is defined in authoredTwoAPI (and not authoredOneAPI) which
# makes it the strongest opinion. Since its type name matches
# attr3's type name in the prim type's definition, it can be used
# for attr3 in the composed prim definition.
attr3 = ("int", 10, authoredTwoAPIDocStr),
# attr4 is defined in authoredTwoAPI (and not authoredOneAPI) which
# would be the strongest opinion. However, its type name is "string"
# which doesn't match the existing prim type's definition of the
# attribute which has it as "int". We use the prim type's definition
# for this attribute ignoring the API schema. Note the prim type's
# definition actually gets its opinion on attr4 from the builtinAPI
# (as the typed schema itself doesn't define attr4). The builtinAPI
# attr4 opinion is part of the composed prim type definition which
# is type wins over any applied API schemas authored over the prim.
attr4 = ("int", 4, builtinAPIDocStr),
# attr5 is defined in both authoredOneAPI and authoredTwoAPI which
# makes authoredOneAPI the strongest opiniion. However its type of
# "point3f" doesn't match the prim type's opinion of the type name
# "color3f". Even though the default values for these types are
# stored as GfVec3f, the type name mismatch means we still used the
# prim type's version in the composed prim definition.
attr5 = ("color3f", Gf.Vec3f(1, 1, 1), conflictPrimTypeDocStr),
# attr6 is defined in both authoredOneAPI and authoredTwoAPI which
# makes authoredOneAPI the strongest opiniion. And since its type
# name matches the prim type definition's attr6 type name, we use
# the attr6 from authoredOneAPI.
attr6 = ("int", 20, authoredOneAPIDocStr))
# For rel1 and rel2, authoredOneAPI, authoredTwoAPI and the prim type
# all define them both. Both authoredTwoAPI and the prim type definition
# define rel1 as a relationship and rel2 as an attribute, but
# authoredOneAPI defines the reverse with rel2 as the relationship.
# Since authoredOneAPI is stronger than authoredTwoAPI, its opinions for
# these properties override the ones from authoredTwoAPI. However, since
# the property types of these strongest opinions don't match match the
# property types from the prim type definition, they fall back to the
# prim type definition's opinions of these properties in the composed
# prim definition.
_VerifyIsRel(prim,
rel1 = (True, conflictPrimTypeDocStr),
rel2 = (False, conflictPrimTypeDocStr))
# Test 5: Prim with no type name; apply the nestedAPI schema only to
# the prim. The nestedAPI has authoredOneAPI included as a built-in and
# authoredTwoAPI auto-applied to it. So all three schemas end up as
# applied schemas on the prim.
nestedAPIPrim = stage.DefinePrim("/NestedAPIPrim")
nestedAPIPrim.AddAppliedSchema(nestedAPIName)
self.assertEqual(nestedAPIPrim.GetAppliedSchemas(),
[nestedAPIName,
authoredOneAPIName,
authoredTwoAPIName])
# The nestedAPI schema defines its own opinions for all of attr1-6
# attributes as well as rel1 and rel2. Since it is stronger than all its
# built-in API schemas, its version of all these properties win for this
# prim, regardless of the property and attributes types.
_VerifyAttrTypes(nestedAPIPrim,
attr1 = ("int", 1, nestedAPIDocStr),
attr2 = ("int", 2, nestedAPIDocStr),
attr3 = ("int", 3, nestedAPIDocStr),
attr4 = ("int", 4, nestedAPIDocStr),
attr5 = ("color3f", Gf.Vec3f(1, 1, 1), nestedAPIDocStr),
attr6 = ("token", "bar", nestedAPIDocStr))
_VerifyIsRel(nestedAPIPrim,
rel1 = (True, nestedAPIDocStr),
rel2 = (False, nestedAPIDocStr))
# Test 6: Prim with no type name; apply the bultinAPI, authoredTwoAPI,
# and nestedAPI schemas to the prim in that strength order. The same
# three schemas are applied to the prim as in Test 5, but now the
# strength order is different.
nestedAPIPrim2 = stage.DefinePrim("/NestedAPIPrim2")
nestedAPIPrim2.AddAppliedSchema(authoredOneAPIName)
nestedAPIPrim2.AddAppliedSchema(authoredTwoAPIName)
nestedAPIPrim2.AddAppliedSchema(nestedAPIName)
self.assertEqual(nestedAPIPrim2.GetAppliedSchemas(),
[authoredOneAPIName,
authoredTwoAPIName,
nestedAPIName,
authoredOneAPIName,
authoredTwoAPIName])
# Only the authoredOneAPI and nestedAPI define attr1 and attr2;
# authoredOneAPI is stronger and wins
# Only the authoredTwoAPI and nestedAPI define attr3 and attr4;
# authoredTwoAPI is stronger and wins
# All define attr5 and attr6, so authoredOneAPI's version of those
# attributes are used since it's strongest.
_VerifyAttrTypes(nestedAPIPrim2,
attr1 = ("int", 0, authoredOneAPIDocStr),
attr2 = ("double", 0.0, authoredOneAPIDocStr),
attr3 = ("int", 10, authoredTwoAPIDocStr),
attr4 = ("string", "foo", authoredTwoAPIDocStr),
attr5 = ("point3f", Gf.Vec3f(0, 0, 0), authoredOneAPIDocStr),
attr6 = ("int", 20, authoredOneAPIDocStr))
# All define rel1 and rel2, but since authoredOneAPI is strongest, it
# wins and only rel2 is a relationship.
_VerifyIsRel(nestedAPIPrim2,
rel1 = (False, authoredOneAPIDocStr),
rel2 = (True, authoredOneAPIDocStr))
# Test 7: Prim with type name set to the conflictPrimType; author the
# nestedAPI schema on this prim. nestedAPI still brings in the
# authoredOneAPI and authoredTwoAPI as its own built-ins to the applied
# API schemas which will be stronger than the prim type definition
# itself.
prim = stage.DefinePrim("/TypedPrim2", conflictPrimTypeName)
prim.AddAppliedSchema(nestedAPIName)
self.assertEqual(prim.GetAppliedSchemas(),
[nestedAPIName,
authoredOneAPIName,
authoredTwoAPIName,
builtinAPIName])
# nestedAPI defines all same properties as the prim type definition,
# is the strongest opinion, and, with the exception of attr6, uses the
# same property types and type names as the prim type definition. So
# all properties except attr6 in the composed definition use property
# definitions from nestedAPI.
# For attr6, the strongest opinion from nestedAPI has the type name
# "token" which doesn't match the type name "int" in the prim type
# definition. So, we have to use the opinion from the prim type
# definition. Note that both authoredOneAPI and authoredTwoAPI are
# technically stronger than prim type definition in this case AND have
# the attribute type of "int" for attr6. However, we still fall back to
# the prim type definition's opinion as we only consider the strongest
# opinion from the authored applied API schemas.
_VerifyAttrTypes(prim,
attr1 = ("int", 1, nestedAPIDocStr),
attr2 = ("int", 2, nestedAPIDocStr),
attr3 = ("int", 3, nestedAPIDocStr),
attr4 = ("int", 4, nestedAPIDocStr),
attr5 = ("color3f", Gf.Vec3f(1, 1, 1), nestedAPIDocStr),
attr6 = ("int", 6, conflictPrimTypeDocStr))
_VerifyIsRel(prim,
rel1 = (True, nestedAPIDocStr),
rel2 = (False, nestedAPIDocStr))
# Now also apply authoredOneAPI and authoredTwoAPI directly to the same
# prim (previously they were included as built-ins under nestedAPI).
# These are still weaker than nestedAPI but this means they're also now
# siblings of nestedAPI in the composed definition as opposed to
# built-in to nestedAPI's own prim definition.
prim.AddAppliedSchema(authoredOneAPIName)
prim.AddAppliedSchema(authoredTwoAPIName)
self.assertEqual(prim.GetAppliedSchemas(),
[nestedAPIName,
authoredOneAPIName,
authoredTwoAPIName,
authoredOneAPIName,
authoredTwoAPIName,
builtinAPIName])
# This extra condition changes nothing about the composed prim
# definition but is here to verify that the same behavior for attr6
# above (where only the strongest authored API schema opinion for an
# attribute is considered) still hold for sibling applied API schemas.
_VerifyAttrTypes(prim,
attr1 = ("int", 1, nestedAPIDocStr),
attr2 = ("int", 2, nestedAPIDocStr),
attr3 = ("int", 3, nestedAPIDocStr),
attr4 = ("int", 4, nestedAPIDocStr),
attr5 = ("color3f", Gf.Vec3f(1, 1, 1), nestedAPIDocStr),
attr6 = ("int", 6, conflictPrimTypeDocStr))
_VerifyIsRel(prim,
rel1 = (True, nestedAPIDocStr),
rel2 = (False, nestedAPIDocStr))
# Test 8: Prim with type name set to the conflictPrimType; author the
# authoredOneAPI, authoredTwoAPI, and nestedAPI schemas on this prim in
# that strength order. This brings in the same 3 applied API schemas as
# Test 7, but now authoredOneAPI and authoredTwoAPI are stronger than
# when they were just brought in as built-ins of nestedAPI.
prim = stage.DefinePrim("/TypedPrim3", conflictPrimTypeName)
prim.AddAppliedSchema(authoredOneAPIName)
prim.AddAppliedSchema(authoredTwoAPIName)
prim.AddAppliedSchema(nestedAPIName)
self.assertEqual(prim.GetAppliedSchemas(),
[authoredOneAPIName,
authoredTwoAPIName,
nestedAPIName,
authoredOneAPIName,
authoredTwoAPIName,
builtinAPIName])
# The results of this test case are identical to Test 4 above as
# builtAPI is the strongest opinion and authoredTwoAPI is next and the
# same property type conflicts exist. Even though nestedAPI is stronger
# than the prim type definition and has opinions for attr2, attr4,
# attr5, rel1, and rel2 of the matching property/attribute type, these
# are never the strongest API schema property opinion and are not
# considered.
_VerifyAttrTypes(prim,
attr1 = ("int", 0, authoredOneAPIDocStr),
attr2 = ("int", 2, conflictPrimTypeDocStr),
attr3 = ("int", 10, authoredTwoAPIDocStr),
attr4 = ("int", 4, builtinAPIDocStr),
attr5 = ("color3f", Gf.Vec3f(1, 1, 1), conflictPrimTypeDocStr),
attr6 = ("int", 20, authoredOneAPIDocStr))
_VerifyIsRel(prim,
rel1 = (True, conflictPrimTypeDocStr),
rel2 = (False, conflictPrimTypeDocStr))
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
#! /usr/bin/env python3
# Copyright 2019 Samsung Research America
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import time
from action_msgs.msg import GoalStatus
from geometry_msgs.msg import PoseStamped, PoseWithCovarianceStamped
from nav2_msgs.action import FollowWaypoints
from nav2_msgs.srv import ManageLifecycleNodes
from nav2_msgs.srv import GetCostmap
from nav2_msgs.msg import Costmap
from nav_msgs.msg import OccupancyGrid
from nav_msgs.msg import Odometry
import rclpy
from rclpy.action import ActionClient
from rclpy.node import Node
from rclpy.qos import QoSDurabilityPolicy, QoSHistoryPolicy, QoSReliabilityPolicy
from rclpy.qos import QoSProfile
from enum import Enum
import numpy as np
import math
OCC_THRESHOLD = 10
MIN_FRONTIER_SIZE = 5
class Costmap2d():
class CostValues(Enum):
FreeSpace = 0
InscribedInflated = 253
LethalObstacle = 254
NoInformation = 255
def __init__(self, map):
self.map = map
def getCost(self, mx, my):
return self.map.data[self.__getIndex(mx, my)]
def getSize(self):
return (self.map.metadata.size_x, self.map.metadata.size_y)
def getSizeX(self):
return self.map.metadata.size_x
def getSizeY(self):
return self.map.metadata.size_y
def __getIndex(self, mx, my):
return my * self.map.metadata.size_x + mx
class OccupancyGrid2d():
class CostValues(Enum):
FreeSpace = 0
InscribedInflated = 100
LethalObstacle = 100
NoInformation = -1
def __init__(self, map):
self.map = map
def getCost(self, mx, my):
return self.map.data[self.__getIndex(mx, my)]
def getSize(self):
return (self.map.info.width, self.map.info.height)
def getSizeX(self):
return self.map.info.width
def getSizeY(self):
return self.map.info.height
def mapToWorld(self, mx, my):
wx = self.map.info.origin.position.x + (mx + 0.5) * self.map.info.resolution
wy = self.map.info.origin.position.y + (my + 0.5) * self.map.info.resolution
return (wx, wy)
def worldToMap(self, wx, wy):
if (wx < self.map.info.origin.position.x or wy < self.map.info.origin.position.y):
raise Exception("World coordinates out of bounds")
mx = int((wx - self.map.info.origin.position.x) / self.map.info.resolution)
my = int((wy - self.map.info.origin.position.y) / self.map.info.resolution)
if (my > self.map.info.height or mx > self.map.info.width):
raise Exception("Out of bounds")
return (mx, my)
def __getIndex(self, mx, my):
return my * self.map.info.width + mx
class FrontierCache():
cache = {}
def getPoint(self, x, y):
idx = self.__cantorHash(x, y)
if idx in self.cache:
return self.cache[idx]
self.cache[idx] = FrontierPoint(x, y)
return self.cache[idx]
def __cantorHash(self, x, y):
return (((x + y) * (x + y + 1)) / 2) + y
def clear(self):
self.cache = {}
class FrontierPoint():
def __init__(self, x, y):
self.classification = 0
self.mapX = x
self.mapY = y
def centroid(arr):
arr = np.array(arr)
length = arr.shape[0]
sum_x = np.sum(arr[:, 0])
sum_y = np.sum(arr[:, 1])
return sum_x/length, sum_y/length
def findFree(mx, my, costmap):
fCache = FrontierCache()
bfs = [fCache.getPoint(mx, my)]
while len(bfs) > 0:
loc = bfs.pop(0)
if costmap.getCost(loc.mapX, loc.mapY) == OccupancyGrid2d.CostValues.FreeSpace.value:
return (loc.mapX, loc.mapY)
for n in getNeighbors(loc, costmap, fCache):
if n.classification & PointClassification.MapClosed.value == 0:
n.classification = n.classification | PointClassification.MapClosed.value
bfs.append(n)
return (mx, my)
def getFrontier(pose, costmap, logger):
fCache = FrontierCache()
fCache.clear()
mx, my = costmap.worldToMap(pose.position.x, pose.position.y)
freePoint = findFree(mx, my, costmap)
start = fCache.getPoint(freePoint[0], freePoint[1])
start.classification = PointClassification.MapOpen.value
mapPointQueue = [start]
frontiers = []
while len(mapPointQueue) > 0:
p = mapPointQueue.pop(0)
if p.classification & PointClassification.MapClosed.value != 0:
continue
if isFrontierPoint(p, costmap, fCache):
p.classification = p.classification | PointClassification.FrontierOpen.value
frontierQueue = [p]
newFrontier = []
while len(frontierQueue) > 0:
q = frontierQueue.pop(0)
if q.classification & (PointClassification.MapClosed.value | PointClassification.FrontierClosed.value) != 0:
continue
if isFrontierPoint(q, costmap, fCache):
newFrontier.append(q)
for w in getNeighbors(q, costmap, fCache):
if w.classification & (PointClassification.FrontierOpen.value | PointClassification.FrontierClosed.value | PointClassification.MapClosed.value) == 0:
w.classification = w.classification | PointClassification.FrontierOpen.value
frontierQueue.append(w)
q.classification = q.classification | PointClassification.FrontierClosed.value
newFrontierCords = []
for x in newFrontier:
x.classification = x.classification | PointClassification.MapClosed.value
newFrontierCords.append(costmap.mapToWorld(x.mapX, x.mapY))
if len(newFrontier) > MIN_FRONTIER_SIZE:
frontiers.append(centroid(newFrontierCords))
for v in getNeighbors(p, costmap, fCache):
if v.classification & (PointClassification.MapOpen.value | PointClassification.MapClosed.value) == 0:
if any(costmap.getCost(x.mapX, x.mapY) == OccupancyGrid2d.CostValues.FreeSpace.value for x in getNeighbors(v, costmap, fCache)):
v.classification = v.classification | PointClassification.MapOpen.value
mapPointQueue.append(v)
p.classification = p.classification | PointClassification.MapClosed.value
return frontiers
def getNeighbors(point, costmap, fCache):
neighbors = []
for x in range(point.mapX - 1, point.mapX + 2):
for y in range(point.mapY - 1, point.mapY + 2):
if (x > 0 and x < costmap.getSizeX() and y > 0 and y < costmap.getSizeY()):
neighbors.append(fCache.getPoint(x, y))
return neighbors
def isFrontierPoint(point, costmap, fCache):
if costmap.getCost(point.mapX, point.mapY) != OccupancyGrid2d.CostValues.NoInformation.value:
return False
hasFree = False
for n in getNeighbors(point, costmap, fCache):
cost = costmap.getCost(n.mapX, n.mapY)
if cost > OCC_THRESHOLD:
return False
if cost == OccupancyGrid2d.CostValues.FreeSpace.value:
hasFree = True
return hasFree
class PointClassification(Enum):
MapOpen = 1
MapClosed = 2
FrontierOpen = 4
FrontierClosed = 8
class WaypointFollowerTest(Node):
def __init__(self):
super().__init__(node_name='nav2_waypoint_tester', namespace='')
self.waypoints = None
self.readyToMove = True
self.currentPose = None
self.lastWaypoint = None
self.action_client = ActionClient(self, FollowWaypoints, 'FollowWaypoints')
self.initial_pose_pub = self.create_publisher(PoseWithCovarianceStamped,
'initialpose', 10)
self.costmapClient = self.create_client(GetCostmap, '/global_costmap/get_costmap')
while not self.costmapClient.wait_for_service(timeout_sec=1.0):
self.info_msg('service not available, waiting again...')
self.initial_pose_received = False
self.goal_handle = None
pose_qos = QoSProfile(
durability=QoSDurabilityPolicy.RMW_QOS_POLICY_DURABILITY_TRANSIENT_LOCAL,
reliability=QoSReliabilityPolicy.RMW_QOS_POLICY_RELIABILITY_RELIABLE,
history=QoSHistoryPolicy.RMW_QOS_POLICY_HISTORY_KEEP_LAST,
depth=1)
self.model_pose_sub = self.create_subscription(Odometry,
'/odom', self.poseCallback, pose_qos)
# self.costmapSub = self.create_subscription(Costmap(), '/global_costmap/costmap_raw', self.costmapCallback, pose_qos)
self.costmapSub = self.create_subscription(OccupancyGrid(), '/map', self.occupancyGridCallback, pose_qos)
self.costmap = None
self.get_logger().info('Running Waypoint Test')
def occupancyGridCallback(self, msg):
self.costmap = OccupancyGrid2d(msg)
def moveToFrontiers(self):
frontiers = getFrontier(self.currentPose, self.costmap, self.get_logger())
if len(frontiers) == 0:
self.info_msg('No More Frontiers')
return
location = None
largestDist = 0
for f in frontiers:
dist = math.sqrt(((f[0] - self.currentPose.position.x)**2) + ((f[1] - self.currentPose.position.y)**2))
if dist > largestDist:
largestDist = dist
location = [f]
#worldFrontiers = [self.costmap.mapToWorld(f[0], f[1]) for f in frontiers]
self.info_msg(f'World points {location}')
self.setWaypoints(location)
action_request = FollowWaypoints.Goal()
action_request.poses = self.waypoints
self.info_msg('Sending goal request...')
send_goal_future = self.action_client.send_goal_async(action_request)
try:
rclpy.spin_until_future_complete(self, send_goal_future)
self.goal_handle = send_goal_future.result()
except Exception as e:
self.error_msg('Service call failed %r' % (e,))
if not self.goal_handle.accepted:
self.error_msg('Goal rejected')
return
self.info_msg('Goal accepted')
get_result_future = self.goal_handle.get_result_async()
self.info_msg("Waiting for 'FollowWaypoints' action to complete")
try:
rclpy.spin_until_future_complete(self, get_result_future)
status = get_result_future.result().status
result = get_result_future.result().result
except Exception as e:
self.error_msg('Service call failed %r' % (e,))
#self.currentPose = self.waypoints[len(self.waypoints) - 1].pose
self.moveToFrontiers()
def costmapCallback(self, msg):
self.costmap = Costmap2d(msg)
unknowns = 0
for x in range(0, self.costmap.getSizeX()):
for y in range(0, self.costmap.getSizeY()):
if self.costmap.getCost(x, y) == 255:
unknowns = unknowns + 1
self.get_logger().info(f'Unknowns {unknowns}')
self.get_logger().info(f'Got Costmap {len(getFrontier(None, self.costmap, self.get_logger()))}')
def dumpCostmap(self):
costmapReq = GetCostmap.Request()
self.get_logger().info('Requesting Costmap')
costmap = self.costmapClient.call(costmapReq)
self.get_logger().info(f'costmap resolution {costmap.specs.resolution}')
def setInitialPose(self, pose):
self.init_pose = PoseWithCovarianceStamped()
self.init_pose.pose.pose.position.x = pose[0]
self.init_pose.pose.pose.position.y = pose[1]
self.init_pose.header.frame_id = 'map'
self.currentPose = self.init_pose.pose.pose
self.publishInitialPose()
time.sleep(5)
def poseCallback(self, msg):
self.info_msg('Received amcl_pose')
self.currentPose = msg.pose.pose
self.initial_pose_received = True
def setWaypoints(self, waypoints):
self.waypoints = []
for wp in waypoints:
msg = PoseStamped()
msg.header.frame_id = 'map'
msg.pose.position.x = wp[0]
msg.pose.position.y = wp[1]
msg.pose.orientation.w = 1.0
self.waypoints.append(msg)
def run(self, block):
if not self.waypoints:
rclpy.error_msg('Did not set valid waypoints before running test!')
return False
while not self.action_client.wait_for_server(timeout_sec=1.0):
self.info_msg("'FollowWaypoints' action server not available, waiting...")
action_request = FollowWaypoints.Goal()
action_request.poses = self.waypoints
self.info_msg('Sending goal request...')
send_goal_future = self.action_client.send_goal_async(action_request)
try:
rclpy.spin_until_future_complete(self, send_goal_future)
self.goal_handle = send_goal_future.result()
except Exception as e:
self.error_msg('Service call failed %r' % (e,))
if not self.goal_handle.accepted:
self.error_msg('Goal rejected')
return False
self.info_msg('Goal accepted')
if not block:
return True
get_result_future = self.goal_handle.get_result_async()
self.info_msg("Waiting for 'FollowWaypoints' action to complete")
try:
rclpy.spin_until_future_complete(self, get_result_future)
status = get_result_future.result().status
result = get_result_future.result().result
except Exception as e:
self.error_msg('Service call failed %r' % (e,))
if status != GoalStatus.STATUS_SUCCEEDED:
self.info_msg('Goal failed with status code: {0}'.format(status))
return False
if len(result.missed_waypoints) > 0:
self.info_msg('Goal failed to process all waypoints,'
' missed {0} wps.'.format(len(result.missed_waypoints)))
return False
self.info_msg('Goal succeeded!')
return True
def publishInitialPose(self):
self.initial_pose_pub.publish(self.init_pose)
def shutdown(self):
self.info_msg('Shutting down')
self.action_client.destroy()
self.info_msg('Destroyed FollowWaypoints action client')
transition_service = 'lifecycle_manager_navigation/manage_nodes'
mgr_client = self.create_client(ManageLifecycleNodes, transition_service)
while not mgr_client.wait_for_service(timeout_sec=1.0):
self.info_msg(transition_service + ' service not available, waiting...')
req = ManageLifecycleNodes.Request()
req.command = ManageLifecycleNodes.Request().SHUTDOWN
future = mgr_client.call_async(req)
try:
rclpy.spin_until_future_complete(self, future)
future.result()
except Exception as e:
self.error_msg('%s service call failed %r' % (transition_service, e,))
self.info_msg('{} finished'.format(transition_service))
transition_service = 'lifecycle_manager_localization/manage_nodes'
mgr_client = self.create_client(ManageLifecycleNodes, transition_service)
while not mgr_client.wait_for_service(timeout_sec=1.0):
self.info_msg(transition_service + ' service not available, waiting...')
req = ManageLifecycleNodes.Request()
req.command = ManageLifecycleNodes.Request().SHUTDOWN
future = mgr_client.call_async(req)
try:
rclpy.spin_until_future_complete(self, future)
future.result()
except Exception as e:
self.error_msg('%s service call failed %r' % (transition_service, e,))
self.info_msg('{} finished'.format(transition_service))
def cancel_goal(self):
cancel_future = self.goal_handle.cancel_goal_async()
rclpy.spin_until_future_complete(self, cancel_future)
def info_msg(self, msg: str):
self.get_logger().info(msg)
def warn_msg(self, msg: str):
self.get_logger().warn(msg)
def error_msg(self, msg: str):
self.get_logger().error(msg)
def main(argv=sys.argv[1:]):
rclpy.init()
# wait a few seconds to make sure entire stacks are up
#time.sleep(10)
wps = [[-0.52, -0.54], [0.58, -0.55], [0.58, 0.52]]
starting_pose = [-2.0, -0.5]
test = WaypointFollowerTest()
#test.dumpCostmap()
test.setWaypoints(wps)
retry_count = 0
retries = 2
while not test.initial_pose_received and retry_count <= retries:
retry_count += 1
test.info_msg('Setting initial pose')
test.setInitialPose(starting_pose)
test.info_msg('Waiting for amcl_pose to be received')
rclpy.spin_once(test, timeout_sec=1.0) # wait for poseCallback
while test.costmap == None:
test.info_msg('Getting initial map')
rclpy.spin_once(test, timeout_sec=1.0)
test.moveToFrontiers()
rclpy.spin(test)
# result = test.run(True)
# assert result
# # preempt with new point
# test.setWaypoints([starting_pose])
# result = test.run(False)
# time.sleep(2)
# test.setWaypoints([wps[1]])
# result = test.run(False)
# # cancel
# time.sleep(2)
# test.cancel_goal()
# # a failure case
# time.sleep(2)
# test.setWaypoints([[100.0, 100.0]])
# result = test.run(True)
# assert not result
# result = not result
# test.shutdown()
# test.info_msg('Done Shutting Down.')
# if not result:
# test.info_msg('Exiting failed')
# exit(1)
# else:
# test.info_msg('Exiting passed')
# exit(0)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import discord
import youtube_dl
import os
#import ffprobe
from discord.ext import commands
from discord.utils import get
from discord import FFmpegPCMAudio
from os import system
from packages.CRITICAL.CLIENT import client
from packages.CRITICAL.VERSION import EJ_DJ_VERSION
import asyncio
import subprocess
import urllib.request
import re #regulare expression
from shutil import copyfile
from commands.data import FOOTER, BOT_CHANNELS
# @client.event
# async def on_raw_reaction_add(payload):
# print("SOMEONE REACTED")
# @client.command(pass_context=True, aliases=["Update"])
# async def update(ctx):
# if ctx.channel.name in BOT_CHANNELS.channels:
# pfp = client.user.avatar_url
# update_embed = discord.Embed(
# colour=discord.Colour.green(),
# title="UPDATE",
# description=f"EJ DJ V{EJ_DJ_VERSION}")
# update_embed.set_author(name="Trinity", icon_url=(pfp))
# update_embed.set_thumbnail(url=(pfp))
# update_embed.add_field(name="NEW:", value="""
# -PROFILES!!!\n
# -Changed the radio stop command to " stop radio ".\n
# -Updated the help channel.
# """, inline=True)
# update_embed.add_field(name="FIXED:", value="""
# -Fixed the stop command from sometimes causing file leaks.
# """, inline=True)
# update_embed.add_field(name="KNOWN BUGS:", value="""
# -Their is a known bug when trying to play 2 songs at the same time.
# """, inline=True)
# update_embed.add_field(name="Report:", value="""
# If you find a bug and wish to report it, please contact\n@EJ Studios#3379 or @Happypat900#8268
# """, inline=False)
# update_embed.set_footer(text=f"{FOOTER.footer}")
# await ctx.send(embed = update_embed)
@client.command(pass_context=True, aliases=["Update"])
async def update(ctx):
pfp = client.user.avatar_url
pages = 3
cur_page = 2 #Change this to the current update !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
info_1 = discord.Embed(
colour=discord.Colour.green(),
title="UPDATE",
description=f"EJ DJ V0.0.0")
info_1.set_author(name="EJ DJ", icon_url=(pfp))
info_1.set_thumbnail(url=(pfp))
info_1.add_field(name="NEW:", value="""
-PROFILES!!!\n
-Changed the radio stop command to " stop radio ".\n
-Updated the help channel.
""", inline=True)
info_1.add_field(name="FIXED:", value="""
-Fixed the stop command from sometimes causing file leaks.
""", inline=True)
info_1.add_field(name="KNOWN BUGS:", value="""
-Their is a known bug when trying to play 2 songs at the same time.
""", inline=True)
info_1.add_field(name="Report:", value="""
If you find a bug and wish to report it, please contact\n@EJ Studios#3379
""", inline=False)
info_1.set_footer(text=f"Page 1/{pages} \n{FOOTER.footer}")
info_2 = discord.Embed(
colour=discord.Colour.green(),
title="UPDATE",
description=f"EJ DJ V0.0.1")
info_2.set_author(name="EJ DJ", icon_url=(pfp))
info_2.set_thumbnail(url=(pfp))
info_2.add_field(name="NEW:", value="""
-Redone the whole update command to make it more modern while still keeping it user friendly.\n
-Added a server restart for times when the bot gets completely emotional... (ADMIN ONLY)\n
""", inline=True)
info_2.add_field(name="FIXED:", value="""
-Fixed a bug that caused the automatic status changes to pause if the internet went out or was weak.
""", inline=True)
info_2.add_field(name="KNOWN BUGS:", value="""
-No known bugs at this time!
""", inline=True)
info_2.add_field(name="Report:", value="""
If you find a bug and wish to report it, please contact\n@EJ Studios#3379
""", inline=False)
info_2.set_footer(text=f"Page 2/{pages} \n{FOOTER.footer}")
info_3 = discord.Embed(
colour=discord.Colour.green(),
title="UPDATE",
description=f"EJ DJ V0.0.2 BETA")
info_3.set_author(name="EJ DJ", icon_url=(pfp))
info_3.set_thumbnail(url=(pfp))
info_3.add_field(name="Whats next?", value="""
-We are still looking at what to improve for our next update, check back soon!
""", inline=True)
info_3.add_field(name="Have an idea?", value="""
please contact: @EJ Studios#3379
""", inline=False)
info_3.set_footer(text=f"Page 3/{pages} \n{FOOTER.footer}")
message = await ctx.send(embed = info_2) #Change this to the current update !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# getting the message object for editing and reacting
await message.add_reaction("◀️")
await message.add_reaction("▶️")
#adds reactions.
def check(reaction, user):
return user == ctx.author and str(reaction.emoji) in ["◀️", "▶️"]
# This makes sure nobody except the command sender can interact with the "menu"
while True:
try:
reaction, user = await client.wait_for("reaction_add", timeout=60, check=check)
# waiting for a reaction to be added - times out after x seconds, 60 in this
# example
if str(reaction.emoji) == "▶️" and cur_page != pages:
cur_page += 1
if cur_page == 1:
await message.edit(embed=info_1)
elif cur_page == 2:
await message.edit(embed=info_2)
elif cur_page == 3:
await message.edit(embed=info_3)
# await message.edit(embed=info)
await message.remove_reaction(reaction, user)
elif str(reaction.emoji) == "◀️" and cur_page > 1:
cur_page -= 1
if cur_page == 1:
await message.edit(embed=info_1)
elif cur_page == 2:
await message.edit(embed=info_2)
elif cur_page == 3:
await message.edit(embed=info_3)
#await message.edit(content=f"Page {cur_page}/{pages}:\n{contents[cur_page-1]}")
await message.remove_reaction(reaction, user)
else:
await message.remove_reaction(reaction, user)
# removes reactions if the user tries to go forward on the last page or
# backwards on the first page
except asyncio.TimeoutError:
await message.delete()
break
# ending the loop if user doesn't react after x seconds
|
nilq/baby-python
|
python
|
from applications_superstaq._init_vars import API_URL, API_VERSION
from applications_superstaq.superstaq_exceptions import (
SuperstaQException,
SuperstaQModuleNotFoundException,
SuperstaQNotFoundException,
SuperstaQUnsuccessfulJobException,
)
from . import converters
from . import finance
from . import logistics
from . import qubo
from . import superstaq_client
from . import superstaq_exceptions
from . import user_config
__all__ = [
"API_URL",
"API_VERSION",
"SuperstaQException",
"SuperstaQModuleNotFoundException",
"SuperstaQNotFoundException",
"SuperstaQUnsuccessfulJobException",
"converters",
"finance",
"logistics",
"qubo",
"superstaq_client",
"superstaq_exceptions",
"user_config",
]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# pycreviewer: source_file_parser.py
# Dromar [https://github.com/dromar-soft]
# License: MIT
#------------------------------------------------------------------------------
from pycparser import parse_file, c_generator, c_ast
import os
def parse(filepath:str, cpp_args:list):
"""
Parse c source file by pycparser, and return AST object.
"""
ast = parse_file(filepath, use_cpp=True,
cpp_path='gcc',
cpp_args=cpp_args)
return ast
|
nilq/baby-python
|
python
|
import random
import sc2
from sc2 import Race, Difficulty
from sc2.constants import *
from sc2.player import Bot, Computer
from sc2.position import Point2, Point3
class RampWallBot(sc2.BotAI):
async def on_step(self, iteration):
cc = self.units(COMMANDCENTER)
if not cc.exists:
return
else:
cc = cc.first
if self.can_afford(SCV) and self.workers.amount < 16 and cc.noqueue:
await self.do(cc.train(SCV))
# Raise depos when enemies are nearby
for depo in self.units(SUPPLYDEPOT).ready:
for unit in self.known_enemy_units.not_structure:
if unit.position.to2.distance_to(depo.position.to2) < 15:
break
else:
await self.do(depo(MORPH_SUPPLYDEPOT_LOWER))
# Lower depos when no enemies are nearby
for depo in self.units(SUPPLYDEPOTLOWERED).ready:
for unit in self.known_enemy_units.not_structure:
if unit.position.to2.distance_to(depo.position.to2) < 10:
await self.do(depo(MORPH_SUPPLYDEPOT_RAISE))
break
depot_placement_positions = self.main_base_ramp.corner_depots
# Uncomment the following if you want to build 3 supplydepots in the wall instead of a barracks in the middle + 2 depots in the corner
# depot_placement_positions = self.main_base_ramp.corner_depots | {self.main_base_ramp.depot_in_middle}
barracks_placement_position = None
barracks_placement_position = self.main_base_ramp.barracks_correct_placement
# If you prefer to have the barracks in the middle without room for addons, use the following instead
# barracks_placement_position = self.main_base_ramp.barracks_in_middle
depots = self.units(SUPPLYDEPOT) | self.units(SUPPLYDEPOTLOWERED)
# Filter locations close to finished supply depots
if depots:
depot_placement_positions = {d for d in depot_placement_positions if depots.closest_distance_to(d) > 1}
# Build depots
if self.can_afford(SUPPLYDEPOT) and not self.already_pending(SUPPLYDEPOT):
if len(depot_placement_positions) == 0:
return
# Choose any depot location
target_depot_location = depot_placement_positions.pop()
ws = self.workers.gathering
if ws: # if workers were found
w = ws.random
await self.do(w.build(SUPPLYDEPOT, target_depot_location))
# Build barracks
if depots.ready.exists and self.can_afford(BARRACKS) and not self.already_pending(BARRACKS):
if self.units(BARRACKS).amount + self.already_pending(BARRACKS) > 0:
return
ws = self.workers.gathering
if ws and barracks_placement_position: # if workers were found
w = ws.random
await self.do(w.build(BARRACKS, barracks_placement_position))
def main():
sc2.run_game(sc2.maps.get("OdysseyLE"), [
Bot(Race.Terran, RampWallBot()),
Computer(Race.Zerg, Difficulty.Hard)
], realtime=False)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import force_str
from django.utils.translation import gettext_lazy as _
from .managers import MetaTagManager
from .utils import get_cache_backend, check_caching_enabled
class MetaTag(models.Model):
url = models.CharField(_('URL-path'), max_length=100, blank=True, db_index=True)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE, null=True)
object_id = models.PositiveIntegerField(null=True)
content_object = GenericForeignKey()
title = models.CharField(_('title'), max_length=80, blank=True)
keywords = models.CharField(_('keywords'), max_length=250, blank=True)
description = models.TextField(_('description'), blank=True)
objects = MetaTagManager()
class Meta:
ordering = ['id']
db_table = 'metatags'
unique_together = ['content_type', 'object_id']
verbose_name = _('meta tags')
verbose_name_plural = _('meta tags')
def __str__(self):
if self.content_object is not None:
return force_str(self.content_object)
return self.title
def save(self, *args, **kwargs):
is_inserted = not bool(self.pk)
super().save(*args, **kwargs)
if not is_inserted and check_caching_enabled():
self._invalidate_cache()
def _asdict(self):
return {
'title': self.title,
'keywords': self.keywords,
'description': self.description,
}
@staticmethod
def _compose_cache_key(identifier):
return '{cache_key_prefix}:{identifier}'.format(
cache_key_prefix=getattr(settings, 'METATAGS_CACHE_KEY_PREFIX', 'metatags'),
identifier=identifier,
)
def _get_cache_key(self):
if self.content_object is not None:
identifier = '{}:{}'.format(self.content_object._meta.label_lower, self.content_object.pk)
else:
identifier = self.url
return self._compose_cache_key(identifier)
def _rebuild_cache(self):
cache = get_cache_backend()
cache.set(
key=self._get_cache_key(),
value=self,
timeout=getattr(settings, 'METATAGS_CACHE_TIMEOUT', None),
)
def _invalidate_cache(self):
cache = get_cache_backend()
cache.delete(self._get_cache_key())
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
def weights_init(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight.data)
nn.init.constant_(m.bias, 0.1)
class PNet(nn.Module):
''' PNet '''
def __init__(self, ):
super(PNet, self).__init__()
# backend
self.pre_layer = nn.Sequential(
nn.Conv2d(3, 10, kernel_size=3, stride=1), # conv1
nn.PReLU(), # PReLU1
nn.MaxPool2d(kernel_size=2, stride=2), # pool1
nn.Conv2d(10, 16, kernel_size=3, stride=1), # conv2
nn.PReLU(), # PReLU2
nn.Conv2d(16, 32, kernel_size=3, stride=1), # conv3
nn.PReLU() # PReLU3
)
# detection
self.conv4_1 = nn.Conv2d(32, 1, kernel_size=1, stride=1) # only for forground and background
# bounding box regresion
self.conv4_2 = nn.Conv2d(32, 4, kernel_size=1, stride=1)
# landmark localization
# self.conv4_3 = nn.Conv2d(32, 10, kernel_size=1, stride=1)
# weight initiation with xavier
self.apply(weights_init)
def forward(self, x):
x = self.pre_layer(x)
logits = self.conv4_1(x) # batch, 2, 1, 1
offset = self.conv4_2(x)
return logits, offset
|
nilq/baby-python
|
python
|
from bayesnet.tensor.constant import Constant
from bayesnet.tensor.tensor import Tensor
from bayesnet.function import Function
class Negative(Function):
"""
element-wise negative
y = -x
"""
def forward(self, x):
x = self._convert2tensor(x)
self.x = x
if isinstance(self.x, Constant):
return Constant(-x.value)
return Tensor(-x.value, function=self)
def backward(self, delta):
dx = -delta
self.x.backward(dx)
def negative(x):
"""
element-wise negative
"""
return Negative().forward(x)
|
nilq/baby-python
|
python
|
"""
This script takes the LIRICAL output TSV and converts it to a format that can be uploaded
to seqr (pending implementation of https://github.com/broadinstitute/seqr/issues/2742).
"""
import argparse
import pandas as pd
import re
from utils.gene_ids import get_entrez_to_ensembl_id_map
def main():
p = argparse.ArgumentParser()
p.add_argument("--max-results", default=200, type=int, help="Output at most this many LIRICAL results")
p.add_argument("--output-suffix", default="for_seqr", help="This suffix will be added to the input filename to make the output filename")
p.add_argument("lirical_tsv", nargs="+", help="One or more LIRICAL output .tsv files")
args = p.parse_args()
gene_id_map = get_entrez_to_ensembl_id_map()
for lirical_tsv in args.lirical_tsv:
with open(lirical_tsv) as f:
next(f)
sample_id_line = next(f)
sample_id = sample_id_line.rstrip().replace("! Sample: ", "")
df = pd.read_table(lirical_tsv, comment="!", dtype=str)
#print(f"Read {len(df)} rows from {lirical_tsv}")
output_rows = []
for _, row in df.iterrows():
entrez_gene_id = str(int(re.sub("^NCBIGene:", "", row.entrezGeneId)))
ensemble_gene_id = gene_id_map.get(entrez_gene_id)
if not ensemble_gene_id:
print(f"WARNING: Entrez gene Id {row.entrezGeneId} doesn't have a matching Ensembl gene Id")
if int(row["rank"]) > args.max_results:
#print(f"Skipping row(s) after row #{args.max_results}: {row.to_dict()}")
continue
output_rows.append({
"tool": "lirical",
"sampleId": sample_id,
"rank": int(row["rank"]),
"geneId": ensemble_gene_id,
"diseaseId": row.diseaseCurie, # "OMIM:130720"
"diseaseName": row.diseaseName, # "Lateral meningocele syndrome"
"scoreName1": "post_test_probability",
"score1": float(row.posttestprob.strip("%")),
"scoreName2": "compositeLR",
"score2": float(row.compositeLR.replace(",", "")),
"scoreName3": None,
"score3": None,
})
output_path = re.sub(".tsv", f".{args.output_suffix}.tsv", lirical_tsv)
output_df = pd.DataFrame(output_rows)
output_df.to_csv(output_path, sep="\t", index=False, header=True)
print(f"Wrote {len(output_df)} rows to {output_path}")
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import discord
from discord.ext import commands
from discord.ext.commands import cooldown
from discord.ext.commands.cooldowns import BucketType
import time
import asyncio
import asyncpg
from datetime import datetime, timedelta
from random import randint
from random import choice
import psutil
import os
import sys
colors = [0xe56b6b,0xdd5151,0xba3434,0xab1f1f,0x940808]
online = '<:online_status:506963324391653387>'
offline = '<:offline_status:506963324521414661>'
dnd = '<:dnd_status:506963324634791936>'
idle = '<:idle_status:506963324529803264>'
class misc:
def __init__(self, bot):
self.bot = bot
@commands.command(description='Give someone a slap', brief='Slap', usage='slap [user]')
async def slap(self, ctx, *, user: discord.Member):
embed = discord.Embed(colour=choice(colors))
embed.set_author(name=f"{ctx.author.name} slapped {user.name}! Ouch!")
embed.set_image(url="https://media.giphy.com/media/RXGNsyRb1hDJm/giphy.gif")
await ctx.send(embed=embed)
@commands.command(description='Give someone a hug! Awwww', brief='Hug', usage='hug [user]')
async def hug(self, ctx, *, user: discord.Member):
embed = discord.Embed(colour=choice(colors))
embed.set_author(name=f"{ctx.author.name} gave {user.name} hug! How cute!")
embed.set_image(url="https://media.giphy.com/media/lXiRKBj0SAA0EWvbG/giphy.gif")
await ctx.send(embed=embed)
@commands.command(aliases=['pong'],description='Check the bots latency to Discord Websockets',brief='Check the bots ping',usage='ping')
async def ping(self, ctx):
t_1 = time.perf_counter()
await ctx.trigger_typing()
t_2 = time.perf_counter()
time_delta = round((t_2-t_1)*1000)
await ctx.send(f':ping_pong: Websocket: {round(self.bot.latency*1000)} ms. Typing: {time_delta} ms')
@commands.command(aliases=["avy"],description='Get a users or your avatar',usage='avatar [user]',brief='Get a users or your avatar')
async def avatar(self, ctx,*,user:discord.Member=None):
user = user or ctx.author
embed = discord.Embed(colour=0x7289da,description=f"[Link]({user.avatar_url})")
embed.set_author(name=f"{user.name}'s avatar",url=user.avatar_url)
embed.set_image(url=user.avatar_url_as(static_format="png"))
await ctx.send(embed=embed)
@commands.command(aliases=['about'],description='Get stats about the bot',brief='Check bot stats',usage='info')
async def info(self, ctx):
cmds_used = await self.bot.pool.fetchval('''SELECT * FROM commands_used;''')
uwulonian = await self.bot.pool.fetchval("SELECT count(user_id) FROM user_settings;")
delta_uptime = datetime.utcnow() - self.bot.launch_time
hours, remainder = divmod(int(delta_uptime.total_seconds()),3600)
minutes, seconds = divmod(remainder,60)
days, hours = divmod(hours,24)
member_count = 0
offline_members = set()
idle_members = set()
dnd_members = set()
online_members = set()
cpu_usage = self.bot.process.cpu_percent()
cpu_count = psutil.cpu_count()
memory_usage = self.bot.process.memory_full_info().uss / (1024 ** 2)
version = sys.version_info
for g in self.bot.guilds:
member_count += g.member_count
for m in g.members:
if m.status is discord.Status.offline:
offline_members.add(m.id)
if m.status is discord.Status.idle:
idle_members.add(m.id)
if m.status is discord.Status.dnd:
dnd_members.add(m.id)
if m.status is discord.Status.online:
online_members.add(m.id)
offline_count = len(offline_members)
idle_count = len(idle_members)
dnd_count = len(dnd_members)
online_count = len(online_members)
user_count = len(self.bot.users)
embed = discord.Embed(color=0x7289da)
embed.set_author(name='Bot Stats')
embed.add_field(name='Owner',value=f'<@300088143422685185> (mellowmarshe#0001)')
embed.add_field(name='Library',value=f'[discord.py\\[rewrite\\]](https://github.com/Rapptz/discord.py/tree/rewrite)')
embed.add_field(name='Language',value=f'Python {version.major}.{version.minor}.{version.micro}')
embed.add_field(name='Uptime',value=f'{days}d {hours}h {minutes}m')
embed.add_field(name='Servers', value=len(self.bot.guilds))
embed.add_field(name='Process',value=f'Memory {round(memory_usage, 2)}MiB\nCPU {cpu_usage/cpu_count}%')
embed.add_field(name='Bot Usage',value=f'{cmds_used} commands used\n{uwulonian} uwulonians')
embed.add_field(name='Members',value=f'{online}{online_count} {idle}{idle_count} {dnd}{dnd_count} {offline}{offline_count}')
embed.add_field(name='Links',value='[Invite](https://discordapp.com/oauth2/authorize?client_id=508725128427995136&scope=bot&permissions=201718983) | [Support](https://discord.gg/494uEhN) | [Donate](https://www.patreon.com/mellOwO?alert=2) | [Vote](https://discordbots.org/bot/508725128427995136/vote)')
await ctx.send(embed=embed)
@commands.command(description="Get the link to the support server")
async def support(self,ctx):
await ctx.send("The link to the support server is https://discord.gg/494uEhN")
@commands.command(description="Check our lovely Patrons!")
async def patrons(self,ctx):
e = discord.Embed(color=0x7289da, description="Patreon https://www.patreon.com/mellOwO?alert=2")
e.set_author(name="Our awesome Patrons")
e.add_field(name="Capn", value=
"""
Supporter since 11/19/18 at 20:42:35 UTC
- [His bot](https://discordapp.com/oauth2/authorize?&client_id=448915931507458048&scope=bot&permissions=8)
""", inline=False)
e.add_field(name="Arika", value=
"""
Supporter since 11/28/18 at 02:06:13 UTC
""", inline=False)
e.add_field(name="Mary Johanna", value=
"""
Awesome since 11/29/18 at 17:21:28 UTC
""", inline=False)
e.add_field(name="XiuhcoatlSlayer", value=
"""
Supporter since 11/30/18 at 02:14:11 UTC
""", inline=False)
await ctx.send(embed=e)
@commands.command(description="Send voting link")
async def vote(self,ctx):
await ctx.send("You can vote for me here! https://discordbots.org/bot/508725128427995136/vote I greatly appreciate voting as it helps the bot a lot")
@commands.command(description="Bot rules", aliases=['tos'])
async def rules(self, ctx):
await ctx.send(
"""
uwu rules. You must follow these or we will take action
`-` Don't spam or abuse commands with the intent to harm or slow the bot down. We want the best experience for our users anything to harm this is a problem.
`-` Don't rename or create an uwulonian with a harmful, racist, or any name that can offend someone. We will most likely delete your uwulonian if you do this.
`-` The use of scripts or anything to gain an advantage is forbidden.
`-` DM staff if you are having problems with the bot don't DM regular members unless they say you can.
""")
def setup(bot):
bot.add_cog(misc(bot))
|
nilq/baby-python
|
python
|
from numbers import Number
class TresholdEqual:
""" Implementation of numeric objects considered equal when within a
specified tolerance. The optional 'tolerance' parameter defaults to 1.
Only comparison operators are implemented. Attempt to compare with
objects of different types raises TypeError exception. When two
TresholdEqual objects have different tolerances the smaller one takes
precendence.
"""
def __init__(self, number, tolerance=1):
if not isinstance(number, Number):
raise TypeError
else:
self.number = number
if not isinstance(tolerance, Number):
raise TypeError
else:
self.tolerance = tolerance
def __eq__(self, other):
if isinstance(other, type(self)):
tolerance = min(self.tolerance, other.tolerance)
return abs(self.number - other.number) <= tolerance
else:
raise TypeError
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return self != other and self.number < other.number
def __gt__(self, other):
return self != other and self.number > other.number
def __le__(self, other):
return self == other or self < other
def __ge__(self, other):
return self == other or self > other
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 24 20:43:09 2019
@author: elif.ayvali
"""
import sys
import gym
import pandas as pd
import numpy as np
import matplotlib.collections as mc
import matplotlib.pyplot as plt
from helpers import create_uniform_grid,discretize,visualize_samples
class QLearningAgent:
"""Q-Learning agent that can act on a continuous state space by discretizing it."""
def __init__(self, env, state_grid, alpha=0.02, gamma=0.99, epsilon=1.0, epsilon_decay_rate=0.9995, min_epsilon=.01, seed=505):
"""Initialize variables, create grid for discretization."""
# Environment info
self.env = env
self.state_grid = state_grid
self.state_size = tuple(len(splits) + 1 for splits in self.state_grid) # n-dimensional state space
self.action_size = self.env.action_space.n # 1-dimensional discrete action space
self.seed = np.random.seed(seed)
print("Environment:", self.env)
print("State space size:", self.state_size)
print("Action space size:", self.action_size)
# Learning parameters
self.alpha = alpha # learning rate
self.gamma = gamma # discount factor
self.epsilon = self.initial_epsilon = epsilon # initial exploration rate
self.epsilon_decay_rate = epsilon_decay_rate # how quickly should we decrease epsilon
self.min_epsilon = min_epsilon
# Create Q-table
self.q_table = np.zeros(shape=(self.state_size + (self.action_size,)))
print("Q table size:", self.q_table.shape)
def preprocess_state(self, state):
"""Map a continuous state to its discretized representation."""
return tuple(discretize(state,self.state_grid))
def reset_episode(self, state):
"""Reset variables for a new episode."""
# Gradually decrease exploration rate
self.epsilon *= self.epsilon_decay_rate
self.epsilon = max(self.epsilon, self.min_epsilon)
# Decide initial action
self.last_state = self.preprocess_state(state)
self.last_action = np.argmax(self.q_table[self.last_state])
return self.last_action
def reset_exploration(self, epsilon=None):
"""Reset exploration rate used when training."""
self.epsilon = epsilon if epsilon is not None else self.initial_epsilon
def act(self, state, reward=None, done=None, mode='train'):
"""Pick next action and update internal Q table (when mode != 'test')."""
state = self.preprocess_state(state)
if mode == 'test':
# Test mode: Simply produce an action
action = np.argmax(self.q_table[state])
else:
# Train mode (default): Update Q table, pick next action
# Note: We update the Q table entry for the *last* (state, action) pair with current state, reward
self.q_table[self.last_state + (self.last_action,)] += self.alpha * \
(reward + self.gamma * max(self.q_table[state]) - self.q_table[self.last_state + (self.last_action,)])
# Exploration vs. exploitation
do_exploration = np.random.uniform(0, 1) < self.epsilon
if do_exploration:
# Pick a random action
action = np.random.randint(0, self.action_size)
else:
# Pick the best action from Q table
action = np.argmax(self.q_table[state])
# Roll over current state, action for next step
self.last_state = state
self.last_action = action
return action
def run(agent, env, num_episodes=20000, mode='train'):
"""Run agent in given reinforcement learning environment and return scores."""
scores = []
max_avg_score = -np.inf
for i_episode in range(1, num_episodes+1):
# Initialize episode
state = env.reset()
action = agent.reset_episode(state)
total_reward = 0
done = False
# Roll out steps until done
while not done:
state, reward, done, info = env.step(action)
total_reward += reward
action = agent.act(state, reward, done, mode)
# Save final score
scores.append(total_reward)
# Print episode stats
if mode == 'train':
if len(scores) > 100:
avg_score = np.mean(scores[-100:])
if avg_score > max_avg_score:
max_avg_score = avg_score
if i_episode % 100 == 0:
print("\rEpisode {}/{} | Max Average Score: {}".format(i_episode, num_episodes, max_avg_score), end="")
sys.stdout.flush()
return scores
def plot_scores(scores, rolling_window=100):
"""Plot scores and optional rolling mean using specified window."""
plt.plot(scores); plt.title("Scores");
rolling_mean = pd.Series(scores).rolling(rolling_window).mean()
plt.plot(rolling_mean);
return rolling_mean
def plot_q_table(q_table):
"""Visualize max Q-value for each state and corresponding action.
Q-table is of size MxNxA,
where (M, N) is the size of the state space,
and A is the size of the action space.
We are interested in the maximum Q-value for each state,
and the corresponding (best) action associated with that value."""
q_image = np.max(q_table, axis=2) # max Q-value for each state
q_actions = np.argmax(q_table, axis=2) # best action for each state
fig, ax = plt.subplots(figsize=(10, 10))
cax = ax.imshow(q_image, cmap='jet');
cbar = fig.colorbar(cax)
for x in range(q_image.shape[0]):
for y in range(q_image.shape[1]):
ax.text(x, y, q_actions[x, y], color='white',
horizontalalignment='center', verticalalignment='center')
ax.grid(False)
ax.set_title("Q-table, size: {}".format(q_table.shape))
ax.set_xlabel('position')
ax.set_ylabel('velocity')
# Create an environment
env = gym.make('MountainCar-v0')
# Create a grid to discretize the state space of the RL problem
low = [-1.0, -5.0]
high = [1.0, 5.0]
state_grid = create_uniform_grid(env.observation_space.low, env.observation_space.high, bins=(10, 10))
q_agent = QLearningAgent(env, state_grid)
scores = run(q_agent, env)
# Plot scores obtained per episode
plt.plot(scores); plt.title("Scores")
#If the scores are noisy, it might be difficult to tell whether your agent is actually learning.
# To find the underlying trend, plot a rolling mean of the scores.
rolling_mean = plot_scores(scores)
# Run in test mode and analyze scores obtained
test_scores = run(q_agent, env, num_episodes=100, mode='test')
print("[TEST] Completed {} episodes with avg. score = {}".format(len(test_scores), np.mean(test_scores)))
_ = plot_scores(test_scores)
plot_q_table(q_agent.q_table)
#Improve the agent by increasing state space
state_grid_new = create_uniform_grid(env.observation_space.low, env.observation_space.high, bins=(20, 20))
q_agent_new = QLearningAgent(env, state_grid_new)
q_agent_new.scores = [] # initialize a list to store scores for this agent
#This line can be run multiple times, and scores will get accumulated
q_agent_new.scores += run(q_agent_new, env, num_episodes=50000) # accumulate scores
rolling_mean_new = plot_scores(q_agent_new.scores)
# Run in test mode and analyze scores obtained
test_scores = run(q_agent_new, env, num_episodes=100, mode='test')
print("[TEST] Completed {} episodes with avg. score = {}".format(len(test_scores), np.mean(test_scores)))
_ = plot_scores(test_scores)
# Visualize the learned Q-table
plot_q_table(q_agent_new.q_table)
|
nilq/baby-python
|
python
|
import numpy as np
from dgl.frame import Frame, FrameRef
from dgl.utils import Index, toindex
import backend as F
import dgl
import unittest
N = 10
D = 5
def check_fail(fn):
try:
fn()
return False
except:
return True
def create_test_data(grad=False):
c1 = F.randn((N, D))
c2 = F.randn((N, D))
c3 = F.randn((N, D))
if grad:
c1 = F.attach_grad(c1)
c2 = F.attach_grad(c2)
c3 = F.attach_grad(c3)
return {'a1' : c1, 'a2' : c2, 'a3' : c3}
def test_create():
data = create_test_data()
f1 = Frame(num_rows=N)
for k, v in data.items():
f1.update_column(k, v)
print(f1.schemes)
assert f1.keys() == set(data.keys())
assert f1.num_columns == 3
assert f1.num_rows == N
f2 = Frame(data)
assert f2.keys() == set(data.keys())
assert f2.num_columns == 3
assert f2.num_rows == N
f1.clear()
assert len(f1.schemes) == 0
assert f1.num_rows == 0
def test_column1():
# Test frame column getter/setter
data = create_test_data()
f = Frame(data)
assert f.num_rows == N
assert len(f) == 3
assert F.allclose(f['a1'].data, data['a1'])
f['a1'] = data['a2']
assert F.allclose(f['a2'].data, data['a2'])
# add a different length column should fail
def failed_add_col():
f['a4'] = F.zeros([N+1, D])
assert check_fail(failed_add_col)
# delete all the columns
del f['a1']
del f['a2']
assert len(f) == 1
del f['a3']
assert len(f) == 0
def test_column2():
# Test frameref column getter/setter
data = Frame(create_test_data())
f = FrameRef(data, toindex([3, 4, 5, 6, 7]))
assert f.num_rows == 5
assert len(f) == 3
assert F.allclose(f['a1'], F.narrow_row(data['a1'].data, 3, 8))
# set column should reflect on the referenced data
f['a1'] = F.zeros([5, D])
assert F.allclose(F.narrow_row(data['a1'].data, 3, 8), F.zeros([5, D]))
# add new partial column should fail with error initializer
f.set_initializer(lambda shape, dtype : assert_(False))
def failed_add_col():
f['a4'] = F.ones([5, D])
assert check_fail(failed_add_col)
def test_append1():
# test append API on Frame
data = create_test_data()
f1 = Frame()
f2 = Frame(data)
f1.append(data)
assert f1.num_rows == N
f1.append(f2)
assert f1.num_rows == 2 * N
c1 = f1['a1']
assert tuple(F.shape(c1.data)) == (2 * N, D)
truth = F.cat([data['a1'], data['a1']], 0)
assert F.allclose(truth, c1.data)
# append dict of different length columns should fail
f3 = {'a1' : F.zeros((3, D)), 'a2' : F.zeros((3, D)), 'a3' : F.zeros((2, D))}
def failed_append():
f1.append(f3)
assert check_fail(failed_append)
def test_append2():
# test append on FrameRef
data = Frame(create_test_data())
f = FrameRef(data)
assert f.is_contiguous()
assert f.is_span_whole_column()
assert f.num_rows == N
# append on the underlying frame should not reflect on the ref
data.append(data)
assert f.is_contiguous()
assert not f.is_span_whole_column()
assert f.num_rows == N
# append on the FrameRef should work
f.append(data)
assert not f.is_contiguous()
assert not f.is_span_whole_column()
assert f.num_rows == 3 * N
new_idx = list(range(N)) + list(range(2*N, 4*N))
assert F.array_equal(f._index.tousertensor(), F.copy_to(F.tensor(new_idx, dtype=F.int64), F.cpu()))
assert data.num_rows == 4 * N
def test_append3():
# test append on empty frame
f = Frame(num_rows=5)
data = {'h' : F.ones((3, 2))}
f.append(data)
assert f.num_rows == 8
ans = F.cat([F.zeros((5, 2)), F.ones((3, 2))], 0)
assert F.allclose(f['h'].data, ans)
# test append with new column
data = {'h' : 2 * F.ones((3, 2)), 'w' : 2 * F.ones((3, 2))}
f.append(data)
assert f.num_rows == 11
ans1 = F.cat([ans, 2 * F.ones((3, 2))], 0)
ans2 = F.cat([F.zeros((8, 2)), 2 * F.ones((3, 2))], 0)
assert F.allclose(f['h'].data, ans1)
assert F.allclose(f['w'].data, ans2)
def test_row1():
# test row getter/setter
data = create_test_data()
f = FrameRef(Frame(data))
# getter
# test non-duplicate keys
rowid = Index(F.tensor([0, 2]))
rows = f[rowid]
for k, v in rows.items():
assert tuple(F.shape(v)) == (len(rowid), D)
assert F.allclose(v, F.gather_row(data[k], F.tensor(rowid.tousertensor())))
# test duplicate keys
rowid = Index(F.tensor([8, 2, 2, 1]))
rows = f[rowid]
for k, v in rows.items():
assert tuple(F.shape(v)) == (len(rowid), D)
assert F.allclose(v, F.gather_row(data[k], F.tensor(rowid.tousertensor())))
# setter
rowid = Index(F.tensor([0, 2, 4]))
vals = {'a1' : F.zeros((len(rowid), D)),
'a2' : F.zeros((len(rowid), D)),
'a3' : F.zeros((len(rowid), D)),
}
f[rowid] = vals
for k, v in f[rowid].items():
assert F.allclose(v, F.zeros((len(rowid), D)))
# setting rows with new column should raise error with error initializer
f.set_initializer(lambda shape, dtype : assert_(False))
def failed_update_rows():
vals['a4'] = F.ones((len(rowid), D))
f[rowid] = vals
assert check_fail(failed_update_rows)
def test_row2():
# test row getter/setter autograd compatibility
data = create_test_data(grad=True)
f = FrameRef(Frame(data))
with F.record_grad():
# getter
c1 = f['a1']
# test non-duplicate keys
rowid = Index(F.tensor([0, 2]))
rows = f[rowid]
y = rows['a1']
F.backward(y, F.ones((len(rowid), D)))
assert F.allclose(F.grad(c1)[:,0], F.tensor([1., 0., 1., 0., 0., 0., 0., 0., 0., 0.]))
f['a1'] = F.attach_grad(f['a1'])
with F.record_grad():
c1 = f['a1']
# test duplicate keys
rowid = Index(F.tensor([8, 2, 2, 1]))
rows = f[rowid]
y = rows['a1']
F.backward(y, F.ones((len(rowid), D)))
assert F.allclose(F.grad(c1)[:,0], F.tensor([0., 1., 2., 0., 0., 0., 0., 0., 1., 0.]))
f['a1'] = F.attach_grad(f['a1'])
with F.record_grad():
# setter
c1 = f['a1']
rowid = Index(F.tensor([0, 2, 4]))
vals = {'a1' : F.attach_grad(F.zeros((len(rowid), D))),
'a2' : F.attach_grad(F.zeros((len(rowid), D))),
'a3' : F.attach_grad(F.zeros((len(rowid), D))),
}
f[rowid] = vals
c11 = f['a1']
F.backward(c11, F.ones((N, D)))
assert F.allclose(F.grad(c1)[:,0], F.tensor([0., 1., 0., 1., 0., 1., 1., 1., 1., 1.]))
assert F.allclose(F.grad(vals['a1']), F.ones((len(rowid), D)))
assert F.is_no_grad(vals['a2'])
def test_row3():
# test row delete
data = Frame(create_test_data())
f = FrameRef(data)
assert f.is_contiguous()
assert f.is_span_whole_column()
assert f.num_rows == N
del f[toindex(F.tensor([2, 3]))]
assert not f.is_contiguous()
assert not f.is_span_whole_column()
# delete is lazy: only reflect on the ref while the
# underlying storage should not be touched
assert f.num_rows == N - 2
assert data.num_rows == N
newidx = list(range(N))
newidx.pop(2)
newidx.pop(2)
newidx = toindex(newidx)
for k, v in f.items():
assert F.allclose(v, data[k][newidx])
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="TF doesn't support inplace update")
def test_row4():
# test updating row with empty frame but has preset num_rows
f = FrameRef(Frame(num_rows=5))
rowid = Index(F.tensor([0, 2, 4]))
f[rowid] = {'h' : F.ones((3, 2))}
ans = F.zeros((5, 2))
ans[F.tensor([0, 2, 4])] = F.ones((3, 2))
assert F.allclose(f['h'], ans)
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="TF doesn't support inplace update")
def test_sharing():
data = Frame(create_test_data())
f1 = FrameRef(data, index=toindex([0, 1, 2, 3]))
f2 = FrameRef(data, index=toindex([2, 3, 4, 5, 6]))
# test read
for k, v in f1.items():
assert F.allclose(F.narrow_row(data[k].data, 0, 4), v)
for k, v in f2.items():
assert F.allclose(F.narrow_row(data[k].data, 2, 7), v)
f2_a1 = f2['a1']
# test write
# update own ref should not been seen by the other.
f1[Index(F.tensor([0, 1]))] = {
'a1' : F.zeros([2, D]),
'a2' : F.zeros([2, D]),
'a3' : F.zeros([2, D]),
}
assert F.allclose(f2['a1'], f2_a1)
# update shared space should been seen by the other.
f1[Index(F.tensor([2, 3]))] = {
'a1' : F.ones([2, D]),
'a2' : F.ones([2, D]),
'a3' : F.ones([2, D]),
}
F.narrow_row_set(f2_a1, 0, 2, F.ones([2, D]))
assert F.allclose(f2['a1'], f2_a1)
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="TF doesn't support inplace update")
def test_slicing():
data = Frame(create_test_data(grad=True))
f1 = FrameRef(data, index=toindex(slice(1, 5)))
f2 = FrameRef(data, index=toindex(slice(3, 8)))
# test read
for k, v in f1.items():
assert F.allclose(F.narrow_row(data[k].data, 1, 5), v)
f2_a1 = f2['a1'] # is a tensor
# test write
f1[Index(F.tensor([0, 1]))] = {
'a1': F.zeros([2, D]),
'a2': F.zeros([2, D]),
'a3': F.zeros([2, D]),
}
assert F.allclose(f2['a1'], f2_a1)
f1[Index(F.tensor([2, 3]))] = {
'a1': F.ones([2, D]),
'a2': F.ones([2, D]),
'a3': F.ones([2, D]),
}
F.narrow_row_set(f2_a1, 0, 2, 1)
assert F.allclose(f2['a1'], f2_a1)
f1[toindex(slice(2, 4))] = {
'a1': F.zeros([2, D]),
'a2': F.zeros([2, D]),
'a3': F.zeros([2, D]),
}
F.narrow_row_set(f2_a1, 0, 2, 0)
assert F.allclose(f2['a1'], f2_a1)
def test_add_rows():
data = Frame()
f1 = FrameRef(data)
f1.add_rows(4)
x = F.randn((1, 4))
f1[Index(F.tensor([0]))] = {'x': x}
ans = F.cat([x, F.zeros((3, 4))], 0)
assert F.allclose(f1['x'], ans)
f1.add_rows(4)
f1[toindex(slice(4, 8))] = {'x': F.ones((4, 4)), 'y': F.ones((4, 5))}
ans = F.cat([ans, F.ones((4, 4))], 0)
assert F.allclose(f1['x'], ans)
ans = F.cat([F.zeros((4, 5)), F.ones((4, 5))], 0)
assert F.allclose(f1['y'], ans)
@unittest.skipIf(dgl.backend.backend_name == "tensorflow", reason="TF doesn't support inplace update")
def test_inplace():
f = FrameRef(Frame(create_test_data()))
print(f.schemes)
a1addr = id(f['a1'])
a2addr = id(f['a2'])
a3addr = id(f['a3'])
# column updates are always out-of-place
f['a1'] = F.ones((N, D))
newa1addr = id(f['a1'])
assert a1addr != newa1addr
a1addr = newa1addr
# full row update that becomes column update
f[toindex(slice(0, N))] = {'a1' : F.ones((N, D))}
assert id(f['a1']) != a1addr
# row update (outplace) w/ slice
f[toindex(slice(1, 4))] = {'a2' : F.ones((3, D))}
newa2addr = id(f['a2'])
assert a2addr != newa2addr
a2addr = newa2addr
# row update (outplace) w/ list
f[toindex([1, 3, 5])] = {'a2' : F.ones((3, D))}
newa2addr = id(f['a2'])
assert a2addr != newa2addr
a2addr = newa2addr
# row update (inplace) w/ slice
f.update_data(toindex(slice(1, 4)), {'a2' : F.ones((3, D))}, True)
newa2addr = id(f['a2'])
assert a2addr == newa2addr
# row update (inplace) w/ list
f.update_data(toindex([1, 3, 5]), {'a2' : F.ones((3, D))}, True)
newa2addr = id(f['a2'])
assert a2addr == newa2addr
if __name__ == '__main__':
test_create()
test_column1()
test_column2()
test_append1()
test_append2()
test_append3()
test_row1()
test_row2()
test_row3()
test_row4()
test_sharing()
test_slicing()
test_add_rows()
test_inplace()
|
nilq/baby-python
|
python
|
# Open The Text File & 'Read' It
# Insert The File Path Between The Brackets & Quotation Marks
with open(" ", "r") as file:
# Read The File & Write The First Replace Method
remove_1 = file.read().replace(" .", ".")
# Remaining Replace Methods
remove_2 = remove_1.replace(" , ", ", ")
remove_3 = remove_2.replace(" ( ", " (")
remove_4 = remove_3.replace(" )", ")")
remove_5 = remove_4.replace(" ? ", "? ")
remove_6 = remove_5.replace(" ‘ ", " ‘")
remove_7 = remove_6.replace(" ’ ", "’ ")
remove_8 = remove_7.replace(" : ", ": ")
remove_9 = remove_8.replace(" (yellow) ", "")
remove_10 = remove_9.replace("Highlight-","")
# 'Write' To A New Text File (Or Update Existing)
# Insert The File Path Between The Brackets & Quotation Marks
with open (" ", "w") as file:
# Call The Last Replace Method
file.write(remove_10)
|
nilq/baby-python
|
python
|
import os,re,sys
# initializing bad_chars_list
bad_chars = [',', ';', ':', '!', '.', '(', ')', '"', "*"]
#filename="mat2vec-1a5b3240-abstracts-head.csv"
filename=sys.argv[1] #"mat2vec-1a5b3240-abstracts.csv"
print(filename)
filename_w="cleaned_"+filename
with open( filename_w, 'a+') as corpus_w:
with open(filename,"r") as corpus:
data=corpus.readlines()
for line in data:
#words=line.split(',') #(" ")
#split_again=str(words[0]).split(".")
#year=split_again[3]
#if int(year)>2020 or int(year)<1900:
# year=split_again[2]
#filename_i=os.path.join("corpus_by_year",str(year)+".csv")
#words_j=''
#for j in range(1,len(words)):
# words_j +=words[j]
#words_jj=re.split('.|,|"|:', str(words_j))
#words_k=''
#for k in range(0,len(words_jj)):
# words_k += words_jj[k]
# remove bad_chars
for ii in bad_chars :
words = line.replace(ii, '')
corpus_w.write(words + '\n')
|
nilq/baby-python
|
python
|
### INTERACTIVE NAPLPS DEMO ###
### Interact with NAPLPS Client (PP3) over a serial connection ###
### John Durno, January 2017 ###
### Added auto-advance April 2017 ###
### Added chunks April 2017 ###
### Added quit (q) April 2017 ###
### Added timeout (auto=M) May 2017 ###
## Imports ###
import collections
import serial
import time
from threading import Thread
from Queue import Queue
import sys
### Data dictionary ###
def rec_dd():
return collections.defaultdict(rec_dd)
j = rec_dd()
# DICT PARAMs #
# fn - name of file to load
# auto - auto-advance to the next slide? Values Y, N, M
# Y=Yes, N=No, M=Maybe [if no input received before timeout]
# goto - cmd to invoke for auto-advance, refers to numbers in NAPLPS frames
# renderwait - interval to pause after file is sent, in seconds
# inputwait - how long to wait for input when auto=M
# chunk - number of bytes to send before pausing, 0=send whole file with no breaks
# chunkwait - pause interval before sending the next chunk in seconds, 0=no wait
# cmd - nested dict of one or more commands indicating file to move to next
# NOTES re PARAMS
# cmd can be entered either by auto-advance(goto) or through user input
# files that exceed some undetermined maximum size have transmission problems
# use chunk feature to sends parts of files as chunks
# use chunkwait to specify delays between chunks
# wait and chunkwait values are based on manual timings of file renderings
### INTERACTIVE NAPLPS DEMO ###
j['idemo010']['fn'] = 'idemo.010'
j['idemo010']['auto'] = 'N'
j['idemo010']['goto'] = '1'
j['idemo010']['renderwait'] = 4
j['idemo010']['inputwait'] = 0
j['idemo010']['chunksize'] = 0
j['idemo010']['chunkwait'] = 0
j['idemo010']['cmd']['1'] = 'idemo021'
j['idemo010']['cmd']['2'] = 'idemo022'
j['idemo021']['fn'] = 'idemo.021'
j['idemo021']['auto'] = 'Y'
j['idemo021']['goto'] = '1'
j['idemo021']['renderwait'] = 4
j['idemo021']['inputwait'] = 0
j['idemo021']['chunksize'] = 0
j['idemo021']['chunkwait'] = 0
j['idemo021']['cmd']['1'] = 'idemo023'
j['idemo022']['fn'] = 'idemo.022'
j['idemo022']['auto'] = 'Y'
j['idemo022']['goto'] = '1'
j['idemo022']['renderwait'] = 6
j['idemo022']['inputwait'] = 0
j['idemo022']['chunksize'] = 0
j['idemo022']['chunkwait'] = 0
j['idemo022']['cmd']['1'] = 'idemo010'
j['idemo023']['fn'] = 'idemo.023'
j['idemo023']['auto'] = 'Y'
j['idemo023']['goto'] = '1'
j['idemo023']['renderwait'] = 5
j['idemo023']['inputwait'] = 0
j['idemo023']['chunksize'] = 78
j['idemo023']['chunkwait'] = 6
j['idemo023']['cmd']['1'] = 'idemo024'
j['idemo024']['fn'] = 'idemo.024'
j['idemo024']['auto'] = 'M'
j['idemo024']['goto'] = '2'
j['idemo024']['renderwait'] = 1
j['idemo024']['inputwait'] = 10
j['idemo024']['chunksize'] = 0
j['idemo024']['chunkwait'] = 0
j['idemo024']['cmd']['1'] = 'idemo025'
j['idemo024']['cmd']['2'] = 'boom'
j['idemo025']['fn'] = 'idemo.025'
j['idemo025']['auto'] = 'Y'
j['idemo025']['goto'] = '1'
j['idemo025']['renderwait'] = 5
j['idemo025']['inputwait'] = 0
j['idemo025']['chunksize'] = 0
j['idemo025']['chunkwait'] = 0
j['idemo025']['cmd']['1'] = 'idemo030'
j['boom']['fn'] = 'boom.nap'
j['boom']['auto'] = 'Y'
j['boom']['goto'] = '1'
j['boom']['renderwait'] = 15
j['boom']['inputwait'] = 0
j['boom']['chunksize'] = 0
j['boom']['chunkwait'] = 0
j['boom']['cmd']['1'] = 'idemo030'
j['idemo030']['fn'] = 'idemo.030'
j['idemo030']['auto'] = 'N'
j['idemo030']['goto'] = '1'
j['idemo030']['renderwait'] = 6
j['idemo030']['inputwait'] = 0
j['idemo030']['chunksize'] = 0
j['idemo030']['chunkwait'] = 0
j['idemo030']['cmd']['1'] = 'idemo041'
j['idemo030']['cmd']['2'] = 'idemo042'
j['idemo030']['cmd']['3'] = 'idemo043'
j['idemo041']['fn'] = 'idemo.041'
j['idemo041']['auto'] = 'Y'
j['idemo041']['goto'] = '1'
j['idemo041']['renderwait'] = 4
j['idemo041']['inputwait'] = 0
j['idemo041']['chunksize'] = 0
j['idemo041']['chunkwait'] = 0
j['idemo041']['cmd']['1'] = 'idemo030'
j['idemo042']['fn'] = 'idemo.042'
j['idemo042']['auto'] = 'Y'
j['idemo042']['goto'] = '1'
j['idemo042']['renderwait'] = 4
j['idemo042']['inputwait'] = 0
j['idemo042']['chunksize'] = 0
j['idemo042']['chunkwait'] = 0
j['idemo042']['cmd']['1'] = 'idemo050'
j['idemo043']['fn'] = 'idemo.043'
j['idemo043']['auto'] = 'N'
j['idemo043']['goto'] = '1'
j['idemo043']['renderwait'] = 6
j['idemo043']['inputwait'] = 0
j['idemo043']['chunksize'] = 0
j['idemo043']['chunkwait'] = 0
j['idemo043']['cmd']['1'] = 'idemo010'
j['idemo043']['cmd']['2'] = 'idemo050'
j['idemo050']['fn'] = 'idemo.050'
j['idemo050']['auto'] = 'N'
j['idemo050']['goto'] = '1'
j['idemo050']['renderwait'] = 6
j['idemo050']['inputwait'] = 0
j['idemo050']['chunksize'] = 0
j['idemo050']['chunkwait'] = 0
j['idemo050']['cmd']['1'] = 'idemo010'
### Serial Connection Settings ###
ser = serial.Serial()
ser.port = "/dev/tnt1"
ser.baudrate = 1200
ser.bytesize = serial.EIGHTBITS #number of bits per bytes
ser.parity = serial.PARITY_NONE #set parity check: no parity
ser.stopbits = serial.STOPBITS_ONE #number of stop bits
ser.xonxoff = None #software flow control
ser.rtscts = None #hardware (RTS/CTS) flow control
ser.dsrdtr = None #hardware (DSR/DTR) flow control
ser.timeout = 0 #read timeout, non-blocking mode
ser.writeTimeout = 1000 #timeout for write - needs a nice long one
### Send File Function ###
def sendFile (ser,fname, renderwait, chunk, chunkwait):
if ser.isOpen():
ser.flushInput()
f = open(fname, "rb")
try:
byte = f.read(1)
bytecount = 1
while byte != "":
ser.write(byte)
bytecount += 1
if bytecount == chunk:
print "Sent " + str(chunk) + " bytes"
print "Pausing " + str(chunkwait) + " seconds"
time.sleep(chunkwait)
bytecount = 1
byte = f.read(1)
finally:
ser.write(byte)
f.close()
print "End of File"
print "Waiting " + str(renderwait) + " seconds for render"
time.sleep(renderwait)
return
### Listen Function (Listen for commands)###
def listen (ser,cmds):
print "listening ... "
keepgoing = 1
while keepgoing:
for cmd in ser.read():
print "got " + str(cmd)
if (cmd in cmds) or (cmd == "*") or (cmd == "q"):
print "Recieved: " + str(cmd)
keepgoing = 0
return cmd
### Listen for a while (like above, but with timeout) ###
def listen_awhile(ser,cmds,goto,q1):
print "listening for a while ..."
while q1.qsize() < 1:
for cmd in ser.read():
print "got " + str(cmd)
if (cmd in cmds) or (cmd == "*") or (cmd == "q"):
print "Recieved: " + str(cmd)
q1.put("hello") #ends while loop
print "returning input " + str(cmd)
return cmd
print "returning default " + str(goto)
return goto
### Timeout function ###
def timeout(q1,t1):
time.sleep(t1)
q1.put('timedout')
print "Timed out"
### Open Serial Connection ###
print "Initiating ..."
print "Opening serial connection ..."
try:
ser.open()
except Exception, e:
print "error open serial port: " + str(e)
exit()
print "Connection opened"
print "Waiting for client connection"
### Parse commands & dictionary and figure out what to do next ###
while True:
cmds = list(j[str(next)]['cmd'].keys())
print " "
print "Commands: " + str(cmds)
auto = j[str(next)]['auto']
inputwait = j[str(next)]['inputwait']
goto = j[str(next)]['goto']
#if we want to automatically advance to the next frame
if auto == "M": #auto = maybe [if no input received in time]
#so we need a timer, ie the timeout function above
q1 = Queue(maxsize = 0)
worker1 = Thread(target=timeout, args=(q1,inputwait))
worker1.setDaemon(True)
worker1.start()
#returns either user input cmd or goto [default] if it times out
cmd = listen_awhile(ser,cmds,goto,q1)
elif auto == "Y": #just go automatically, don't wait for input
cmd = goto
#otherwise listen for user input to determine where to go next
else:
cmd = listen(ser,cmds)
# "*" starts the presentation
if cmd == "*":
next = 'idemo010' #first frame
elif cmd == "q":
print "Shutting down now ..."
exit()
else:
next = j[str(next)]['cmd'][cmd]
# sendFile needs to know some things
renderwait = j[str(next)]['renderwait']
chunk = j[str(next)]['chunksize']
chunkwait = j[str(next)]['chunkwait']
fname = j[str(next)]['fn']
print "Initiating send with " + cmd
print "Sending " + str(fname)
sendFile(ser, fname, renderwait, chunk, chunkwait)
|
nilq/baby-python
|
python
|
from flask import Flask, abort, jsonify, request
from flask_restful import Resource, reqparse
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
## DUMMY DATA ##
currencies = [
{
"name": "btc",
"balance": "0.65448",
"address": "1BvBMSEYstWetqTFn5Au4m4GFg7xJaNVN2",
"seeds": "slim sugar lizard predict state cute awkward asset inform blood civil sugar"
},
{
"name": "tft",
"balance": "742",
"address": "019314d28462f0c15bc900c47afeb456e1db234418eccf17c913260386d73a736f097130677a5b",
"seeds": "nut blossom milk special answer best school joke tree ice enhance trend spoil fabric cereal display nurse axis champion hill enough cigar evidence rigid"
},
{
"name": "eth",
"balance": "3.14785",
"address": "0xe99356bde974bbe08721d77712168fa070aa8da4",
"seeds": "gravity trophy shrimp suspect sheriff avocado label trust dove tragic pitch title network myself spell task protect smooth sword diary brain blossom under bulb"
},
{
"name": "xrp",
"balance": "2541",
"address": "rG1QQv2nh2gr7RCZ1P8YYcBUKCCN633jCn",
"seeds": "avoid theory group elder tennis dentist wheat machine motion settle valid alter balcony deliver clog pipe antenna clutch route debate screen tomato obey keen"
}
]
wallets = [
]
orders = [
{
"id":"0",
"currencyBuy":"tft",
"currencySell":"btc",
"amountBuy":"100",
"amountSell":"0.1"
},
{
"id":"1",
"currencyBuy":"eth",
"currencySell":"btc",
"amountBuy":"12",
"amountSell":"4"
},
{
"id":"2",
"currencyBuy":"btc",
"currencySell":"tft",
"amountBuy":"1",
"amountSell":"10000"
},
{
"id":"3",
"currencyBuy":"xrp",
"currencySell":"eth",
"amountBuy":"1000",
"amountSell":"5"
}
]
## CURRENCY ##
@app.route('/<string:cur>/balance', methods=['GET'])
def get_currency_balance(cur):
# return a JSON with specific currency balance
## method for testing with dummy data ##
for c in currencies:
if(cur == c["name"]):
return jsonify({'balance' : c['balance']})
@app.route('/<string:cur>/address', methods=['GET'])
def get_currency_address(cur):
# return a JSON with specific currency address
## method for testing with dummy data ##
for c in currencies:
if(cur == c["name"]):
return jsonify({'address' : c['address']})
## WALLET ##
@app.route('/<string:cur>/wallet', methods=['GET'])
def get_currency_wallet(cur):
# return a JSON with
# a url to download an encrypted file
# with the specified currency wallet seeds
# and a randomly generated decryption password
## method for testing with dummy data ##
for c in currencies:
if(cur == c["name"]):
return jsonify({'key':'randomlyGeneratedSuperStrongPassword', 'url':'/download/14548745642sdwe'})
return abort(404)
@app.route('/<string:cur>/wallet', methods=['POST'])
def create_currency_wallet(cur):
# decrypts seed file in JSON using key
# recovers specified currency wallet from seeds
## method for testing with dummy data ##
if not request.json or not 'key' in request.json or not 'file' in request.json:
abort(400)
for c in currencies:
if(cur == c["name"]):
wallet = {
'key': request.json['key'],
'file': request.json['file'],
}
wallets.append(wallet)
return jsonify({"wallet": wallet}), 201
return abort(404)
## ORDERS ##
@app.route('/orders', methods=['GET'])
def get_orders():
# return JSON with array of orders
return jsonify({'orders': orders})
@app.route('/orders', methods=['POST'])
def create_order():
# creates order in orderbook with
# currencyBuy (the currency that the user creating the order wants)
# currencySell (the currency that the user creating the order has)
# amountBuy (the amount of currencyBuy the user wants)
# amountSell (the amount of currencySell the user is offering)
## method for testing with dummy data ##
if not request.json or not 'currencyBuy' in request.json:
abort(400)
order = {
'currencyBuy': request.json['currencyBuy'],
'currencySell': request.json['currencySell'],
'amountBuy': request.json['amountBuy'],
'amountSell': request.json['amountSell']
}
orders.append(order)
return jsonify({'order': order}), 201
@app.route('/orders/<string:order_id>', methods=['GET'])
def get_order(order_id):
# returns a JSON of order matching order ID
## method for testing with dummy data ##
for o in orders:
if(order_id == o["id"]):
return jsonify({'order':o})
return abort(404)
app.run(debug=True)
|
nilq/baby-python
|
python
|
import os
import copy
import random
import functools
from datetime import datetime
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as data
import torch.utils.tensorboard as tensorboard
from core.model import NAC
from core.metric import AverageMetric, MovingAverageMetric
from core.genotypes import Genotype
from core.dataset.database import DataBase
from core.dataset.architecture.common_nas import PRIMITIVES
from core.dataset.utils import ControllerDataset
from core.dataset.seq2arch import seq2arch_fn
from core.dataset.tensorize import tensorize_fn, nasbench_tensor2arch
from core.controller import NASBenchController
from core.config import args
from core.utils import *
def single_batchify(*items):
return [item.unsqueeze(0) for item in items]
best_iters = -1
def train_controller(max_iter: int, database: DataBase,
entropy_coeff: float, grad_clip: int,
controller: NASBenchController, nac: NAC,
optimizer: optim.Optimizer, writer: tensorboard.SummaryWriter,
alternate_train, alternate_evaluate, random_baseline=False,
log_frequence: int = 10, search_space=None):
controller.train()
nac.eval()
optimizer.zero_grad()
policy_loss_avg = MovingAverageMetric()
entropy_mavg = MovingAverageMetric()
logp_mavg = MovingAverageMetric()
score_avg = MovingAverageMetric()
pseudo_architecture_set = None
with torch.no_grad():
*arch_seq, _, _ = controller(force_uniform=True)
raw_arch = seq2arch_fn(arch_seq)
baseline_arch = [tensorize_fn(raw_arch, device=device)]
best_collect_archs = [arch_seq]
for iter_ in range(max_iter):
if iter_ % args.n_iteration_update_pseudoset == 0 and args.pseudo_ratio != 0:
if pseudo_architecture_set is None:
pseudo_architecture_set = \
generate_architecture_with_pseudo_labels(
nac, controller,
2*int(args.pseudo_ratio*args.train_batch_size),
int(args.pseudo_ratio*args.train_batch_size))
else:
pseudo_architecture_set = list_concat(
pseudo_architecture_set,
generate_architecture_with_pseudo_labels(
nac, controller,
2*args.n_sample_architectures, args.n_sample_architectures)
)
epoch = args.nac_epochs + iter_
accuracy, rank_loss = alternate_train(epoch=epoch, pseudo_set=pseudo_architecture_set)
writer.add_scalar("nac/train_accuracy", accuracy, epoch)
writer.add_scalar("nac/loss", rank_loss, epoch)
KTau = alternate_evaluate(epoch=epoch)
writer.add_scalar("nac/ktau", KTau, epoch)
*arch_seq, logp, entropy = controller()
with torch.no_grad():
sample_arch = [tensorize_fn(seq2arch_fn(arch_seq), device=device)]
score = nac(batchify(sample_arch), batchify(baseline_arch))
score = score.mean().item()
policy_loss = -logp * score - entropy_coeff * entropy
optimizer.zero_grad()
if grad_clip is not None:
nn.utils.clip_grad_norm_(controller.parameters(), grad_clip)
policy_loss.backward()
optimizer.step()
policy_loss_avg.update(policy_loss)
entropy_mavg.update(entropy)
logp_mavg.update(logp)
score_avg.update(score)
if iter_ % log_frequence == 0:
logger.info(
", ".join([
"Policy Learning",
f"iter={iter_:03d}",
f"policy loss={policy_loss_avg.compute():.4f}",
f"entropy={entropy_mavg.compute():.4f}",
f"logp={logp_mavg.compute():.4f}",
])
)
writer.add_scalar("policy_learning/loss", policy_loss_avg.compute(), iter_)
writer.add_scalar("policy_learning/entropy", entropy_mavg.compute(), iter_)
writer.add_scalar("policy_learning/logp", logp_mavg.compute(), iter_)
writer.add_scalar("policy_learning/reward", score_avg.compute(), iter_)
if iter_ % args.evaluate_controller_freq == 0:
baseline_arch, best_collect_archs = derive(iter_, controller, nac, 10,
database, writer, best_collect_archs,
random_baseline, search_space)
torch.save(controller.state_dict(), os.path.join(args.output,f"controller-{iter_}.path"))
def generate_architecture_with_pseudo_labels(nac, controller, total, k):
with torch.no_grad():
arch_seqs = [controller()[:-2] for _ in range(total)]
sample_archs = [seq2arch_fn(seq) for seq in arch_seqs]
arch0 = [tensorize_fn(arch, device=device) for arch in sample_archs]
arch0 = batchify(arch0)
if not isinstance(arch0, (list, tuple)):
arch0 = [arch0]
arch1 = shuffle(*arch0)
# import ipdb; ipdb.set_trace()
p = nac(arch0, arch1)
select_p, index = torch.topk(p, k=k)
arch0 = list_select(arch0, index)
arch1 = list_select(arch1, index)
labels = (select_p > 0.5).float()
return arch0, arch1, labels
def derive(iter_, controller: NASBenchController, nac: NAC, n_derive: int,
database, writer, best_collect_archs, random_baseline=False, search_space=None):
controller.eval()
with torch.no_grad():
arch_seqs = [controller()[:-2] for _ in range(n_derive)]
sample_archs = [seq2arch_fn(seq) for seq in arch_seqs]
arch_tensor = [tensorize_fn(arch, device=device) for arch in sample_archs]
if random_baseline:
location = random.choice(list(range(len(arch_tensor))))
else:
outputs = cartesian_traverse(arch_tensor, arch_tensor, nac)
outputs.fill_diagonal_(0)
max_p, location = outputs.sum(dim=1).max(dim=0, keepdim=True)
max_p = max_p.view([]).item() / n_derive
location = location.view([]).item()
if database is not None:
arch = database.fetch_by_spec(sample_archs[location])
writer.add_scalar("policy_learning/besttop", arch.rank/database.size*100, iter_)
history_arch_seqs = arch_seqs + best_collect_archs
history_arch_tensor = arch_tensor + [tensorize_fn(seq2arch_fn(arch), device=device) for arch in best_collect_archs]
his_outputs = cartesian_traverse(history_arch_tensor, history_arch_tensor, nac)
his_outputs.fill_diagonal_(0)
his_max_p, his_location = his_outputs.sum(dim=1).max(dim=0, keepdim=True)
his_max_p = his_max_p.view([]).item() / (n_derive-1)
his_location = his_location.view([]).item()
global best_iters
if his_location < n_derive:
best_iters = iter_
if database is not None:
his_best_arch = database.fetch_by_spec(seq2arch_fn(history_arch_seqs[his_location]))
writer.add_scalar("policy_learning/history_top", his_best_arch.rank/database.size*100, iter_)
if search_space == "nasbench":
logger.info(
", ".join([
"DERIVE",
f"iters={iter_}",
f"derive {n_derive} archs, the best arch id = {location:02d}, p={max_p*100:.2f}%",
f"test acc={arch.test_accuracy*100:.2f}%",
f"rank={arch.rank}/{database.size}({arch.rank/database.size*100:.4f}%)",
f"history best sampled in {best_iters} iters",
f"test acc={his_best_arch.test_accuracy*100:.2f}%",
f"rank={his_best_arch.rank}/{database.size}({his_best_arch.rank/database.size*100:.4f}%)",
])
)
elif search_space == "darts":
best_geno = Genotype.from_ordinal_arch(ordinal_normal_arch=arch_seqs[location][0],
ordinal_reduced_arch=arch_seqs[location][1],
primitives=PRIMITIVES)
his_best_geno = Genotype.from_ordinal_arch(ordinal_normal_arch=history_arch_seqs[his_location][0],
ordinal_reduced_arch=history_arch_seqs[his_location][1],
primitives=PRIMITIVES)
logger.info(
", ".join([
f"DERIVE",
f"iters={iter_}",
f"derive {n_derive} archs, the best arch id = {location:02d}, p={max_p*100:.2f}%",
f"genotype={best_geno}",
f"history best sampled in {best_iters} iters",
f"genotype={his_best_geno}",
])
)
elif search_space == "mobilespace":
best_arch = arch_seqs[location]
his_best_arch = history_arch_seqs[his_location]
logger.info(
", ".join([
f"DERIVE",
f"iters={iter_}",
f"derive {n_derive} archs, the best arch id = {location:02d}, p={max_p*100:.2f}%",
f"best_arch={best_arch}",
f"history best sampled in {best_iters} iters",
f"his_best_arch={his_best_arch}",
])
)
os.makedirs(os.path.join(args.output, "controllers"), exist_ok=True)
torch.save(controller.state_dict(), os.path.join(args.output, "controllers", f"controller-{iter_}.pth"))
return [arch_tensor[location]], [history_arch_seqs[his_location]]
|
nilq/baby-python
|
python
|
from crowd_sim.envs.utils.agent import Agent
from crowd_sim.envs.utils.state import JointState
class Human(Agent):
# see Agent class in agent.py for details!!!
def __init__(self, config, section):
super().__init__(config, section)
self.isObstacle = False # whether the human is a static obstacle (part of wall) or a moving agent
def act(self, ob):
"""
The state for human is its full state and all other agents' observable states
:param ob:
:return:
"""
state = JointState(self.get_full_state(), ob)
action = self.policy.predict(state)
return action
|
nilq/baby-python
|
python
|
# _*_ coding: utf-8 _*_
import multiprocessing
from gensim.test.utils import datapath
from gensim.models.word2vec import LineSentence
from gensim.models.word2vec import PathLineSentences
from gensim.models import word2vec
from gensim.models import Word2Vec
from gensim.models import KeyedVectors
from gensim.utils.util import SplitSentences
class Word2Vector(object):
def __init__(self, src_file, dst_file, size=300, window=5, min_count=10, sg=0, learning_rate=0.025):
self.src_file = src_file
self.model_file = dst_file
self.size = size
self.window = window
self.min_count = min_count
self.sg = sg # 1 : skip-gram, 0: CBOW
self.alpha = learning_rate
self.workers = multiprocessing.cpu_count()
def train(self, sentences):
model = Word2Vec(sentences, size=self.size, window=self.window, min_count=self.min_count,
sg=self.sg, workers=self.workers)
model.save(self.model_file)
model.save_word2vec_format(self.model_file + '.bin', binary=True)
def train_model(self):
sentences = LineSentence(self.src_file)
self.train(sentences)
def train_dir_model(self, custom=True):
if custom:
sentences = SplitSentences(self.src_file)
else:
sentences = PathLineSentences(self.src_file)
self.train(sentences)
def load_model(self, model_name):
self.model = word2vec.Word2Vec.load(model_name)
def load_google_model(self, model_name, binary=True):
self.model = KeyedVectors.load_word2vec_format(datapath(model_name), binary=binary)
def show_similarity(self, word1, word2):
return self.model.wv.similarity(word1, word2)
def show_similarity_by_word(self, word, topn=10):
return self.model.wv.similar_by_word(word, topn)
|
nilq/baby-python
|
python
|
"""
This package contains utilities that are only used when developing drms in a
copy of the source repository.
These files are not installed, and should not be assumed to exist at
runtime.
"""
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
##
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=invalid-name
# To disable 'invalid constant name' warnings.
# pylint: disable=import-error
# Testing environment may not have all dependencies.
"""
This script uses Jenkins REST api to collect test result(s) of given build/builds and generates
flakyness data about unittests.
Print help: report-flakies.py -h
"""
import argparse
import logging
import os
import time
from collections import OrderedDict
from jinja2 import Template
import requests
import findHangingTests
parser = argparse.ArgumentParser()
parser.add_argument(
'--urls', metavar='URL', action='append', required=True,
help='Urls to analyze, which can refer to simple projects, multi-configuration projects or '
'individual build run.')
parser.add_argument('--excluded-builds', metavar='n1,n2', action='append',
help='List of build numbers to exclude (or "None"). Not required, '
'but if specified, number of uses should be same as that of --urls '
'since the values are matched.')
parser.add_argument('--max-builds', metavar='n', action='append', type=int,
help='The maximum number of builds to use (if available on jenkins). Specify '
'0 to analyze all builds. Not required, but if specified, number of uses '
'should be same as that of --urls since the values are matched.')
parser.add_argument(
"--mvn", action="store_true",
help="Writes two strings for including/excluding these flaky tests using maven flags. These "
"strings are written to files so they can be saved as artifacts and easily imported in "
"other projects. Also writes timeout and failing tests in separate files for "
"reference.")
parser.add_argument("-v", "--verbose", help="Prints more logs.", action="store_true")
args = parser.parse_args()
logging.basicConfig()
logger = logging.getLogger(__name__)
if args.verbose:
logger.setLevel(logging.INFO)
def get_bad_tests(build_url):
"""
Given url of an executed build, analyzes its console text, and returns
[list of all tests, list of timeout tests, list of failed tests].
Returns None if can't get console text or if there is any other error.
"""
logger.info("Analyzing %s", build_url)
response = requests.get(build_url + "/api/json").json()
if response["building"]:
logger.info("Skipping this build since it is in progress.")
return {}
console_url = build_url + "/consoleText"
build_result = findHangingTests.get_bad_tests(console_url)
if not build_result:
logger.info("Ignoring build %s", build_url)
return
return build_result
def expand_multi_config_projects(cli_args):
"""
If any url is of type multi-configuration project (i.e. has key 'activeConfigurations'),
get urls for individual jobs.
"""
job_urls = cli_args.urls
excluded_builds_arg = cli_args.excluded_builds
max_builds_arg = cli_args.max_builds
if excluded_builds_arg is not None and len(excluded_builds_arg) != len(job_urls):
raise Exception("Number of --excluded-builds arguments should be same as that of --urls "
"since values are matched.")
if max_builds_arg is not None and len(max_builds_arg) != len(job_urls):
raise Exception("Number of --max-builds arguments should be same as that of --urls "
"since values are matched.")
final_expanded_urls = []
for (i, job_url) in enumerate(job_urls):
max_builds = 10000 # Some high number
if max_builds_arg is not None and max_builds_arg[i] != 0:
max_builds = int(max_builds_arg[i])
excluded_builds = []
if excluded_builds_arg is not None and excluded_builds_arg[i] != "None":
excluded_builds = [int(x) for x in excluded_builds_arg[i].split(",")]
response = requests.get(job_url + "/api/json").json()
if response.has_key("activeConfigurations"):
for config in response["activeConfigurations"]:
final_expanded_urls.append({'url':config["url"], 'max_builds': max_builds,
'excludes': excluded_builds})
else:
final_expanded_urls.append({'url':job_url, 'max_builds': max_builds,
'excludes': excluded_builds})
return final_expanded_urls
# Set of timeout/failed tests across all given urls.
all_timeout_tests = set()
all_failed_tests = set()
all_hanging_tests = set()
# Contains { <url> : { <bad_test> : { 'all': [<build ids>], 'failed': [<build ids>],
# 'timeout': [<build ids>], 'hanging': [<builds ids>] } } }
url_to_bad_test_results = OrderedDict()
# Iterates over each url, gets test results and prints flaky tests.
expanded_urls = expand_multi_config_projects(args)
for url_max_build in expanded_urls:
url = url_max_build["url"]
excludes = url_max_build["excludes"]
json_response = requests.get(url + "/api/json").json()
if json_response.has_key("builds"):
builds = json_response["builds"]
logger.info("Analyzing job: %s", url)
else:
builds = [{'number' : json_response["id"], 'url': url}]
logger.info("Analyzing build : %s", url)
build_id_to_results = {}
num_builds = 0
build_ids = []
build_ids_without_tests_run = []
for build in builds:
build_id = build["number"]
if build_id in excludes:
continue
result = get_bad_tests(build["url"])
if not result:
continue
if len(result[0]) > 0:
build_id_to_results[build_id] = result
else:
build_ids_without_tests_run.append(build_id)
num_builds += 1
build_ids.append(build_id)
if num_builds == url_max_build["max_builds"]:
break
# Collect list of bad tests.
bad_tests = set()
for build in build_id_to_results:
[_, failed_tests, timeout_tests, hanging_tests] = build_id_to_results[build]
all_timeout_tests.update(timeout_tests)
all_failed_tests.update(failed_tests)
all_hanging_tests.update(hanging_tests)
# Note that timedout tests are already included in failed tests.
bad_tests.update(failed_tests.union(hanging_tests))
# For each bad test, get build ids where it ran, timed out, failed or hanged.
test_to_build_ids = {key : {'all' : set(), 'timeout': set(), 'failed': set(),
'hanging' : set(), 'bad_count' : 0}
for key in bad_tests}
for build in build_id_to_results:
[all_tests, failed_tests, timeout_tests, hanging_tests] = build_id_to_results[build]
for bad_test in test_to_build_ids:
is_bad = False
if all_tests.issuperset([bad_test]):
test_to_build_ids[bad_test]["all"].add(build)
if timeout_tests.issuperset([bad_test]):
test_to_build_ids[bad_test]['timeout'].add(build)
is_bad = True
if failed_tests.issuperset([bad_test]):
test_to_build_ids[bad_test]['failed'].add(build)
is_bad = True
if hanging_tests.issuperset([bad_test]):
test_to_build_ids[bad_test]['hanging'].add(build)
is_bad = True
if is_bad:
test_to_build_ids[bad_test]['bad_count'] += 1
# Calculate flakyness % and successful builds for each test. Also sort build ids.
for bad_test in test_to_build_ids:
test_result = test_to_build_ids[bad_test]
test_result['flakyness'] = test_result['bad_count'] * 100.0 / len(test_result['all'])
test_result['success'] = (test_result['all'].difference(
test_result['failed'].union(test_result['hanging'])))
for key in ['all', 'timeout', 'failed', 'hanging', 'success']:
test_result[key] = sorted(test_result[key])
# Sort tests in descending order by flakyness.
sorted_test_to_build_ids = OrderedDict(
sorted(test_to_build_ids.iteritems(), key=lambda x: x[1]['flakyness'], reverse=True))
url_to_bad_test_results[url] = sorted_test_to_build_ids
if len(sorted_test_to_build_ids) > 0:
print "URL: {}".format(url)
print "{:>60} {:10} {:25} {}".format(
"Test Name", "Total Runs", "Bad Runs(failed/timeout/hanging)", "Flakyness")
for bad_test in sorted_test_to_build_ids:
test_status = sorted_test_to_build_ids[bad_test]
print "{:>60} {:10} {:7} ( {:4} / {:5} / {:5} ) {:2.0f}%".format(
bad_test, len(test_status['all']), test_status['bad_count'],
len(test_status['failed']), len(test_status['timeout']),
len(test_status['hanging']), test_status['flakyness'])
else:
print "No flaky tests founds."
if len(build_ids) == len(build_ids_without_tests_run):
print "None of the analyzed builds have test result."
print "Builds analyzed: {}".format(build_ids)
print "Builds without any test runs: {}".format(build_ids_without_tests_run)
print ""
all_bad_tests = all_hanging_tests.union(all_failed_tests)
if args.mvn:
includes = ",".join(all_bad_tests)
with open("./includes", "w") as inc_file:
inc_file.write(includes)
excludes = ["**/{0}.java".format(bad_test) for bad_test in all_bad_tests]
with open("./excludes", "w") as exc_file:
exc_file.write(",".join(excludes))
with open("./timeout", "w") as timeout_file:
timeout_file.write(",".join(all_timeout_tests))
with open("./failed", "w") as failed_file:
failed_file.write(",".join(all_failed_tests))
dev_support_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(dev_support_dir, "flaky-dashboard-template.html"), "r") as f:
template = Template(f.read())
with open("dashboard.html", "w") as f:
datetime = time.strftime("%m/%d/%Y %H:%M:%S")
f.write(template.render(datetime=datetime, bad_tests_count=len(all_bad_tests),
results=url_to_bad_test_results))
|
nilq/baby-python
|
python
|
/home/runner/.cache/pip/pool/9d/74/0a/c53d742787105b6eebdef5c502cadfb8aaf2c96273099593c4c552f563
|
nilq/baby-python
|
python
|
from .tictactoe import *
from .connectfour import *
|
nilq/baby-python
|
python
|
from django.urls import path
from . import views
urlpatterns = [
path('users/', views.usersView.get, name='usersUrl'),
path('users/search', views.usersView.search, name='usersSearchUrl'),
path('users/<str:username>/', views.usersView.selected, name="selectedUserUrl"),
path('repos/', views.reposView.get, name='reposUrl'),
path('repos/search', views.reposView.search, name='reposSearchUrl'),
path('repos/<int:id>/', views.reposView.selected, name="selectedRepoUrl"),
path('repos/<int:id>/commits/<str:hash>',
views.commitView.get, name="selectedRepoCommitUrl"),
path('repos/<int:id>/issues/',
views.reposView.issues, name="repoIssuesUrl"),
path('repos/<int:id>/issues/<str:title>',
views.reposView.issueSelected, name="selectedRepoIssueUrl"),
path('repos/<int:id>/commits/<str:hash>/files/<str:filename>',
views.commitView.commitSelected, name="selectedRepoCommitFileUrl"),
path('orgs/', views.orgsView.get, name='orgsUrl'),
path('orgs/search', views.orgsView.search, name='orgsSearchUrl'),
path('orgs/<str:org_name>/', views.orgsView.selected, name="selectedOrgUrl"),
path('orgs/<str:org_name>/teams/', views.orgsView.teams, name='orgTeamsUrl'),
path('orgs/<str:org_name>/teams/<str:team_name>',
views.orgsView.teamSelected, name="selectedOrgTeamUrl"),
path('', views.indexView.index, name='indexUrl'),
path('contact/', views.contactView.contact, name='contactUrl'),
path('stats/', views.statsView.get, name='statsUrl')
]
|
nilq/baby-python
|
python
|
import argparse
from core import compat, app, utils, colors
from core.ctrl import api
compat.check_version()
app.mode = 'cli'
parser = argparse.ArgumentParser(
description="CTRL command line tool",
epilog="All accessible arguments listed above"
)
parser.add_argument('method', help="Specify API method")
parser.add_argument('-id', type=str)
parser.add_argument('-username', type=str)
parser.add_argument('-email', type=str)
parser.add_argument('-firstname', type=str)
parser.add_argument('-lastname', type=str)
parser.add_argument('-role', type=str)
parser.add_argument('-filter', type=str, nargs='+')
parser.add_argument('-ulc', type=str)
parser.add_argument('-pin', type=int)
parser.add_argument('-sort', type=str, nargs='+')
parser.add_argument('-domain', type=str)
parser.add_argument('-ssh_key', type=str)
parser.add_argument('-ssh_pwd', type=str)
parser.add_argument('-ipv4', type=str)
parser.add_argument('-ip', type=str)
parser.add_argument('-name', type=str)
parser.add_argument('-http_origin', type=str)
args, unknown = parser.parse_known_args()
data_pass = utils.validate_data_pass(dict(vars(args)))
method = data_pass.pop('method', None)
# for key, value in data_pass.items():
if method != None and method in dir(api) and method in app.config['api']['cli']:
app.config['user'] = api.get_system_user()
#data_pass['config'] = app.config
result = getattr(api, str(method))(data_pass)
if type(result) == dict:
status = True
message = 'completed'
if 'status' in result:
status = result['status']
result.pop('status', None)
if 'message' in result:
message = result['message']
result.pop('message', None)
print(utils.format_response(status, message))
for key, value in result.items():
print(f"{colors.blue(key)} : {str(value)}")
if type(result) == list or type(result) == tuple or type(result) == set:
for value in result:
print(str(value))
else:
print(utils.format_response(False, f"Method {method} is not allowed"))
|
nilq/baby-python
|
python
|
from k5test import *
plugin = os.path.join(buildtop, "plugins", "hostrealm", "test",
"hostrealm_test.so")
# Disable the "dns" module (we can't easily test TXT lookups) and
# arrange the remaining modules in an order which makes sense for most
# tests.
conf = {'plugins': {'hostrealm': {'module': ['test1:' + plugin,
'test2:' + plugin],
'enable_only': ['test2', 'profile',
'domain', 'test1']}},
'domain_realm': {'.x': 'DOTMATCH', 'x': 'MATCH', '.1': 'NUMMATCH'}}
realm = K5Realm(krb5_conf=conf, create_kdb=False)
def test(realm, args, expected_realms, msg, env=None):
out = realm.run(['./hrealm'] + args, env=env)
if out.split('\n') != expected_realms + ['']:
fail(msg)
def test_error(realm, args, expected_error, msg, env=None):
realm.run(['./hrealm'] + args, env=env, expected_code=1,
expected_msg=expected_error)
def testh(realm, host, expected_realms, msg, env=None):
test(realm, ['-h', host], expected_realms, msg, env=env)
def testf(realm, host, expected_realms, msg, env=None):
test(realm, ['-f', host], expected_realms, msg, env=env)
def testd(realm, expected_realm, msg, env=None):
test(realm, ['-d'], [expected_realm], msg, env=env)
def testh_error(realm, host, expected_error, msg, env=None):
test_error(realm, ['-h', host], expected_error, msg, env=env)
def testf_error(realm, host, expected_error, msg, env=None):
test_error(realm, ['-f', host], expected_error, msg, env=env)
def testd_error(realm, expected_error, msg, env=None):
test_error(realm, ['-d'], expected_error, msg, env=env)
###
### krb5_get_host_realm tests
###
# The test2 module returns a fatal error on hosts beginning with 'z',
# and an answer on hosts begining with 'a'.
mark('test2 module')
testh_error(realm, 'zoo', 'service not available', 'host_realm test2 z')
testh(realm, 'abacus', ['a'], 'host_realm test2 a')
# The profile module gives answers for hostnames equal to or ending in
# 'X', due to [domain_realms]. There is also an entry for hostnames
# ending in '1', but hostnames which appear to be IP or IPv6 addresses
# should instead fall through to test1.
mark('profile module')
testh(realm, 'x', ['MATCH'], 'host_realm profile x')
testh(realm, '.x', ['DOTMATCH'], 'host_realm profile .x')
testh(realm, 'b.x', ['DOTMATCH'], 'host_realm profile b.x')
testh(realm, '.b.c.x', ['DOTMATCH'], 'host_realm profile .b.c.x')
testh(realm, 'b.1', ['NUMMATCH'], 'host_realm profile b.1')
testh(realm, '4.3.2.1', ['4', '3', '2', '1'], 'host_realm profile 4.3.2.1')
testh(realm, 'b:c.x', ['b:c', 'x'], 'host_realm profile b:c.x')
# hostname cleaning should convert "X." to "x" before matching.
testh(realm, 'X.', ['MATCH'], 'host_realm profile X.')
# The test1 module returns a list of the hostname components.
mark('test1 module')
testh(realm, 'b.c.d', ['b', 'c', 'd'], 'host_realm test1')
# If no module returns a result, we should get the referral realm.
mark('no result')
testh(realm, '', [''], 'host_realm referral realm')
###
### krb5_get_fallback_host_realm tests
###
# Return a special environment with realm_try_domains set to n.
def try_env(realm, testname, n):
conf = {'libdefaults': {'realm_try_domains': str(n)}}
return realm.special_env(testname, False, krb5_conf=conf)
# The domain module will answer with the uppercased parent domain,
# with no special configuration.
mark('fallback: domain module')
testf(realm, 'a.b.c', ['B.C'], 'fallback_realm domain a.b.c')
# With realm_try_domains = 0, the hostname itself will be looked up as
# a realm and returned if found.
mark('fallback: realm_try_domains = 0')
try0 = try_env(realm, 'try0', 0)
testf(realm, 'krbtest.com', ['KRBTEST.COM'], 'fallback_realm try0', env=try0)
testf(realm, 'a.b.krbtest.com', ['B.KRBTEST.COM'],
'fallback_realm try0 grandparent', env=try0)
testf(realm, 'a.b.c', ['B.C'], 'fallback_realm try0 nomatch', env=try0)
# With realm_try_domains = 2, the parent and grandparent will be
# checked as well, but it stops there.
mark('fallback: realm_try_domains = 2')
try2 = try_env(realm, 'try2', 2)
testf(realm, 'krbtest.com', ['KRBTEST.COM'], 'fallback_realm try2', env=try2)
testf(realm, 'a.b.krbtest.com', ['KRBTEST.COM'],
'fallback_realm try2 grandparent', env=try2)
testf(realm, 'a.b.c.krbtest.com', ['B.C.KRBTEST.COM'],
'fallback_realm try2 great-grandparent', env=try2)
# The test1 module answers with a list of components. Use an IPv4
# address to bypass the domain module.
mark('fallback: test1 module')
testf(realm, '1.2.3.4', ['1', '2', '3', '4'], 'fallback_realm test1')
# If no module answers, the default realm is returned. The test2
# module returns an error when we try to look that up.
mark('fallback: default realm')
testf_error(realm, '', 'service not available', 'fallback_realm default')
###
### krb5_get_default_realm tests
###
# The test2 module returns an error.
mark('default_realm: test2 module')
testd_error(realm, 'service not available', 'default_realm test2')
# The profile module returns the default realm from the profile.
# Disable test2 to expose this behavior.
mark('default_realm: profile module')
disable_conf = {'plugins': {'hostrealm': {'disable': 'test2'}}}
notest2 = realm.special_env('notest2', False, krb5_conf=disable_conf)
testd(realm, 'KRBTEST.COM', 'default_realm profile', env=notest2)
# The test1 module returns a list of two realms, of which we can only
# see the first. Remove the profile default_realm setting to expose
# this behavior.
mark('default_realm: test1 module')
remove_default = {'libdefaults': {'default_realm': None}}
# Python 3.5+: nodefault_conf = {**disable_conf, **remove_default}
nodefault_conf = dict(list(disable_conf.items()) +
list(remove_default.items()))
nodefault = realm.special_env('nodefault', False, krb5_conf=nodefault_conf)
testd(realm, 'one', 'default_realm test1', env=nodefault)
success('hostrealm interface tests')
|
nilq/baby-python
|
python
|
"""
@brief: fill queue with new tasks read from tasks file.
"""
import pika
import sys
from .ip_provider import get_valid_ip
def create_new_tasks(fn,broker):
tasks=[]
with open(fn,"r") as f:
for line in f:
tasks.append(line)
connection = pika.BlockingConnection(pika.ConnectionParameters(host=get_valid_ip(broker)))
channel = connection.channel()
channel.queue_declare(queue='task_queue', durable=True)
for task in tasks:
task = task.strip("\n")
channel.basic_publish(
exchange='',
routing_key='task_queue',
body=task,
properties=pika.BasicProperties(
delivery_mode=2, # make message persisten
))
print(" [x] Sent %r" % task)
connection.close()
|
nilq/baby-python
|
python
|
'''
@file: MPNCOV.py
@author: Jiangtao Xie
@author: Peihua Li
Copyright (C) 2018 Peihua Li and Jiangtao Xie
All rights reserved.
'''
import torch
import numpy as np
from torch.autograd import Function
class Covpool(Function):
@staticmethod
def forward(ctx, input):
x = input
batchSize = x.data.shape[0]
dim = x.data.shape[1]
h = x.data.shape[2]
w = x.data.shape[3]
M = h*w
x = x.reshape(batchSize,dim,M)
I_hat = (-1./M/M)*torch.ones(M,M,device = x.device) + (1./M)*torch.eye(M,M,device = x.device)
I_hat = I_hat.view(1,M,M).repeat(batchSize,1,1).type(x.dtype)
y = x.bmm(I_hat).bmm(x.transpose(1,2))
ctx.save_for_backward(input,I_hat)
return y
@staticmethod
def backward(ctx, grad_output):
input,I_hat = ctx.saved_tensors
x = input
batchSize = x.data.shape[0]
dim = x.data.shape[1]
h = x.data.shape[2]
w = x.data.shape[3]
M = h*w
x = x.reshape(batchSize,dim,M)
grad_input = grad_output + grad_output.transpose(1,2)
grad_input = grad_input.bmm(x).bmm(I_hat)
grad_input = grad_input.reshape(batchSize,dim,h,w)
return grad_input
class Sqrtm(Function):
@staticmethod
def forward(ctx, input, iterN):
x = input
batchSize = x.data.shape[0]
dim = x.data.shape[1]
dtype = x.dtype
I3 = 3.0*torch.eye(dim,dim,device = x.device).view(1, dim, dim).repeat(batchSize,1,1).type(dtype)
normA = (1.0/3.0)*x.mul(I3).sum(dim=1).sum(dim=1)
A = x.div(normA.view(batchSize,1,1).expand_as(x))
Y = torch.zeros(batchSize, iterN, dim, dim, requires_grad = False, device = x.device)
Z = torch.eye(dim,dim,device = x.device).view(1,dim,dim).repeat(batchSize,iterN,1,1)
if iterN < 2:
ZY = 0.5*(I3 - A)
Y[:,0,:,:] = A.bmm(ZY)
else:
ZY = 0.5*(I3 - A)
Y[:,0,:,:] = A.bmm(ZY)
Z[:,0,:,:] = ZY
for i in range(1, iterN-1):
ZY = 0.5*(I3 - Z[:,i-1,:,:].bmm(Y[:,i-1,:,:]))
Y[:,i,:,:] = Y[:,i-1,:,:].bmm(ZY)
Z[:,i,:,:] = ZY.bmm(Z[:,i-1,:,:])
ZY = 0.5*Y[:,iterN-2,:,:].bmm(I3 - Z[:,iterN-2,:,:].bmm(Y[:,iterN-2,:,:]))
y = ZY*torch.sqrt(normA).view(batchSize, 1, 1).expand_as(x)
ctx.save_for_backward(input, A, ZY, normA, Y, Z)
ctx.iterN = iterN
return y
@staticmethod
def backward(ctx, grad_output):
input, A, ZY, normA, Y, Z = ctx.saved_tensors
iterN = ctx.iterN
x = input
batchSize = x.data.shape[0]
dim = x.data.shape[1]
dtype = x.dtype
der_postCom = grad_output*torch.sqrt(normA).view(batchSize, 1, 1).expand_as(x)
der_postComAux = (grad_output*ZY).sum(dim=1).sum(dim=1).div(2*torch.sqrt(normA))
I3 = 3.0*torch.eye(dim,dim,device = x.device).view(1, dim, dim).repeat(batchSize,1,1).type(dtype)
if iterN < 2:
der_NSiter = 0.5*(der_postCom.bmm(I3 - A) - A.bmm(der_sacleTrace))
else:
dldY = 0.5*(der_postCom.bmm(I3 - Y[:,iterN-2,:,:].bmm(Z[:,iterN-2,:,:])) -
Z[:,iterN-2,:,:].bmm(Y[:,iterN-2,:,:]).bmm(der_postCom))
dldZ = -0.5*Y[:,iterN-2,:,:].bmm(der_postCom).bmm(Y[:,iterN-2,:,:])
for i in range(iterN-3, -1, -1):
YZ = I3 - Y[:,i,:,:].bmm(Z[:,i,:,:])
ZY = Z[:,i,:,:].bmm(Y[:,i,:,:])
dldY_ = 0.5*(dldY.bmm(YZ) -
Z[:,i,:,:].bmm(dldZ).bmm(Z[:,i,:,:]) -
ZY.bmm(dldY))
dldZ_ = 0.5*(YZ.bmm(dldZ) -
Y[:,i,:,:].bmm(dldY).bmm(Y[:,i,:,:]) -
dldZ.bmm(ZY))
dldY = dldY_
dldZ = dldZ_
der_NSiter = 0.5*(dldY.bmm(I3 - A) - dldZ - A.bmm(dldY))
grad_input = der_NSiter.div(normA.view(batchSize,1,1).expand_as(x))
grad_aux = der_NSiter.mul(x).sum(dim=1).sum(dim=1)
for i in range(batchSize):
grad_input[i,:,:] += (der_postComAux[i] \
- grad_aux[i] / (normA[i] * normA[i])) \
*torch.ones(dim,device = x.device).diag()
return grad_input, None
class Triuvec(Function):
@staticmethod
def forward(ctx, input):
x = input
batchSize = x.data.shape[0]
dim = x.data.shape[1]
dtype = x.dtype
x = x.reshape(batchSize, dim*dim)
I = torch.ones(dim,dim).triu().t().reshape(dim*dim)
index = I.nonzero()
y = torch.zeros(batchSize,dim*(dim+1)/2,device = x.device)
for i in range(batchSize):
y[i, :] = x[i, index].t()
ctx.save_for_backward(input,index)
return y
@staticmethod
def backward(ctx, grad_output):
input,index = ctx.saved_tensors
x = input
batchSize = x.data.shape[0]
dim = x.data.shape[1]
dtype = x.dtype
grad_input = torch.zeros(batchSize,dim,dim,device = x.device,requires_grad=False)
grad_input = grad_input.reshape(batchSize,dim*dim)
for i in range(batchSize):
grad_input[i,index] = grad_output[i,:].reshape(index.size(),1)
grad_input = grad_input.reshape(batchSize,dim,dim)
return grad_input
def CovpoolLayer(var):
return Covpool.apply(var)
def SqrtmLayer(var, iterN):
return Sqrtm.apply(var, iterN)
def TriuvecLayer(var):
return Triuvec.apply(var)
|
nilq/baby-python
|
python
|
# Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Object client"""
import logging
from openstackclient.common import utils
LOG = logging.getLogger(__name__)
API_NAME = 'object-store'
API_VERSIONS = {
'1': 'openstackclient.object.client.ObjectClientv1',
}
def make_client(instance):
"""Returns an object service client."""
object_client = utils.get_client_class(
API_NAME,
instance._api_version[API_NAME],
API_VERSIONS)
if instance._url:
endpoint = instance._url
else:
endpoint = instance.get_endpoint_for_service_type(API_NAME)
LOG.debug('instantiating object client')
client = object_client(
endpoint=endpoint,
token=instance._token,
)
return client
class ObjectClientv1(object):
def __init__(
self,
endpoint_type='publicURL',
endpoint=None,
token=None,
):
self.endpoint_type = endpoint_type
self.endpoint = endpoint
self.token = token
|
nilq/baby-python
|
python
|
import pytest
from constants.pipelines import OperationStatuses, PipelineStatuses
from factories.factory_pipelines import OperationRunFactory
from pipelines.celery_task import ClassBasedTask, OperationTask
from polyaxon.celery_api import app as celery_app
from tests.utils import BaseTest
@pytest.mark.pipelines_mark
class TestOperationTask(BaseTest):
def setUp(self):
self.operation_run = OperationRunFactory()
self.pipeline_run = self.operation_run.pipeline_run
# Manually set status to scheduled
self.operation_run.on_scheduled()
return super().setUp()
def test_task_without_operation_run_raises(self):
@celery_app.task(base=OperationTask, shared=False)
def dummy_task():
return
with self.assertRaises(TypeError):
dummy_task.apply_async()
def test_task_with_operation_run_succeeds(self):
@celery_app.task(base=OperationTask, shared=False)
def dummy_task(operation_run_id):
return
kwargs = {'operation_run_id': self.operation_run.id}
dummy_task.apply_async(kwargs=kwargs)
self.operation_run.refresh_from_db()
assert self.operation_run.succeeded is True
assert set(self.operation_run.statuses.values_list('status', flat=True)) == {
OperationStatuses.CREATED,
OperationStatuses.SCHEDULED,
OperationStatuses.RUNNING,
OperationStatuses.SUCCEEDED,
}
self.pipeline_run.refresh_from_db()
assert self.operation_run.pipeline_run.last_status == PipelineStatuses.FINISHED
assert set(self.operation_run.pipeline_run.statuses.values_list('status', flat=True)) == {
PipelineStatuses.CREATED,
PipelineStatuses.SCHEDULED,
PipelineStatuses.RUNNING,
PipelineStatuses.FINISHED,
}
def test_task_with_error_fails(self):
@celery_app.task(base=OperationTask, shared=False)
def raising_task(operation_run_id):
raise KeyError
kwargs = {'operation_run_id': self.operation_run.id}
raising_task.apply_async(kwargs=kwargs)
self.operation_run.refresh_from_db()
assert self.operation_run.failed is True
assert set(self.operation_run.statuses.values_list('status', flat=True)) == {
OperationStatuses.CREATED,
OperationStatuses.SCHEDULED,
OperationStatuses.RUNNING,
OperationStatuses.FAILED,
}
self.pipeline_run.refresh_from_db()
assert self.operation_run.pipeline_run.last_status == PipelineStatuses.FINISHED
assert set(self.operation_run.pipeline_run.statuses.values_list('status', flat=True)) == {
PipelineStatuses.CREATED,
PipelineStatuses.SCHEDULED,
PipelineStatuses.RUNNING,
PipelineStatuses.FINISHED,
}
def test_task_retries_for_specified_exception(self):
class RetryTask(ClassBasedTask):
retry_for = (KeyError, )
@staticmethod
def _run(task_bind, *args, **kwargs):
raise KeyError
@celery_app.task(base=OperationTask, bind=True, shared=False)
def retry_task(task_bind, operation_run_id):
assert task_bind.max_retries == 2
assert task_bind.countdown == 0
RetryTask.run(task_bind=task_bind, operation_run_id=operation_run_id)
# Add retries and count to the operation
self.operation_run.operation.max_retries = 2
self.operation_run.operation.retry_delay = 0
self.operation_run.operation.save()
kwargs = {'operation_run_id': self.operation_run.id}
retry_task.apply_async(kwargs=kwargs)
self.operation_run.refresh_from_db()
assert self.operation_run.last_status == OperationStatuses.RETRYING
assert set(self.operation_run.statuses.values_list('status', flat=True)) == {
OperationStatuses.CREATED,
OperationStatuses.SCHEDULED,
OperationStatuses.RUNNING,
OperationStatuses.RETRYING,
}
self.pipeline_run.refresh_from_db()
assert self.operation_run.pipeline_run.last_status == PipelineStatuses.RUNNING
assert set(self.operation_run.pipeline_run.statuses.values_list('status', flat=True)) == {
PipelineStatuses.CREATED,
PipelineStatuses.SCHEDULED,
PipelineStatuses.RUNNING,
}
|
nilq/baby-python
|
python
|
import os
import pygame
from game_defines import DIRECTIONS
ASSET_BASE = os.path.join(os.path.dirname(__file__), "assets")
class Actor(object):
@staticmethod
def asset(name):
return os.path.join(ASSET_BASE, name)
def __init__(self, name, image_path, actor_type, startx, starty):
self.image = pygame.image.load(Actor.asset(image_path))
self.name = name
self.x = startx
self.y = starty
self.actor_type = actor_type
self.map_object = None
def process(self, sensor_input):
raise AssertionError("Process Needs to be overriden")
def get_image(self):
return self.image
def get_type(self):
return self.actor_type
def get_x(self):
return self.x
def get_y(self):
return self.y
def set_map(self, map_object):
self.map_object = map_object
def move(self, move_to):
x_offset = 0
y_offset = 0
if move_to == DIRECTIONS.UP:
x_offset = 0
y_offset = -1
elif move_to == DIRECTIONS.UPRIGHT:
x_offset = 1
y_offset = -1
elif move_to == DIRECTIONS.UPLEFT:
x_offset = -1
y_offset = -1
elif move_to == DIRECTIONS.RIGHT:
x_offset = 1
y_offset = 0
elif move_to == DIRECTIONS.DOWN:
x_offset = 0
y_offset = 1
elif move_to == DIRECTIONS.DOWNRIGHT:
x_offset = 1
y_offset = 1
elif move_to == DIRECTIONS.DOWNLEFT:
x_offset = -1
y_offset = 1
elif move_to == DIRECTIONS.LEFT:
x_offset = -1
y_offset = 0
if self.map_object.is_blocked(self.x + x_offset, self.y + y_offset):
return False
self.x += x_offset
self.y += y_offset
return True
|
nilq/baby-python
|
python
|
"""
File: 240.py
Title: Search a 2D Matrix II
Difficulty: Medium
URL: https://leetcode.com/problems/search-a-2d-matrix-ii/
"""
import unittest
from typing import List
class Solution:
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
m = len(matrix)
n = len(matrix[0])
i = 0
j = m - 1
while (i < n) and (j >= 0):
if matrix[j][i] == target:
return True
if matrix[j][i] > target:
j -= 1
else:
i += 1
return False
class SolutionTestCase(unittest.TestCase):
def test_example1(self):
# Input
matrix = [[1, 4, 7, 11, 15],
[2, 5, 8, 12, 19],
[3, 6, 9, 16, 22],
[10, 13, 14, 17, 24],
[18, 21, 23, 26, 30]]
target = 5
# Output
output = True
solution = Solution()
self.assertEqual(solution.searchMatrix(matrix, target), output)
def test_example2(self):
# Input
matrix = [[1, 4, 7, 11, 15],
[2, 5, 8, 12, 19],
[3, 6, 9, 16, 22],
[10, 13, 14, 17, 24],
[18, 21, 23, 26, 30]]
target = 20
# Output
output = False
solution = Solution()
self.assertEqual(solution.searchMatrix(matrix, target), output)
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
safedun-server
Created on Sun Oct 13 00:00:00 2019
Author: Adil Rahman
GitHub: https://github.com/adildsw/safedun-server
"""
import argparse
import socket
from backend import safedun
from flask import Flask, render_template, request, send_file
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/execute', methods=['POST'])
def execute():
mode = request.form['mode']
key = request.form['key']
cycle = int(request.form['cycle'])
file = request.files['file']
scrambler = safedun()
output_file = scrambler.generate(mode, cycle, key, file)
return send_file(output_file, as_attachment=True, attachment_filename="output.png")
if __name__ == "__main__":
host_ip = socket.gethostbyname(socket.gethostname())
parser = argparse.ArgumentParser(description="safedun Server Option Description")
parser.add_argument("-H", "--host", help="specify IP address to host server", required=False, default=host_ip)
parser.add_argument("-p", "--port", help="specify Port number to host server", required=False, default="5000")
parser.add_argument("-d", "--debug", help="specify whether the server will run on debug mode", required=False, default=False)
parser.add_argument("-l", "--local", help="host server in localhost", required=False, default=False)
argument = parser.parse_args()
if not argument.local == False:
argument.host = '127.0.0.1'
app.run(host=argument.host, port=argument.port, debug=argument.debug)
|
nilq/baby-python
|
python
|
"""The Policy can use these classes to communicate with Vizier."""
import abc
import collections
import dataclasses
import datetime
from typing import Dict, Iterable, List, Optional
from vizier import pyvizier as vz
@dataclasses.dataclass(frozen=True)
class MetadataDelta:
"""Carries cumulative delta for a batch metadata update.
Attributes:
on_study: Updates to be made on study-level metadata.
on_trials: Maps trial id to updates.
"""
on_study: vz.Metadata = dataclasses.field(default_factory=vz.Metadata)
on_trials: Dict[int, vz.Metadata] = dataclasses.field(
default_factory=lambda: collections.defaultdict(vz.Metadata))
class _MetadataUpdateContext:
"""Metadata update context.
Usage:
# All metadata updates in the context are queued, not immediately applied.
# Upon exit, supporter handles all metadata updates in a batch.
with pythia2._MetadataUpdateContext(policy_supporter) as mu:
# Study-level metadata.
mu.assign('namespace', 'key', 'value')
# Trial-level metadata.
mu.assign('namespace', 'key', 'value', trial_id=1)
# Same as above but with a side effect. After this line the following
# line is True:
# trial.metadata.ns('namespace')['key'] == 'value'
mu.assign('namespace', 'key', 'value', trial)
"""
def __init__(self, supporter: 'PolicySupporter'):
self._supporter = supporter
self._delta = MetadataDelta()
# pylint: disable=invalid-name
def assign(self,
namespace: str,
key: str,
value: vz.MetadataValue,
trial: Optional[vz.Trial] = None,
*,
trial_id: Optional[int] = None):
"""Assigns metadata.
Args:
namespace: Namespace of the metadata. See vz.Metadata doc for more
details.
key:
value:
trial: If specified, `trial_id` must be None. It behaves the same as when
`trial_id=trial.id`, except that `trial` is immediately modified.
trial_id: If specified, `trial` must be None. If both `trial` and
`trial_id` are None, then the key-value pair will be assigned to the
study.
Raises:
ValueError:
"""
if trial is None and trial_id is None:
self._delta.on_study.ns(namespace)[key] = value
elif trial is not None and trial_id is not None:
raise ValueError(
'At most one of `trial` and `trial_id` can be specified.')
elif trial is not None:
self._delta.on_trials[trial.id].ns(namespace)[key] = value
trial.metadata.ns(namespace)[key] = value
elif trial_id is not None:
self._delta.on_trials[trial_id].ns(namespace)[key] = value
def __enter__(self):
return self
def __exit__(self, *args):
"""upon exit, sends a batch update request."""
self._supporter.SendMetadata(self._delta)
class PolicySupporter(abc.ABC):
"""Used by Policy instances to communicate with Vizier."""
# TODO: Change to GetStudyDescriptor.
@abc.abstractmethod
def GetStudyConfig(self, study_guid: Optional[str] = None) -> vz.StudyConfig:
"""Requests a StudyConfig from Vizier.
This sends a PythiaToVizier.trial_selector packet and waits for the
response(s). You can call this multiple times, and it is thread-friendly,
so you can even overlap calls.
Args:
study_guid: The GUID of the study whose StudyConfig you want. Note that
access control applies. By default, use the current study's GUID.
Returns:
The requested StudyConfig proto.
Raises:
CancelComputeError: (Do not catch.)
PythiaProtocolError: (Do not catch.)
VizierDatabaseError: If the database operation raises an error, e.g. if
$study_guid refers to a nonexistent or inaccessible study.
"""
@abc.abstractmethod
def GetTrials(
self,
*,
study_guid: Optional[str] = None,
trial_ids: Optional[Iterable[int]] = None,
min_trial_id: Optional[int] = None,
max_trial_id: Optional[int] = None,
status_matches: Optional[vz.TrialStatus] = None,
include_intermediate_measurements: bool = True) -> List[vz.Trial]:
"""Requests Trials from Vizier.
Args:
study_guid: The GUID of the study to get Trials from. Default is None,
which means the current Study.
trial_ids: a list of Trial id numbers to acquire.
min_trial_id: Trials in [min_trial_id, max_trial_id] are selected, if at
least one of the two is not None.
max_trial_id: Trials in [min_trial_id, max_trial_id] are selected, if at
least one of the two is not None.
status_matches: If not None, filters for Trials where
Trial.status==status_matches. The default passes all types of Trial.
include_intermediate_measurements: If True (default), the returned Trials
must have all measurements. Note that the final Measurement is always
included for COMPLETED Trials. If False, PolicySupporter _may_ leave
`measurements` field empty in the returned Trials in order to optimize
speed, but it is not required to do so.
Returns:
Trials obtained from Vizier.
Raises:
CancelComputeError: (Do not catch.)
PythiaProtocolError: (Do not catch.)
VizierDatabaseError: If the database operation raises an error, e.g. if
$study_guid refers to a nonexistent or inaccessible study.
NOTE: if $trial_ids is set, $min_trial_id, $max_trial_id, and
$status_matches will be ignored.
"""
def CheckCancelled(self, note: Optional[str] = None) -> None:
"""Throws a CancelComputeError on timeout or if Vizier cancels.
This should be called occasionally by any long-running computation.
Raises an exception if the interaction has been cancelled by the Vizier
side of the protocol; the exception shuts down the Pythia server.
Args:
note: for debugging.
Raises:
CancelComputeError: (Do not catch.)
"""
pass
def TimeRemaining(self) -> datetime.timedelta:
"""The time remaining to compute a result.
Returns:
The remaining time before the RPC is considered to have timed out; it
returns datetime.timedelta.max if no deadline was specified in the RPC.
This is an alternative to calling CheckCancelled(); both have the goal of
terminating runaway computations. If your computation times out,
you should raise TemporaryPythiaError (if you want a retry) or
InactivateStudyError (if not).
"""
return datetime.timedelta(hours=1.0)
def MetadataUpdate(self) -> _MetadataUpdateContext:
"""Queues metadata updates, then passes them to UpdateMetadata().
Usage:
ps = PolicySupporter()
with ps.MetadataUpdate() as mu:
# Study-level metadata.
mu.assign('namespace', 'key', 'value')
# Trial-level metadata.
mu.assign('namespace', 'key', 'value', trial_id=1)
Returns:
A _MetadataUpdateContext instance to use as a context.
Raises:
CancelComputeError: (Do not catch.)
PythiaProtocolError: (Do not catch.)
VizierDatabaseError: If the database operation raises an error.
"""
return _MetadataUpdateContext(self)
@abc.abstractmethod
def SendMetadata(self, delta: MetadataDelta) -> None:
"""Updates the Study's metadata in Vizier's database.
The MetadataUpdate() method is preferred for normal use.
Args:
delta: Metadata to be uploaded to the Vizier database.
Raises:
CancelComputeError: (Do not catch.)
PythiaProtocolError: (Do not catch.)
VizierDatabaseError: If the database operation raises an error.
"""
|
nilq/baby-python
|
python
|
from django.conf.urls import url
from . import views
from django.contrib.auth import views as auth_views
urlpatterns = [
url(r'^$', views.home, name='home'),
url(r'^sponsor/$', views.sponsor, name='sponsor'),
url(r'^hospitality/$', views.hospitality, name='hospitality'),
url(r'^transport/$', views.transport, name='transport'),
url(r'^accomodation/$', views.accomodation, name='accomodation'),
url(r'^events/technical/$',views.technical, name='technical'),
url(r'^events/sports/$',views.sports, name='sports'),
url(r'^events/cultural/$',views.cultural, name='cultural'),
url(r'^events/register/$',views.event_register, name='event_register'),
url(r'^accomodation/register/$', views.accom_register, name='accom_register'),
url(r'^events/register2/$',views.event_register2, name='event_register2'),
url(r'^accomodation/register2/$',views.accom_register2, name='accom_register2'),
url(r'^events/(?P<category>\w+)/(?P<subcategory>\w+)/$', views.specificEventView, name='specificView'),
url(r'^events/(?P<category>\w+)/$', views.specificEventView, name='specificView'),
url(r'^pronights/', views.pronights, name='pronights'),
url(r'^forgotPassMail/', views.forgotmail, name='forgotmail'),
url(r'^forgotPassword/(?P<hashkey>\w+)', views.forgot, name='forgot'),
url(r'^me/', views.profile, name='profile'),
url(r'^login/', views.login1, name='login'),
url(r'^register/', views.register, name='register'),
url(r'^logout/$', auth_views.logout, {'next_page': '/'}),
]
|
nilq/baby-python
|
python
|
import numpy as np
import math
import pandas as pd
#######################################################################
"""
These functions are applied to hierarchically classify the images.
level_n(x): determines the prediction at level n. If there is no prediction to be made at level n, the function returns nan.
level_n_p(x): determines the probability associated with the prediction at level n. This probability is needed to classify the sequences using the top-k method.
"""
def level_1(x):
if x['blank'] > x['not_blank']:
return 'blank'
else:
return 'not_blank'
def level_1_p(x):
if x['blank'] > x['not_blank']:
return x['blank']
else:
return x['not_blank']
def level_2(x):
if x['level_1'] == 'not_blank':
if x['animal'] > x['no_animal']:
return 'animal'
else:
return 'no_animal'
else:
return math.nan
def level_2_p(x):
if x['animal'] > x['no_animal']:
return x['animal']
else:
return x['no_animal']
def level_3(x):
if x['level_2'] == 'animal':
if x['bird'] > x['mammal']:
return 'bird'
else:
return 'mammal'
elif x['level_2'] == 'no_animal':
if x['human'] > x['pickup']:
return 'human'
else:
return 'pickup'
else:
return math.nan
def level_3_p(x):
if x['level_2'] == 'animal':
if x['bird'] > x['mammal']:
return x['bird']
else:
return x['mammal']
elif x['level_2'] == 'no_animal':
if x['human'] > x['pickup']:
return x['human']
else:
return x['pickup']
else:
return math.nan
def level_4(x):
if x['level_3'] == 'mammal':
if x['small_mammal'] > x['large_mammal']:
return 'small_mammal'
else:
return 'large_mammal'
else:
return math.nan
def level_4_p(x):
if x['level_3'] == 'mammal':
if x['small_mammal'] > x['large_mammal']:
return x['small_mammal']
else:
return x['large_mammal']
else:
return math.nan
def level_5(x,pred):
if x['level_4'] == 'small_mammal':
p = int(pred.iloc[x.name,[9,5,6,14]].idxmax())
return p
elif x['level_4'] == 'large_mammal':
p = int(pred.iloc[x.name,[0,7,11,1,4,12,13,15,16]].idxmax())
return p
else:
return math.nan
def level_5_p(x,pred):
if x['level_4'] == 'small_mammal':
p = np.asarray(pred.iloc[x.name,[9,5,6,14]]).max(axis=0)
return p
elif x['level_4'] == 'large_mammal':
p = np.asarray(pred.iloc[x.name,[0,7,11,1,4,12,13,15,16]]).max(axis=0)
return p
else:
return math.nan
########################################################################
def top_predictions(data, data_hierarchy):
"""
This function determines the prediction of the sequences based on the top-k predictions at every level.
"""
sequences = data_hierarchy['sequence'].drop_duplicates()
data_seq_top = pd.DataFrame(columns=['sequence','level_1','level_1_p','level_2','level_2_p','level_3','level_3_p','level_4','level_4_p','level_5','level_5_p'])
for s, seq in enumerate(sequences):
data_sequence = data_hierarchy[data_hierarchy['sequence'] == seq]
pred_sequence = data.loc[:,'0':'16'].loc[data['sequence'] == seq]
#Level 1
p_l1 = max([data_sequence['blank'].mean(), data_sequence['not_blank'].mean()])
l1 = ['blank', 'not_blank'][np.argmax([data_sequence['blank'].mean(), data_sequence['not_blank'].mean()])]
#level 2
if l1 == 'not_blank':
p_l2 = max([data_sequence['animal'].mean(), data_sequence['no_animal'].mean()])
l2 = ['animal', 'no_animal'][np.argmax([data_sequence['animal'].mean(), data_sequence['no_animal'].mean()])]
else:
p_l2 = math.nan
l2 = math.nan
#Level 3
if l2 == 'animal':
p_l3 = max([data_sequence['bird'].mean(), data_sequence['mammal'].mean()])
l3 = ['bird', 'mammal'][np.argmax([data_sequence['bird'].mean(), data_sequence['mammal'].mean()])]
elif l2 == 'no_animal':
p_l3 = max([data_sequence['human'].mean(), data_sequence['pickup'].mean()])
l3 = ['human', 'pickup'][np.argmax([data_sequence['human'].mean(), data_sequence['pickup'].mean()])]
else:
p_l3 = math.nan
l3 = math.nan
#Level 4
if l3 == 'mammal':
p_l4 = max([data_sequence['small_mammal'].mean(), data_sequence['large_mammal'].mean()])
l4 = ['small_mammal', 'large_mammal'][np.argmax([data_sequence['small_mammal'].mean(), data_sequence['large_mammal'].mean()])]
else:
p_l4 = math.nan
l4 = math.nan
#Level 5
if l4 == 'small_mammal':
p_l5 = max(pred_sequence.iloc[:,[9,5,6,14]].mean())
l5 = int(np.argmax(pred_sequence.iloc[:,[9,5,6,14]].mean()))
elif l4 == 'large_mammal':
large = pred_sequence.iloc[:,[0,7,11,1,4,12,13,15,16]]
top5_p = []
top5_pred = []
#Top-5 for every image
for i, row in large.iterrows():
top5_p += np.sort(row.values.tolist())[-5:].tolist()
top5_pred += np.array([0,7,11,1,4,12,13,15,16])[np.argsort(row.values.tolist())[-5:].tolist()].tolist()
df_top5 = pd.DataFrame({'top5_p': top5_p, 'top5_pred':top5_pred})
top5_seq = df_top5.groupby('top5_pred').sum().divide(len(data_sequence)).sort_values('top5_p',ascending=False)[:5]
p_l5 = top5_seq.max()[0]
l5 = int(top5_seq.idxmax()[0])
else:
p_l5 = math.nan
l5 = math.nan
data_seq_top.loc[s] = [seq, l1, p_l1, l2, p_l2, l3, p_l3, l4, p_l4, l5, p_l5]
return data_seq_top
#########################################################################################
def hierarchical_predictions(data):
"""
This function determines the hierarchical prediction for the individual images, based on the output of the neural network.
These predictions can then be used to classify a sequence.
"""
predictions = data.loc[:,'0':'16']
index_small = [9,5,6,14]
index_large = [0,7,11,1,4,12,13,15,16]
hierarchy = pd.DataFrame()
hierarchy['blank'] = predictions.iloc[:,3]
hierarchy['small_mammal'] = predictions.iloc[:,index_small].sum(axis=1)
hierarchy['large_mammal'] = predictions.iloc[:,index_large].sum(axis=1)
hierarchy['mammal'] = hierarchy['small_mammal'] + hierarchy['large_mammal']
hierarchy['bird'] = predictions.iloc[:,2]
hierarchy['animal'] = hierarchy['bird'] + hierarchy['mammal']
hierarchy['human'] = predictions.iloc[:,8]
hierarchy['pickup'] = predictions.iloc[:,10]
hierarchy['no_animal'] = hierarchy['human'] + hierarchy['pickup']
hierarchy['not_blank'] = hierarchy['no_animal'] + hierarchy['animal']
hierarchy['level_1'] = hierarchy.apply(level_1, axis=1)
hierarchy['level_1_p'] = hierarchy.apply(level_1_p, axis=1)
hierarchy['level_2'] = hierarchy.apply(level_2, axis=1)
hierarchy['level_2_p'] = hierarchy.apply(level_2_p, axis=1)
hierarchy['level_3'] = hierarchy.apply(level_3, axis=1)
hierarchy['level_3_p'] = hierarchy.apply(level_3_p, axis=1)
hierarchy['level_4'] = hierarchy.apply(level_4, axis=1)
hierarchy['level_4_p'] = hierarchy.apply(level_4_p, axis=1)
mammals = pd.DataFrame()
mammals['small_pred_max'] = np.asarray(predictions.iloc[:,index_small]).argmax(axis=1)
mammals['large_pred_max'] = np.asarray(predictions.iloc[:,index_large]).argmax(axis=1)
mammals['small_max_p'] = np.asarray(predictions.iloc[:,index_small]).max(axis=1)
mammals['large_max_p'] = np.asarray(predictions.iloc[:,index_large]).max(axis=1)
hierarchy['level_5'] = hierarchy.apply(level_5, pred = predictions, axis=1)
hierarchy['level_5_p'] = hierarchy.apply(level_5_p, pred = predictions, axis=1)
return hierarchy
############################################################################################
def bottom_hierarchical_prediction(x):
"""
This function determines the final prediction for a sequence, based on the hierarchical prediction at every level.
"""
if pd.isnull(x['level_5']) == False:
label = x['level_5']
elif pd.isnull(x['level_3']) == False:
label = x['level_3']
if label == 'bird':
label = 2
elif label == 'human':
label = 8
else:
label = 10
else:
label = 3 #blank
labels = ['Ass','Beech Marten','Bird','Blank','Cat','Squirrel','Hare','Horse','Human','Mouse','PickupSetup','Fox','Dog','Mouflon','Hedgehog','Roe Deer','Wild Boar']
label = labels[label]
return label
|
nilq/baby-python
|
python
|
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.utils import timezone
from django.views import generic
from .models import Choice, Question
# Create your views here.
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset_rev(self):
return Question.objects.order_by('-pub_date').reverse()[1:]
def get_queryset(self):
"""
Return the last five published questions (not including those set to be
published in the future).
"""
return Question.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
def get_queryset(self):
"""
Excludes any questions that aren't published yet.
"""
return Question.objects.filter(pub_date__lte=timezone.now())
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def summary(request):
summary = {}
for choice in Choice.objects.all():
q_text = choice.question.question_text
c_text = choice.choice_text
votes = choice.votes
if not q_text in summary.keys():
summary[q_text] = {c_text: votes}
else:
summary[q_text][c_text] = votes
return render(request, 'polls/summary.html', {
'summary': summary
})
def vote(request, pk):
question = get_object_or_404(Question, pk=pk)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
return render(request, 'polls/detail.html', {
'question': question,
'error_message' :"You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
return HttpResponseRedirect(reverse('polls:results', args=(question.id, )))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import sys
from os import listdir
def proc_files(files):
fs = []
for f in files:
try:
fs.append(open(f, "r"))
except Exception as exct:
print("Failed to read file {:s}".format(f))
sys.exit(1)
done = False
result = []
while not done:
index = "NONE"
val_sum = 0
for f in fs:
try:
line = f.__next__()
except Exception:
done = True
break
sline = line.split()
assert(len(sline) == 2)
index, value = sline
value = float(value)
val_sum += value
# print(sline)
else:
val_avg = val_sum/len(fs)
result.append("{:s}\t{:f}".format(index, val_avg))
result.append("")
return result
def proc_dirs(dirs):
first_dir = dirs[0]
for f in listdir(first_dir):
avg_out = proc_files(["{:s}/{:s}".format(d, f) for d in dirs])
with open(f, "w") as outfile:
outfile.write("\n".join(avg_out))
def main():
proc_dirs(sys.argv[1:])
# proc_files(sys.argv[1:])
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
"""Define language independent properties at the module level"""
from adam.language.lexicon import LexiconEntry, LexiconProperty
from adam.language.dependency import MorphosyntacticProperty
# Define universal morphosyntactic properties
FIRST_PERSON = MorphosyntacticProperty("1p")
SECOND_PERSON = MorphosyntacticProperty("2p")
THIRD_PERSON = MorphosyntacticProperty("3p")
NOMINATIVE = MorphosyntacticProperty("nom")
ACCUSATIVE = MorphosyntacticProperty("acc")
# Define universal lexicon properties
MASS_NOUN = LexiconProperty("mass-noun")
ALLOWS_DITRANSITIVE = LexiconProperty("allows-ditransitive")
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 6 16:31:59 2021
@author: msantamaria
"""
# Redes Neuronales Artificiales
# Parte 1 - Pre procesado de datos
# Importar las librerías
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importar el data set
dataset = pd.read_csv("Churn_Modelling.csv")
X = dataset.iloc[:,3:13].values
y = dataset.iloc[:,13].values
# Codificar datos categóricos
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelEncoder_X_1 = LabelEncoder()
X[:,1] = labelEncoder_X_1.fit_transform(X[:,1])
labelEncoder_X_2 = LabelEncoder()
X[:,2] = labelEncoder_X_2.fit_transform(X[:,2])
from sklearn.compose import ColumnTransformer
ct = ColumnTransformer(
[('one_hot_encoder', OneHotEncoder(categories='auto'), [1])], # The column numbers to be transformed (here is [0] but can be [0, 1, 3])
remainder='passthrough' # Leave the rest of the columns untouched
)
X = np.array(ct.fit_transform(X), dtype=np.float)
X = X[:,1:]
# Dividir el dataset en conjunto de entrenamiento y en conjunto de testing
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.20,random_state = 0)
# Escalado de variables
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
# Parte 2 - Construir la RNA
# Importar Keras y librerías adicionales
import keras
from keras.models import Sequential
from keras.layers import Dense
# Inicializar la RNA
classifier = Sequential()
# Añadir las capas de entrada y primera capa oculta
classifier.add(Dense(units=6,
kernel_initializer="uniform",
activation="relu",
input_dim=11))
# Añadir la segunda capa oculta
classifier.add(Dense(units=6,
kernel_initializer="uniform",
activation="relu"))
# Añadir la capa de salida
classifier.add(Dense(units=1,
kernel_initializer="uniform",
activation="sigmoid"))
# Compilar la RNA
classifier.compile(optimizer="adam",
loss="binary_crossentropy",
metrics=["accuracy"])
# Ajustamos la RNA al Conjunto de Entrenamiento
classifier.fit(X_train, y_train, batch_size=10, epochs=100)
# How to build the same neural network for Regression?
# Initialising the ANN
# regressor = Sequential()
# # Adding the input layer and the first hidden layer
# regressor.add(Dense(units = 6,
# kernel_initializer = ’uniform’,
# activation = 'relu',
# input_dim = 11))
# # Adding the second hidden layer
# regressor.add(Dense(units = 6,
# kernel_initializer = 'uniform',
# activation = 'relu'))
# # Adding the output layer
# regressor.add(Dense(units = 1,
# kernel_initializer = 'uniform'))
# # Compiling the ANN
# regressor.compile(optimizer = 'adam',
# loss = 'mean_squared_error')
# # Fitting the ANN to the Training set
# regressor.fit(X_train,
# y_train,
# batch_size = 10,
# epochs = 100)
# Parte 3 - Evaluar el modelo y calcular predicciones finales
# Predicción de los resultados con el Conjunto de Testing
y_pred = classifier.predict(X_test)
y_pred = (y_pred > 0.5)
# Elaborar una matriz de confusión
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test,y_pred)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-01-29 08:44
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('codenerix_products', '0008_auto_20180126_1711'),
('codenerix_invoicing', '0003_auto_20180129_0941'),
]
operations = [
migrations.CreateModel(
name='SalesLines',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Created')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Updated')),
('removed', models.BooleanField(default=False, editable=False, verbose_name='Removed')),
('price_recommended', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='Recomended price base')),
('quantity', models.FloatField(verbose_name='Quantity')),
('subtotal', models.DecimalField(decimal_places=2, default=0, editable=False, max_digits=10, verbose_name='Subtotal')),
('discounts', models.DecimalField(decimal_places=2, default=0, editable=False, max_digits=10, verbose_name='Discounts')),
('taxes', models.DecimalField(decimal_places=2, default=0, editable=False, max_digits=10, verbose_name='Taxes')),
('equivalence_surcharges', models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=10, null=True, verbose_name='Equivalence surcharge')),
('total', models.DecimalField(decimal_places=2, default=0, editable=False, max_digits=10, verbose_name='Total')),
('code', models.CharField(blank=True, default=None, max_length=250, null=True, verbose_name='Code')),
('description_basket', models.CharField(blank=True, max_length=256, null=True, verbose_name='Description')),
('price_base_basket', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='Price base')),
('discount_basket', models.DecimalField(decimal_places=2, default=0, max_digits=10, verbose_name='Discount (%)')),
('tax_basket', models.FloatField(blank=True, default=0, null=True, verbose_name='Tax (%)')),
('equivalence_surcharge_basket', models.FloatField(blank=True, default=0, null=True, verbose_name='Equivalence surcharge (%)')),
('tax_label_basket', models.CharField(blank=True, max_length=250, null=True, verbose_name='Tax Name')),
('notes_basket', models.CharField(blank=True, max_length=256, null=True, verbose_name='Notes')),
('description_order', models.CharField(blank=True, max_length=256, null=True, verbose_name='Description')),
('price_base_order', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='Price base')),
('discount_order', models.DecimalField(decimal_places=2, default=0, max_digits=10, verbose_name='Discount (%)')),
('tax_order', models.FloatField(blank=True, default=0, null=True, verbose_name='Tax (%)')),
('equivalence_surcharge_order', models.FloatField(blank=True, default=0, null=True, verbose_name='Equivalence surcharge (%)')),
('tax_label_order', models.CharField(blank=True, max_length=250, null=True, verbose_name='Tax Name')),
('notes_order', models.CharField(blank=True, max_length=256, null=True, verbose_name='Notes')),
('notes_albaran', models.CharField(blank=True, max_length=256, null=True, verbose_name='Notes')),
('description_ticket', models.CharField(blank=True, max_length=256, null=True, verbose_name='Description')),
('price_base_ticket', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='Price base')),
('discount_ticket', models.DecimalField(decimal_places=2, default=0, max_digits=10, verbose_name='Discount (%)')),
('tax_ticket', models.FloatField(blank=True, default=0, null=True, verbose_name='Tax (%)')),
('equivalence_surcharge_ticket', models.FloatField(blank=True, default=0, null=True, verbose_name='Equivalence surcharge (%)')),
('tax_label_ticket', models.CharField(blank=True, max_length=250, null=True, verbose_name='Tax Name')),
('notes_ticket', models.CharField(blank=True, max_length=256, null=True, verbose_name='Notes')),
('notes_ticket_rectification', models.CharField(blank=True, max_length=256, null=True, verbose_name='Notes')),
('description_invoice', models.CharField(blank=True, max_length=256, null=True, verbose_name='Description')),
('price_base_invoice', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='Price base')),
('discount_invoice', models.DecimalField(decimal_places=2, default=0, max_digits=10, verbose_name='Discount (%)')),
('tax_invoice', models.FloatField(blank=True, default=0, null=True, verbose_name='Tax (%)')),
('equivalence_surcharge_invoice', models.FloatField(blank=True, default=0, null=True, verbose_name='Equivalence surcharge (%)')),
('tax_label_invoice', models.CharField(blank=True, max_length=250, null=True, verbose_name='Tax Name')),
('notes_invoice', models.CharField(blank=True, max_length=256, null=True, verbose_name='Notes')),
('notes_invoice_rectification', models.CharField(blank=True, max_length=256, null=True, verbose_name='Notes')),
('albaran', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='lines_sales', to='codenerix_invoicing.SalesAlbaran', verbose_name='Albaran')),
('basket', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='lines_sales', to='codenerix_invoicing.SalesBasket', verbose_name='Basket')),
('invoice', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='lines_sales', to='codenerix_invoicing.SalesInvoice', verbose_name='Invoice')),
('invoice_rectification', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='lines_sales', to='codenerix_invoicing.SalesInvoiceRectification', verbose_name='Invoice rectification')),
('order', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='lines_sales', to='codenerix_invoicing.SalesOrder', verbose_name='Sales order')),
('product_final', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='lines_sales', to='codenerix_products.ProductFinal', verbose_name='Product')),
('product_unique', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='lines_sales', to='codenerix_products.ProductUnique', verbose_name='Product Unique')),
('ticket', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='lines_sales', to='codenerix_invoicing.SalesTicket', verbose_name='Ticket')),
('ticket_rectification', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='lines_sales', to='codenerix_invoicing.SalesTicketRectification', verbose_name='Ticket rectification')),
],
options={
'default_permissions': ('add', 'change', 'delete', 'view', 'list'),
'abstract': False,
},
),
]
|
nilq/baby-python
|
python
|
# %%
from sklearn.metrics import r2_score
import pandas as pd
import numpy as np
import matplotlib.pyplot as pl
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
# %%
# Other type of file could be used which contains tabular data
advertising = pd.read_csv("advertising.csv")
# Target column must be last to work below all cell's code correctly, If you don't have your target colum last then make necessary changes to below two lines of code
TV = 'TV'
Radio = "Radio"
Newspaper = 'Newspaper'
Sales = 'Sales'
X = advertising.iloc[:, :1]
y = advertising.iloc[:, -1]
# %%
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=100
)
# %%
X_train
# %%
model = LinearRegression(
normalize=True, fit_intercept=True, n_jobs=-1).fit(X_train, y_train)
# %%
y_predicted = model.predict(X_test)
# %%
r2_score(y_predicted, y_test)
# %%
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from collections import OrderedDict
import os
import re
from lxml import etree
from django.db.models import Count, Prefetch
from selections.models import PreProcessFragment
from stats.utils import get_label_properties_from_cache, prepare_label_cache
from .models import Corpus, Tense, Alignment, Source, Annotation, Fragment, Sentence, Word
def get_next_alignment(user, language_from, language_to, corpus=None):
"""
Retrieves a random Alignment from the database.
:param user: The current User
:param language_from: The source language
:param language_to: The target language
:param corpus: (if supplied) The Corpus where to draw an Alignment from
(otherwise: select from the available Corpora for a user)
:return: A random Alignment object
"""
target_words = Sentence.objects. \
prefetch_related(Prefetch('word_set', queryset=Word.objects.filter(is_target=True)))
alignments = Alignment.objects \
.filter(original_fragment__language=language_from) \
.filter(translated_fragment__language=language_to) \
.filter(annotation=None) \
.select_related('original_fragment__document') \
.prefetch_related(Prefetch('original_fragment__sentence_set', queryset=target_words,
to_attr='targets_prefetched'),)
corpora = [corpus] if corpus else get_available_corpora(user)
alignments = alignments.filter(original_fragment__document__corpus__in=corpora)
for corpus in corpora:
if corpus.current_subcorpus:
alignments = alignments.filter(original_fragment__in=corpus.current_subcorpus.get_fragments())
if not alignments:
return None
elif corpora[0].random_next_item:
return alignments.order_by('?').first()
else:
# Sort by Document title and the xml_id of the first target Word
return sorted(alignments, key=lambda a: (a.original_fragment.document.title,
a.original_fragment.sort_key_target()))[0]
def get_available_corpora(user):
"""
Returns the available Corpora for a User.
A superuser can see data from all corpora, other users are limited to corpora where they are an annotator.
:param user: The current User
:return: The available Corpora for this User
"""
if user.is_superuser:
return Corpus.objects.all()
elif user.is_authenticated:
return user.corpus_set.all()
else:
return Corpus.objects.filter(is_public=True)
def get_most_frequent_tenses(language):
"""
Returns the most frequently annotated tenses for a language.
:param language: The given Language
:return: A list of tenses
"""
most_frequent_by_language = Annotation.objects \
.filter(alignment__translated_fragment__language=language) \
.values('tense') \
.annotate(Count('tense')) \
.order_by('-tense__count')
return Tense.objects.filter(pk__in=[t.get('tense') for t in most_frequent_by_language])
def get_tenses(language):
"""
Returns tenses for a language.
:param language: The given Language
:return: A list of tenses
"""
return [t.title for t in Tense.objects.filter(language=language).order_by('title')]
def update_dialogue(in_dialogue, fragment=None, sentence=None, word_range=None):
"""
Updates the dialogue marking for Words and Fragments.
:param in_dialogue: whether the Words should be in_dialogue
:param fragment: a Fragment for which to change the dialogue marking
:param sentence: a Sentence for which to change the dialogue marking
:param word_range: a Word range for which to change the dialogue marking
"""
words = Word.objects.none()
if not any([fragment, sentence, word_range]):
raise ValueError('No words selected')
if fragment:
words |= Word.objects.filter(sentence__fragment=fragment)
if sentence:
words |= Word.objects.filter(sentence=sentence)
if word_range:
words |= Word.objects.filter(pk__in=word_range)
fragments = set()
for word in words:
word.is_in_dialogue = in_dialogue
word.is_in_dialogue_prob = 1.0 if in_dialogue else 0.0
word.save()
fragments.add(word.sentence.fragment)
for fragment in fragments:
fragment.save()
XML_ID_REGEX = re.compile(r'w?(\d[\.\d]*)')
def is_before(xml_id1, xml_id2):
result = False
match1 = re.match(XML_ID_REGEX, xml_id1)
match2 = re.match(XML_ID_REGEX, xml_id2)
if match1 and match2:
parts1 = [int(i) for i in match1.group(1).split('.')]
parts2 = [int(i) for i in match2.group(1).split('.')]
for p1, p2 in zip(parts1, parts2):
if p1 < p2:
result = True
break
return result
def sort_key(xml_id, xml_tag):
result = [xml_id]
if xml_id.isdigit():
result = int(xml_id)
else:
if xml_id[0] == xml_tag and xml_id[1:].split('.'):
result = list(map(int, xml_id[1:].split('.')))
return result
def natural_sort_key(s, _nsre=re.compile('([0-9]+)')):
"""
Allows natural sorting, e.g. 2.xml is before 16.xml
"""
return [int(text) if text.isdigit() else text.lower() for text in _nsre.split(s)]
def get_xml_sentences(fragment, limit):
"""
Retrieves sentences in the XML in the vicinity of the given xml_id
"""
try:
source = Source.objects.get(document=fragment.document, language=fragment.language)
except Source.DoesNotExist:
source = None
results = []
if source and source.xml_file and os.path.exists(source.xml_file.path):
xml_id = fragment.xml_ids() # TODO: this works, as source Fragments have only one Sentence
# TODO: limit to Fragments that are the source of an Alignment
related_fragments = Fragment.objects.filter(
document=fragment.document,
language=fragment.language,
preprocessfragment=None,
)
related_fragments = related_fragments.exclude(original=None)
# Loop over p/head/s elements
prev_el = []
found = False
added = 0
for _, el in etree.iterparse(source.xml_file.path, tag=['p', 'head', 's']):
if el.get('id') == xml_id:
found = True
if found:
if added <= limit:
position = 'current' if added == 0 else 'after'
results.append(add_element(el, fragment, related_fragments, position))
if el.tag == 's':
added += 1
else:
break
else:
prev_el.append(el)
# Inserts previous elements before the results
added = 0
for el in list(reversed(prev_el)):
results.insert(0, add_element(el, fragment, related_fragments, 'before'))
if el.tag == 's':
added += 1
if added == limit:
break
return results
def add_element(el, current_fragment, related_fragments, position):
sentence = None
sentence_content_xml = None
if el.tag == 's':
# For s elements, look up the Sentence in the same Corpus as the current Fragment
sentences = Sentence.objects.filter(
xml_id=el.get('id'),
fragment__in=related_fragments
).select_related('fragment').prefetch_related('word_set')
if sentences:
xml_id = None
fragment_pks = []
words = OrderedDict()
for s in sentences:
xml_id = s.xml_id
fragment_pks.append(s.fragment.pk)
is_current = current_fragment == s.fragment
for w in s.word_set.all():
if w.xml_id in words:
words[w.xml_id]['is_target'] |= w.is_target and is_current
words[w.xml_id]['is_other_target'] |= w.is_target and not is_current
else:
word = {
'word': w.word,
'xml_id': w.xml_id,
'pos': w.pos,
'lemma': w.lemma,
'is_in_dialogue': w.is_in_dialogue,
'is_target': w.is_target and is_current,
'is_other_target': w.is_target and not is_current,
}
words[w.xml_id] = word
fragment_pks.sort(reverse=True)
sentence = {
'xml_id': xml_id,
'fragment_pks': fragment_pks,
'words': list(words.values()),
}
# If the Sentence is not there, create a mock Sentence from the XML
else:
words = []
for w in el.xpath('.//w'):
word = {
'word': w.text,
'xml_id': w.get('id'),
'pos': w.get('tree') or w.get('pos') or w.get('hun') or '?',
'lemma': w.get('lem'),
'is_in_dialogue': float(w.get('dialog', 0)) > 0,
}
words.append(word)
sentence_content_xml = {
'xml_id': el.get('id'),
'words': words
}
return {'tag': el.tag,
'id': el.get('id'),
'position': position,
'content': sentence,
'content_xml': sentence_content_xml,
}
def bind_annotations_to_xml(source):
# Retrieve the Annotations
annotations = Annotation.objects. \
filter(alignment__translated_fragment__language=source.language,
alignment__translated_fragment__document=source.document). \
select_related('alignment__original_fragment', 'tense'). \
prefetch_related('labels', 'words')
# Only include correct Annotations
annotations = annotations.filter(is_no_target=False, is_translation=True)
tree = etree.parse(source.xml_file)
label_cache = prepare_label_cache(source.document.corpus)
labels = set()
failed_lookups = []
words_by_xml_id = dict()
all_w_elements = tree.xpath('//w')
if annotations:
# Attach Annotations to the XML tree
for annotation in annotations:
label, color, _ = get_label_properties_from_cache(
annotation.get_labels(as_pk=True, include_labels=True), label_cache, len(labels))
labels.add(label)
words = annotation.words.all()
for w in words:
words_by_xml_id[w.xml_id] = dict(annotation=annotation, label=label, color=color, found=False)
for xml_w in all_w_elements:
word = words_by_xml_id.get(xml_w.get('id'))
if word:
annotation = word['annotation']
label = word['label']
color = word['color']
xml_w.set('annotation-pk', str(annotation.pk))
xml_w.set('fragment-pk', str(annotation.alignment.original_fragment.pk))
xml_w.set('label', label)
xml_w.set('color', color)
del words_by_xml_id[xml_w.get('id')]
else:
# Assume we are dealing with a source language here
# Retrieve the fragments
target_words = Sentence.objects. \
prefetch_related(Prefetch('word_set', queryset=Word.objects.filter(is_target=True)))
pp_fragments = PreProcessFragment.objects.filter(language=source.language, document=source.document)
fragments = Fragment.objects.filter(language=source.language, document=source.document). \
exclude(pk__in=pp_fragments). \
select_related('tense'). \
prefetch_related('labels',
Prefetch('sentence_set', queryset=target_words, to_attr='targets_prefetched'))
# Attach Fragments to the XML tree
for fragment in fragments:
label, color, _ = get_label_properties_from_cache(
fragment.get_labels(as_pk=True, include_labels=True), label_cache, len(labels))
labels.add(label)
sentences = fragment.targets_prefetched
for s in sentences:
for w in s.word_set.all():
words_by_xml_id[w.xml_id] = dict(fragment=fragment, label=label, color=color, found=False)
for xml_w in all_w_elements:
word = words_by_xml_id.get(xml_w.get('id'))
if word:
fragment = word['fragment']
label = word['label']
color = word['color']
xml_w.set('fragment-pk', str(fragment.pk))
xml_w.set('label', label)
xml_w.set('color', color)
del words_by_xml_id[xml_w.get('id')]
for word in words_by_xml_id.values():
# all words that were assigned to the xml tree were removed from words_by_xml_id
failed_lookups.append(word.get('fragment', word.get('annotation')))
return tree, failed_lookups
def labels_to_choices(queryset):
return [(label['pk'], '{}:{}'.format(label['key__title'], label['title']))
for label in queryset.values('pk', 'key__title', 'title')]
|
nilq/baby-python
|
python
|
from math import exp
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
# from kornia.color import rgb_to_yuv
from torch.nn.modules.loss import _Loss
import numpy as np
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
return gauss/gauss.sum()
def create_window(window_size, channel=1):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = _2D_window.expand(channel, 1, window_size, window_size).contiguous()
return window
def ssim(img1, img2, window_size=11, window=None, size_average=True, full=False, val_range=None):
# Value range can be different from 255. Other common ranges are 1 (sigmoid) and 2 (tanh).
if val_range is None:
if torch.max(img1) > 128:
max_val = 255
else:
max_val = 1
if torch.min(img1) < -0.5:
min_val = -1
else:
min_val = 0
L = max_val - min_val
else:
L = val_range
padd = 0
(_, channel, height, width) = img1.size()
if window is None:
real_size = min(window_size, height, width)
window = create_window(real_size, channel=channel).to(img1.device)
mu1 = F.conv2d(img1, window, padding=padd, groups=channel)
mu2 = F.conv2d(img2, window, padding=padd, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv2d(img1 * img1, window, padding=padd, groups=channel) - mu1_sq
sigma2_sq = F.conv2d(img2 * img2, window, padding=padd, groups=channel) - mu2_sq
sigma12 = F.conv2d(img1 * img2, window, padding=padd, groups=channel) - mu1_mu2
C1 = (0.01 * L) ** 2
C2 = (0.03 * L) ** 2
v1 = 2.0 * sigma12 + C2
v2 = sigma1_sq + sigma2_sq + C2
cs = torch.mean(v1 / v2) # contrast sensitivity
ssim_map = ((2 * mu1_mu2 + C1) * v1) / ((mu1_sq + mu2_sq + C1) * v2)
if size_average:
ret = ssim_map.mean()
else:
ret = ssim_map.mean(1).mean(1).mean(1)
if full:
return ret, cs
return ret
def msssim(img1, img2, window_size=11, size_average=True, val_range=None, normalize=False):
device = img1.device
weights = torch.FloatTensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333]).to(device)
levels = weights.size()[0]
mssim = []
mcs = []
for _ in range(levels):
sim, cs = ssim(img1, img2, window_size=window_size, size_average=size_average, full=True, val_range=val_range)
mssim.append(sim)
mcs.append(cs)
img1 = F.avg_pool2d(img1, (2, 2))
img2 = F.avg_pool2d(img2, (2, 2))
mssim = torch.stack(mssim)
mcs = torch.stack(mcs)
# Normalize (to avoid NaNs during training unstable models, not compliant with original definition)
if normalize:
mssim = (mssim + 1) / 2
mcs = (mcs + 1) / 2
pow1 = mcs ** weights
pow2 = mssim ** weights
# From Matlab implementation https://ece.uwaterloo.ca/~z70wang/research/iwssim/
output = torch.prod(pow1[:-1] * pow2[-1])
return output
# Classes to re-use window
class SSIM(torch.nn.Module):
def __init__(self, window_size=11, size_average=True, val_range=None):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.val_range = val_range
# Assume 1 channel for SSIM
self.channel = 1
self.window = create_window(window_size)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.dtype == img1.dtype:
window = self.window
else:
window = create_window(self.window_size, channel).to(img1.device).type(img1.dtype)
self.window = window
self.channel = channel
return ssim(img1, img2, window=window, window_size=self.window_size, size_average=self.size_average)
class MSSSIM(torch.nn.Module):
def __init__(self, window_size=11, size_average=True, channel=3):
super(MSSSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = channel
def forward(self, img1, img2):
# TODO: store window between calls if possible
return msssim(img1, img2, window_size=self.window_size, size_average=self.size_average)
class MeanShift(nn.Conv2d):
def __init__(self, rgb_range, rgb_mean, rgb_std, sign=-1):
super(MeanShift, self).__init__(3, 3, kernel_size=1)
std = torch.Tensor(rgb_std)
self.weight.data = torch.eye(3).view(3, 3, 1, 1)
self.weight.data.div_(std.view(3, 1, 1, 1))
self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)
self.bias.data.div_(std)
self.requires_grad = False
class VGG(torch.nn.Module):
def __init__(self, conv_index, rgb_range=1):
super(VGG, self).__init__()
vgg_features = models.vgg19(pretrained=True).features
modules = [m for m in vgg_features]
if conv_index == '22':
self.vgg = nn.Sequential(*modules[:8])
elif conv_index == '54':
self.vgg = nn.Sequential(*modules[:35])
vgg_mean = (0.485, 0.456, 0.406)
vgg_std = (0.229 * rgb_range, 0.224 * rgb_range, 0.225 * rgb_range)
self.sub_mean = MeanShift(rgb_range, vgg_mean, vgg_std)
self.vgg.requires_grad = False
def forward(self, sr, hr):
def _forward(x):
x = self.sub_mean(x)
x = self.vgg(x)
return x
vgg_sr = _forward(sr)
with torch.no_grad():
vgg_hr = _forward(hr.detach())
loss = F.l1_loss(vgg_sr, vgg_hr)
return loss
def color_loss(out, target):
out_yuv = rgb_to_yuv(out)
out_u = out_yuv[:, 1, :, :]
out_v = out_yuv[:, 2, :, :]
target_yuv = rgb_to_yuv(target)
target_u = target_yuv[:, 1, :, :]
target_v = target_yuv[:, 2, :, :]
return torch.div(torch.mean((out_u - target_u).pow(1)).abs() + torch.mean((out_v - target_v).pow(1)).abs(), 2)
class BurstLoss(_Loss):
def __init__(self, size_average=None, reduce=None, reduction='mean'):
super(BurstLoss, self).__init__(size_average, reduce, reduction)
self.reduction = reduction
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
prewitt_filter = 1 / 6 * np.array([[1, 0, -1],
[1, 0, -1],
[1, 0, -1]])
self.prewitt_filter_horizontal = torch.nn.Conv2d(in_channels=1, out_channels=1,
kernel_size=prewitt_filter.shape,
padding=prewitt_filter.shape[0] // 2).to(device)
self.prewitt_filter_horizontal.weight.data.copy_(torch.from_numpy(prewitt_filter).to(device))
self.prewitt_filter_horizontal.bias.data.copy_(torch.from_numpy(np.array([0.0])).to(device))
self.prewitt_filter_vertical = torch.nn.Conv2d(in_channels=1, out_channels=1,
kernel_size=prewitt_filter.shape,
padding=prewitt_filter.shape[0] // 2).to(device)
self.prewitt_filter_vertical.weight.data.copy_(torch.from_numpy(prewitt_filter.T).to(device))
self.prewitt_filter_vertical.bias.data.copy_(torch.from_numpy(np.array([0.0])).to(device))
def get_gradients(self, img):
img_r = img[:, 0:1, :, :]
img_g = img[:, 1:2, :, :]
img_b = img[:, 2:3, :, :]
grad_x_r = self.prewitt_filter_horizontal(img_r)
grad_y_r = self.prewitt_filter_vertical(img_r)
grad_x_g = self.prewitt_filter_horizontal(img_g)
grad_y_g = self.prewitt_filter_vertical(img_g)
grad_x_b = self.prewitt_filter_horizontal(img_b)
grad_y_b = self.prewitt_filter_vertical(img_b)
grad_x = torch.stack([grad_x_r[:, 0, :, :], grad_x_g[:, 0, :, :], grad_x_b[:, 0, :, :]], dim=1)
grad_y = torch.stack([grad_y_r[:, 0, :, :], grad_y_g[:, 0, :, :], grad_y_b[:, 0, :, :]], dim=1)
grad = torch.stack([grad_x, grad_y], dim=1)
return grad
def forward(self, input, target):
input_grad = self.get_gradients(input)
target_grad = self.get_gradients(target)
return F.l1_loss(input_grad, target_grad, reduction=self.reduction)
|
nilq/baby-python
|
python
|
from rvsml.align import dtw2, OPW_w
from rvsml.EvaluateRVSML import EvaluateRVSML_dtw
from rvsml.NNClassifier import NNClassifier_dtw
from rvsml.RVSML_OT_Learning import RVSML_OT_Learning_dtw
|
nilq/baby-python
|
python
|
import sys
class Foobar:
def __init__(self, foobar="foobar"):
self.foobar = foobar
def __repr__(self):
return(self.foobar)
|
nilq/baby-python
|
python
|
import os
import json
import requests
import telegram
def custom_alert_slack(message):
text = ""
text = "%s" % message
requests.post(os.getenv('SLACK_WEBHOOK'), data=json.dumps({"text": text}), headers={'Content-type': 'application/json'})
def publish_on_telegram_channel(chat_id, message, token=None, image=None):
if not token:
token = os.getenv('TelegramBotsToken')
bot = telegram.Bot(token=token)
if image is None:
bot.send_message(chat_id=chat_id, text=message, parse_mode='HTML', disable_web_page_preview="true")
else:
bot.send_photo(chat_id=chat_id, photo=open(image, 'rb'), caption=message, parse_mode='HTML', disable_web_page_preview="true")
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author Eric Bullen <ebullen@linkedin.com>
@application jtune.py
@version 4.0.1
@abstract This tool will give detailed information about the running
JVM in real-time. It produces useful information that can
further assist the user in debugging and optimization.
@license Copyright 2015 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
either express or implied.
"""
import atexit
import datetime
import getpass
import locale
import logging
import math
import os
import re
import resource
import shlex
import socket
import subprocess as sp
import sys
import textwrap
import time
from decimal import Decimal
from itertools import zip_longest, count
import argparse
import multiprocessing as mp
try:
locale.setlocale(locale.LC_ALL, 'en_US')
except locale.Error:
# Try UTF8 variant before failing
locale.setlocale(locale.LC_ALL, 'en_US.utf8')
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s: "%(name)s" (line: %(lineno)d) - %(levelname)s: %(message)s'))
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.addHandler(handler)
# For me to use in PyCharm to read flight recorder files
DEBUG = False
class Display(object):
def __init__(self, textwrap_offset=80):
self.display_output = []
self.textwrap_offset = textwrap_offset
def render(self, message=None, keep_newline=True, save_output=True):
"""Basically wraps the print function so that it will also save the output to an array for pasting
Keyword arguments:
message -- the message to print
keep_newline -- if this is True, then print it, otherwise, print with no newline (like print with a comma at the end)
save_output -- if this is false, do not save the output to an array for pasting
"""
if save_output:
self.add(message)
if message.endswith("\n"):
message = message[:-1]
if keep_newline:
print(message)
else:
print(message, end=" ")
def add(self, message):
"""Append message to output items."""
self.display_output.append(message)
class GCRecord(object):
"""Object definition for a single gc record."""
_version = "1.0"
def __init__(self, raw_gc_record=None):
if raw_gc_record:
self.raw_gc_record = raw_gc_record
else:
self.raw_gc_record = list()
self.is_cms_gc = False
self.is_stw_gc = False
self.cms_sweep_time = None
self.valid_record = False
self.record_timestamp = None
self.jvm_running_time = None
self.gc_type = None
self.desired_survivor_size = None
self.curr_threshold = None
self.max_threshold = None
self.ages = list()
self.young_size_before_gc = None
self.young_size_after_gc = None
self.young_size_total = None
self.young_gc_time = 0
self.total_heap_before_gc = None
self.total_heap_after_gc = None
self.total_heap = None
self.total_gc_time = 0
self.og_used = None
self.stw_time = 0
self._parse_record()
def __repr__(self):
"""This prints out the gc record so that it looks as though it came straight from
the logs."""
output = list()
output.append("{0} Runtime: {1} GC Type: {2}".format(self.record_timestamp, self.jvm_running_time, self.gc_type))
output.append("Desired Survivor Size: {0}, Curr Threshold: {1} (Max: {2})".format(self.desired_survivor_size, self.curr_threshold, self.max_threshold))
for age in self.ages:
if age[1] > -1 or age[2] > -1:
output.append("- Age {0}: {1:>10} bytes, {2:>10} total".format(age[0], age[1], age[2]))
output.append("YG Before GC: {0}K, YG After GC: {1}K (Total: {2}K), {3} secs".format(self.young_size_before_gc, self.young_size_after_gc, self.young_size_total, self.young_gc_time))
output.append("Total Heap Before GC: {0}K, Total Heap After GC: {1}K (Total: {2}K), {3} secs".format(self.total_heap_before_gc, self.total_heap_after_gc, self.total_heap, self.total_gc_time))
return "\n".join(output)
def _parse_record(self):
"""This loops through record_array to set the class variables that make up the record."""
self.record_timestamp, record_array = self.raw_gc_record
#############################################################
# Capture STW (Full GC, remarks, etc.). Yeah, I could combine
# these three, but this is good enough for now.
if any("CMS Initial Mark" in line for line in record_array):
match = re.search(r", ([\d\.]+) secs\] ", record_array[-1])
if match:
self.gc_type = "CMS-STW"
self.is_stw_gc = True
self.valid_record = True
self.stw_time += float(match.group(1))
if any("CMS Final Remark" in line for line in record_array):
match = re.search(r", ([\d\.]+) secs\] ", record_array[-1])
if match:
self.gc_type = "CMS-STW"
self.is_stw_gc = True
self.valid_record = True
self.stw_time += float(match.group(1))
if any("Full GC" in line for line in record_array):
match = re.search(r", ([\d\.]+) secs\] ", record_array[-1])
if match:
self.gc_type = "FULL"
self.is_stw_gc = True
self.valid_record = True
self.stw_time += float(match.group(1))
if not self.is_stw_gc:
for line in record_array:
if "CMS-concurrent-sweep: " in line:
match = re.match(r"^\d+-\d+-\d+T\d+:\d+:[\d\.]+[+-]\d+: ([\d\.]+): \[CMS-concurrent-sweep: [\d\.]+/([\d\.]+) secs", line)
if match:
self.is_cms_gc = True
self.valid_record = True
self.gc_type = "CMS"
self.jvm_running_time = float(match.group(1))
self.cms_sweep_time = float(match.group(2))
break
if not (self.jvm_running_time or self.gc_type):
match = re.match(r"^\d+-\d+-\d+T\d+:\d+:[\d\.]+[+-]\d+: ([\d\.]+): .*\[(\S+)", line)
if match:
self.jvm_running_time = float(match.group(1))
self.gc_type = match.group(2)
if not (self.desired_survivor_size or self.curr_threshold or self.max_threshold):
match = re.match(r"^Desired survivor size (\d+) bytes, new threshold (\d+) \(max (\d+)\)", line)
if match:
self.valid_record = True
self.desired_survivor_size = int(match.group(1))
self.curr_threshold = int(match.group(2))
self.max_threshold = int(match.group(3))
# Here I set the survivor size beforehand, for any that
# may be missing as I want all the ages even if they aren't
# being used for comparison between GCs
for age in range(1, self.max_threshold + 1):
self.ages.append((age, -1, -1))
continue
################################################
# Skipping records when the JVM has been running
# for less than 300 seconds
if self.jvm_running_time < 300:
self.valid_record = False
break
#############################
# Capture survivor ages, etc.
match = re.match(r"^- age\s+(\d+):\s+(\d+) bytes,\s+(\d+) total", line)
if match:
############################################################
# This while logic block catches any ages that were
# fully reaped, and fills them with zeros. This is important
# as the analytics needs to know this to determine survivor
# death rates/ratios
age = int(match.group(1))
curr_size = int(match.group(2))
max_size = int(match.group(3))
self.ages[age - 1] = (age, curr_size, max_size)
continue
###############################
# Capture gc reallocation stats
match = re.match(r"^: (\d+)\w->(\d+)\w\((\d+)\w\), ([\d\.]+) secs\] (\d+)\w->(\d+)\w\((\d+)\w\), ([\d\.]+) secs\]", line)
if match:
self.young_size_before_gc = int(match.group(1)) * 1024
self.young_size_after_gc = int(match.group(2)) * 1024
self.young_size_total = int(match.group(3)) * 1024
self.young_gc_time = Decimal(match.group(4))
self.total_heap_before_gc = int(match.group(5)) * 1024
self.total_heap_after_gc = int(match.group(6)) * 1024
self.total_heap = int(match.group(7)) * 1024
self.total_gc_time = Decimal(match.group(8))
self.og_used = self.total_heap_after_gc - self.young_size_after_gc
def liverun(cmd=None):
"""Run cmd, and return an iterator of said cmd.
Keyword arguments:
cmd -- the command to run
"""
global subproc
env = dict(os.environ)
# Combining stdout and stderr. I can't find a way to keep both separate
# while getting the data 'live'. itertools.izip_longest seemed like it'd
# almost do it, but it caches the results before sending it out...
subproc = sp.Popen(shlex.split(cmd), stdout=sp.PIPE, stderr=sp.STDOUT, env=env)
return iter(subproc.stdout.readline, b'')
def reduce_seconds(secs=None):
"""Return a compressed representation of time in seconds
Keyword arguments:
secs -- a float/int representing the seconds to be 'compressed'
"""
# The nested if statements keep it from being too long,
# by lopping off the non significant values
retval = ""
secs = int(float(secs))
mins, secs = divmod(secs, 60)
hours, mins = divmod(mins, 60)
days, hours = divmod(hours, 24)
secs = int("{0:0.0f}".format(secs))
if days:
retval += "{0}d".format(days)
if hours:
retval += "{0}h".format(hours)
if days > 0:
return retval
if mins:
retval += "{0}m".format(mins)
if hours or days:
return retval
if secs:
retval += "{0:}s".format(secs)
return retval
def sec_diff(first_time=None, second_time=None):
"""Return the number of seconds between two datetime objects
Keyword arguments:
first_time -- The (typically) older time of the two
second_time -- The (typically) newer time of the two
"""
time_delta = second_time - first_time
return time_delta.days * 86400 + time_delta.seconds + Decimal(str(time_delta.microseconds / float(1000000)))
def _min(values=None):
"""A wrapper around the min() function so that it does not error on an empty list"""
try:
return min(values)
except ValueError:
return 0
def _max(values=None):
"""A wrapper around the max() function so that it does not error on an empty list"""
try:
return max(values)
except ValueError:
return 0
def median(values=None):
"""Return the median of 'values'
Keyword arguments:
values -- the list of numbers
"""
sorts = sorted(values)
length = len(sorts)
result = None
if not values:
result = 0
# raise ValueError, "I can't find the median of an empty list."
elif not length % 2:
result = (sorts[(length // 2)] + sorts[(length // 2) - 1]) / 2.0
else:
result = sorts[length // 2]
return result
def mean(values=None, _length=None):
"""Return the mean of 'values'
Keyword arguments:
values -- the list of numbers
_length -- mostly not usable for end-users, needed by the stdev function
"""
result = None
if not _length:
_length = len(values)
if _length > 0:
result = Decimal(str(sum(values))) / _length
else:
result = 0
return result
def stdev(values=None):
"""Return the standard deviation of values
Keyword arguments:
values -- The poorly named argument that contains the list of numbers
"""
values_mean = mean(values)
variance = [math.pow(Decimal(str(x)) - values_mean, 2) for x in values]
return math.sqrt(mean(variance, len(variance) - 1))
def percentile(values=None, pct=None):
"""Return the percentile of a given values
Keyword arguments:
values -- The list of numbers to be analyzed
pct -- The percentile (can be a float) to be used (100 == 100%, not 1 = 100%, etc.)
"""
watermark_index = int(round((float(pct) / 100) * len(values) + .5))
watermark = sorted(values)[watermark_index - 1]
return [element for element in values if element <= watermark]
def ord_num(number=None):
return str(number) + ("th" if 4 <= number % 100 <= 20 else {1: "st", 2: "nd", 3: "rd"}.get(number % 10, "th"))
def reduce_k(size=None, precision=2, short_form=True, _place_holder=0):
"""Return a compressed representation of a given number of bytes
Keyword arguments:
size -- the size in bytes
precision -- what precision should be used (places to the right of the decimal)
short_form -- (true/false). Use 'K' instead of 'KiB', etc.
"""
if not isinstance(size, Decimal):
size = Decimal(str(size))
# You know.. just in case we ever get to a yottabyte
if short_form:
iec_scale = ['K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
else:
iec_scale = ['KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB']
if not isinstance(size, Decimal):
size = Decimal(str(size))
if abs(size) >= 1024:
_place_holder += 1
return reduce_k(size / Decimal("1024.0"), precision=precision, short_form=short_form, _place_holder=_place_holder)
else:
value = Decimal("{0:.{1}f}".format(size, precision))
if Decimal(str(int(value))) == value:
value = int(value)
if short_form:
return "{0}{1}".format(value, iec_scale[_place_holder])
else:
return "{0} {1}".format(value, iec_scale[_place_holder])
def _run_analysis(gc_data=None, jmap_data=None, jstat_data=None, proc_details=None, optimized_for_ygcs_rate=None):
"""The meat-and-potatoes of this tool. This takes in numerous data structures,
and prints out a report of the analysis of them."""
# Formulas to get the JVM configuration just from JMap:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# eden_size == (newsize * survivor_ratio)/(2 + survivor_ratio)
# survivor_size == eden_size * (1/survivor_ratio)
# og_size == max_heap_size - eden_size - survivor_size
# og_used == heap_used - eden_used
if not gc_data:
logger.error("I can't do any analysis for this sample period because there wasn't enough data in the GC log. Exiting.")
sys.exit(1)
############################################################
# Get some summary data that doesn't require GC log analysis
# Loop through the GC data array to find all CMS events, and capture
# how long they took.
cms_times = [record.cms_sweep_time for record in gc_data if record.is_cms_gc]
display.render("\n")
display.render("Meta:\n")
display.render("~~~~~\n")
sample_time_secs = sec_diff(gc_data[0].record_timestamp, gc_data[-1].record_timestamp)
if sample_time_secs < 60:
display.render("GC Sample Time: {0} seconds\n".format(sample_time_secs))
else:
display.render("GC Sample Time: {0} ({1} seconds)\n".format(reduce_seconds(sample_time_secs), sample_time_secs))
display.render("GC Sample Time from {0} to {1}\n".format(gc_data[0].record_timestamp, gc_data[-1].record_timestamp))
if proc_details:
cpu_count = mp.cpu_count()
cpu_uptime = cpu_count * proc_details['sys_uptime_seconds']
proc_utime_pct = proc_details['proc_utime_seconds'] / cpu_uptime
proc_stime_pct = proc_details['proc_stime_seconds'] / cpu_uptime
display.render("System Uptime: {0}\n".format(reduce_seconds(proc_details['sys_uptime_seconds'])))
display.render("Proc Uptime: {0}\n".format(reduce_seconds(proc_details['proc_uptime_seconds'])))
display.render("Proc Usertime: {0} ({1:0.2%})\n".format(reduce_seconds(proc_details['proc_utime_seconds']), proc_utime_pct))
display.render("Proc Systime: {0} ({1:0.2%})\n".format(reduce_seconds(proc_details['proc_stime_seconds']), proc_stime_pct))
display.render("Proc RSS: {0}\n".format(reduce_k(proc_details['proc_rss_bytes'] / 1024)))
display.render("Proc VSize: {0}\n".format(reduce_k(proc_details['proc_vsize_bytes'] / 1024)))
display.render("Proc # Threads: {0}\n".format(proc_details['num_threads']))
display.render("\n")
# Exit out as I don't have enough gc_data to do any analysis on
if len(gc_data) < 2:
display.render("\n")
display.render("* NOTE: There wasn't enough data to do any analysis. Please let the tool\n")
display.render(" gather at least 2 complete gc.log records (found {0}).\n".format(len(gc_data)))
return False
survivor_info = dict()
young_gc_count_delta = len([record.is_stw_gc for record in gc_data if not record.is_stw_gc])
full_gc_count_delta = len([record.is_stw_gc for record in gc_data if record.is_stw_gc])
sample_gc_time = sum(record.total_gc_time for record in gc_data)
sample_gc_load = (sample_gc_time / Decimal(str(sample_time_secs))) * 100
#######################################################
# Get young gen allocation rates over the sample period
yg_rates = list()
for first_gc, second_gc in zip(gc_data, gc_data[1:]):
if any([second_gc.is_stw_gc, first_gc.is_stw_gc, first_gc.is_cms_gc, second_gc.is_cms_gc]):
continue
# Iterate over the gc logs 2 at a time
# [1, 2, 3, 4] ->
# [(1, 2), (2, 3), (3, 4)]
#
time_delta = sec_diff(first_gc.record_timestamp, second_gc.record_timestamp)
try:
yg_size_delta = second_gc.young_size_before_gc - first_gc.young_size_after_gc
yg_growth_delta = second_gc.young_size_after_gc - first_gc.young_size_after_gc
except TypeError:
display.render("\n".join(textwrap.wrap("Warning: Something is really wrong with this JVM; I couldn't get correct GC data for it.", display.textwrap_offset)))
display.render("")
yg_size_delta = 0
yg_growth_delta = 0
# These are in KiB/s
yg_alloc_rate = yg_size_delta / time_delta
yg_growth_rate = yg_growth_delta / time_delta
yg_rates.append((yg_alloc_rate, yg_growth_rate))
#####################################################
# Get old gen promotion rates over the sample period
og_rates = list()
for first_gc, second_gc in zip(gc_data, gc_data[1:]):
if any([second_gc.is_stw_gc, first_gc.is_stw_gc, first_gc.is_cms_gc, second_gc.is_cms_gc]):
continue
time_delta = sec_diff(first_gc.record_timestamp, second_gc.record_timestamp)
# These are in KiB/s
og_allocation_delta = (second_gc.og_used - first_gc.og_used) / Decimal("1024")
og_allocation_rate = og_allocation_delta / time_delta
############################################################################
# I only want when the old gen is growing. If it's decreasing, it's probably
# b/c there was a FGC, and space is being reclaimed.
if og_allocation_delta > 0:
# This is in KiB/s
og_rates.append(og_allocation_rate)
############################
# Calc survivor death ratios
gc_survivor_death_rates = list()
for first_gc, second_gc in zip(gc_data, gc_data[1:]):
if any([second_gc.is_stw_gc, first_gc.is_stw_gc, first_gc.is_cms_gc, second_gc.is_cms_gc]):
continue
survivor_death_rates = list()
for first_age, second_age in zip(first_gc.ages, second_gc.ages[1:]):
# The second age CAN be bigger than the first age. I verified
# this in the gc.logs (still not sure how/why)
# ID 0 is the age number
# ID 1 is bytes in that age
# ID 2 is the total bytes for that age
if second_age[1] == -1:
# I don't think I want to capture any changes if
# the survivor space didn't exist (-1 as a default value- see above)
continue
# survivor_death_rates.append(Decimal(0))
else:
survivor_death_rates.append(1 - (Decimal(second_age[1]) / first_age[1]))
gc_survivor_death_rates.append(survivor_death_rates)
#################################################################################
# Since I have 2 in-scope valid GCs, I'm going to calculate some needed JVM sizes
# the sizes will be fixed if I have a fixed heap size (which we do in prod)
jvm_mem_cfg = dict()
try:
jvm_mem_cfg["og_size"] = (first_gc.total_heap - first_gc.young_size_total) * 1024
except TypeError:
display.render("\n".join(textwrap.wrap("Error: I could not find a non CMS/FGC GC record for analysis. Exiting.", display.textwrap_offset)))
display.render("")
sys.exit(1)
jvm_mem_cfg["survivor_size"] = (first_gc.desired_survivor_size * 2)
jvm_mem_cfg["eden_size"] = (first_gc.young_size_total * 1024) - jvm_mem_cfg["survivor_size"]
jvm_mem_cfg["total_heap"] = (first_gc.total_heap * 1024) + jvm_mem_cfg["survivor_size"]
jvm_mem_cfg["new_size"] = (jvm_mem_cfg["eden_size"] + (jvm_mem_cfg["survivor_size"] * 2))
#########################################################
# Now that I have a crap-ton of curated data, report out.
# This grabs the first part of the tuple (which is
# the total allocation for that gc (not growth!)
yg_alloc_rates = [entry[0] for entry in yg_rates]
min_yg_rate, mean_yg_rate, max_yg_rate = _min(yg_alloc_rates), mean(yg_alloc_rates), _max(yg_alloc_rates)
display.render("YG Allocation Rates*:\n")
display.render("~~~~~~~~~~~~~~~~~~~~~\n")
display.render("per sec (min/mean/max): {0:>13} {1:>13} {2:>13}\n".format(reduce_k(min_yg_rate) + "/s", reduce_k(mean_yg_rate) + "/s", reduce_k(max_yg_rate) + "/s"))
display.render("per hr (min/mean/max): {0:>13} {1:>13} {2:>13}\n".format(reduce_k(min_yg_rate * 3600) + "/h", reduce_k(mean_yg_rate * 3600) + "/h", reduce_k(max_yg_rate * 3600) + "/h"))
display.render("\n")
# This grabs the second part of the tuple (which is
# the total growth for that gc (not allocation rate!)
min_og_rate, mean_og_rate, max_og_rate = _min(og_rates), mean(og_rates), _max(og_rates)
display.render("OG Promotion Rates:\n")
display.render("~~~~~~~~~~~~~~~~~~~\n")
display.render("per sec (min/mean/max): {0:>13} {1:>13} {2:>13}\n".format(reduce_k(min_og_rate) + "/s", reduce_k(mean_og_rate) + "/s", reduce_k(max_og_rate) + "/s"))
display.render("per hr (min/mean/max): {0:>13} {1:>13} {2:>13}\n".format(reduce_k(min_og_rate * 3600) + "/h", reduce_k(mean_og_rate * 3600) + "/h", reduce_k(max_og_rate * 3600) + "/h"))
display.render("\n")
################################################
# Survivor Lengths- wanted to make a nested list
# comprehension, but I suppose that's a bit ugly
# to debug/read
display.render("Survivor Death Rates:\n")
display.render("~~~~~~~~~~~~~~~~~~~~~\n")
survivor_lengths = list()
for sub_arr in gc_survivor_death_rates:
survivor_lengths.append(len([elem for elem in sub_arr if elem > 0]))
display.render("Lengths (min/mean/max): {0}/{1:0.1f}/{2}\n".format(_min(survivor_lengths), mean(survivor_lengths), _max(survivor_lengths)))
display.render("Death Rate Breakdown:\n")
cuml_pct = 1
death_ages = list()
for survivor_num, pct_list in enumerate(zip_longest(*gc_survivor_death_rates, fillvalue=0), 1):
min_pct = min(pct_list)
mean_pct = mean(pct_list)
max_pct = max(pct_list)
cuml_pct *= 1 - mean_pct
death_ages.append(mean_pct)
survivor_info[survivor_num] = min_pct, mean_pct, max_pct
display.render(" Age {0}: {1:>5} / {2:>5} / {3:>5} / {4:>5} (min/mean/max/cuml alive %)\n".format(survivor_num, "{0:0.1%}".format(min_pct), "{0:0.1%}".format(mean_pct), "{0:0.1%}".format(max_pct),
"{0:0.1%}".format(cuml_pct)))
##################################
# GC Times
young_gc_times = [record.young_gc_time * 1000 for record in gc_data if not record.is_stw_gc]
full_gc_times = [record.stw_time * 1000 for record in gc_data if record.is_stw_gc]
if sample_time_secs:
if young_gc_count_delta:
ygc_rate = (young_gc_count_delta / sample_time_secs) * 60
else:
ygc_rate = 0
if full_gc_count_delta:
fgc_rate = (full_gc_count_delta / sample_time_secs) * 60
else:
fgc_rate = 0
display.render("\n")
display.render("GC Information:\n")
display.render("~~~~~~~~~~~~~~~\n")
display.render("YGC/FGC Count: {0}/{1} (Rate: {2:0.2f}/min, {3:0.2f}/min)\n".format(young_gc_count_delta, full_gc_count_delta, ygc_rate, fgc_rate))
display.render("\n")
display.render("Sample Period GC Load: {0:0.2f}%\n".format(sample_gc_load))
display.render("")
display.render("CMS Sweep Times: {0:0.3f}s / {1:0.3f}s / {2:0.3f}s / {3:0.2f} (min/mean/max/stdev)\n".format(_min(cms_times), mean(cms_times), _max(cms_times), stdev(cms_times)))
display.render("YGC Times: {0:0.0f}ms / {1:0.0f}ms / {2:0.0f}ms / {3:0.2f} (min/mean/max/stdev)\n".format(_min(young_gc_times), mean(young_gc_times), _max(young_gc_times), stdev(young_gc_times)))
display.render("FGC Times: {0:0.0f}ms / {1:0.0f}ms / {2:0.0f}ms / {3:0.2f} (min/mean/max/stdev)\n".format(_min(full_gc_times), mean(full_gc_times), _max(full_gc_times), stdev(full_gc_times)))
agg_ygc_time = sum(young_gc_times)
agg_fgc_time = sum(full_gc_times)
display.render("Agg. YGC Time: {0:0.0f}ms\n".format(agg_ygc_time))
display.render("Agg. FGC Time: {0:0.0f}ms\n".format(agg_fgc_time))
display.render("\n")
if og_rates:
display.render(
"Est. Time Between FGCs (min/mean/max): {0:>10} {1:>10} {2:>10}\n".format(reduce_seconds(jvm_mem_cfg["og_size"] / min_og_rate), reduce_seconds(jvm_mem_cfg["og_size"] / mean_og_rate),
reduce_seconds(jvm_mem_cfg["og_size"] / max_og_rate)))
else:
display.render("Est. Time Between FGCs (min/mean/max): {0:>10} {1:>10} {2:>10}\n".format("n/a", "n/a", "n/a"))
display.render("Est. OG Size for 1 FGC/hr (min/mean/max): {0:>10} {1:>10} {2:>10}\n".format(reduce_k(min_og_rate * 3600), reduce_k(mean_og_rate * 3600), reduce_k(max_og_rate * 3600)))
display.render("\n")
display.render("Overall JVM Efficiency Score*: {0:0.3f}%\n".format(100 - sample_gc_load))
display.render("\n")
###################################
# JMap Data
display.render("Current JVM Mem Configuration:\n")
display.render("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n")
if jmap_data:
for k, v in jmap_data.items():
if "Size" in k:
v = reduce_k(v / 1024)
display.render("{0:>17}: {1}\n".format(k, v))
else:
for k, v in jvm_mem_cfg.items():
display.render("{0:>17}: {1}\n".format(k, reduce_k(v / 1024)))
display.render("\n")
######################
# Show recommendations
_show_recommendations(death_ages, young_gc_times, full_gc_times, fgc_rate, ygc_rate, yg_alloc_rates, og_rates, jvm_mem_cfg, jmap_data, jstat_data, gc_data, cms_times, survivor_info,
optimized_for_ygcs_rate, proc_details)
display.render("~~~\n")
display.render("* The allocation rate is the increase in usage before a GC done. Growth rate\n")
display.render(" is the increase in usage after a GC is done.\n")
display.render("\n")
display.render("* The JVM efficiency score is a convenient way to quantify how efficient the\n")
display.render(" JVM is. The most efficient JVM is 100% (pretty much impossible to obtain).\n")
if full_gc_count_delta == 0:
display.render("\n")
display.render("* There were no full GCs during this sample period. This reporting will\n")
display.render(" be less useful/accurate as a result.\n")
display.render("\n")
display.render("* A copy of the critical data used to generate this report is stored\n")
display.render(" in /tmp/jtune_data-{0}.bin.bz2. Please copy this to your homedir if you\n".format(user))
display.render(" want to save/analyze this further.\n")
def _get_survivor_info(death_ages=None, survivor_info=None, gc_data=None, survivor_problem_pct=None, curr_ng_size=None, adj_ng_size=None):
"""This looks at the survivor info data structure, and will return the max
tenuring size, and max tenuring age that it feels is needed."""
# This is roughly how much larger the survivor space should be to counteract the increase
# in the frequency of ygcs caused from the smaller NG size as it pushes data into the
# survivor space more often. I don't need to change the MaxTenuringThreshold as that is
# mostly constant depending on how data ages.
#
# I'm adjusting the size of the survivor space based on the eden change. It MAY be better
# adjusting this based on time of how frequent the ygcs are happening.
ng_size_delta = curr_ng_size - adj_ng_size
# Going to use this to change the maxtenuringtrheshold parameter. The reason is that
# ygcs will happen less/more often if I change the ng size, and I'll need to counter
# that by increasing/decreasing the tenuring threshold to keep things in balance.
ng_size_delta_pct = adj_ng_size / curr_ng_size
# Changing the 'survivor_problem_pct' which is the watermark
# for objects still alive. If it's over that amount, then the
# tenuring threshold needs to be increased, if it's less, then
# the age is good. HOWEVER, I use death rate, so[-1] a 85% death
# rate is a 15% survivor rate.
survivor_watermark = 100 - survivor_problem_pct
# Get the max survivor age allowed per the jvm configuration
max_survivor_age = gc_data[0].max_threshold
# The survivor_info structure is the decrease in size for that
# age going into the next, so if the max here is 6, the actual max
# survivor size used is 7.
longest_used_ratio = len(survivor_info) + 1
# Survivor percentage of surviving objects
age_objects_still_alive = list()
current_percentage = 100
for key in sorted(survivor_info):
# [1] is the average, [2] is the max
mean_death_rate_pct = survivor_info[key][1]
current_percentage *= 1 - mean_death_rate_pct
age_objects_still_alive.append(current_percentage)
error_msg = None
if max_survivor_age < 15:
if longest_used_ratio == max_survivor_age:
if age_objects_still_alive[-1] > ((100 - survivor_watermark) / 100.0):
error_msg = "The survivor ratio of {0} is too small as {1:0.1f}% of the objects are still alive. Try increasing the MaxTenuringThreshold (Max: 15) parameter, and running this analysis again.".format(
longest_used_ratio, age_objects_still_alive[-1])
elif not survivor_info:
error_msg = "For the examined sample period, I could not retrieve any meaningful survivor statistics from the gc.log. This JVM is either sick, or the sample period was too short."
elif not survivor_info:
error_msg = "For the examined sample period, I could not retrieve any meaningful survivor statistics from the gc.log. This JVM is either sick, or the sample period was too short."
elif not survivor_info:
error_msg = "For the examined sample period, I could not retrieve any meaningful survivor statistics from the gc.log. This JVM is either sick, or the sample period was too short."
if error_msg:
raise ValueError(error_msg)
###########################################################
# Don't confuse the 'min()' with the 'max' variable. I want
# the first age where it's less than survivor_problem_pct
try:
max_tenuring_age = min([k for k, v in enumerate(age_objects_still_alive, 1) if v <= survivor_problem_pct])
except ValueError:
max_tenuring_age = 0
if not max_tenuring_age:
# Not sure if I like this algorithm, but it seems close enough
below_threshold_ct = len([death_pct for death_pct in death_ages if death_pct <= Decimal(".04")])
below_threshold_pct = below_threshold_ct / float(len(death_ages))
# If more than 33% of the ages are at or below 4%, make a note of it.
if below_threshold_pct > .33:
# It's speculative that I should add to the heap any objects that aren't reaped
# after cutting off the MaxTenuringThrehold, but since it's not getting reaped anyway,
# it may not change anything, so not adjusting for the time being.
# We're using all the available ages, but objects are still alive...
if max_survivor_age == len(death_ages):
display.render("\n".join(textwrap.wrap(
"* Warning: It looks like your tenuring threshold is too high - {0:0.0%} of your ages are reaping at or below 4% of the objects. We could make it easier for the JVM if we reduce your MaxTenuringThreshold by {1} to {2} instead of {3}.".format(
below_threshold_pct, below_threshold_ct, len(death_ages) - below_threshold_ct, max_survivor_age))))
else:
display.render("\n".join(textwrap.wrap(
"* Warning: It looks like your tenuring threshold is too high - {0:0.0%} of your ages are reaping at or below 4% of the objects. We could make it easier for the JVM if we reduce your MaxTenuringThreshold by {1} to {2} instead of {3}. BE CAREFUL - your max *used* age in the gc.logs of {4} is less than the configured max age of {3} - make sure that you used a large enough sample size, and let the JVM go through 3 FGCs (option: '-s 3') and is being checked during peak traffic.".format(
below_threshold_pct, below_threshold_ct, len(death_ages) - below_threshold_ct, max_survivor_age, len(death_ages)))))
max_tenuring_age = len(death_ages) - below_threshold_ct
else:
display.render("\n".join(textwrap.wrap(
"* Warning: Your survivor age is too short, your last age of {0} has {1:0.2f}% of its objects still alive. Because of this, I'm unable to reliably determine how your objects are aging. Unset or increase the MaxTenuringThreshold (max: 15) to mitigate this problem.".format(
len(age_objects_still_alive), age_objects_still_alive[-1]))))
tenure_sizes = list()
for gc_record in gc_data:
try:
tenure_sizes.append(gc_record.ages[max_tenuring_age - 1][2])
except IndexError:
# I saw a gc record that doesn't have that age
# level, so skip it.
pass
# It's recommended to have the tenuring size 2x the max tenure size, I then
# add in the change in newgen (ng_size_delta) to offset the decrease/increase
# in newgen as calculated in this parent's function. The 'ng_size_delta / 2' is
# such that I increase the whole max_tenuring_size by ng_size_delta, but since
# there are two survivor spaces, I need to split the ng_size_delta by 2 for each
# survivor space
max_tenuring_size = (max(tenure_sizes) * 2) + (ng_size_delta / 2)
survivor_ratio = adj_ng_size / max_tenuring_size
# Checking if survivor space is LARGER than the newgen size
if survivor_ratio < 1:
display.render("\n".join(textwrap.wrap(
"* Warning: The calculated recommended survivor ratio of {0:0.2f} is less than 1. This is not possible, so I increased the size of newgen by {1}, and set the survivor ratio to 1. Try the tuning suggestions, and watch closely.\n".format(
survivor_ratio, reduce_k((max_tenuring_size - adj_ng_size) / 1024)), display.textwrap_offset)) + "\n\n")
# This is close, but still wrong. If I run into this condition, then I need to
# also fix the newgen size b/c the tenured size is based off of the newgen
# size before I knew there was an issue. I think this is probably close enough
# for now.
survivor_ratio = 1
adj_ng_size = max_tenuring_size
else:
adj_ng_size += max_tenuring_size
# Now, change the max tenuring age/threshold
max_tenuring_age *= (1 / ng_size_delta_pct)
return adj_ng_size, survivor_ratio, max_tenuring_size, max_tenuring_age
def _show_recommendations(death_ages=None, young_gc_times=None, full_gc_times=None, fgc_rate=None, ygc_rate=None, yg_alloc_rates=None, og_rates=None, jvm_mem_cfg=None, jmap_data=None, jstat_data=None,
gc_data=None, cms_times=None, survivor_info=None, optimized_for_ygcs_rate=None, proc_details=None):
"""This is where any jvm tuning recommendations happens."""
###########################################################################
# The basis of these recommendations are as follows:
#
# 1) More frequent YGCs which take less time is almost always better
# than less frequent YGCs, but taking longer; consistently slow is
# better than periodically slower
# 2) YGC times should have a low standard deviation(<= 5)
# 3) YGC times should be low (<= 50ms, ideally)
display.render("Recommendation Summary:\n")
display.render("~~~~~~~~~~~~~~~~~~~~~~~\n")
# This is how many ygcs/sec should be happening, if the mean ygc
# times are higher than desired
ygc_time_goal_ms = 50
ygc_stdev_goal = 5
# YGC mean ms percentile - lop off the worst offenders
# I am changing it instead of a mean of the 99p, doing a
# max of the 75p; may be better
ygc_pctile = 75
# This is just for analysis purposes; need a decent sample set count
ygc_count_goal = 10
fgc_count_goal = 3
# Marker for indicating if current config is good for
# the Java G1 garbage collector
ready_for_g1 = False
survivor_problem_pct = 10
ygc_stdev = stdev(percentile(young_gc_times, ygc_pctile))
ygc_mean_ms = float(max(percentile(young_gc_times, ygc_pctile)))
if jmap_data:
curr_ng_size = jmap_data['NewSize']
curr_og_size = jmap_data['OldSize']
# Not using b/c this data is not in the GC logs (and
# really doesn't need to be tuned...
# if "PermSize" in jmap_data:
# curr_pg_ms_size = jmap_data['PermSize']
# else:
# curr_pg_ms_size = jmap_data['MetaspaceSize']
max_heap_size = jmap_data['MaxHeapSize']
else:
curr_ng_size = jvm_mem_cfg["new_size"]
curr_og_size = jvm_mem_cfg["og_size"]
max_heap_size = jvm_mem_cfg["total_heap"]
adj_ng_size = curr_ng_size
#########################################################################################################
# This is an estimate. Because we use CMS for FGCs, it's an iterative process, and while the CMS reset is
# happening, more objects are being tenured into OG. The best we can do (I think) is to find the minimum
# size of OU, and go from there. This is why it's super important to have more than 2 FGCs to look at.
#
# This is tricky. I need to find the first record where the previous og size is bigger than
# the current. This identifies when the first CMS runs, and from there, I can find the minimum
normal_gc_data = [x for x in gc_data if x.og_used > 0]
try:
record_num = [record_num for record_num, first_gc, second_gc in zip(count(), normal_gc_data, normal_gc_data[1:]) if first_gc.og_used > second_gc.og_used][0]
except IndexError:
live_data_size_bytes = None
else:
live_data_size_bytes = _min(record.og_used for record in normal_gc_data[record_num:])
if proc_details and proc_details['proc_uptime_seconds'] < 300:
display.render("\n".join(textwrap.wrap(
"Warning: The process I'm doing the analysis on has been up for {0}, and may not be in a steady-state. It's best to let it be up for more than 5 minutes to get more realistic results.\n".format(
reduce_seconds(proc_details['proc_uptime_seconds'])))) + "\n\n")
#################################################
# Find the recommended NewGen size
if len(young_gc_times) < ygc_count_goal:
display.render("\n".join(
textwrap.wrap("Warning: There were only {0} YGC entries to do the analysis on. It's better to have > {1} to get more realistic results.\n".format(len(young_gc_times), ygc_count_goal),
display.textwrap_offset)) + "\n\n")
if ygc_stdev > ygc_stdev_goal * 4:
comment = "VERY inconsistent"
elif ygc_stdev > ygc_stdev_goal * 2:
comment = "pretty inconsistent"
elif ygc_stdev > ygc_stdev_goal:
comment = "somewhat consistent"
ready_for_g1 = True
else:
comment = "very consistent"
ready_for_g1 = True
messages = list()
# This logic block goes through different optimizaion scenarios that it
# uses to find an optimal setting.
# messages.append("- The mean YGC rate is {0:0.2f}/min, and the max {1} percentile YGC time is {2:0.0f}ms (stdev of {3:0.2f} which is {4}). It's best to have the mean YGC time be at or below {5}ms, and the YGC stdev at or below {6} if possible.".format(ygc_rate, ord_num(ygc_pctile), ygc_mean_ms, ygc_stdev, comment, ygc_time_goal_ms, ygc_stdev_goal))
# TODO: Too much repetition in this code block
if (optimized_for_ygcs_rate > ygc_rate) and (ygc_stdev > ygc_stdev_goal or ygc_mean_ms > ygc_time_goal_ms):
adj_ng_size = curr_ng_size * (ygc_rate / optimized_for_ygcs_rate)
######################################################################
# Figure out Tenuring Threshold & size for the survivor spaces, basing
# it on the last age where below 10% still live
try:
new_adj_ng_size, survivor_ratio, max_tenuring_size, max_tenuring_age = _get_survivor_info(death_ages, survivor_info, gc_data, survivor_problem_pct, curr_ng_size, adj_ng_size)
# Go ahead and set it regardless
adj_ng_size = new_adj_ng_size
except ValueError as msg:
display.render("\n" + "\n".join(textwrap.wrap("* Error: {0}".format(msg), display.textwrap_offset)) + "\n\n")
display.render("")
return False
messages.append(
"- With a mean YGC time goal of {0:0.0f}ms, the suggested (optimized for a YGC rate of {1:0.2f}/min) size of NewGen (including adjusting for calculated max tenuring size) considering the above criteria should be {2:0.0f} MiB (currently: {3:0.0f} MiB).".format(
ygc_time_goal_ms, optimized_for_ygcs_rate, float(adj_ng_size) / 1024.0 / 1024.0, float(curr_ng_size) / 1024.0 / 1024.0))
if new_adj_ng_size < curr_ng_size:
messages.append(
"- Because we're decreasing the size of NewGen, it can have an impact on system load due to increased memory management requirements. There's not an easy way to predict the impact to the application, so watch this after it's tuned.")
elif ygc_mean_ms > ygc_time_goal_ms:
adj_ng_size = curr_ng_size * (ygc_time_goal_ms / ygc_mean_ms)
######################################################################
# Figure out Tenuring Threshold & size for the survivor spaces, basing
# it on the last age where below 10% still live
try:
new_adj_ng_size, survivor_ratio, max_tenuring_size, max_tenuring_age = _get_survivor_info(death_ages, survivor_info, gc_data, survivor_problem_pct, curr_ng_size, adj_ng_size)
# Go ahead and set it regardless
adj_ng_size = new_adj_ng_size
except ValueError as msg:
display.render("\n" + "\n".join(textwrap.wrap("* Error: {0}".format(msg), display.textwrap_offset)) + "\n\n")
display.render("")
return False
messages.append(
"- With a mean YGC time goal of {0:0.0f}ms, the suggested (optimized for YGC time) size of NewGen (including adjusting for calculated max tenuring size) considering the above criteria should be {1:0.0f} MiB (currently: {2:0.0f} MiB).".format(
ygc_time_goal_ms, float(adj_ng_size) / 1024.0 / 1024.0, float(curr_ng_size) / 1024.0 / 1024.0))
if new_adj_ng_size < curr_ng_size:
messages.append(
"- Because we're decreasing the size of NewGen, it can have an impact on system load due to increased memory management requirements. There's not an easy way to predict the impact to the application, so watch this after it's tuned.")
else:
adj_ng_size = curr_ng_size
######################################################################
# Figure out Tenuring Threshold & size for the survivor spaces, basing
# it on the last age where below 10% still alive
try:
new_adj_ng_size, survivor_ratio, max_tenuring_size, max_tenuring_age = _get_survivor_info(death_ages, survivor_info, gc_data, survivor_problem_pct, curr_ng_size, adj_ng_size)
# Go ahead and set it regardless
adj_ng_size = new_adj_ng_size
except ValueError as msg:
display.render("\n" + "\n".join(textwrap.wrap("* Error: {0}".format(msg), display.textwrap_offset)) + "\n\n")
display.render("")
return False
messages.append("- The mean YGC rate is {0:0.2f}/min, and the mean YGC time is {1:0.0f}ms (stdev of {2:0.2f} which is {3}).".format(ygc_rate, ygc_mean_ms, ygc_stdev, comment))
for message in messages:
display.render("\n".join(textwrap.wrap(message)) + "\n")
#################################################
# Find the recommended PermGen size
#
# Removing this block b/c permgen/metaspace usage isn't in the gc.logs
#
############################################
# Find out what the survivor ratio should be
display.render("\n".join(
textwrap.wrap("- Looking at the worst (max) survivor percentages for all the ages, it looks like a TenuringThreshold of {0:0.0f} is ideal.".format(max_tenuring_age), display.textwrap_offset)) + "\n")
display.render("\n".join(textwrap.wrap(
"- The survivor size should be 2x the max size for tenuring threshold of {0:0.0f} given above. Given this, the survivor size of {1:0.0f}M is ideal.".format(max_tenuring_age,
max_tenuring_size / 1024 / 1024), display.textwrap_offset)) + "\n")
display.render("\n".join(textwrap.wrap("- To ensure enough survivor space is allocated, a survivor ratio of {0:0.0f} should be used.".format(survivor_ratio), display.textwrap_offset)) + "\n")
#################################################
# Find the recommended max heap size
if len(full_gc_times) < fgc_count_goal:
display.render("\n" + "\n".join(textwrap.wrap(
"* Error: You really need to have at least {0} (preferably more) FGCs happen (I found {1}) before doing any OG size recommendation analysis. Stopping any further analysis.\n".format(
fgc_count_goal, len(full_gc_times)), display.textwrap_offset)) + "\n\n")
display.render("\n")
return False
recommended_max_heap_size = 3.5 * float(live_data_size_bytes) + float(max_tenuring_size + adj_ng_size)
if max_heap_size != recommended_max_heap_size:
display.render("\n".join(textwrap.wrap(
"- It's recommended to have the max heap size 3-4x the size of the live data size (OldGen + PermGen), and adjusted to include the recommended survivor and newgen size. New recommended size is {0:0.0f}MiB (currently: {1:0.0f}MiB).".format(
float(recommended_max_heap_size) / 1024.0 / 1024.0, float(max_heap_size) / 1024.0 / 1024.0), display.textwrap_offset)) + "\n")
#################################################
# Figure out the occupancy fraction
max_cms_time = float(_max(cms_times))
# Not doing the MAX, but a max of a percentile of the og rates- I think that's better
# maybe doing a mean of a percentile?
pct_number = 99
# KiB -> B
max_og_rate = float(_max(percentile(og_rates, pct_number))) * 1024
oldgen_offset = curr_og_size - (float(_max(yg_alloc_rates) / 1024) * max_cms_time) - (max_cms_time * max_og_rate)
occ_fraction = math.floor((float(oldgen_offset) / curr_og_size) * 100)
display.render("\n".join(textwrap.wrap(
"- With a max {0} percentile OG promotion rate of {1}/s, and the max CMS sweep time of {2}s, you should not have a occupancy fraction any higher than {3:0.0f}.".format(ord_num(pct_number),
reduce_k(Decimal(str(
max_og_rate / 1024.0))),
max_cms_time,
occ_fraction),
display.textwrap_offset)) + "\n")
# Java 7 G1 Stuff
display.render("\n")
display.render("Java G1 Settings:\n")
display.render("~~~~~~~~~~~~~~~~~~~\n")
if ready_for_g1:
display.render("\n".join(textwrap.wrap(
"- With a max ygc stdev of {0:0.2f}, and a {1} percentile ygc mean ms of {2:0.0f}ms, your config is good enough to move to the G1 garbage collector.".format(ygc_stdev, ord_num(pct_number),
ygc_mean_ms),
display.textwrap_offset)) + "\n")
display.render("\n".join(textwrap.wrap("- Since G1 uses one space for everything, the consolidated heap size should be {0:0.0f}MiB.".format(float(recommended_max_heap_size) / 1024.0 / 1024.0),
display.textwrap_offset)) + "\n")
else:
display.render("\n".join(textwrap.wrap(
"- With a max ygc stdev of {0:0.2f}, and a {1} percentile ygc mean ms of {2:0.0f}ms, your config is probably not ready to move to the G1 garbage collector. Try tuning the JVM, and see if that improves things first.".format(
ygc_stdev, ord_num(pct_number), ygc_mean_ms), display.textwrap_offset)) + "\n")
display.render("\n")
display.render("The JVM arguments from the above recommendations:\n")
display.render("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n")
display.render("\n".join(textwrap.wrap("-Xmx{0:0.0f}m -Xms{0:0.0f}m -Xmn{1:0.0f}m -XX:SurvivorRatio={2:0.0f} -XX:MaxTenuringThreshold={3:0.0f} -XX:CMSInitiatingOccupancyFraction={4:0.0f}".format(recommended_max_heap_size / 1024.0 / 1024.0, float(adj_ng_size) / 1024.0 / 1024.0, survivor_ratio, max_tenuring_age, occ_fraction), display.textwrap_offset)) + "\n")
if ready_for_g1:
display.render("\n")
display.render("The JVM arguments for G1:\n")
display.render("~~~~~~~~~~~~~~~~~~~~~~~~~\n")
display.render("\n".join(textwrap.wrap("-XX:+UseG1GC -XX:MaxGCPauseMillis={0:0.0f} -Xms{1:0.0f}m -Xmx{1:0.0f}m ".format(ygc_mean_ms, recommended_max_heap_size / 1024.0 / 1024.0), display.textwrap_offset)) + "\n")
def get_proc_info(pid=None):
"""Return a data structure with details of the given process id
Keyword arguments:
pid -- the process id of the process to be checked
"""
details = dict()
try:
cpu_ticks_per_sec = int(os.sysconf(os.sysconf_names['SC_CLK_TCK']))
bytes_per_page = resource.getpagesize()
details['gc_file_rotation'] = False
for line in liverun("readlink /proc/{0}/cwd".format(pid)):
line = line.decode()
details['proc_cwd'] = line.strip()
with open("/proc/{0}/cmdline".format(pid), "r") as _file:
for blob in _file:
for line in blob.split("\0"):
if "-Xloggc" in line:
gc_path = line.split(":", 1)[1]
if gc_path.startswith("/"):
details['gc_log_path'] = gc_path
else:
details['gc_log_path'] = details['proc_cwd'] + "/" + gc_path
elif "/bin/java" in line:
details['java_path'] = os.path.dirname(line)
elif "-XX:+UseGCLogFileRotation" in line:
details['gc_file_rotation'] = True
elif "-Xms" in line:
details['min_heap_size'] = line.split("ms")[1]
elif "-Xmx" in line:
details['max_heap_size'] = line.split("mx")[1]
elif "-XX:+PrintGCDateStamps" in line:
details['print_gc_date_stamps'] = True
elif "-XX:+PrintGCDetails" in line:
details['print_gc_details'] = True
elif "-XX:+PrintTenuringDistribution" in line:
details['print_tenuring_distribution'] = True
elif "-XX:SurvivorRatio=" in line:
details['survivor_ratio'] = line.split("SurvivorRatio=")[1]
elif "-XX:+UseConcMarkSweepGC" in line:
details['use_cms'] = True
elif "-XX:+UseParNewGC" in line:
details['use_parnew'] = True
if 'java_path' not in details:
details['java_path'] = ''.join(liverun("which java")).strip().replace("/java", "")
with open("/proc/uptime", "r") as _file:
for line in _file:
details['sys_uptime_seconds'] = Decimal(line.split()[0])
break
with open("/proc/{0}/stat".format(pid), "r") as _file:
for line in _file:
field = line.split()
utime_ticks = int(field[13])
stime_ticks = int(field[14])
num_threads = int(field[19])
uptime_ticks = int(field[21])
vsize_bytes = int(field[22])
rss_bytes = int(field[23]) * bytes_per_page
details['proc_uptime_seconds'] = (details['sys_uptime_seconds']) - Decimal(str(uptime_ticks / float(cpu_ticks_per_sec)))
details['proc_utime_seconds'] = utime_ticks / Decimal(cpu_ticks_per_sec)
details['proc_stime_seconds'] = stime_ticks / Decimal(cpu_ticks_per_sec)
details['proc_rss_bytes'] = rss_bytes
details['proc_vsize_bytes'] = vsize_bytes
details['num_threads'] = num_threads
break
for line in liverun("{0}/java -version".format(details['java_path'])):
line = line.decode()
if "java version" in line:
line = line.strip().replace("\"", "")
fields = line.split()
details['java_build_version'] = fields[-1]
match = re.match(r"^(\d+)\.(\d+)\.(\d+)", details['java_build_version'])
details['java_ver_int'] = match.group(2)
break
except IOError:
# The data structure will be empty, and I'll catch it when
# I get a key error on accessing it
pass
return details
def process_gclog(log_file=None, log_file_pos=0):
"""Pretty basic function that iterates through a gc log, and returns a data structure
of the log data.
Keyword arguments:
log_file -- the gc log file to be read
log_file_pos -- the offset of the log file from whence to start (as bytes)
"""
gc_log_queue = list()
try:
line_num = 0
print()
print("* Reading gc.log file...", end=" ")
current_size = os.stat(log_file).st_size
if current_size < log_file_pos:
print("log file was truncated/rotated; reading from the start", end=" ")
log_file_pos = 0
start_time = datetime.datetime.now()
with open(log_file, "r") as _file:
_file.seek(log_file_pos)
for line in _file:
gc_log_queue.append(line)
line_num += 1
elapsed_time = sec_diff(start_time, datetime.datetime.now())
print("done. Scanned {0} lines in {1:0.4f} seconds.".format(line_num, elapsed_time))
except IOError:
# I don't want/need to check the exception. If it fails, it fails.
pass
else:
gc_log_queue.append("END_OF_FILE")
return gc_log_queue
def _run_jmap(pid=None, procdetails=None):
"""Rung jmap for the given process id, and java path, returning
a data structure with the information"""
jmap_data = dict()
java_path = procdetails['java_path']
try:
for line in liverun("{0}/jmap -J-Xmx128M -heap {1}".format(java_path, pid)):
line = line.decode()
field = line.split()
if "MinHeapFreeRatio" in line:
jmap_data['MinHeapFreeRatio'] = int(field[2])
elif "MaxHeapFreeRatio" in line:
jmap_data['MaxHeapFreeRatio'] = int(field[2])
elif "MaxHeapSize" in line:
jmap_data['MaxHeapSize'] = int(field[2])
elif "NewSize" in line:
jmap_data['NewSize'] = int(field[2])
elif "MaxNewSize" in line:
jmap_data['MaxNewSize'] = int(field[2])
elif "OldSize" in line:
# JMap seems to be scaled wrong. Comparing it to jstat, it shows that
# it's off by about 1000 (1024). There's a bug in Java6 where this is in KB
# not bytes like the others. Appears to be fixed in Java8 (maybe Java7, too)
java_int = int(procdetails['java_ver_int'])
if java_int < 8:
jmap_data['OldSize'] = int(field[2]) * 1024
else:
jmap_data['OldSize'] = int(field[2])
elif "NewRatio" in line:
jmap_data['NewRatio'] = int(field[2])
elif "SurvivorRatio" in line:
jmap_data['SurvivorRatio'] = int(field[2])
elif "PermSize" in line:
jmap_data['PermSize'] = int(field[2])
elif "MaxPermSize" in line:
jmap_data['MaxPermSize'] = int(field[2])
elif "MaxMetaspaceSize" in line:
if "MB" in line:
jmap_data['MaxMetaspaceSize'] = int(field[2]) * 1024 * 1024
else:
jmap_data['MaxMetaspaceSize'] = int(field[2])
elif "MetaspaceSize" in line:
jmap_data['MetaspaceSize'] = int(field[2])
except (IOError, KeyboardInterrupt):
pass
return jmap_data
def run_jstat(pid=None, java_path=None, no_jstat_output=None, fgc_stop_count=None, max_count=None, ygc_stop_count=None):
"""Rung jstat, and outputs the data in a nice column and aligned layout.
Keyword arguments:
pid -- the process pid to run jstat against
java_path -- the path to use to run jstat
no_jstat_output -- true/false that tells this function to not output any data
fgc_stop_count -- the integer value that tells this function to stop at this number of full (cms) gcs
max_count -- the max number of lines the function should display
ygc_stop_count -- the integer value that tells this function to stop at this number of young gcs
"""
global subproc
jstat_data = dict()
jstat_data['TIME_STAMP'] = list()
# This is how the columns will be displayed in order.
ordered_fields = ["EC", "EP", "EU", "S0C/S1C", "S0C", "S1C", "S0U", "S1U", "OC", "OP", "OU", "MC", "MU", "PC", "PU", "YGC", "YGCD", "FGC", "FGCD"]
displayed_output = False
combined_survivors = False
field_map = dict()
line_num = 0
field_widths = dict()
first_fgc_ct = None
prev_fgc_ct = None
last_fgc_ct = None
total_fgcs = None
total_ygcs = None
short_fields = True
# Being able to use python3's print function that I could override would
# work much better here; instead I have to do this ghetto way...
display.render("#" * 5 + "\n")
display.render("# Start Time: {0} GMT\n".format(datetime.datetime.now()))
display.render("# Host: {0}\n".format(socket.getfqdn()))
display.render("#" * 5 + "\n")
if max_count > 0:
cmd = "{0}/jstat -J-Xmx128M -gc {1} 1000 {2}".format(java_path, pid, max_count)
else:
cmd = "{0}/jstat -J-Xmx128M -gc {1} 1000".format(java_path, pid)
try:
for line in liverun(cmd):
line = line.decode()
timestamp = datetime.datetime.now()
line = line.strip()
#######################################################################
# Print the header, and first two lines should be printed. After that,
# the logic block at the end (to see if there's been a fgc or not)
# takes over, and prints the line conditionally with decoration
field_num = 0
for field in line.split():
if line_num == 0:
jstat_data[field] = list()
field_map[field_num] = field
else:
field_name = field_map[field_num]
if field_name in ['YGCT', 'FGCT', 'GCT']:
jstat_data[field_name].append(Decimal(field))
else:
# Minding sigfigs- no decimal needed for large numbers; that's
# just silly
jstat_data[field_name].append(Decimal("{0:0.0f}".format(Decimal(field))))
field_num += 1
if jstat_data['OC'] and jstat_data['OU']:
# Better to handle the percentage-awareness here instead
# of making a unique conditional later on
if "OP" not in jstat_data:
jstat_data['OP'] = list()
jstat_data['OP'].append("{0:0.1%}".format(jstat_data['OU'][-1] / jstat_data['OC'][-1]))
if jstat_data['EC'] and jstat_data['EU']:
# Better to handle the percentage-awareness here instead
# of making a unique conditional later on
if "EP" not in jstat_data:
jstat_data['EP'] = list()
jstat_data['EP'].append("{0:0.1%}".format(jstat_data['EU'][-1] / jstat_data['EC'][-1]))
if jstat_data['GCT']:
if "YGCD" not in jstat_data:
jstat_data['YGCD'] = list()
if "FGCD" not in jstat_data:
jstat_data['FGCD'] = list()
# Young gc count delta
try:
if jstat_data['YGC'][-1] > jstat_data['YGC'][-2]:
delta = "+" + str(jstat_data['YGC'][-1] - jstat_data['YGC'][-2])
else:
delta = "-"
except IndexError:
delta = "-"
jstat_data['YGCD'].append(delta)
# full gc count delta
try:
if jstat_data['FGC'][-1] > jstat_data['FGC'][-2]:
delta = "+" + str(jstat_data['FGC'][-1] - jstat_data['FGC'][-2])
else:
delta = "-"
except IndexError:
delta = "-"
jstat_data['FGCD'].append(delta)
##################################
# I need at least two lines to get
# historical data
if line_num >= 2:
# Keep a timestamp for each record (to get sub-second granularity)
first_fgc_ct = jstat_data['FGC'][0]
first_ygc_ct = jstat_data['YGC'][0]
prev_fgc_ct = jstat_data['FGC'][-2]
last_fgc_ct = jstat_data['FGC'][-1]
last_ygc_ct = jstat_data['YGC'][-1]
total_fgcs = last_fgc_ct - first_fgc_ct
total_ygcs = last_ygc_ct - first_ygc_ct
#############################################
# line 1 is actual data, 0 is just the header
if line_num > 0:
jstat_data['TIME_STAMP'].append(timestamp)
####################################################
# See if I can combine the S0C/S1C fields (probably)
if jstat_data['S0C'][-1] == jstat_data['S1C'][-1]:
if "S0C/S1C" not in jstat_data:
jstat_data['S0C/S1C'] = list()
jstat_data['S0C/S1C'].append(jstat_data['S0C'][-1])
combined_survivors = True
else:
# This is redundant as I catch it earlier. Leaving it here for now.
logger.error("Looks like you're not running with the CMS garbage collector. You can enable this option by setting your JVM arguments to use '-XX:+UseConcMarkSweepGC'.")
sys.exit(1)
if not field_widths:
field_widths = _get_widths(jstat_data, short_fields)
if not displayed_output:
displayed_output = True
#############################################
# Don't display any output, just continue to
# the next iteration. Ick, double-negative..
if no_jstat_output:
continue
# Print the column header
display.render(" ", keep_newline=False)
for field in ordered_fields:
if combined_survivors and field != "S0C" and field != "S1C":
if field in field_widths:
width = field_widths[field]
display.render("{0:>{1}}".format(field, width + 1), keep_newline=False)
display.render("\n")
# Print a nice line spacer all even-like
display.render(" ", keep_newline=False)
for field in ordered_fields:
if combined_survivors and field != "S0C" and field != "S1C":
if field in field_widths:
width = field_widths[field]
display.render("{0:>{1}}".format("~" * width, width + 1), keep_newline=False)
display.render("\n")
# Print the first row of data that was cached so it can
# be used to determine field widths
display.render(" ", keep_newline=False)
for field in ordered_fields:
if field in field_widths:
width = field_widths[field]
# Get the last value
if combined_survivors and field != "S0C" and field != "S1C":
value = jstat_data[field][0]
if short_fields and field not in ['EP', 'OP', 'YGC', 'YGCT', 'FGC', 'FGCT', 'GCT', 'FGCD', 'YGCD']:
value = reduce_k(value, precision=1)
display.render("{0:>{1}}".format(value, width + 1), keep_newline=False)
display.render("\n")
else:
#################################
# Don't display any output, just
# continue to the next iteration.
if no_jstat_output:
if last_fgc_ct > prev_fgc_ct:
display.render("* ", keep_newline=False)
else:
display.render(" ", keep_newline=False)
# Now print the actual numbers
for field in ordered_fields:
if field in field_widths:
width = field_widths[field]
# Get the last value
if combined_survivors and field != "S0C" and field != "S1C":
value = jstat_data[field][-1]
if short_fields and field not in ['EP', 'OP', 'YGC', 'YGCT', 'FGC', 'FGCT', 'GCT', 'FGCD', 'YGCD']:
value = reduce_k(value, precision=1)
display.render("{0:>{1}}".format(value, width + 1), keep_newline=False)
display.render("\n")
else:
if last_fgc_ct > prev_fgc_ct:
display.render("* ", keep_newline=False)
else:
display.render(" ", keep_newline=False)
# Now print the actual numbers
for field in ordered_fields:
if field in field_widths:
width = field_widths[field]
# Get the last value
if combined_survivors and field != "S0C" and field != "S1C":
value = jstat_data[field][-1]
if short_fields and field not in ['EP', 'OP', 'YGC', 'YGCT', 'FGC', 'FGCT', 'GCT', 'FGCD', 'YGCD']:
value = reduce_k(value, precision=1)
display.render("{0:>{1}}".format(value, width + 1), keep_newline=False)
display.render("\n")
if 0 < fgc_stop_count <= total_fgcs:
break
if 0 < ygc_stop_count <= total_ygcs:
break
line_num += 1
except (IOError, KeyboardInterrupt):
# This triggers if I exit the 'liverun'
pass
finally:
if subproc and subproc.poll() is None:
# The process hasn't terminated
subproc.terminate()
return jstat_data
def _get_widths(jstat_data=None, short_fields=False):
"""Function that returns the recommended field widths of the jstat output"""
widths = dict()
for field in jstat_data:
max_width = max(list(map(len, list(map(str, jstat_data[field])))))
field_width = len(field)
if field_width > max_width:
widths[field] = field_width
else:
widths[field] = max_width
##################################################################
# Special handling for survivor spaces (S0C, S1C, S0U, S1U) should
# all be the same width, and b/c S{01}U alternate, it's better to
# set the width from S{01}C
if short_fields:
# The '5' accounts for 'x.xxN' (3.23K/M/G), etc.
survivor_max = 6
newgen_max = 6
oldgen_max = 6
else:
survivor_max = max(widths['S0C'], widths['S1C'], widths['S0U'], widths['S1U'])
newgen_max = max(widths['EC'], widths['EU'])
oldgen_max = max(widths['OC'], widths['OU'])
widths['OC'] = oldgen_max
widths['OU'] = oldgen_max
widths['EC'] = newgen_max
widths['EU'] = newgen_max
widths['S0C'] = survivor_max
widths['S1C'] = survivor_max
widths['S0U'] = survivor_max
widths['S1U'] = survivor_max
widths['EP'] = 6
widths['OP'] = 6
return widths
def _at_exit(raw_gc_log=None, jmap_data=None, jstat_data=None, proc_details=None, optimized_for_ygcs_rate=None):
"""The exit function that is called when the user presses ctrl-c, or when it exits after X number
of jstat iterations. It calls various functions to display useful information to the end-user."""
gc_data = list()
in_stanza = False
date_time = None
entry = list()
# I don't know if I like this, but I wouldn't get to
# this point unless I asked for GC data from stdin...
if not raw_gc_log:
raw_gc_log = sys.stdin
for line in raw_gc_log:
#############################################################################
# Since I'm using the timestamp as the record stanza delimiter, I may as well
# convert it to a datetime object here instead of doing it later.
match = re.match(r"^(\d+)-(\d+)-(\d+)T(\d+):(\d+):([\d\.]+)[+-]\d+: ([\d\.]+):", line)
if match:
in_stanza = True
# If I'm at the start of a new block, save the previous block
if date_time and entry:
gc_record = GCRecord((date_time, entry))
if gc_data:
prev_gc_record = gc_data[-1]
if gc_record.jvm_running_time and prev_gc_record.jvm_running_time > gc_record.jvm_running_time:
logger.warning("The JVM restarted at {0}. Re-initing the internal datastructures.".format(gc_record.record_timestamp))
gc_data = list()
if gc_record.valid_record:
gc_data.append(gc_record)
entry = list()
year = int(match.group(1))
month = int(match.group(2))
day = int(match.group(3))
hour = int(match.group(4))
minute = int(match.group(5))
second = Decimal(match.group(6))
# up_time = Decimal(match.group(7))
date_time = datetime.datetime.strptime("{0}-{1}-{2} {3}:{4}:{5}".format(year, month, day, hour, minute, second), "%Y-%m-%d %H:%M:%S.%f")
if in_stanza:
entry.append(line)
_run_analysis(gc_data, jmap_data, jstat_data, proc_details, optimized_for_ygcs_rate)
def get_rotated_log_file(gc_log_file):
"""Function will scan existing log files to determine latest rotated log, if none found will return
non rotated file name.
"""
log_number = 0
while os.path.isfile("{0}.{1}".format(gc_log_file, log_number)):
log_number += 1
if log_number:
gc_log_file = "{0}.{1}".format(gc_log_file, (log_number - 1))
else:
logger.debug("\n".join(
textwrap.wrap(
"Was not able to find a rotated GC log for this process, defaulting to gc log from process.",
display.textwrap_offset)))
return gc_log_file
def get_gc_log_file(procdetails):
gc_log_file = procdetails['gc_log_path']
if not gc_log_file:
logger.error("\n".join(
textwrap.wrap(
"I was not able to find a GC log for this process. Is the instance up?",
display.textwrap_offset)))
sys.exit(1)
if procdetails['gc_file_rotation']:
return get_rotated_log_file(gc_log_file)
else:
return gc_log_file
def get_jmap_data(pid=None, procdetails=None):
"""Function that runs jmap, only needed b/c jmap may not start, and this retries on failure."""
jmap_data = None
for seconds in [x * 2 for x in range(1, 8)]:
jmap_data = _run_jmap(pid, procdetails)
if "NewSize" in jmap_data:
break
else:
logger.warning("Couldn't connect to jvm via jmap to get valid data. Sleeping {0:0.0f} seconds, and trying again.".format(seconds))
time.sleep(seconds)
return jmap_data
################################################################
# Main
user = os.environ.get("SUDO_USER", None)
if not user:
user = getpass.getuser()
subproc = None
display = Display()
def main():
parser = argparse.ArgumentParser(description="Analytics tool for tuning and analyzing GC behavior.")
parser.add_argument('-o', '--optimize', help='Optimize for latency or throughput (range 0-11, 0 = ygc @ 180/min, 11 = ygc @ 1/min). Floats allowed.', type=Decimal, required=False, default=9)
parser.add_argument('-s', '--fgc-stop-count', help='How many full gcs should happen before I stop (very important for analytics)', type=int, default=0)
parser.add_argument('-y', '--ygc-stop-count', help='How many young gcs should happen before I stop', type=int, default=0)
parser.add_argument('-c', '--stop-count', help='How many iterations of jstat to run before stopping', type=int, default=0)
parser.add_argument('-n', '--no-jstat-output', help='Do not show jstat output - only print summary', action="store_true")
if DEBUG:
group = parser.add_mutually_exclusive_group(required=False)
else:
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-p', '--pid', help='Which java PID should I attach to', type=int)
group.add_argument('--gc-stdin', help='Read GC log data from stdin', action="store_true")
cmd_args = parser.parse_args()
raw_gc_log_data = list()
jmap_data = list()
jstat_data = list()
proc_details = list()
if not (cmd_args.pid or cmd_args.gc_stdin):
logger.error("Please specify -p (pid) or --gc-stdin")
sys.exit(1)
# A ygc of 180/min (3/sec)
ygc_upper_rate_per_min = 180
# Validate the optimize range
if 0 <= cmd_args.optimize <= 11:
# You won't have to change this function if you want
# to change the ygc upper/lower bounds later on
#
# Convert from rate/min to rate/sec
optimized_for_ygcs_rate = ((-Decimal(ygc_upper_rate_per_min - 1) / 11) * Decimal(str(cmd_args.optimize)) + ygc_upper_rate_per_min)
else:
logger.error("The optimize range must be between 0 and 11.")
sys.exit(1)
######################################################################
# This should be done w/ argparse, but I haven't dedicated enough time
# to figure it out
if cmd_args.no_jstat_output and not (cmd_args.ygc_stop_count or cmd_args.stop_count or cmd_args.fgc_stop_count):
logger.error("You must specify -s, -y, or -c arguments for this option to work.")
sys.exit(1)
if not cmd_args.gc_stdin:
try:
config_error = False
proc_details = get_proc_info(cmd_args.pid)
java_path = proc_details['java_path']
if proc_details.get("min_heap_size", 0) != proc_details.get("max_heap_size", 1):
config_error = True
logger.error(
"It looks like either you didn't specify your min and max heap size (-Xms & -Xmx respectively), or they are set to two different sizes. They need to be set to the same for jtune.py to work properly.")
if not proc_details.get("print_gc_date_stamps", False):
config_error = True
logger.error("You need to include the '-XX:PrintGCDateStamps' option to the JVM for JTune to work correctly.")
if not proc_details.get("print_gc_details", False):
config_error = True
logger.error("You need to include the '-XX:PrintGCDetails' option to the JVM for JTune to work correctly.")
if not proc_details.get("print_tenuring_distribution", False):
config_error = True
logger.error("You need to include the '-XX:+PrintTenuringDistribution' option to the JVM for JTune to work correctly.")
if not proc_details.get("survivor_ratio", False):
logger.warning("You probably want to include the '-XX:SurvivorRatio=<num>' option to the JVM for JTune to work correctly.")
if not proc_details.get("use_cms", False):
config_error = True
logger.error("You need to include the '-XX:+UseConcMarkSweepGC' option to the JVM for JTune to work correctly.")
if not proc_details.get("use_parnew", False):
config_error = True
logger.error("You need to include the '-XX:+UseParNewGC' option to the JVM for JTune to work correctly.")
if config_error:
logger.error("Exiting.")
sys.exit(1)
except (TypeError, KeyError):
logger.error("I was not able to get the process data for pid {0}".format(cmd_args.pid))
sys.exit(1)
###########################################
# Start the gc log watching in a subprocess
gc_log_file = get_gc_log_file(proc_details)
if not gc_log_file:
logger.error("\n".join(textwrap.wrap("I was not able to find a GC log for this process. Is the instance up?", display.textwrap_offset)))
sys.exit(1)
####################################################
# Get the file offset before starting jstat, so
# I can use it after jstat runs to read the log file
gc_log_file_pos = os.stat(gc_log_file).st_size
jmap_data = get_jmap_data(cmd_args.pid, proc_details)
if cmd_args.no_jstat_output:
jstat_data = dict()
else:
jstat_data = run_jstat(cmd_args.pid, java_path, cmd_args.no_jstat_output, cmd_args.fgc_stop_count, cmd_args.stop_count, cmd_args.ygc_stop_count)
# This basically hits after the user ctrl-c's
raw_gc_log_data = process_gclog(gc_log_file, gc_log_file_pos)
atexit.register(_at_exit, raw_gc_log_data, jmap_data, jstat_data, proc_details, optimized_for_ygcs_rate)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import os
base_dir = os.getcwd()
project_name = '{{cookiecutter.project_name}}'
project_path = f'{project_name}'
# https://github.com/cookiecutter/cookiecutter/issues/955
for root, dirs, files in os.walk(base_dir):
for filename in files:
# read file content
with open(os.path.join(root, filename)) as f:
content = f.read()
# replace tag by install path
content = content.replace('replace_me.base_dir', base_dir)
# replace file content
with open(os.path.join(root, filename), 'w') as f:
f.write(content)
print(f'\033[0;32mSuccessfully generated {project_path}\033[0m')
print('\033[0;33mTo make the utility functions available in your shell, source utils.sh. e.g.\033[0m')
print(f'\techo "source {project_path}/utils.sh" >> ~/.bashrc')
print()
print('\033[0;33mTo setup git hooks run the following:\033[0m')
print(f'\tln -s {project_path}/githooks/commit-msg {project_name}/.git/hooks/commit-msg')
|
nilq/baby-python
|
python
|
import unittest
from parameterized import parameterized as p
from solns.combinationSum3.combinationSum3 import *
class UnitTest_CombinationSum3(unittest.TestCase):
@p.expand([
[]
])
def test_naive(self):
pass
|
nilq/baby-python
|
python
|
import pymongo
class Database(object): # Database class inherits all attributes from object class
URI = "mongodb://127.0.0.1:27017" # class attributes which defines a value for every class instance
DATABASE = None
@staticmethod
def initialize(): # method that creates path to desired mongodb database
client = pymongo.MongoClient(Database.URI)
Database.DATABASE = client['tooldraft']
@staticmethod
def insert(collection, data):
Database.DATABASE[collection].insert(data)
@staticmethod
def find(collection, query):
return Database.DATABASE[collection].find(query)
@staticmethod
def find_one(collection, query):
return Database.DATABASE[collection].find_one(query)
|
nilq/baby-python
|
python
|
from talon import Context, actions
ctx = Context()
ctx.matches = r"""
app: mintty
"""
ctx.tags = ['terminal', 'user.file_manager', 'user.generic_terminal', 'user.git', 'user.kubectl']
@ctx.action_class('user')
class UserActions:
def file_manager_open_parent():
actions.insert('cd ..')
actions.key('enter')
@ctx.action_class('edit')
class EditActions:
def paste(): actions.key('shift-insert')
def copy(): actions.key('ctrl-insert')
def delete_line(): actions.key('ctrl-u')
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.